blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
412e8795ac75a224f067ce73eeb0bb392bea7095 | 1f4d3a1e7a18a18f5621a0e623f814df31784a13 | /Exercise/22.return_function.py | 35996ad1bfb32813ff365804f7d23b7816612841 | []
| no_license | skylarzhs/learn-python | 9b681a22f4fe96b894ede91f2967b5b6fc8c092e | b7a243121515ff95e6ca249c296596aef294d9c8 | refs/heads/master | 2021-07-07T07:20:05.139496 | 2020-07-25T07:57:38 | 2020-07-25T07:57:38 | 155,383,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | # -*- coding:utf-8 -*-
# 可变参数求和
# def calc_sum(*args):
# ax = 0
# for i in args:
# ax = ax + i
# return ax
# def lazy_sum(*args):
# def sum():
# ax = 0
# for i in args:
# ax = ax + i
# return ax
# return sum
# print(calc_sum(1,2,3,4,5))
# f = lazy_sum(1,2,3,4,5)
# print(f)
# print(f())
# 闭包
# def count():
# fs = []
# for i in range(1, 4):
# # print(i)
# def f():
# return i * i
# # print(f)
# fs.append(f)
# # print(fs)
# return fs
# f1, f2, f3 = count()
# print(11111111111)
# print(f1())
# print(2222222222222)
# print(f2())
# print(3333333333333)
# print(f3())
# 全部都是9!原因就在于返回的函数引用了变量i,但它并非立刻执行。等到3个函数都返回时,它们所引用的变量i已经变成了3,因此最终结果为9。
# 1
# <function count.<locals>.f at 0x000002D93D70E280>
# [<function count.<locals>.f at 0x000002D93D70E280>]
# 2
# <function count.<locals>.f at 0x000002D93D70E310>
# [<function count.<locals>.f at 0x000002D93D70E280>, <function count.<locals>.f at 0x000002D93D70E310>]
# 3
# <function count.<locals>.f at 0x000002D93D70E3A0>
# [<function count.<locals>.f at 0x000002D93D70E280>, <function count.<locals>.f at 0x000002D93D70E310>, <function count.<locals>.f at 0x000002D93D70E3A0>]
# 11111111111
# 9
# 2222222222222
# 9
# 3333333333333
# 9
# def count():
# def f(j):
# def g():
# return j * j
# return g
# fs = []
# for i in range(1, 4):
# print(fs)
# print(i)
# print(f(i))
# fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()
# print(9999999)
# print(fs)
# return fs
# f1, f2, f3 = count()
# print(111111)
# print(f1)
# print(f2)
# print(f3)
# print(f1())
# print(f2())
# print(f3())
# PS python .\Exercise\22.return_function.py
# []
# 1
# <function count.<locals>.f.<locals>.g at 0x000001E53793E310>
# 9999999
# [<function count.<locals>.f.<locals>.g at 0x000001E53793E310>]
# 2
# <function count.<locals>.f.<locals>.g at 0x000001E53793E3A0>
# 9999999
# [<function count.<locals>.f.<locals>.g at 0x000001E53793E310>, <function count.<locals>.f.<locals>.g at 0x000001E53793E3A0>]
# 3
# <function count.<locals>.f.<locals>.g at 0x000001E53793E430>
# 9999999
# [<function count.<locals>.f.<locals>.g at 0x000001E53793E310>, <function count.<locals>.f.<locals>.g at 0x000001E53793E3A0>, <function count.<locals>.f.<locals>.g at
# 0x000001E53793E430>]
# 111111
# 1
# 4
# 9
# lambda 缩减代码
# def count():
# def f(j):
# return lambda : j * j
# fs = []
# for i in range(1, 4):
# fs.append(f(i))
# return fs
# f1, f2, f3 = count()
# print(f1())
# print(f2())
# print(f3())
# 练习
# 利用闭包返回一个计数器函数,每次调用它返回递增整数:
def createCounter():
i = 0 # 先定义一个变量作为初始值
def counter():
nonlocal i # 声明变量i非内部函数的局部变量,否则内部函数只能引用,一旦修改会视其为局部变量,报错“局部变量在赋值之前被引用”。
i = i + 1 # 每调用一次内部函数,对i + 1 ======= 重点是修改全局变量的值!
return i
return counter
# f1 = createCounter()
# print(777777)
# print(f1)
# print(f1(), f1(), f1())
# f2 = createCounter()
# print(f2)
# print(f2(),f2(),f2())
# 测试:
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
| [
"[email protected]"
]
| |
eecf1d418155f7ae27b4dce7f4c4aa384bb413b4 | 808e7d23eb345724d9bc06804f352a7ec40fcf3b | /testDjango/views.py | e15286f548f80dd7333e50fab0e1e1c27c4199aa | []
| no_license | fxma/testDjango | 3718c184d62698e7c8b7e61f75d7c5a4d294fce4 | da2bb8e647d5987e3a7ca3fd58f904e88d2bcf11 | refs/heads/master | 2020-04-11T18:42:05.876201 | 2018-12-16T14:39:23 | 2018-12-16T14:39:23 | 162,008,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | from django.http import HttpResponse,Http404
import time
import datetime
from django.shortcuts import render
from django.shortcuts import render_to_response
import MySQLdb
# def hello(request):
# return HttpResponse("hello world!")
def hello(request):
context = dict()
context['hello'] = 'Hello World!'
return render(request, 'hello.html', context)
def current_time(request):
# return HttpResponse("Current time is: "+time.strftime('%Y-%m-%d %H:%M:%S'))
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
html = "<html><body>In %s hour(s), it will be %s.</body></html>" % (offset, dt)
return HttpResponse(html)
def book_list(request):
db = MySQLdb.connect(user='test', db='testdb', passwd='test', host='localhost')
cursor = db.cursor()
cursor.execute('SELECT name FROM books ORDER BY name')
names = [row[0] for row in cursor.fetchall()]
db.close()
return render_to_response('book_list.html', {'names': names}) | [
"[email protected]"
]
| |
13d1d80e09ac2d722d4552d168bf1f004a37ebcc | 9f527d2c7378758ee1d1d3d99257f05c55272447 | /web/models.py | 207b02b1d20031e74ef9c43c6eec7be40c64e696 | []
| no_license | coolestcat/higweb | 7dd4048df252221c761520e7a54ad29bc6105343 | 70667711aca7fd28325a3143ee090c45411f3cbc | refs/heads/master | 2021-01-19T00:30:15.817902 | 2015-04-19T22:57:10 | 2015-04-19T22:57:10 | 32,948,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,217 | py | from __future__ import unicode_literals
from tastypie.utils.timezone import now
from django.db import models
class Alias(models.Model):
aliassernum = models.IntegerField(db_column='AliasSerNum', primary_key=True) # Field name made lowercase.
aliasname = models.CharField(db_column='AliasName', unique=True, max_length=100) # Field name made lowercase.
aliastype = models.CharField(db_column='AliasType', max_length=25) # Field name made lowercase.
aliasupdate = models.IntegerField(db_column='AliasUpdate') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Alias'
class Aliasexpression(models.Model):
aliasexpressionsernum = models.IntegerField(db_column='AliasExpressionSerNum', primary_key=True) # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum') # Field name made lowercase.
expressionname = models.CharField(db_column='ExpressionName', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'AliasExpression'
class Appointment(models.Model):
appointmentsernum = models.IntegerField(db_column='AppointmentSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
appointmentid = models.IntegerField(db_column='AppointmentId', blank=True, null=True) # Field name made lowercase.
diagnosissernum = models.IntegerField(db_column='DiagnosisSerNum') # Field name made lowercase.
prioritysernum = models.IntegerField(db_column='PrioritySerNum') # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum', blank=True, null=True) # Field name made lowercase.
aliasexpressionsernum = models.IntegerField(db_column='AliasExpressionSerNum') # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=50, blank=True) # Field name made lowercase.
scheduledstarttime = models.DateTimeField(db_column='ScheduledStartTime', blank=True, null=True) # Field name made lowercase.
scheduledendtime = models.DateTimeField(db_column='ScheduledEndTime', blank=True, null=True) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Appointment'
class Cron(models.Model):
profile = models.IntegerField(db_column='Profile') # Field name made lowercase.
nextcron = models.DateField(db_column='NextCron') # Field name made lowercase.
repeatoption = models.CharField(db_column='RepeatOption', max_length=25) # Field name made lowercase.
repeattime = models.TimeField(db_column='RepeatTime') # Field name made lowercase.
lastcron = models.DateTimeField(db_column='LastCron') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Cron'
class Diagnosis(models.Model):
diagnosissernum = models.IntegerField(db_column='DiagnosisSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
diagnosisid = models.CharField(db_column='DiagnosisId', max_length=25, blank=True) # Field name made lowercase.
diagnosiscreationdate = models.DateTimeField(db_column='DiagnosisCreationDate', blank=True, null=True) # Field name made lowercase.
diagnosiscode = models.CharField(db_column='DiagnosisCode', max_length=25, blank=True) # Field name made lowercase.
description = models.TextField(db_column='Description') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Diagnosis'
class Doctor(models.Model):
doctorsernum = models.IntegerField(db_column='DoctorSerNum', primary_key=True) # Field name made lowercase.
oncologistflag = models.IntegerField(db_column='OncologistFlag') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Doctor'
class Document(models.Model):
documentsernum = models.IntegerField(db_column='DocumentSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
documentid = models.CharField(db_column='DocumentId', max_length=30) # Field name made lowercase.
diagnosissernum = models.IntegerField(db_column='DiagnosisSerNum') # Field name made lowercase.
prioritysernum = models.IntegerField(db_column='PrioritySerNum') # Field name made lowercase.
approvalstatus = models.CharField(db_column='ApprovalStatus', max_length=11) # Field name made lowercase.
approvedbysernum = models.IntegerField(db_column='ApprovedBySerNum', blank=True, null=True) # Field name made lowercase.
approvedtimestamp = models.DateTimeField(db_column='ApprovedTimeStamp', blank=True, null=True) # Field name made lowercase.
authoredbysernum = models.IntegerField(db_column='AuthoredBySerNum') # Field name made lowercase.
dateofservice = models.DateTimeField(db_column='DateOfService') # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum') # Field name made lowercase.
aliasexpressionsernum = models.IntegerField(db_column='AliasExpressionSerNum') # Field name made lowercase.
printed = models.CharField(db_column='Printed', max_length=5, blank=True) # Field name made lowercase.
signedbysernum = models.IntegerField(db_column='SignedBySerNum', blank=True, null=True) # Field name made lowercase.
signedtimestamp = models.DateTimeField(db_column='SignedTimeStamp', blank=True, null=True) # Field name made lowercase.
supervisedbysernum = models.IntegerField(db_column='SupervisedBySerNum', blank=True, null=True) # Field name made lowercase.
createdbysernum = models.IntegerField(db_column='CreatedBySerNum') # Field name made lowercase.
createdtimestamp = models.DateTimeField(db_column='CreatedTimeStamp') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Document'
class Field(models.Model):
fieldsernum = models.IntegerField(db_column='FieldSerNum', primary_key=True) # Field name made lowercase.
plansernum = models.IntegerField(db_column='PlanSerNum') # Field name made lowercase.
fieldid = models.IntegerField(db_column='FieldId') # Field name made lowercase.
fieldcreationdate = models.DateTimeField(db_column='FieldCreationDate') # Field name made lowercase.
gantryrtn = models.FloatField(db_column='GantryRtn') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Field'
class Patient(models.Model):
patientsernum = models.IntegerField(db_column='PatientSerNum', primary_key=True) # Field name made lowercase.
dateofbirth = models.TextField(db_column='DateOfBirth', blank=True) # Field name made lowercase. This field type is a guess.
sex = models.CharField(db_column='Sex', max_length=11, blank=True) # Field name made lowercase.
postalcode = models.CharField(db_column='PostalCode', max_length=25) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Patient'
class Patientdoctor(models.Model):
patientdoctorsernum = models.IntegerField(db_column='PatientDoctorSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
doctorsernum = models.IntegerField(db_column='DoctorSerNum') # Field name made lowercase.
oncologistflag = models.IntegerField(db_column='OncologistFlag') # Field name made lowercase.
primaryflag = models.IntegerField(db_column='PrimaryFlag') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'PatientDoctor'
class Patientlocation(models.Model):
patientlocationsernum = models.IntegerField(db_column='PatientLocationSerNum', primary_key=True) # Field name made lowercase.
appointmentsernum = models.IntegerField(db_column='AppointmentSerNum') # Field name made lowercase.
patientlocationid = models.IntegerField(db_column='PatientLocationId') # Field name made lowercase.
resourceser = models.IntegerField(db_column='ResourceSer') # Field name made lowercase.
revcount = models.IntegerField(db_column='RevCount') # Field name made lowercase.
checkedinflag = models.IntegerField(db_column='CheckedInFlag') # Field name made lowercase.
arrivaldatetime = models.DateTimeField(db_column='ArrivalDateTime') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'PatientLocation'
class Patientlocationmh(models.Model):
patientlocationmhsernum = models.IntegerField(db_column='PatientLocationMHSerNum', primary_key=True) # Field name made lowercase.
appointmentsernum = models.IntegerField(db_column='AppointmentSerNum') # Field name made lowercase.
patientlocationid = models.IntegerField(db_column='PatientLocationId') # Field name made lowercase.
resourceser = models.IntegerField(db_column='ResourceSer') # Field name made lowercase.
revcount = models.IntegerField(db_column='RevCount') # Field name made lowercase.
checkedinflag = models.IntegerField(db_column='CheckedInFlag') # Field name made lowercase.
arrivaldatetime = models.DateTimeField(db_column='ArrivalDateTime') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'PatientLocationMH'
class Plan(models.Model):
plansernum = models.IntegerField(db_column='PlanSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
planid = models.IntegerField(db_column='PlanId') # Field name made lowercase.
diagnosissernum = models.IntegerField(db_column='DiagnosisSerNum') # Field name made lowercase.
prioritysernum = models.IntegerField(db_column='PrioritySerNum') # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum') # Field name made lowercase.
aliasexpressionsernum = models.IntegerField(db_column='AliasExpressionSerNum') # Field name made lowercase.
plancreationdate = models.DateTimeField(db_column='PlanCreationDate') # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=100) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Plan'
class Priority(models.Model):
prioritysernum = models.IntegerField(db_column='PrioritySerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
priorityid = models.CharField(db_column='PriorityId', max_length=25, blank=True) # Field name made lowercase.
prioritydatetime = models.DateTimeField(db_column='PriorityDateTime', blank=True, null=True) # Field name made lowercase.
prioritycode = models.CharField(db_column='PriorityCode', max_length=25, blank=True) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Priority'
class Staff(models.Model):
staffsernum = models.IntegerField(db_column='StaffSerNum', primary_key=True) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Staff'
class Study(models.Model):
studysernum = models.IntegerField(db_column='StudySerNum', primary_key=True) # Field name made lowercase.
usersernum = models.IntegerField(db_column='UserSerNum') # Field name made lowercase.
studyname = models.CharField(db_column='StudyName', max_length=100) # Field name made lowercase.
relativeplot = models.IntegerField(db_column='RelativePlot') # Field name made lowercase.
binwidth = models.IntegerField(db_column='BinWidth') # Field name made lowercase.
thresholdtype = models.CharField(db_column='ThresholdType', max_length=100) # Field name made lowercase.
thresholdpercent = models.IntegerField(db_column='ThresholdPercent', blank=True, null=True) # Field name made lowercase.
histdataseriestype = models.CharField(db_column='HistDataSeriesType', max_length=25) # Field name made lowercase.
histdatastartdate = models.DateField(db_column='HistDataStartDate') # Field name made lowercase.
histdataenddate = models.DateField(db_column='HistDataEndDate') # Field name made lowercase.
currdataseriestype = models.CharField(db_column='CurrDataSeriesType', max_length=25) # Field name made lowercase.
currdatastartdate = models.DateField(db_column='CurrDataStartDate') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Study'
class Studydiagnosisfilter(models.Model):
studydiagnosisfiltersernum = models.IntegerField(db_column='StudyDiagnosisFilterSerNum', primary_key=True) # Field name made lowercase.
studysernum = models.IntegerField(db_column='StudySerNum') # Field name made lowercase.
filtername = models.TextField(db_column='FilterName') # Field name made lowercase.
class Meta:
managed = False
db_table = 'StudyDiagnosisFilter'
class Studypriorityfilter(models.Model):
studypriorityfiltersernum = models.IntegerField(db_column='StudyPriorityFilterSerNum', primary_key=True) # Field name made lowercase.
studysernum = models.IntegerField(db_column='StudySerNum') # Field name made lowercase.
filtername = models.CharField(db_column='FilterName', max_length=1000) # Field name made lowercase.
class Meta:
managed = False
db_table = 'StudyPriorityFilter'
class Studythreshold(models.Model):
thresholdsernum = models.IntegerField(db_column='ThresholdSerNum', primary_key=True) # Field name made lowercase.
studysernum = models.IntegerField(db_column='StudySerNum') # Field name made lowercase.
minimum = models.IntegerField(db_column='Minimum') # Field name made lowercase.
maximum = models.IntegerField(db_column='Maximum') # Field name made lowercase.
class Meta:
managed = False
db_table = 'StudyThreshold'
class Task(models.Model):
tasksernum = models.IntegerField(db_column='TaskSerNum', primary_key=True) # Field name made lowercase.
patientsernum = models.IntegerField(db_column='PatientSerNum') # Field name made lowercase.
taskid = models.IntegerField(db_column='TaskId', blank=True, null=True) # Field name made lowercase.
diagnosissernum = models.IntegerField(db_column='DiagnosisSerNum') # Field name made lowercase.
prioritysernum = models.IntegerField(db_column='PrioritySerNum') # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum') # Field name made lowercase.
aliasexpressionsernum = models.IntegerField(db_column='AliasExpressionSerNum') # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=50) # Field name made lowercase.
duedatetime = models.DateTimeField(db_column='DueDateTime', blank=True, null=True) # Field name made lowercase.
creationdate = models.DateTimeField(db_column='CreationDate', blank=True, null=True) # Field name made lowercase.
completiondate = models.DateTimeField(db_column='CompletionDate', blank=True, null=True) # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Task'
class Timedelaystudy(models.Model):
timedelaystudysernum = models.IntegerField(db_column='TimeDelayStudySerNum', primary_key=True) # Field name made lowercase.
studysernum = models.IntegerField(db_column='StudySerNum') # Field name made lowercase.
startaliassernum = models.IntegerField(db_column='StartAliasSerNum') # Field name made lowercase.
starttimestampname = models.CharField(db_column='StartTimeStampName', max_length=100) # Field name made lowercase.
startstatuses = models.CharField(db_column='StartStatuses', max_length=100, blank=True) # Field name made lowercase.
endaliassernum = models.IntegerField(db_column='EndAliasSerNum') # Field name made lowercase.
endtimestampname = models.CharField(db_column='EndTimeStampName', max_length=100) # Field name made lowercase.
endstatuses = models.CharField(db_column='EndStatuses', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'TimeDelayStudy'
class Treatmentfieldhstry(models.Model):
treatmentfieldhstrysernum = models.IntegerField(db_column='TreatmentFieldHstrySerNum', primary_key=True) # Field name made lowercase.
plansernum = models.IntegerField(db_column='PlanSerNum') # Field name made lowercase.
treatmentfieldhstryid = models.IntegerField(db_column='TreatmentFieldHstryId') # Field name made lowercase.
treatmentdatetime = models.DateTimeField(db_column='TreatmentDateTime') # Field name made lowercase.
gantryrtn = models.FloatField(db_column='GantryRtn') # Field name made lowercase.
lastupdated = models.DateTimeField(db_column='LastUpdated') # Field name made lowercase.
class Meta:
managed = False
db_table = 'TreatmentFieldHstry'
class Treatmentparameterstudy(models.Model):
treatmentparameterstudysernum = models.IntegerField(db_column='TreatmentParameterStudySerNum', primary_key=True) # Field name made lowercase.
studysernum = models.IntegerField(db_column='StudySerNum') # Field name made lowercase.
aliassernum = models.IntegerField(db_column='AliasSerNum') # Field name made lowercase.
treatmentparameterfield = models.CharField(db_column='TreatmentParameterField', max_length=100) # Field name made lowercase.
treatmentparameterdisplayname = models.CharField(db_column='TreatmentParameterDisplayName', max_length=100) # Field name made lowercase.
treatmentparameterunits = models.CharField(db_column='TreatmentParameterUnits', max_length=100) # Field name made lowercase.
planstatus = models.CharField(db_column='PlanStatus', max_length=100) # Field name made lowercase.
polarplot = models.IntegerField(db_column='PolarPlot') # Field name made lowercase.
class Meta:
managed = False
db_table = 'TreatmentParameterStudy'
class User(models.Model):
usersernum = models.IntegerField(db_column='UserSerNum', primary_key=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'User'
class Venue(models.Model):
venuesernum = models.IntegerField(db_column='VenueSerNum', primary_key=True) # Field name made lowercase.
venuename = models.CharField(db_column='VenueName', max_length=50) # Field name made lowercase.
resourceser = models.IntegerField(db_column='ResourceSer') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Venue'
class Entry(models.Model):
pub_date = models.DateTimeField(default=now)
title = models.CharField(max_length=200)
def __unicode__(self):
return self.title
| [
"[email protected]"
]
| |
1c44758ce87bbe708afbe87796414cc6e00ce10f | 6486fab69a89645a45d71c482d84d7de49dd1659 | /stringAnalyzer.py | 2a911af1d675b50769431efa47ac084db85e95a0 | []
| no_license | elmerjaen/Lexical-Parser | 1c7e625137e2d97a8bbb7e380ce1f46f9e61dbdc | a1b2c36324c6e313f4973a81216731ff468b0047 | refs/heads/main | 2023-09-04T20:13:56.696921 | 2021-11-21T16:21:18 | 2021-11-21T16:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,228 | py | # Analizador Léxico by Elmer Jaén
import matplotlib.pyplot as plt
def table(list_show):
fig, ax = plt.subplots(1,1)
plt.rcParams.update({'font.size': 18}) #ch3eange font size
# row_labels is optional
row_labels=['Palabras reservadas:', 'Identificadores:', 'Operadores lógicos matemáticos:','Números positivos y negativos:']
ax.axis('tight')
ax.axis('off')
the_table = ax.table(cellText=list_show, rowLabels=row_labels, loc="center", cellLoc='center')
the_table.scale(2,3) #change table scale
for i in range(0, 4):
the_table[(i, -1)].set_facecolor("#56b5fd")
plt.show()
reserved_keywords = ['If', 'Else', 'Declare', 'Dim', 'Integer']
operators = ['+', '-', '*', '/', '=', '==', 'and', 'or', 'not']
def show_results(data_list):
list_show = []
k = 0
for i in data_list:
string = ""
list_show.append([])
for j in i:
string += str(j) + ", "
string = string[:-2]
if list_show:
list_show[k].append(string)
else:
list_show.append(string)
k += 1
table(list_show)
def classify(data):
keywords_in_data = []
operators_in_data = []
numbers_in_data = []
identifiers_in_data = []
IDENTIFIERS = []
# get all reserverd keywords
for i in reserved_keywords:
for j, k in enumerate(data):
if i in k:
keywords_in_data.append(i)
# get all the possible identifiers that are neither in
# reserved_keywords nor in operators
for i in data:
if i.isidentifier() == True and i not in reserved_keywords and i not in operators:
identifiers_in_data.append(i)
for i, j in enumerate(identifiers_in_data):
if j[0] != "_":
IDENTIFIERS.append(j)
# get all the operators
for i in operators:
for j, k in enumerate(data):
if i == k:
operators_in_data.append(i)
# get all the negative and positive numbers
for i, j in enumerate(data):
if j == "" or j == "-":
continue
elif j.isnumeric() == True:
numbers_in_data.append(int(j))
# for negative numbers
elif j[0] == "-" and j[1].isnumeric():
numbers_in_data.append(int(j))
return keywords_in_data, IDENTIFIERS, operators_in_data, numbers_in_data
# extract word for word
def extract_words(data):
data2 = []
string = ""
data_size = len(data)-1
for i, j in enumerate(data):
j_size = len(j)-1
for k, m in enumerate(j):
# delete " " and \n
if m != " " and m != "\n":
if m == "\t":
continue
else:
string += m
else:
data2.append(string)
string = ""
return data2
def run():
data = []
print("\nA continuación ingrese una cadena. Escriba 'exit' al terminar.\n")
while True:
string = input()
if string == 'exit':
break
else:
data.append(string+'\n')
data_list = classify(extract_words(data))
show_results(data_list)
if __name__ == '__main__':
run() | [
"[email protected]"
]
| |
643795f151072295d587adb43db76d88e1683f49 | 1d95859dd9154fa2e7f087f111cbb1e1441fb619 | /Chapter4Lists/Proj1.py | c3b1041acd647d9d30c6072fd7842acc785e9d48 | []
| no_license | mrkruoff/Work_with_Automate_the_boring_stuff | d54906b983269e34106fed2966daf2f4f42f8af6 | 2bcd7e45830301511cf414a2d46db9927520402f | refs/heads/master | 2021-07-06T13:34:41.573065 | 2017-09-26T22:21:46 | 2017-09-27T16:56:37 | 104,943,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | def comma(inputlist):
outputstring=''
outputstring+=inputlist[0]
for i in range(1,len(inputlist)-1):
outputstring+=','+inputlist[i]
outputstring+=',and '+inputlist[len(inputlist)-1]
return outputstring
mylist=['apples','bananas', 'tofu', 'cats', 'dogs', 'mice', 'yogurt']
string=comma(mylist)
print(string)
| [
"[email protected]"
]
| |
ab64df24d2ab5994bbd12576db574fa3037e90ec | 22b555ed4a5ce9256e81e78977ae5ab50d4af80c | /apps/profile/migrations/0027_auto_20150916_1417.py | 0bd02d9c79f72dcaeb606eb99bb3ece5e03e0e10 | []
| no_license | kcole16/jobfire | 334cfe3a8a3a7028db543a794387e58414e95548 | f55b719f64312bbc3542624d45cf2fa2802779a4 | refs/heads/master | 2021-07-11T06:44:32.295415 | 2015-09-26T21:20:50 | 2015-09-26T21:20:50 | 39,473,036 | 0 | 0 | null | 2021-06-10T20:27:29 | 2015-07-21T22:30:29 | JavaScript | UTF-8 | Python | false | false | 404 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profile', '0026_auto_20150915_2314'),
]
operations = [
migrations.AlterField(
model_name='company',
name='about',
field=models.CharField(max_length=1400),
),
]
| [
"[email protected]"
]
| |
d6f88a51961dfbaff72ba17a232a5e048f814825 | e108a45d456b9e3a2e0f1b0ddd51f552d397f7b2 | /TocTable_algorithm.py | f61e2884603f2ed6d814000d303ddfbd990c93b1 | [
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0"
]
| permissive | agiudiceandrea/TocTable | 66efb92195350f9da97d2b19b0d86644224ed9f1 | 7f99457970ac1eef1d8343519bf9cc6511964ed2 | refs/heads/main | 2023-02-24T21:23:14.061388 | 2020-11-30T15:46:29 | 2020-11-30T15:46:29 | 334,705,074 | 0 | 0 | null | 2021-01-31T16:45:06 | 2021-01-31T16:45:05 | null | UTF-8 | Python | false | false | 11,746 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
TocTable
A QGIS plugin
TocTable
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2020-11-23
copyright : (C) 2020 by Giulio Fattori
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Giulio Fattori'
__date__ = '2020-11-23'
__copyright__ = '(C) 2020 by Giulio Fattori'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsProject,
QgsField,
QgsFields,
QgsFeature,
QgsFeatureSink,
QgsMapLayerType,
QgsWkbTypes,
QgsLayerTreeGroup,
QgsLayerTreeLayer,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink)
import datetime
#questo per l'icona dell'algoritmo di processing
import os
import inspect
from qgis.PyQt.QtGui import QIcon
class TocTableAlgorithm(QgsProcessingAlgorithm):
"""
TOC algorithm retrieve info from Metadata and some
attributes of each layer and collect it's in a table.
"""
INPUT_F = 'INPUT_F'
OUTPUT = 'OUTPUT'
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
#icona dell'algoritmo di processing
def icon(self):
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
icon = QIcon(os.path.join(os.path.join(cmd_folder, 'icon.png')))
return icon
def createInstance(self):
return TocTableAlgorithm()
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'Toc Table'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Toc Table')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return ''
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return ''
def shortHelpString(self):
"""
Returns a localised short helper string for the algorithm. This string
should provide a basic description about what the algorithm does and the
parameters and outputs associated with it..
"""
return self.tr("The algorithm retrieves some properties and metadata of the project layers and \
inserts them in a table so that they can be inserted in the prints. Keeps track\
of the order of layers in the project and any groups\n \
Questo algoritmo recupera alcuni metadati e proprietà dei layer del progetto e\
li raccoglie in una tabella così da poterli inserire nelle stampe.\
Tiene traccia dell'ordine dei layer nel progetto e degli eventuali gruppi")
def initAlgorithm(self, config=None):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# We add a feature sink in which to store our processed features (this
# usually takes the form of a newly created vector layer when the
# algorithm is run in QGIS).
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Project_Layers_Table ' + str(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")))
)
)
self.addParameter(
QgsProcessingParameterField(
self.INPUT_F,
self.tr('Campi da inserire nella TocTable'),
'Layer_N;Layer_Group_Level;Layer_Storage;Layer_Name;Geometry_Not_Valid;Layer_Crs;Layer_Type;Layer_Type_Name;Layer_Source;Raster_type;Raster_data_type;Raster_Info_dim;Raster_extent;Raster_Info_res;Raster_NoDataValue;Layer_Feature_Count;Layer_Meta_Parent_Id;Layer_Meta_Identifier;Layer_Meta_Title;Layer_Meta_Type;Layer_Meta_Language;Layer_Meta_Abstract',
allowMultiple = True
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
i_fields = self.parameterAsMatrix(
parameters,
self.INPUT_F,
context)
#CREA TABELLA CONTENUTI PROGETTO
#per altri campi occorre vedere quali si serve aggiungere
#funzione iterativa per posizione layer nella TOC
def get_group_layers(group, level):
level = level + group.name() + ' - '#' >> '
for child in group.children():
if isinstance(child, QgsLayerTreeGroup):
get_group_layers(child, level)
else:
TOC_dict [child.name()] = level
#print(lev)
#dizionario delle posizioni
TOC_dict ={}
root = QgsProject.instance().layerTreeRoot()
for child in root.children():
level = 'root - ' #' >> '
if isinstance(child, QgsLayerTreeGroup):
get_group_layers(child, level)
elif isinstance(child, QgsLayerTreeLayer):
#lev = level #+ child.name())
TOC_dict[child.name()] = level
#abort if TOC is empty
#feedback.pushInfo (str(TOC_dict))
#feedback.pushInfo (str(not bool(TOC_dict)))
if not bool(TOC_dict):
raise QgsProcessingException('Invalid input value: EMPY PROJECT')
#parametro denominazione tabella risultante
report = 'Project_Layers_Table'
fields = QgsFields()
for item in i_fields:
if item in ('Layer_N','Geometry_Not_Valid','Layer_Type','Layer_Feature_Count'):
fields.append(QgsField(item, QVariant.Int))
else:
fields.append(QgsField(item, QVariant.String))
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context, fields)
# If sink was not created, throw an exception to indicate that the algorithm
# encountered a fatal error. The exception text can be any string, but in this
# case we use the pre-built invalidSinkError method to return a standard
# helper text for when a sink cannot be evaluated
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
feat = QgsFeature()
count = 1
for layer in QgsProject.instance().mapLayers().values():
if layer.name().find("Project_Layers_Table") == -1:
Layer_N = count
count += 1
Layer_Name = layer.name()
Layer_Group_Level = TOC_dict.get(Layer_Name)
Layer_Crs = layer.crs().authid()
Layer_Source = layer.source()
Layer_Meta_Parent_Id = layer.metadata().parentIdentifier()
Layer_Meta_Identifier = layer.metadata().identifier()
Layer_Meta_Title = layer.metadata().title()
Layer_Meta_Type = layer.metadata().type()
Layer_Meta_Language = layer.metadata().language()
Layer_Meta_Abstract = layer.metadata().abstract()
Raster_type = Raster_data_type = Raster_Info_dim = '-'
Raster_extent = Raster_Info_res = Raster_NoDataValue = '-'
if layer.type() is not QgsMapLayerType.RasterLayer:
Layer_Feature_Count = layer.featureCount()
Layer_Type = layer.wkbType()
Layer_Storage = layer.storageType()
Layer_Type_Name = QgsWkbTypes.displayString(layer.wkbType())
Geometry_Not_Valid = 0
for f in layer.getFeatures():
if not f.geometry().isGeosValid():
Geometry_Not_Valid += 1
else:
Layer_Type = (0)
Layer_Type_Name = QgsMapLayerType.RasterLayer.name
Layer_Storage = ''
Layer_Feature_Count = 'nan'
Geometry_Not_Valid = 0
gh = layer.height()
gw = layer.width()
Raster_extent = layer.extent().toString()
provider = layer.dataProvider()
gpx = layer.rasterUnitsPerPixelX()
gpy = layer.rasterUnitsPerPixelY()
block = provider.block(1, layer.extent(), gpy, gpx)
for band in range(1, layer.bandCount()+1):
#print('Band ', band, layer.dataProvider().sourceNoDataValue(band))
Raster_NoDataValue = Raster_NoDataValue + 'Band ' + str(band) + ': ' + str(layer.dataProvider().sourceNoDataValue(band)) + ' '
Raster_data_type = type(provider.sourceNoDataValue(band)).__name__
Raster_type = layer.renderer().type()
#feedback.pushInfo(str(gh)+' x '+str(gw)+' - '+ str(gpx)+' x '+str(gpy))
Raster_Info_dim = str(gh) + ' x '+ str(gw)
Raster_Info_res = str(gpx) + ' x ' + str(gpy)
campi = []
for item in i_fields:
campi.append(vars()[item])
feat.setAttributes(campi)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id} | [
"[email protected]@libero.it"
]
| [email protected]@libero.it |
9731bfb1731f6bb96de9696493701447b90e6fd0 | b3be7c8358e07191571c009cdf2d341d7b075a48 | /media.py | 964e09100bae4d59e91c436b7fcd339bd780ea54 | []
| no_license | jassie-rangar/IPND-3rd-Project | c2112eaecf19e745cfd589408f3c38137d286479 | 74c3cb56eccf118933425003e1b7feeff1a5daf9 | refs/heads/master | 2021-01-22T05:11:01.622516 | 2017-02-11T07:29:40 | 2017-02-11T07:29:40 | 81,638,001 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import webbrowser
class Movie():
"""This class provides a way to store movie related information"""
def __init__(self,movie_title,movie_storyline,poster_image,trailer_youtube):
""" This docstring explains the constructor method, it's inputs and outputs if any """
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
""" This docstring explains what the show_trailer function does """
webbrowser.open(self.trailer_youtube_url)
| [
"[email protected]"
]
| |
50388300e027e2679a18ee7082721eaca1d73ad6 | 2fe2f5d4a7f9649f1dc01cb41dac7e13e62d6b5e | /08/demos/Assignment/cust_objects/null_cust.py | 8f5d46438a5e6e31231c4d2a48285f967bf3cade | []
| no_license | ankitagarwal/pluralsight_design_patterns | 2edc6de68484801dcf1edfa6bd81221e7545c75b | 9ded77e17898b2be0223dbc5c3736459efe6ef81 | refs/heads/main | 2023-05-01T21:46:03.407125 | 2021-05-25T15:44:44 | 2021-05-25T15:44:44 | 370,723,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from .abs_cust import AbsCust
class NullCust(AbsCust):
def __init__(self,cust_type):
self._cust_type = cust_type
@property
def name(self):
return None
@name.setter
def name(self, name):
pass
def send_invoice(self):
print('Customer type "%s" not found.' % self._cust_type)
| [
"[email protected]"
]
| |
2c04d227f256a556a0f089ee255feac18760ec34 | c63f226c88826675ad6a87e1acf2a7511d0ea0c3 | /apps/login/models.py | 6bf1130b58c6cbbc87f2524e83575874f57b3cc1 | []
| no_license | qwerji/login_app | 1024041395c0c73f81060178856d06d14aa94771 | 64da2f584c2339d124cfef3cb89a53d49e313c36 | refs/heads/master | 2020-01-24T20:49:20.709105 | 2016-11-17T22:59:44 | 2016-11-17T22:59:44 | 73,858,722 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
import bcrypt, re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def validate_reg(self, request):
errors = self.validate_inputs(request)
if errors:
return (False, errors)
pw_hash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
user = self.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=pw_hash)
return (True, user)
def validate_login(self, request):
try:
user = User.objects.get(email=request.POST['email'])
password = request.POST['password'].encode()
if bcrypt.hashpw(password, user.password.encode()):
return (True, user)
except ObjectDoesNotExist:
pass
return (False, ["Invalid login."])
def validate_inputs(self, request):
errors = []
if not request.POST['first_name']:
errors.append('First name cannot be blank.')
if not request.POST['email']:
errors.append('Please enter an email.')
elif not EMAIL_REGEX.match(request.POST['email']):
errors.append('Invalid email.')
if len(request.POST['password']) < 8:
errors.append('Password must be at least 8 characters.')
if request.POST['password'] != request.POST['confirm']:
errors.append('Password and password confirm must match.')
return errors
class User(models.Model):
first_name = models.CharField(max_length = 50)
last_name = models.CharField(max_length = 50)
email = models.CharField(max_length = 50)
password = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
| [
"[email protected]"
]
| |
41e74dfbc4c3d4cce2a45291771017f36b7c0f9f | f7c987afa021b028b8308a8fbf8fad08e6a24b21 | /app/models.py | 98323996c6f80ebf57d517a8375b3de4575f2add | [
"MIT"
]
| permissive | Evance23/News-Zone | 3e3586da195e259471c192f0455216a3e1273997 | 7b4ea65f1d1a8d6eed75901e7624c405a8fc0944 | refs/heads/master | 2023-04-23T09:31:45.001078 | 2021-05-07T12:34:11 | 2021-05-07T12:34:11 | 358,629,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | class News:
'''
news class to define news Objects
'''
def __init__(self, id, title, overview):
self.id = id
self.title = title
self.overview = overview
| [
"[email protected]"
]
| |
642e3e99d1201a885cf557bfd47a7c5f3c6ed08b | c2a7ba3176ef3a6d698b3390c942f219bfeda762 | /archiving_assistant/urls.py | 88d5103175ae59abfbb3389073c968c98e4fcdee | []
| no_license | SuperbWebDeveloper11/archiving-assistant-api | b64aabc20ce158a2364d0ed6c7e45004aff14815 | f9211f7749ebcc221ca6261f62c7136dd6c791bc | refs/heads/main | 2023-01-29T23:07:13.818559 | 2020-12-05T20:06:51 | 2020-12-05T20:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework.authtoken import views
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('accounts/', include('accounts.urls')),
path('blog/', include('blog.urls')),
path('edoc/', include('edoc.urls')),
path('admin/', admin.site.urls),
# urls for documenting the api
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
# serving media files only during developement
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
1bcbcbfe92659458a764c39a0f71f668340971fc | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /accounts/perms.py | bd00bb6b63018efa7cc39d7709ce8ee5829b7d04 | []
| no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,451 | py | # -*- coding: utf-8 -*-
from simple_perms import PermissionLogic, register
from helpers.mixins import BasicPermissionLogicMixin
class UserPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, user_to_view, *args):
if user_to_view == user:
return True
if user.is_client or user.is_professional:
return False
if user.is_administrator or user.is_advisor or user.is_manager:
return True
return self.admin_permission(user, user_to_view, *args)
def change(self, user, user_to_modify, *args):
if user_to_modify == user:
return True
if user.is_client or user.is_professional:
return False
if user.is_administrator:
return True
# Allow same group modifications
if user_to_modify.group is not None and user_to_modify.group.is_member(user):
if user.is_advisor and user_to_modify.is_advisor:
return True
if user.is_manager and (
user_to_modify.is_advisor or user_to_modify.is_manager
):
return True
if (user.is_advisor or user.is_manager) and user_to_modify.is_client:
return True
if (
user.is_manager
and user_to_modify.is_advisor
and user_to_modify.group.admin_group == user.group
and user.group.is_admin
):
return True
if (
user.is_manager
and user_to_modify.is_manager
and user_to_modify.group == user.group
):
return True
return self.admin_permission(user, user_to_modify, *args)
def change_user_type(self, user, *args):
"""
Perm for user to change user_type for user_modified
Parameters
----------
user : User
args : Dict(user_modified, to_user_type)
"""
user_modified = args[0]["user_modified"]
to_user_type = args[0]["to_user_type"]
if user.is_client or user.is_professional:
return False
if user_modified.is_client or user_modified.is_professional:
return False
if to_user_type == "client" or to_user_type == "professional":
return False
if user.is_administrator:
return True
if user.is_manager:
if (
user_modified.is_advisor
or user_modified.is_superadvisor
or user_modified.is_manager
and user_modified.group.is_member(user)
):
if to_user_type in ["advisor", "superadvisor", "manager"]:
return True
if (
user.is_superadvisor
and to_user_type in ["advisor", "superadvisor"]
and user_modified.is_advisor
):
return True
return self.admin_permission(user, user_modified, *args)
register("user", UserPermissionLogic)
register("accounts/user", UserPermissionLogic)
class RgpdConsentPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, rgpdconsent, *args):
if rgpdconsent.user == user:
return True
return self.admin_permission(user, rgpdconsent, *args)
change = view
register("rgpdconsent", RgpdConsentPermissionLogic)
register("accounts/rgpdconsent", RgpdConsentPermissionLogic)
class GroupPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if user.is_advisor or user.is_manager:
return True
return self.admin_permission(user, group, *args)
def create(self, user, group, group_data, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if user.is_manager:
if not group_data:
return False
if user.group is not None:
if group is not None:
if group.admin_group.pk == user.group.pk:
return True
return self.admin_permission(user, None, *args)
def change(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if (
user.is_manager
and user.group is not None
and group.admin_group == user.group
):
return True
return self.admin_permission(user, group, *args)
def partial_change(self, user, group, *args):
"""
change only some fiels on group
"""
if user.is_advisor and user.group is not None and group == user.group:
return True
return self.admin_permission(user, group, *args)
register("group", GroupPermissionLogic)
register("accounts/group", GroupPermissionLogic)
class GroupPlacePermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_expert:
return True
return self.admin_permission(user, group, *args)
register("group_place", GroupPlacePermissionLogic)
register("accounts/group_place", GroupPlacePermissionLogic)
| [
"[email protected]"
]
| |
aacc12eabb0b4eb5e62e7da5668c3ba88bb40c61 | 2f5797309b741938dca213353f042c77825b0936 | /server_run.py | 35559759cb860a0476b02c5e749109bf2aeb1303 | []
| no_license | electramite/RPi_dashboard | 0def396c04ea99a5f8345363e37ffd421dad8067 | 02cb5a959e9ad86e15184283602b10407264cba7 | refs/heads/main | 2022-12-30T13:44:01.199658 | 2020-10-20T08:36:06 | 2020-10-20T08:36:06 | 305,641,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from flask import render_template, url_for, request
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
trig = 17
echo = 27
GPIO.setup(trig, GPIO.OUT)
GPIO.setup(echo, GPIO.IN)
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
distance = sensor_1()
return render_template("sensor.html", distance=distance)
def sensor_1():
GPIO.output(trig, True)
time.sleep(0.00001)
GPIO.output(trig, False)
while GPIO.input(echo)==0:
pulse_s = time.time()
while GPIO.input(echo)==1:
pulse_e = time.time()
pulse_d = pulse_e - pulse_s
d = 34000*pulse_d/2
return int(d)
if __name__ == "__main__":
app.run(host = '0.0.0.0',port=4556,debug=True)
| [
"[email protected]"
]
| |
03692f50ed9e909b7858f410645b5c90ff1c95ed | c385a69705301f50b45d46f71b808654d7450ad6 | /python_wheel/lbj_db/lbj_db/entity/ret_find.py | 23bc1de9ae084c4d4a0213afab1b5e780b3032c5 | []
| no_license | libaojie/python_package | c411c60c84be1f42221f98c5f140486dc5508b21 | 4bb0ab793c119153e9ee476274d8908c23e33a30 | refs/heads/master | 2023-05-26T12:23:07.226332 | 2023-05-22T06:19:06 | 2023-05-22T06:19:06 | 159,101,700 | 0 | 0 | null | 2023-02-16T06:52:26 | 2018-11-26T03:00:49 | Python | UTF-8 | Python | false | false | 1,609 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Comment : 查询sql列表结果
@Time : 2020/02/08 20:47
@Author : libaojie
@File : ret_find.py
@Software : PyCharm
"""
class RetFind(object):
"""
查询列表结果
"""
def __init__(self):
self._page_size = None # 当前页面
self._page_num = None # 每页数量
self._page_total = None # 一共页数
self._total = None # 数据总量
self._data = None # 数据列表
@property
def page_size(self):
"""
当前页面
:return:
"""
return self._page_size
@page_size.setter
def page_size(self, _page_size):
self._page_size = _page_size
@property
def page_num(self):
"""
每页数量
:return:
"""
return self._page_num
@page_num.setter
def page_num(self, _page_num):
self._page_num = _page_num
@property
def page_total(self):
"""
一共页数
:return:
"""
return self._page_total
@page_total.setter
def page_total(self, _page_total):
self._page_total = _page_total
@property
def data(self):
"""
数据列表
:return:
"""
return self._data
@data.setter
def data(self, _data):
self._data = _data
@property
def total(self):
"""
数据总数量
:return:
"""
return self._total
@total.setter
def total(self, _total):
self._total = _total | [
"[email protected]"
]
| |
9cc161c306a2b642a3822cfd73e8ce21d28327e7 | 6d26b320905ba93ee02f9ba95a76b3839ae3e2c1 | /archinstaller.py | e09ce0bff0dab7b10df5c2841484901e01a5e273 | []
| no_license | accountDBBackup/arch | 3ecfb39adce321e5874a8963e4c9c923c7d4848e | 3c6d51198746e5bbc769055223297abbeae4e334 | refs/heads/main | 2023-07-06T20:34:36.264595 | 2021-08-05T21:13:04 | 2021-08-05T21:13:04 | 388,939,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | import os
import subprocess
import fileinput
import pwd
import afterchroot
def welcome():
print("Welcome to to the Arch Installer!")
system_clock_sync = "timedatectl set-ntp true"
print(f"Running `{system_clock_sync}` command to sync the system clock!")
subprocess.run(system_clock_sync, shell=True)
def format_disks():
pass
def mount_partitions():
pass
def update_mirrors():
print("Refreshing mirrorlist...")
subprocess.run(
"reflector --latest 30 --sort rate --save /etc/pacman.d/mirrorlist", shell=True)
def install_arch_essentails():
kernels = ["linux", "linux-lts", "linux linux-lts"]
while not ((choice :=
input("\t(1) linux\n\t(2) linux-lts\n\t(3) both\nChose a kernel: ")) in [1, 2, 3]):
pass
choice = int(choice)
print(f"Installing: {kernels[choice-1].replace(' ', ' and ')}")
subprocess.run(
f"pacstrap /mnt base {kernels[choice -1]} linux-firmware git python", shell=True)
def generate_fstab():
subprocess.run("genfstab -U /mnt >> /mnt/etc/fstab", shell=True)
def chroot():
subprocess.run("arch-chroot /mnt /bin/bash", shell=True)
def main():
afterchroot.main()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
f0704c277601046e7ecff140c4ce76723f895a6f | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/python2.7/test/outstanding_bugs.py | 5a947e5deea9d551dd5f2994869ab7dd70a83e94 | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/action/.parts/packages/python2/2.7.6/lib/python2.7/test/outstanding_bugs.py | [
"[email protected]"
]
| |
abe1990e419bae2b1a5e5c3b386ec31e385780da | 679bf42c14ef2b7ea5d682ad6f1ffc6e488028c0 | /server_tcp.py | 1b23cfc1e10d15eb48863613e93cb0d2bb774fa0 | []
| no_license | pers9727/lrs_protocol | 0b67086adbcaae271037989fd3e28667f30b72bc | 330d758fc3d7546709e15714a0a303a8320d1f8e | refs/heads/master | 2023-02-05T03:41:57.031536 | 2020-12-28T12:08:54 | 2020-12-28T12:08:54 | 324,732,792 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,907 | py | import socket
import threading
from _thread import *
import pickle
import os
import work_with_files
module_file = '/home/roman/lrs_protocol/module_information/'
commands_file = '/home/roman/lrs_protocol/commands.txt'
ip_type_file = f'{os.getcwd()}/ip_type.txt'
print_lock = threading.Lock()
port = 65432
def threaded(conn, ip_addr):
ip_family = '10.42.43.'
conn.send(pickle.dumps('0'))
module_data = f'{module_file}{ip_addr[len(ip_family):]}.txt'
while True:
# Get data from client
data_to_receive = conn.recv(4096)
# If data is None -> close connection
if not data_to_receive:
print('[LOG] Connection closed')
print_lock.release()
break
# Data is getting
else:
# Write new data to new_data
new_data = pickle.loads(data_to_receive)
# Write ip_type to ip_type_data
ip_type_data = f'{ip_addr} {new_data[0]}'
if ip_type_data not in list(ip_type_file):
work_with_files.write(ip_type_file, ip_type_data, 'a')
# Write commands to file and check if command in file continue, else write
for i in new_data[2]:
if '\n' in i:
if i[:-1] in list('/home/pi/lrs_protocol/commands.txt'):
continue
else:
work_with_files.write(commands_file, str(i[:-1]) + '\n', 'a')
else:
if i in list('/home/pi/lrs_protocol/commands.txt'):
continue
else:
work_with_files.write(commands_file, str(i) + '\n', 'a')
# Write new_data to .txt file for new module
for i in new_data:
work_with_files.write(module_data, str(i) + '\n', 'a')
# Create file with ip_type list of modules
''' if os.path.exists(ip_type_file) and os.stat(ip_type_file).st_size > 0:
work_with_files.write(ip_type_file, ip_type_data, 'a')
else:
work_with_files.write(ip_type_file, ip_type_data, 'w')'''
conn.close()
def main_client(port):
host = ''
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("[LOG] Socket created")
try:
sock.bind((host, port))
except socket.error as msg:
print("[LOG] ", msg)
print("[LOG] Socket binded to port: ", port)
sock.listen(5)
while True:
conn, addr = sock.accept()
print_lock.acquire()
print('[LOG] Connected with client: ', addr[0])
start_new_thread(threaded, (conn, addr[0]))
if __name__ == '__main__':
try:
main_client(port)
except KeyboardInterrupt:
print('[LOG] Server stopped! Exit from protocol')
exit()
| [
"[email protected]"
]
| |
2d800fba44b77304483de1a550b1a877daeeda5d | fe61c7ac89c2a486066d36bdc99b70c3a7098e59 | /Message.py | 1bbc1d8e16a6b71e8ede7e0835d3272922516cdc | []
| no_license | figarodoe/MauMau | 9f149508a1b504c88847a2b5da5fa495fd01a09c | 592bb1c6339735383a639a9c4e333e491fb2f611 | refs/heads/master | 2021-01-12T02:22:04.037120 | 2017-01-10T06:46:58 | 2017-01-10T06:46:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | '''Markup but not'''
def Clear():
import os
from sys import platform
if platform == "win32":
os.system('cls') # For Windows
else:
os.system('clear') # For Linux/OS X
| [
"[email protected]"
]
| |
0eab4d6b9baf06f0c6515a1c93b578c02de52a07 | ce68ff8c06a47cb5a26dce7f42f5a80c35ef5409 | /06-Faces/detection.py | 70a971742b20deba4af9dda6be9a17a70b01ce43 | []
| no_license | lyukov/computer_vision_intro | 9421062c0ad77ab96b2c79e5879744b78f4c0c54 | 008ed8705bd98259691110413579a5afd87e0ab5 | refs/heads/master | 2022-03-27T05:50:18.456057 | 2019-12-17T12:02:20 | 2019-12-17T12:02:20 | 225,220,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | from skimage.io import imread, imshow
import numpy as np
import pandas as pd
import os
from skimage.transform import resize
from skimage import img_as_float
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Dense, Activation, Flatten, Dropout
from keras.models import Sequential
from keras.optimizers import Adam
import keras
def normalize(img):
img -= img.mean()
img /= np.sqrt((img**2).mean())
def prepare_img(image, tg_size):
img = resize(image, (tg_size, tg_size))
if len(img.shape) == 2:
normalize(img)
return np.array([img, img, img]).transpose((1,2,0))
else:
for i in range(img.shape[2]):
normalize(img[:,:,i])
return img
def get_data_shapes_filenames(directory, tg_size=128):
filenames = sorted(os.listdir(directory))
result = np.zeros((len(filenames), tg_size, tg_size, 3))
shapes = np.zeros((len(filenames), 2))
for i, filename in enumerate(filenames):
file_path = os.path.join(directory, filename)
img = img_as_float(imread(file_path))
prepared = prepare_img(img, tg_size)
result[i] = prepared
shapes[i] = img.shape[:2]
return result, shapes, filenames
def train_detector(train_gt, train_img_dir, fast_train=True):
y = pd.DataFrame(train_gt).transpose().values
data, shapes, filenames = get_data_shapes_filenames(train_img_dir)
model = Sequential([
Convolution2D(
64, (3, 3),
activation='relu',
input_shape=(128, 128, 3),
kernel_initializer='normal'),
MaxPooling2D(
pool_size=(2,2),
strides=(2,2)),
Convolution2D(
128, (3, 3),
activation='relu',
kernel_initializer='normal'),
MaxPooling2D(
pool_size=(2,2),
strides=(2,2)),
Convolution2D(
256, (3, 3),
activation='relu',
kernel_initializer='normal'),
MaxPooling2D(
pool_size=(2,2),
strides=(2,2)),
Flatten(),
Dense(64, activation='relu'),
Dropout(0.25),
Dense(28)
])
adam = Adam(lr=0.0003)
model.compile(loss='mean_squared_error',
optimizer=adam,
metrics=['mean_absolute_error'])
model.fit(data, y, epochs=1)
return model
# returns dict: {filename -> [number]}
def detect(model, test_img_dir):
data, shapes, filenames = get_data_shapes_filenames(test_img_dir)
answers = []
batch_size = 500
for i in range((len(data) + batch_size - 1) // batch_size):
answers.extend(model.predict(data[i*batch_size : min((i+1)*batch_size, len(data))]))
return {filenames[i] : answers[i] * shapes[i, 0] for i in range(len(filenames))}
| [
"[email protected]"
]
| |
e79168b08e0286fa92b3cb329528a55e4ca1e1de | 94d653498dc75690b847df9f560ee75a1cb177d5 | /calculator.py | 50f1e373bc4efee1b8e9fea4a26efd6d5b10ca1b | []
| no_license | weelis/shiyanlouplus | 28bf09eb422ab1cb73b363ce6df4b36f945ed124 | 9ba7b9006221d017656670e620d12b1f4c2909fc | refs/heads/master | 2020-04-27T23:36:07.823231 | 2019-03-10T06:45:56 | 2019-03-10T06:45:56 | 174,782,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/env python3
import sys
try:
salary = int(sys.argv[1])
salary_tax = salary - 3500
if salary_tax <= 1500:
tax = salary_tax * 0.03
elif salary_tax > 1500 and salary_tax <= 4500:
tax = salary_tax * 0.1 - 105
elif salary_tax > 4500 and salary_tax <= 9000:
tax = salary_tax * 0.2 - 555
elif salary_tax > 9000 and salary_tax <= 35000:
tax = salary_tax * 0.25 - 1005
elif salary_tax > 35000 and salary <= 55000:
tax = salary_tax * 0.3 - 2755
elif salary_tax > 55000 and salary <= 80000:
tax = salary_tax * 0.35 - 5505
else:
tax = salary_tax * 0.45 - 13505
print(format(tax, ".2f"))
except:
print("Parameter Error")
| [
"[email protected]"
]
| |
a6d93d5e249b23f47e659301e4c8403aef94ee45 | 63f1c3161651ba76434ef241eed933788a0836c5 | /autorally/autorally_core/src/chronyStatus/chronyStatus.py | 0fda2de26ce3e42f0713501d7722d321a597a7cd | []
| no_license | 27Apoorva/RBE502Project | 0bd64706a5ff26cb791c11eff96d75bd41e024be | 056330cd91667a3eeceddb668672cf4e5e2bc3cd | refs/heads/master | 2021-08-30T04:30:09.326153 | 2017-12-13T04:00:38 | 2017-12-13T04:00:38 | 112,425,430 | 1 | 0 | null | 2017-12-16T01:50:43 | 2017-11-29T04:13:25 | Makefile | UTF-8 | Python | false | false | 5,012 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
# Copyright (c) 2016, Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@package chronyStatus
Acquires and publishes to /diagnostics the current state of chrony time sychronization
and information about available timesyn sources. On startup the node verifies that the
installed version of chrony is at least chronyMinVersion.
"""
import os
import socket
import rospy
import commands
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from subprocess import check_output
import subprocess
"""
@return chrony version number, if available
Get current chrony version number by using chronyc interface
"""
def checkChronyVersion():
try:
versionText = check_output("chronyc -v", shell=True);
lines = versionText.split(' ')
if lines[2] == 'version':
return lines[3]
except subprocess.CalledProcessError as e:
rospy.logerr('chronyStatus: subprocess error:' + e.output)
except ValueError:
rospy.logerr('chrony version check failed, version unkown')
"""
@param status the diganostic array to add information to
Queries and adds to diganostics the current tracking status of chronyd using chronyc
"""
def getTracking(status):
try:
trackingText = check_output("chronyc tracking", shell=True);
for line in trackingText.split('\n'):
if len(line):
#split on first : to separate data field name from value because some values can have : in them
info = line.split(':', 1)
status.values.append(KeyValue(key=info[0], value=info[1]))
except subprocess.CalledProcessError as e:
rospy.logerr(e.output)
status.values.append(KeyValue(key=e.output, value=chr(2)))
"""
@param status the diganostic array to add information to
Queries and adds to diagnostics the current sources information from chronyd using chronyc
"""
def getSources(status):
try:
sourcesText = check_output("chronyc sources", shell=True);
lines = sourcesText.split('\n')
status.level = 1
for line in lines[3:]:
if len(line):
tok = line.split()
text = 'ModeState:' + tok[0] + ' Stratum:' + tok[2] + ' Poll:' + tok[3] + ' Reach:' + tok[4] +\
' LastRx:' + tok[5] + ' Last Sample:' + ''.join(tok[6:])
status.values.append(KeyValue(key='source '+tok[1], value=text))
#M = tok[0][0]
#S = tok[0][1]
#all is good if we are synchronizing to a source
if tok[0][1] == '*':
status.level = 0
#print M, S
except subprocess.CalledProcessError as e:
rospy.logerr(e.output)
status.values.append(KeyValue(key=e.output, value=chr(2)))
if __name__ == '__main__':
hostname = socket.gethostname()
rospy.init_node('chronyStatus_'+hostname)
pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=1, latch=True)
array = DiagnosticArray()
status = DiagnosticStatus(name='ChronyStatus',\
level=0,\
hardware_id=hostname)
array.status = [status]
rate = rospy.Rate(0.2) # query and publish chrony information once every 5 seconds
chronyVersion = checkChronyVersion()
#chronyMinVersion = 1.29
#publish error and exit if chronyMinVersion is not satisfied
#if chronyVersion < chronyMinVersion:
# rospy.logerr('ChronyStatus requires chrony version ' + str(chronyMinVersion) + \
# ' or greater, version ' + str(chronyVersion) + ' detected, exiting')
#else:
while not rospy.is_shutdown():
status.values = []
status.values.append(KeyValue(key='chrony version', value=chronyVersion) )
getTracking(status)
getSources(status)
pub.publish(array)
rate.sleep()
| [
"[email protected]"
]
| |
d19d3271cd6125027f3d50770dd7a814ce0ebf43 | af9d2aa777f9a311f309f1036ebc141e7f936c2f | /core/migrations/0002_auto_20200929_1344.py | fc83f09aaa626ed540f263173034c781930cf548 | []
| no_license | oopaze/testes-unitarios-django | d20c0de8f565c2f0e3f557159af8a6912d401fc9 | 1b31b9cfa3641ffa4cf5dcc1d9fb8299c9b27734 | refs/heads/master | 2022-12-24T13:32:36.460162 | 2020-10-07T04:50:37 | 2020-10-07T04:50:37 | 301,927,563 | 0 | 0 | null | 2020-10-07T04:50:38 | 2020-10-07T04:35:34 | Python | UTF-8 | Python | false | false | 353 | py | # Generated by Django 3.1.1 on 2020-09-29 13:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='sorvete',
old_name='unidade',
new_name='unidades',
),
]
| [
"[email protected]"
]
| |
1670cf15584af1d803a46c989f7bbbd0b31521a2 | acb8eb49908d0d8417dfd08ddb5340f938d34214 | /pretrain_data/aliagn_trained_vecs.py | f066a1d3594f355d74784e4b76da16e720f71a8b | []
| no_license | zhuxiangru/multimudal-bert | 11577b783150754ff3e01bd03d915f51a7407ec2 | ef05fccb2315a6feaadab5f162a72a105f06092a | refs/heads/master | 2022-10-31T02:34:53.874507 | 2020-06-15T09:23:27 | 2020-06-15T09:23:27 | 268,139,353 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | import sys
from urllib import parse
import os
import json
from multiprocessing import Pool
import html5lib
import time
import re
def get_entity_id_dict(infilename):
entity2id_dict = {}
index = 0
with open(infilename, "r", encoding = "utf-8") as infile:
for line in infile:
line_list = line.strip().split()
if line_list[0] not in entity2id_dict:
entity2id_dict[line_list[0]] = None
return entity2id_dict
def generate_alias_entity2index_file(entity2id_dict, alias_entity_file, \
output_alias_name2uri_file, output_alias_uri2index_file, type_option = ""):
index = -1
with open(output_alias_name2uri_file, "w", encoding = "utf-8") as output_name2uri_file:
with open(output_alias_uri2index_file, "w", encoding = "utf-8") as output_uri2index_file:
with open(alias_entity_file, "r", encoding = "utf-8") as infile:
for line in infile:
index += 1
if index == 0:
continue
line_list = line.strip().split()
if line_list[0] in entity2id_dict:
output_name2uri_file.write("%s\t%s%s\n" % (line_list[0], type_option, str(index)))
output_uri2index_file.write("%s%s\t%s\n" % (type_option, str(index), str(index - 1)))
if __name__ == '__main__':
if len(sys.argv) < 6:
print ("Usage: python3 aliagn_trained_vecs.py all_entity2id_infilename alias_entity_file output_name2uri_file output_uri2index_file type_option")
exit(0)
all_entity2id_infilename = sys.argv[1]
alias_entity_file = sys.argv[2]
output_name2uri_file = sys.argv[3]
output_uri2index_file = sys.argv[4]
type_option = sys.argv[5]
if type_option == "entity":
type_option = "Q"
elif type_option == "image":
type_option = "I"
else:
type_option = ""
all_entity2id_dict = get_entity_id_dict(all_entity2id_infilename)
generate_alias_entity2index_file(all_entity2id_dict, alias_entity_file, \
output_name2uri_file, output_uri2index_file, type_option) | [
"[email protected]"
]
| |
cccb674d3a60d939b4eefbb72d10130d2db2932b | 96d14f70deb5164e294475402ef50f6e39712a1c | /ex27.py | 3483e6dea39c2be6c4ec4c23d9e6292bed2e1cf0 | []
| no_license | jagdishrenuke/Project_python | bafc1b4cf058d36a990c9b4857b0cd635492d919 | 79284d5f6f05c24aff7181c53c8d8318739f86db | refs/heads/master | 2020-04-23T12:22:55.461500 | 2015-03-16T17:25:03 | 2015-03-16T17:25:03 | 32,340,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py |
ten_things = "apple orange crows telephone light sugar"
print "wait there are not 10 things in that list."
stuff = ten_things.split(' ')
more_stuff = ["day","night","song","girl","frisbee","cool","banana","boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print "Adding: ",next_one
stuff.append(next_one)
print "there are %d items now." % len(stuff)
print "We have : ",stuff
print "Let's do something with the stuff..."
print stuff[1]
print stuff[-1]
print stuff.pop()
print ' '.join(stuff)
print '#'.join(stuff[3:5])
| [
"[email protected]"
]
| |
c5599204c2088d413cd3a990459617d0b80417da | cefa2d235896b31f84456160787eebf55f3ccc84 | /Generate_code.py | 180024b5ee42331eefcc14afe34458337c3410be | []
| no_license | Unbeaten123/Take-others-as-mine | eaebb4bd5595a81183a106a3968fc235955e8998 | 26227cd558b52259dce45fb7d586a5fe172c44aa | refs/heads/master | 2021-01-10T15:20:53.147238 | 2016-04-27T14:55:21 | 2016-04-27T14:55:21 | 53,040,946 | 0 | 0 | null | 2016-04-04T06:06:12 | 2016-03-03T10:13:29 | JavaScript | UTF-8 | Python | false | false | 720 | py | from elaphe import barcode
def get_code39(info):
bc = barcode('code39', info, options=dict(includetext=True), scale=3, margin=10)
bc.save('code39.png', quality=95)
def get_QRcode(info):
bc = barcode('QRcode', info, options=dict(version=9, eclevel='M'), margin=10, scale=5)
bc.save('QRcode.png', quality=95)
choice = raw_input('''Choose what kind of code you want to generate(input a number):
1.Code39
2.QRcode
''')
info = raw_input("Input a string that you want to generate: ")
if int(choice)==1:
try:
get_code39(info)
print "Done!"
except:
print "Error occurred!"
else:
try:
get_QRcode(info)
print "Done!"
except:
print "Error occurred!"
| [
"[email protected]"
]
| |
b3fb5072be2c9803b039ffc66f3bf3a06a4247b1 | 4755dabdcff6a45b9c15bf9ea814c6b8037874bd | /devel/lib/python2.7/dist-packages/snakebot_position_control/msg/__init__.py | 7e50b3d802aa4cf5d4063bde91254d3fba75ff3c | []
| no_license | Rallstad/RobotSnake | 676a97bdfde0699736d613e73d539929a0c2b492 | 37ee6d5af0458b855acf7c2b83e0ee17833dbfd1 | refs/heads/master | 2023-01-03T05:46:46.268422 | 2018-05-27T16:01:47 | 2018-05-27T16:01:47 | 308,665,980 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | from ._PositionControlEffort import *
| [
"[email protected]"
]
| |
976cddf10f6864ba5c9a7a761545d47337c3af20 | 4789ee577801e55bb6209345df6ddd1adff58aa9 | /skyline/boundary/boundary_alerters.py | 0f63c5c74e6d30f89ea03ffca79842f2fafdab45 | [
"MIT"
]
| permissive | bastienboutonnet/skyline | 76767fdad5eb9b9ee9bb65bfcee05e2551061fbe | 7f19fcc7ac1177b4a0a4663d6e645be63ceea452 | refs/heads/master | 2023-04-25T01:57:17.955874 | 2021-04-11T09:20:30 | 2021-04-11T09:20:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,389 | py | from __future__ import division
import logging
import traceback
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# hashlib not used
# import hashlib
from smtplib import SMTP
# @added 20200122: Feature #3396: http_alerter
from ast import literal_eval
import requests
import boundary_alerters
try:
import urllib2
except ImportError:
import urllib.request
import urllib.error
# @added 20191023 - Task #3290: Handle urllib2 in py3
# Branch #3262: py3
# Use urlretrieve
try:
import urllib2 as urllib
except ImportError:
from urllib import request as urllib
import re
from requests.utils import quote
from time import time
import datetime
import os.path
import sys
# @added 20181126 - Task #2742: Update Boundary
# Feature #2618: alert_slack
# Added dt, redis, gmtime and strftime
import datetime as dt
# import redis
from time import (gmtime, strftime)
# @added 20201127 - Feature #3820: HORIZON_SHARDS
from os import uname
python_version = int(sys.version_info[0])
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# charset no longer used
# from email import charset
if python_version == 2:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
if python_version == 3:
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
if True:
import settings
# @added 20181126 - Task #2742: Update Boundary
# Feature #2034: analyse_derivatives
# Feature #2618: alert_slack
from skyline_functions import (
write_data_to_file, in_list,
is_derivative_metric, get_graphite_graph_image,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn_decoded,
# @modified 20191105 - Branch #3002: docker
# Branch #3262: py3
get_graphite_port, get_graphite_render_uri, get_graphite_custom_headers,
# @added 20200122: Feature #3396: http_alerter
get_redis_conn,
# @added 20200825 - Feature #3704: Add alert to anomalies
add_panorama_alert,
# @added 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
encode_graphite_metric_name)
# @added 20201127 - Feature #3820: HORIZON_SHARDS
try:
HORIZON_SHARDS = settings.HORIZON_SHARDS.copy()
except:
HORIZON_SHARDS = {}
this_host = str(uname()[1])
HORIZON_SHARD = 0
if HORIZON_SHARDS:
HORIZON_SHARD = HORIZON_SHARDS[this_host]
skyline_app = 'boundary'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
"""
Create any alerter you want here. The function is invoked from trigger_alert.
7 arguments will be passed in as strings:
alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp
"""
# FULL_DURATION to hours so that Boundary surfaces the relevant timeseries data
# in the graph
try:
full_duration_seconds = int(settings.FULL_DURATION)
except:
full_duration_seconds = 86400
full_duration_in_hours = full_duration_seconds / 60 / 60
try:
graphite_previous_hours = int(settings.BOUNDARY_SMTP_OPTS['graphite_previous_hours'])
except:
graphite_previous_hours = full_duration_in_hours
try:
graphite_graph_line_color = int(settings.BOUNDARY_SMTP_OPTS['graphite_graph_line_color'])
except:
graphite_graph_line_color = 'pink'
# @added 20200122 - Branch #3002: docker
try:
DOCKER_FAKE_EMAIL_ALERTS = settings.DOCKER_FAKE_EMAIL_ALERTS
except:
DOCKER_FAKE_EMAIL_ALERTS = False
def alert_smtp(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
sender = settings.BOUNDARY_SMTP_OPTS['sender']
matched_namespaces = []
for namespace in settings.BOUNDARY_SMTP_OPTS['recipients']:
CHECK_MATCH_PATTERN = namespace
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(metric_name)
if pattern_match:
matched_namespaces.append(namespace)
matched_recipients = []
for namespace in matched_namespaces:
for recipients in settings.BOUNDARY_SMTP_OPTS['recipients'][namespace]:
matched_recipients.append(recipients)
def unique_noHash(seq):
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
recipients = unique_noHash(matched_recipients)
# Backwards compatibility
if type(recipients) is str:
recipients = [recipients]
# @added 20180524 - Task #2384: Change alerters to cc other recipients
# The alerters did send an individual email to each recipient. This would be
# more useful if one email was sent with the first smtp recipient being the
# to recipient and the subsequent recipients were add in cc.
primary_recipient = False
cc_recipients = False
if recipients:
for i_recipient in recipients:
if not primary_recipient:
primary_recipient = str(i_recipient)
if primary_recipient != i_recipient:
if not cc_recipients:
cc_recipients = str(i_recipient)
else:
new_cc_recipients = '%s,%s' % (str(cc_recipients), str(i_recipient))
cc_recipients = str(new_cc_recipients)
logger.info(
'alert_smtp - will send to primary_recipient :: %s, cc_recipients :: %s' %
(str(primary_recipient), str(cc_recipients)))
alert_algo = str(algorithm)
alert_context = alert_algo.upper()
# @added 20191008 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
try:
main_alert_title = settings.CUSTOM_ALERT_OPTS['main_alert_title']
except:
main_alert_title = 'Skyline'
try:
app_alert_context = settings.CUSTOM_ALERT_OPTS['boundary_alert_heading']
except:
app_alert_context = 'Boundary'
# @modified 20191002 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
# Use alert_context
# unencoded_graph_title = 'Skyline Boundary - %s at %s hours - %s - %s' % (
# alert_context, graphite_previous_hours, metric_name, datapoint)
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# unencoded_graph_title = '%s %s - %s at %s hours - %s - %s' % (
# main_alert_title, app_alert_context, alert_context, graphite_previous_hours, metric_name, datapoint)
unencoded_graph_title = '%s %s - %s %s %s times - %s' % (
main_alert_title, app_alert_context, alert_context, str(metric_trigger),
str(alert_threshold), str(datapoint))
# @added 20181126 - Task #2742: Update Boundary
# Feature #2034: analyse_derivatives
# Added deriative functions to convert the values of metrics strictly
# increasing monotonically to their deriative products in alert graphs and
# specify it in the graph_title
known_derivative_metric = False
try:
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Use get_redis_conn_decoded
# if settings.REDIS_PASSWORD:
# # @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# # Branch #3262: py3
# # REDIS_ALERTER_CONN = redis.StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# REDIS_ALERTER_CONN = redis.StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH, charset='utf-8', decode_responses=True)
# else:
# # REDIS_ALERTER_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
# REDIS_ALERTER_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH, charset='utf-8', decode_responses=True)
REDIS_ALERTER_CONN = get_redis_conn_decoded(skyline_app)
except:
logger.error('error :: alert_smtp - redis connection failed')
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
try:
derivative_metrics = list(REDIS_ALERTER_CONN.smembers('derivative_metrics'))
except:
derivative_metrics = []
redis_metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(metric_name))
if redis_metric_name in derivative_metrics:
known_derivative_metric = True
if known_derivative_metric:
try:
non_derivative_monotonic_metrics = settings.NON_DERIVATIVE_MONOTONIC_METRICS
except:
non_derivative_monotonic_metrics = []
skip_derivative = in_list(redis_metric_name, non_derivative_monotonic_metrics)
if skip_derivative:
known_derivative_metric = False
known_derivative_metric = is_derivative_metric(skyline_app, metric_name)
if known_derivative_metric:
# @modified 20191002 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
# unencoded_graph_title = 'Skyline Boundary - %s at %s hours - derivative graph - %s - %s' % (
# alert_context, graphite_previous_hours, metric_name, datapoint)
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# unencoded_graph_title = '%s %s - %s at %s hours - derivative graph - %s - %s' % (
# main_alert_title, app_alert_context, alert_context, graphite_previous_hours, metric_name, datapoint)
unencoded_graph_title = '%s %s - %s %s %s times - derivative graph - %s' % (
main_alert_title, app_alert_context, alert_context, str(metric_trigger),
str(alert_threshold), str(datapoint))
graph_title_string = quote(unencoded_graph_title, safe='')
graph_title = '&title=%s' % graph_title_string
# @added 20181126 - Bug #2498: Incorrect scale in some graphs
# Task #2742: Update Boundary
# If -xhours is used the scale is incorrect if x hours > than first
# retention period, passing from and until renders the graph with the
# correct scale.
graphite_port = '80'
if settings.GRAPHITE_PORT != '':
graphite_port = str(settings.GRAPHITE_PORT)
until_timestamp = int(time())
from_seconds_ago = graphite_previous_hours * 3600
from_timestamp = until_timestamp - from_seconds_ago
graphite_from = dt.datetime.fromtimestamp(int(from_timestamp)).strftime('%H:%M_%Y%m%d')
logger.info('graphite_from - %s' % str(graphite_from))
graphite_until = dt.datetime.fromtimestamp(int(until_timestamp)).strftime('%H:%M_%Y%m%d')
logger.info('graphite_until - %s' % str(graphite_until))
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# graphite_target = 'target=cactiStyle(%s)'
# @added 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
encoded_graphite_metric_name = encode_graphite_metric_name(skyline_app, metric_name)
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# graphite_target = 'target=cactiStyle(%s,%%27si%%27)' % metric_name
graphite_target = 'target=cactiStyle(%s,%%27si%%27)' % encoded_graphite_metric_name
if known_derivative_metric:
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# graphite_target = 'target=cactiStyle(nonNegativeDerivative(%s))'
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# graphite_target = 'target=cactiStyle(nonNegativeDerivative(%s),%%27si%%27)' % metric_name
graphite_target = 'target=cactiStyle(nonNegativeDerivative(%s),%%27si%%27)' % encoded_graphite_metric_name
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s:%s/render/?from=%s&until=%s&%s%s%s&colorList=%s' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, graphite_port,
# str(graphite_from), str(graphite_until), graphite_target,
# settings.GRAPHITE_GRAPH_SETTINGS, graph_title,
# graphite_graph_line_color)
link = '%s://%s:%s/%s/?from=%s&until=%s&%s%s%s&colorList=%s' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
graphite_port, settings.GRAPHITE_RENDER_URI, str(graphite_from),
str(graphite_until), graphite_target, settings.GRAPHITE_GRAPH_SETTINGS,
graph_title, graphite_graph_line_color)
content_id = metric_name
image_data = None
image_file = '%s/%s.%s.%s.alert_smtp.png' % (
settings.SKYLINE_TMP_DIR, skyline_app, str(until_timestamp),
metric_name)
if settings.BOUNDARY_SMTP_OPTS.get('embed-images'):
image_data = get_graphite_graph_image(skyline_app, link, image_file)
if settings.BOUNDARY_SMTP_OPTS.get('embed-images_disabled3290'):
# @modified 20191021 - Task #3290: Handle urllib2 in py3
# Branch #3262: py3
if python_version == 2:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# image_data = urllib2.urlopen(link).read() # nosec
image_data = None
except urllib2.URLError:
image_data = None
if python_version == 3:
try:
# image_data = urllib.request.urlopen(link).read() # nosec
image_data = None
except:
logger.error(traceback.format_exc())
logger.error('error :: boundary_alerters :: alert_smtp :: failed to urlopen %s' % str(link))
image_data = None
# If we failed to get the image or if it was explicitly disabled,
# use the image URL instead of the content.
if image_data is None:
img_tag = '<img src="%s"/>' % link
else:
img_tag = '<img src="cid:%s"/>' % content_id
# @modified 20191002 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
# body = '%s :: %s <br> Next alert in: %s seconds <br> skyline Boundary alert - %s <br><a href="%s">%s</a>' % (
# datapoint, metric_name, expiration_time, alert_context, link, img_tag)
body = '%s :: %s <br> Next alert in: %s seconds <br> %s %s alert - %s <br><a href="%s">%s</a>' % (
main_alert_title, app_alert_context, expiration_time, datapoint, metric_name, alert_context, link, img_tag)
# @added 20200122 - Branch #3002: docker
# Do not try to alert if the settings are default
send_email_alert = True
if 'your_domain.com' in str(sender):
logger.info('alert_smtp - sender is not configured, not sending alert')
send_email_alert = False
if 'your_domain.com' in str(primary_recipient):
logger.info('alert_smtp - sender is not configured, not sending alert')
send_email_alert = False
if 'example.com' in str(sender):
logger.info('alert_smtp - sender is not configured, not sending alert')
send_email_alert = False
if 'example.com' in str(primary_recipient):
logger.info('alert_smtp - sender is not configured, not sending alert')
send_email_alert = False
if DOCKER_FAKE_EMAIL_ALERTS:
logger.info('alert_smtp - DOCKER_FAKE_EMAIL_ALERTS is set to %s, not executing SMTP command' % str(DOCKER_FAKE_EMAIL_ALERTS))
send_email_alert = False
# @added 20200122 - Feature #3406: Allow for no_email SMTP_OPTS
no_email = False
if str(sender) == 'no_email':
send_email_alert = False
no_email = True
if str(primary_recipient) == 'no_email':
send_email_alert = False
no_email = True
if no_email:
logger.info('alert_smtp - no_email is set in BOUNDARY_SMTP_OPTS, not executing SMTP command')
# @modified 20180524 - Task #2384: Change alerters to cc other recipients
# Do not send to each recipient, send to primary_recipient and cc the other
# recipients, thereby sending only one email
# for recipient in recipients:
# @modified 20200122 - Feature #3406: Allow for no_email SMTP_OPTS
# if primary_recipient:
if primary_recipient and send_email_alert:
logger.info(
'alert_smtp - will send to primary_recipient :: %s, cc_recipients :: %s' %
(str(primary_recipient), str(cc_recipients)))
msg = MIMEMultipart('alternative')
# @modified 20191002 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
# msg['Subject'] = '[Skyline alert] ' + 'Boundary ALERT - ' + alert_context + ' - ' + datapoint + ' - ' + metric_name
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# msg['Subject'] = '[' + main_alert_title + ' alert] ' + app_alert_context + ' ALERT - ' + alert_context + ' - ' + datapoint + ' - ' + metric_name
email_subject = '[%s alert] %s ALERT - %s' % (
main_alert_title, app_alert_context, alert_context, metric_name)
msg['Subject'] = email_subject
msg['From'] = sender
# @modified 20180524 - Task #2384: Change alerters to cc other recipients
# msg['To'] = recipient
msg['To'] = primary_recipient
# @added 20180524 - Task #2384: Change alerters to cc other recipients
# Added Cc
if cc_recipients:
msg['Cc'] = cc_recipients
msg.attach(MIMEText(body, 'html'))
if image_data is not None:
# msg_attachment = MIMEImage(image_data)
fp = open(image_file, 'rb')
msg_attachment = MIMEImage(fp.read())
fp.close()
msg_attachment.add_header('Content-ID', '<%s>' % content_id)
msg.attach(msg_attachment)
s = SMTP('127.0.0.1')
# @modified 20180524 - Task #2384: Change alerters to cc other recipients
# Send to primary_recipient and cc_recipients
# s.sendmail(sender, recipient, msg.as_string())
try:
if cc_recipients:
s.sendmail(sender, [primary_recipient, cc_recipients], msg.as_string())
else:
s.sendmail(sender, primary_recipient, msg.as_string())
except:
logger.error(traceback.format_exc())
logger.error(
'error :: alert_smtp - could not send email to primary_recipient :: %s, cc_recipients :: %s' %
(str(primary_recipient), str(cc_recipients)))
s.quit()
# @added 20200825 - Feature #3704: Add alert to anomalies
if settings.PANORAMA_ENABLED:
added_panorama_alert_event = add_panorama_alert(skyline_app, int(metric_timestamp), metric_name)
if not added_panorama_alert_event:
logger.error(
'error :: failed to add Panorama alert event - panorama.alert.%s.%s' % (
str(metric_timestamp), metric_name))
def alert_pagerduty(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
if settings.PAGERDUTY_ENABLED:
import pygerduty
pager = pygerduty.PagerDuty(settings.BOUNDARY_PAGERDUTY_OPTS['subdomain'], settings.BOUNDARY_PAGERDUTY_OPTS['auth_token'])
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# pager.trigger_incident(settings.BOUNDARY_PAGERDUTY_OPTS['key'], 'Anomalous metric: %s (value: %s) - %s' % (metric_name, datapoint, algorithm))
pager.trigger_incident(settings.BOUNDARY_PAGERDUTY_OPTS['key'], 'Anomalous metric: %s (value: %s) - %s %s %s times' % (
metric_name, str(datapoint), algorithm, str(metric_trigger),
str(alert_threshold)))
# @added 20200825 - Feature #3704: Add alert to anomalies
if settings.PANORAMA_ENABLED:
added_panorama_alert_event = add_panorama_alert(skyline_app, int(metric_timestamp), metric_name)
if not added_panorama_alert_event:
logger.error(
'error :: failed to add Panorama alert event - panorama.alert.%s.%s' % (
str(metric_timestamp), metric_name))
else:
return False
def alert_hipchat(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp):
if settings.HIPCHAT_ENABLED:
sender = settings.BOUNDARY_HIPCHAT_OPTS['sender']
import hipchat
hipster = hipchat.HipChat(token=settings.BOUNDARY_HIPCHAT_OPTS['auth_token'])
# Allow for absolute path metric namespaces but also allow for and match
# match wildcard namepaces if there is not an absolute path metric namespace
rooms = 'unknown'
notify_rooms = []
matched_rooms = []
try:
rooms = settings.BOUNDARY_HIPCHAT_OPTS['rooms'][metric_name]
notify_rooms.append(rooms)
except:
for room in settings.BOUNDARY_HIPCHAT_OPTS['rooms']:
CHECK_MATCH_PATTERN = room
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(metric_name)
if pattern_match:
matched_rooms.append(room)
if matched_rooms != []:
for i_metric_name in matched_rooms:
rooms = settings.BOUNDARY_HIPCHAT_OPTS['rooms'][i_metric_name]
notify_rooms.append(rooms)
alert_algo = str(algorithm)
alert_context = alert_algo.upper()
unencoded_graph_title = 'Skyline Boundary - %s at %s hours - %s - %s' % (
alert_context, graphite_previous_hours, metric_name, datapoint)
graph_title_string = quote(unencoded_graph_title, safe='')
graph_title = '&title=%s' % graph_title_string
# @modified 20170706 - Support #2072: Make Boundary hipchat alerts show fixed timeframe
graphite_now = int(time())
target_seconds = int((graphite_previous_hours * 60) * 60)
from_timestamp = str(graphite_now - target_seconds)
until_timestamp = str(graphite_now)
graphite_from = datetime.datetime.fromtimestamp(int(from_timestamp)).strftime('%H:%M_%Y%m%d')
graphite_until = datetime.datetime.fromtimestamp(int(until_timestamp)).strftime('%H:%M_%Y%m%d')
if settings.GRAPHITE_PORT != '':
# link = '%s://%s:%s/render/?from=-%shours&target=cactiStyle(%s)%s%s&colorList=%s' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT,
# graphite_previous_hours, metric_name, settings.GRAPHITE_GRAPH_SETTINGS,
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s:%s/render/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=%s' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT,
# graphite_from, graphite_until, metric_name, settings.GRAPHITE_GRAPH_SETTINGS,
# graph_title, graphite_graph_line_color)
# @modified 20200417 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=%s' % (
link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(%s,%%27si%%27)%s%s&colorList=%s' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT,
settings.GRAPHITE_RENDER_URI, graphite_from, graphite_until,
metric_name, settings.GRAPHITE_GRAPH_SETTINGS, graph_title,
graphite_graph_line_color)
else:
# link = '%s://%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=%s' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, graphite_previous_hours,
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s/render/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=%s' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, graphite_from, graphite_until,
# metric_name, settings.GRAPHITE_GRAPH_SETTINGS, graph_title,
# graphite_graph_line_color)
# @modified 20200417 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=%s' % (
link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(%s,%%27si%%27)%s%s&colorList=%s' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
settings.GRAPHITE_RENDER_URI, graphite_from, graphite_until,
metric_name, settings.GRAPHITE_GRAPH_SETTINGS, graph_title,
graphite_graph_line_color)
embed_graph = "<a href='" + link + "'><img height='308' src='" + link + "'>" + metric_name + "</a>"
for rooms in notify_rooms:
for room in rooms:
hipster.method('rooms/message', method='POST', parameters={'room_id': room, 'from': 'skyline', 'color': settings.BOUNDARY_HIPCHAT_OPTS['color'], 'message': '%s - Boundary - %s - Anomalous metric: %s (value: %s) at %s hours %s' % (sender, algorithm, metric_name, datapoint, graphite_previous_hours, embed_graph)})
else:
return False
def alert_syslog(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
if settings.SYSLOG_ENABLED:
import sys
import syslog
syslog_ident = settings.SYSLOG_OPTS['ident']
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# message = str('Boundary - Anomalous metric: %s (value: %s) - %s' % (metric_name, datapoint, algorithm))
message = 'Boundary - Anomalous metric: %s (value: %s) - %s with %s %s times' % (
metric_name, str(datapoint), algorithm, str(metric_trigger),
str(alert_threshold))
if sys.version_info[:2] == (2, 6):
syslog.openlog(syslog_ident, syslog.LOG_PID, syslog.LOG_LOCAL4)
elif sys.version_info[:2] == (2, 7):
syslog.openlog(ident='skyline', logoption=syslog.LOG_PID, facility=syslog.LOG_LOCAL4)
elif sys.version_info[:1] == (3):
syslog.openlog(ident='skyline', logoption=syslog.LOG_PID, facility=syslog.LOG_LOCAL4)
else:
syslog.openlog(syslog_ident, syslog.LOG_PID, syslog.LOG_LOCAL4)
syslog.syslog(4, message)
else:
return False
# @added 20181126 - Task #2742: Update Boundary
# Feature #2618: alert_slack
def alert_slack(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
if not settings.SLACK_ENABLED:
return False
# @modified 20200701 - Task #3612: Upgrade to slack v2
# Task #3608: Update Skyline to Python 3.8.3 and deps
# Task #3556: Update deps
# slackclient v2 has a version function, < v2 does not
# from slackclient import SlackClient
try:
from slack import version as slackVersion
slack_version = slackVersion.__version__
except:
slack_version = '1.3'
if slack_version == '1.3':
from slackclient import SlackClient
else:
from slack import WebClient
metric = metric_name
logger.info('alert_slack - anomalous metric :: metric: %s - %s' % (metric, algorithm))
base_name = metric
alert_algo = str(algorithm)
alert_context = alert_algo.upper()
# The known_derivative_metric state is determine in case we need to surface
# the png image from Graphite if the Ionosphere image is not available for
# some reason. This will result in Skyline at least still sending an alert
# to slack, even if some gear fails in Ionosphere or slack alerting is used
# without Ionosphere enabled. Yes not DRY but multiprocessing and spawn
# safe.
known_derivative_metric = False
# try:
# if settings.REDIS_PASSWORD:
# # @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# # Branch #3262: py3
# # REDIS_ALERTER_CONN = redis.StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# REDIS_ALERTER_CONN = redis.StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH, charset='utf-8', decode_responses=True)
# else:
# # REDIS_ALERTER_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
# REDIS_ALERTER_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH, charset='utf-8', decode_responses=True)
# except:
# logger.error('error :: alert_slack - redis connection failed')
# try:
# derivative_metrics = list(REDIS_ALERTER_CONN.smembers('derivative_metrics'))
# except:
# derivative_metrics = []
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# redis_metric_name not used
# redis_metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(base_name))
# if redis_metric_name in derivative_metrics:
# known_derivative_metric = True
known_derivative_metric = is_derivative_metric(skyline_app, str(base_name))
# if known_derivative_metric:
# try:
# non_derivative_monotonic_metrics = settings.NON_DERIVATIVE_MONOTONIC_METRICS
# except:
# non_derivative_monotonic_metrics = []
# skip_derivative = in_list(redis_metric_name, non_derivative_monotonic_metrics)
# if skip_derivative:
# known_derivative_metric = False
# @added 20191008 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
try:
main_alert_title = settings.CUSTOM_ALERT_OPTS['main_alert_title']
except:
main_alert_title = 'Skyline'
try:
app_alert_context = settings.CUSTOM_ALERT_OPTS['boundary_alert_heading']
except:
app_alert_context = 'Boundary'
if known_derivative_metric:
# @modified 20191008 - Feature #3194: Add CUSTOM_ALERT_OPTS to settings
# unencoded_graph_title = 'Skyline Boundary - ALERT %s at %s hours - derivative graph - %s' % (
# alert_context, str(graphite_previous_hours), metric)
# slack_title = '*Skyline Boundary - ALERT* %s on %s at %s hours - derivative graph - %s' % (
# alert_context, metric, str(graphite_previous_hours), datapoint)
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# unencoded_graph_title = '%s %s - ALERT %s at %s hours - derivative graph - %s' % (
# main_alert_title, app_alert_context, alert_context, str(graphite_previous_hours), metric)
# slack_title = '*%s %s - ALERT* %s on %s at %s hours - derivative graph - %s' % (
# main_alert_title, app_alert_context, alert_context, metric, str(graphite_previous_hours), datapoint)
unencoded_graph_title = '%s %s - ALERT %s %s %s times - derivative graph - %s' % (
main_alert_title, app_alert_context, alert_context,
str(metric_trigger), str(alert_threshold), metric)
slack_title = '*%s %s - ALERT* %s %s %s times on %s - derivative graph - %s' % (
main_alert_title, app_alert_context, alert_context,
str(metric_trigger), str(alert_threshold), metric, str(datapoint))
else:
# unencoded_graph_title = 'Skyline Boundary - ALERT %s at %s hours - %s' % (
# alert_context, str(graphite_previous_hours), metric)
# slack_title = '*Skyline Boundary - ALERT* %s on %s at %s hours - %s' % (
# alert_context, metric, str(graphite_previous_hours), datapoint)
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# unencoded_graph_title = '%s %s - ALERT %s at %s hours - %s' % (
# main_alert_title, app_alert_context, alert_context, str(graphite_previous_hours), metric)
# slack_title = '*%s %s - ALERT* %s on %s at %s hours - %s' % (
# main_alert_title, app_alert_context, alert_context, metric, str(graphite_previous_hours), datapoint)
unencoded_graph_title = '%s %s - ALERT %s %s %s times - %s' % (
main_alert_title, app_alert_context, alert_context,
str(metric_trigger), str(alert_threshold), metric)
slack_title = '*%s %s - ALERT* %s %s %s times on %s - %s' % (
main_alert_title, app_alert_context, alert_context,
str(metric_trigger), str(alert_threshold), metric, str(datapoint))
graph_title_string = quote(unencoded_graph_title, safe='')
graph_title = '&title=%s' % graph_title_string
until_timestamp = int(time())
target_seconds = int((graphite_previous_hours * 60) * 60)
from_timestamp = str(until_timestamp - target_seconds)
graphite_from = dt.datetime.fromtimestamp(int(from_timestamp)).strftime('%H:%M_%Y%m%d')
logger.info('graphite_from - %s' % str(graphite_from))
graphite_until = dt.datetime.fromtimestamp(int(until_timestamp)).strftime('%H:%M_%Y%m%d')
logger.info('graphite_until - %s' % str(graphite_until))
# @added 20181025 - Feature #2618: alert_slack
# Added date and time info so you do not have to mouseover the slack
# message to determine the time at which the alert came in
timezone = strftime("%Z", gmtime())
# @modified 20181029 - Feature #2618: alert_slack
# Use the standard UNIX data format
# human_anomaly_time = dt.datetime.fromtimestamp(int(until_timestamp)).strftime('%Y-%m-%d %H:%M:%S')
human_anomaly_time = dt.datetime.fromtimestamp(int(until_timestamp)).strftime('%c')
slack_time_string = '%s %s' % (human_anomaly_time, timezone)
# @added 20191106 - Branch #3262: py3
# Branch #3002: docker
graphite_port = get_graphite_port(skyline_app)
graphite_render_uri = get_graphite_render_uri(skyline_app)
graphite_custom_headers = get_graphite_custom_headers(skyline_app)
# @added 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
encoded_graphite_metric_name = encode_graphite_metric_name(skyline_app, metric_name)
if settings.GRAPHITE_PORT != '':
if known_derivative_metric:
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s:%s/render/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s))%s%s&colorList=orange' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# settings.GRAPHITE_PORT, str(graphite_from), str(graphite_until),
# metric, settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s))%s%s&colorList=orange' % (
link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s),%%27si%%27)%s%s&colorList=orange' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# Branch #3262: py3
# Branch #3002: docker
# settings.GRAPHITE_PORT, settings.GRAPHITE_RENDER_URI,
graphite_port, graphite_render_uri,
str(graphite_from), str(graphite_until),
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# metric, settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
encoded_graphite_metric_name, settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
else:
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s:%s/render/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=orange' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# settings.GRAPHITE_PORT, str(graphite_from), str(graphite_until),
# metric, settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=orange' % (
link = '%s://%s:%s/%s/?from=%s&until=%s&target=cactiStyle(%s,%%27si%%27)%s%s&colorList=orange' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# Branch #3262: py3
# Branch #3002: docker
# settings.GRAPHITE_PORT, settings.GRAPHITE_RENDER_URI,
graphite_port, graphite_render_uri,
# str(graphite_from), str(graphite_until), metric,
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
str(graphite_from), str(graphite_until), encoded_graphite_metric_name,
settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
else:
if known_derivative_metric:
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s/render/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s))%s%s&colorList=orange' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# str(graphite_from), str(graphite_until), metric,
# settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s))%s%s&colorList=orange' % (
link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(nonNegativeDerivative(%s),%%27si%%27)%s%s&colorList=orange' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
settings.GRAPHITE_RENDER_URI, str(graphite_from),
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# str(graphite_until), metric, settings.GRAPHITE_GRAPH_SETTINGS,
str(graphite_until), encoded_graphite_metric_name, settings.GRAPHITE_GRAPH_SETTINGS,
graph_title)
else:
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# link = '%s://%s/render/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=orange' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# str(graphite_from), str(graphite_until), metric,
# settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
# @modified 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
# link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(%s)%s%s&colorList=orange' % (
link = '%s://%s/%s/?from=%s&until=%s&target=cactiStyle(%s,%%27si%%27)%s%s&colorList=orange' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
settings.GRAPHITE_RENDER_URI, str(graphite_from),
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# str(graphite_until), metric, settings.GRAPHITE_GRAPH_SETTINGS,
str(graphite_until), encoded_graphite_metric_name, settings.GRAPHITE_GRAPH_SETTINGS,
graph_title)
# slack does not allow embedded images, nor will it fetch links behind
# authentication so Skyline uploads a png graphite image with the message
image_file = None
# Fetch the png from Graphite
# @modified 20191021 - Task #3290: Handle urllib2 in py3
# Branch #3262: py3
image_file = '%s/%s.%s.graphite.%sh.png' % (
settings.SKYLINE_TMP_DIR, base_name, skyline_app,
str(int(graphite_previous_hours)))
if python_version == 22:
try:
# image_data = urllib2.urlopen(link).read() # nosec
image_data = None
# except urllib2.URLError:
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_slack - failed to get image graph')
logger.error('error :: alert_slack - %s' % str(link))
image_data = None
if python_version == 33:
try:
image_file = '%s/%s.%s.graphite.%sh.png' % (
settings.SKYLINE_TMP_DIR, base_name, skyline_app,
str(int(graphite_previous_hours)))
# urllib.request.urlretrieve(link, image_file)
image_data = 'retrieved'
image_data = None
except:
try:
# @added 20191022 - Task #3294: py3 - handle system parameter in Graphite cactiStyle
image_data = None
original_traceback = traceback.format_exc()
if 'cactiStyle' in link:
metric_replace = '%s,%%27si%%27' % metric
original_link = link
link = link.replace(metric, metric_replace)
logger.info('link replaced with cactiStyle system parameter added - %s' % str(link))
urllib.request.urlretrieve(link, image_file)
image_data = 'retrieved'
except:
new_trackback = traceback.format_exc()
logger.error(original_traceback)
logger.error('error :: boundary_alerters :: alert_slack :: failed to urlopen %s' % str(original_link))
logger.error(new_trackback)
logger.error('error :: boundary_alerters :: alert_slack :: failed to urlopen with system parameter added %s' % str(link))
image_data = None
# @added 20191025 -
image_data = get_graphite_graph_image(skyline_app, link, image_file)
if image_data == 'disabled_for_testing':
image_file = '%s/%s.%s.graphite.%sh.png' % (
settings.SKYLINE_TMP_DIR, base_name, skyline_app,
str(int(graphite_previous_hours)))
if image_data != 'retrieved':
try:
write_data_to_file(skyline_app, image_file, 'w', image_data)
logger.info('alert_slack - added Graphite image :: %s' % (
image_file))
except:
logger.info(traceback.format_exc())
logger.error(
'error :: alert_slack - failed to add %s Graphite image' % (
image_file))
image_file = None
try:
filename = os.path.basename(image_file)
except:
filename = None
try:
bot_user_oauth_access_token = settings.BOUNDARY_SLACK_OPTS['bot_user_oauth_access_token']
except:
logger.error('error :: alert_slack - could not determine bot_user_oauth_access_token')
return False
# Allow for absolute path metric namespaces but also allow for and match
# match wildcard namepaces if there is not an absolute path metric namespace
channels = 'unknown'
notify_channels = []
matched_channels = []
try:
channels = settings.BOUNDARY_SLACK_OPTS['channels'][metric_name]
notify_channels.append(channels)
except:
for channel in settings.BOUNDARY_SLACK_OPTS['channels']:
CHECK_MATCH_PATTERN = channel
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(metric_name)
if pattern_match:
matched_channels.append(channel)
if matched_channels != []:
for i_metric_name in matched_channels:
channels = settings.BOUNDARY_SLACK_OPTS['channels'][i_metric_name]
notify_channels.append(channels)
if not notify_channels:
logger.error('error :: alert_slack - could not determine channel')
return False
else:
channels = notify_channels
try:
icon_emoji = settings.BOUNDARY_SLACK_OPTS['icon_emoji']
except:
icon_emoji = ':chart_with_upwards_trend:'
try:
# @modified 20200701 - Task #3612: Upgrade to slack v2
# Task #3608: Update Skyline to Python 3.8.3 and deps
# Task #3556: Update deps
# sc = SlackClient(bot_user_oauth_access_token)
if slack_version == '1.3':
sc = SlackClient(bot_user_oauth_access_token)
else:
sc = WebClient(bot_user_oauth_access_token, timeout=10)
except:
logger.info(traceback.format_exc())
logger.error('error :: alert_slack - could not initiate SlackClient')
return False
# @added 20200815 - Bug #3676: Boundary slack alert errors
# Task #3608: Update Skyline to Python 3.8.3 and deps
# Task #3612: Upgrade to slack v2
# Strange only Boundary slack messages are erroring on a tuple or part
# thereof, mirage_alerters using the same method are fine???
# The server responded with: {'ok': False, 'error': 'invalid_channel', 'channel': "('#skyline'"}
# This fix handles converting tuple items into list items where the channel
# is a tuple.
channels_list = []
for channel in channels:
if type(channel) == tuple:
for ichannel in channel:
channels_list.append(str(ichannel))
else:
channels_list.append(str(channel))
if channels_list:
channels = channels_list
for channel in channels:
initial_comment = slack_title + ' :: <' + link + '|graphite image link>\nFor anomaly at ' + slack_time_string
# @added 20201127 - Feature #3820: HORIZON_SHARDS
# Add the origin and shard for debugging purposes
if HORIZON_SHARDS:
initial_comment = initial_comment + ' - from ' + this_host + ' (shard ' + str(HORIZON_SHARD) + ')'
try:
# slack does not allow embedded images, nor links behind authentication
# or color text, so we have jump through all the API hoops to end up
# having to upload an image with a very basic message.
if os.path.isfile(image_file):
# @modified 20200701 - Task #3612: Upgrade to slack v2
# Task #3608: Update Skyline to Python 3.8.3 and deps
# Task #3556: Update deps
if slack_version == '1.3':
slack_file_upload = sc.api_call(
'files.upload', filename=filename, channels=channel,
initial_comment=initial_comment, file=open(image_file, 'rb'))
else:
slack_file_upload = sc.files_upload(
filename=filename, channels=channel,
initial_comment=initial_comment, file=open(image_file, 'rb'))
if not slack_file_upload['ok']:
logger.error('error :: alert_slack - failed to send slack message with file upload')
logger.error('error :: alert_slack - slack_file_upload - %s' % str(slack_file_upload))
try:
os.remove(image_file)
except OSError:
logger.error('error - failed to remove %s, continuing' % image_file)
pass
else:
send_text = initial_comment + ' :: error :: there was no graph image to upload'
send_message = sc.api_call(
'chat.postMessage',
channel=channel,
icon_emoji=icon_emoji,
text=send_text)
if not send_message['ok']:
logger.error('error :: alert_slack - failed to send slack message')
else:
logger.info('alert_slack - sent slack message')
except:
logger.info(traceback.format_exc())
logger.error('error :: alert_slack - could not upload file')
return False
# @added 20200825 - Feature #3704: Add alert to anomalies
if settings.PANORAMA_ENABLED:
added_panorama_alert_event = add_panorama_alert(skyline_app, int(metric_timestamp), metric_name)
if not added_panorama_alert_event:
logger.error(
'error :: failed to add Panorama alert event - panorama.alert.%s.%s' % (
str(metric_timestamp), metric_name))
# @added 20200122: Feature #3396: http_alerter
def alert_http(alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
"""
Called by :func:`~trigger_alert` and sends and resend anomalies to a http
endpoint.
"""
if settings.HTTP_ALERTERS_ENABLED:
alerter_name = alerter
alerter_enabled = False
try:
alerter_enabled = settings.HTTP_ALERTERS_OPTS[alerter_name]['enabled']
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http failed to determine the enabled from settings.HTTP_ALERTERS_OPTS for alerter - %s and metric %s with algorithm %s' % (
str(alerter), str(metric_name), algorithm))
if not alerter_enabled:
logger.info('alert_http - %s enabled %s, not alerting' % (
str(alerter_name), str(alerter_enabled)))
return
alerter_endpoint = False
try:
alerter_endpoint = settings.HTTP_ALERTERS_OPTS[alerter_name]['endpoint']
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http failed to determine the endpoint from settings.HTTP_ALERTERS_OPTS for alert - %s and metric %s with algorithm %s' % (
str(alerter), str(metric_name), algorithm))
if not alerter_endpoint:
logger.error('alert_http - no endpoint set for %s, not alerting' % (
str(alerter_name)))
return
alerter_token = None
try:
alerter_token = settings.HTTP_ALERTERS_OPTS[alerter_name]['token']
except:
pass
source = 'boundary'
metric_alert_dict = {}
alert_data_dict = {}
try:
timestamp_str = str(metric_timestamp)
value_str = str(datapoint)
full_duration_str = str(int(full_duration_seconds))
expiry_str = str(expiration_time)
metric_alert_dict = {
"metric": metric_name,
"algorithm": algorithm,
"timestamp": timestamp_str,
"value": value_str,
"full_duration": full_duration_str,
"expiry": expiry_str,
# @added 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
"metric_trigger": metric_trigger,
"alert_threshold": alert_threshold,
"source": str(source),
"token": str(alerter_token)
}
# @modified 20200302: Feature #3396: http_alerter
# Add the token as an independent entity from the alert
# alert_data_dict = {"status": {}, "data": {"alert": metric_alert_dict}}
alerter_token_str = str(alerter_token)
# @modified 20201127 - Feature #3820: HORIZON_SHARDS
# Add the origin and shard to status for debugging purposes
if not HORIZON_SHARDS:
alert_data_dict = {"status": {}, "data": {"token": alerter_token_str, "alert": metric_alert_dict}}
else:
alert_data_dict = {"status": {"origin": this_host, "shard": HORIZON_SHARD}, "data": {"token": alerter_token_str, "alert": metric_alert_dict}}
logger.info('alert_http :: alert_data_dict to send - %s' % str(alert_data_dict))
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http failed to construct the alert data for %s from alert - %s and metric - %s' % (
str(alerter_name), str(algorithm), str(metric_name)))
return
in_resend_queue = False
redis_set = '%s.http_alerter.queue' % str(source)
resend_queue = None
previous_attempts = 0
REDIS_HTTP_ALERTER_CONN_DECODED = get_redis_conn_decoded(skyline_app)
try:
resend_queue = REDIS_HTTP_ALERTER_CONN_DECODED.smembers(redis_set)
except Exception as e:
logger.error('error :: alert_http :: could not query Redis for %s - %s' % (redis_set, e))
if REDIS_HTTP_ALERTER_CONN_DECODED:
try:
del REDIS_HTTP_ALERTER_CONN_DECODED
except:
pass
if resend_queue:
try:
for index, resend_item in enumerate(resend_queue):
resend_item_list = literal_eval(resend_item)
# resend_alert = literal_eval(resend_item_list[0])
# resend_metric = literal_eval(resend_item_list[1])
resend_metric_alert_dict = literal_eval(resend_item_list[2])
if resend_metric_alert_dict['metric'] == metric_name:
if int(resend_metric_alert_dict['timestamp']) == int(metric_timestamp):
previous_attempts = int(resend_metric_alert_dict['attempts'])
in_resend_queue = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http failed iterate to resend_queue')
# REDIS_HTTP_ALERTER_CONN = None
# if in_resend_queue:
# REDIS_HTTP_ALERTER_CONN = get_redis_conn(skyline_app)
REDIS_HTTP_ALERTER_CONN = get_redis_conn(skyline_app)
add_to_resend_queue = False
fail_alerter = False
if alert_data_dict and alerter_endpoint:
# @modified 20200403 - Feature #3396: http_alerter
# Changed timeouts from 2, 2 to 5, 20
connect_timeout = 5
read_timeout = 20
if requests.__version__ >= '2.4.0':
use_timeout = (int(connect_timeout), int(read_timeout))
else:
use_timeout = int(connect_timeout)
if settings.ENABLE_DEBUG:
logger.debug('debug :: use_timeout - %s' % (str(use_timeout)))
response = None
try:
response = requests.post(alerter_endpoint, json=alert_data_dict, timeout=use_timeout)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to post alert to %s - %s' % (
str(alerter_name), str(alert_data_dict)))
add_to_resend_queue = True
response = None
if in_resend_queue:
try:
REDIS_HTTP_ALERTER_CONN.srem(redis_set, str(resend_item))
logger.info('alert_http :: alert removed from %s' % (
str(redis_set)))
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http :: failed remove %s from Redis set %s' % (
str(resend_item), redis_set))
# @added 20200310 - Feature #3396: http_alerter
# When the response code is 401 the response object appears to be
# False, although the response.code and response.reason are set
try:
if response.status_code != 200:
logger.error('error :: alert_http :: %s %s responded with status code %s and reason %s' % (
str(alerter_name), str(alerter_endpoint),
str(response.status_code), str(response.reason)))
add_to_resend_queue = True
fail_alerter = True
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http :: failed determine response.status_code')
if response:
if response.status_code == 200:
logger.info('alert_http :: alert sent to %s - %s' % (
str(alerter_endpoint), str(alert_data_dict)))
if in_resend_queue:
logger.info('alert_http :: alert removed from %s after %s attempts to send' % (
str(redis_set), str(previous_attempts)))
try:
del REDIS_HTTP_ALERTER_CONN
except:
pass
# @added 20200825 - Feature #3704: Add alert to anomalies
if settings.PANORAMA_ENABLED:
added_panorama_alert_event = add_panorama_alert(skyline_app, int(metric_timestamp), metric_name)
if not added_panorama_alert_event:
logger.error(
'error :: failed to add Panorama alert event - panorama.alert.%s.%s' % (
str(metric_timestamp), metric_name))
return
else:
logger.error('error :: alert_http :: %s %s responded with status code %s and reason %s' % (
str(alerter_name), str(alerter_endpoint),
str(response.status_code), str(response.reason)))
add_to_resend_queue = True
fail_alerter = True
else:
logger.error('error :: alert_http :: %s %s did not respond' % (
str(alerter_name), str(alerter_endpoint)))
add_to_resend_queue = True
fail_alerter = True
number_of_send_attempts = previous_attempts + 1
metric_alert_dict['attempts'] = number_of_send_attempts
if add_to_resend_queue:
data = [alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, str(metric_alert_dict)]
logger.info('alert_http :: adding alert to %s after %s attempts to send - %s' % (
str(redis_set), str(number_of_send_attempts), str(metric_alert_dict)))
try:
# redis_conn.sadd(redis_set, str(metric_alert_dict))
REDIS_HTTP_ALERTER_CONN.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: alert_http :: failed to add %s from Redis set %s' % (
str(metric_alert_dict), redis_set))
# Create a Redis if there was a bad or no response from the
# alerter_endpoint, to ensure that Boundary does not loop through
# every alert in the queue for an alerter_endpoint, if the
# alerter_endpoint is down
if fail_alerter:
alerter_endpoint_cache_key = 'http_alerter.down.%s' % str(alerter_name)
logger.error('error :: alert_http :: alerter_endpoint %s failed adding Redis key %s' % (
str(alerter_endpoint), str(alerter_endpoint_cache_key)))
if REDIS_HTTP_ALERTER_CONN:
try:
failed_timestamp = int(time())
REDIS_HTTP_ALERTER_CONN.setex(alerter_endpoint_cache_key, 60, failed_timestamp)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to set Redis key %s' % alerter_endpoint_cache_key)
try:
del REDIS_HTTP_ALERTER_CONN
except:
pass
else:
logger.info('alert_http :: settings.HTTP_ALERTERS_ENABLED not enabled nothing to do')
return
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
# def trigger_alert(alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp):
def trigger_alert(alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold):
if alerter == 'smtp':
strategy = 'alert_smtp'
# @added 20200122: Feature #3396: http_alerter
# Added http_alerter
elif 'http_alerter' in alerter:
strategy = 'alert_http'
else:
strategy = 'alert_%s' % alerter
try:
if strategy == 'alert_http':
# @modified 20201207 - Task #3878: Add metric_trigger and alert_threshold to Boundary alerts
getattr(boundary_alerters, strategy)(alerter, datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold)
else:
getattr(boundary_alerters, strategy)(datapoint, metric_name, expiration_time, metric_trigger, algorithm, metric_timestamp, alert_threshold)
except:
logger.error(traceback.format_exc())
logger.error('error :: alerters - %s - getattr error' % strategy)
| [
"[email protected]"
]
| |
fc0054ef638181b7cdaa31bdd9b2c7d6c701b84a | 8bbe2351bbd157a46ccf8530cde4e4cc7b0bd3b7 | /main.py | 94f313395298785422b7caeae12f2b205f2ce590 | []
| no_license | airuibel/py_code | 8dc98d71e79a4c0f785ad5cf81b2ca2073061ebf | 1da9a9dcd37475dd14bab6ae58bca1e2dff4c251 | refs/heads/master | 2020-06-18T03:47:43.754204 | 2018-03-20T09:31:00 | 2018-03-20T09:31:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,021 | py | # import package
import numpy as np
from pandas import DataFrame
import pandas as pd
import re
from dateutil import relativedelta
import datetime as dt
# 1.1
def df_groupby(df, groupkey, col, func, res_col_name, asint=False, dup=False):
"""
:param df: 一个df 征对 1+ 用户
:param groupkey: df中聚合分类的变量名
:param col: df中待聚合的变量名,字符串或者列表
:param func: 聚合方式,支持sum /max /min /avg /count/ distinct_count
:param res_col_name: 聚合结果列名,字符串或者列表
:param asint: if asint=True ,聚合结果转为int ;default asint=False;
:param dup: if dup=True ,变量取值去重 ;default dup=False;
:return:df_res df
"""
# dropna all row
df = df.dropna(axis=0, how='all')
# reformat type
try:
if func != 'count' and func != 'distinct_count':
df[col] = df[col].astype('float32')
except ValueError:
print('the col could not convert string to float!')
# duplicate the col
if dup:
df = df.drop_duplicates(df.columns)
# compatible str
if type(col) != list:
col = [col]
if type(res_col_name) != list:
res_col_name = [res_col_name]
if type(func) != list:
func = [func]
# agg index
df_res = DataFrame(df[groupkey].unique(), columns=[groupkey])
for i in func:
if i == 'sum':
df_res_ago = DataFrame(df.groupby(groupkey)[col].sum())
elif i == 'max':
df_res_ago = DataFrame(df.groupby(groupkey)[col].max())
elif i == 'min':
df_res_ago = DataFrame(df.groupby(groupkey)[col].min())
elif i == 'avg':
df_res_ago = DataFrame(df.groupby(groupkey)[col].mean())
elif i == 'std':
df_res_ago = DataFrame(df.groupby(groupkey)[col].std())
elif i == 'count':
df_res_ago = DataFrame(df.groupby(groupkey)[col].count())
elif i == 'distinct_count':
df_res_ago = DataFrame(df.groupby(groupkey)[col].nunique())
else:
print('input func error!')
df_res_ago = df_res_ago.reset_index()
df_res = pd.merge(df_res, df_res_ago, how='left', on=groupkey)
columns_list = [groupkey]
columns_list.extend(res_col_name)
df_res.columns = columns_list
if asint:
df_res[res_col_name] = df_res[res_col_name].astype(int)
return df_res
# use example
# df_groupby(df,'appl_no', 'phone_gray_score', 'sum', 'phone_gray_score_sum', dup=False, asint=False)
# df_groupby(df,'appl_no', ['phone_gray_score'], ['sum'], ['phone_gray_score_sum'], dup=False, asint=False)
# df_groupby(df,'appl_no', ['register_cnt','phone_gray_score'], ['sum'], ['register_cnt_sum','phone_gray_score_sum'], dup=False, asint=False)
# df_groupby(df,'appl_no', ['register_cnt','phone_gray_score'], ['sum','avg','count'], ['register_cnt_sum','phone_gray_score_sum','register_cnt_avg','phone_gray_score_avg','register_cnt_count','phone_gray_score_count'], dup=False, asint=False)
# 1.2.1
def col_dummy(x, col, dummy_dict=[]):
"""
function about:变量编码功能函数集
by boysgs @20171103
:param x: 一个数值
:param col: df中需重新编码的变量名
:param dummy_dict: 列表,变量所有取值组成,示例['value_1','value_2']
:return:col_dummy_dict
"""
dummy_dict_sorted = sorted(dummy_dict)
dummy_dict_sorted_key = np.array(['_'.join(['if', col, i]) for i in dummy_dict_sorted])
dummy_dict_sorted_value = [0] * len(dummy_dict_sorted_key)
col_dummy_zip = zip(dummy_dict_sorted_key, dummy_dict_sorted_value)
col_dummy_dict = dict((a, b) for a, b in col_dummy_zip)
#
if x in dummy_dict_sorted:
col_dummy_dict['_'.join(['if', col, x])] = 1
return col_dummy_dict
# use example
# df = pd.DataFrame({'col1': [1, np.nan, 2, 3], 'col2': [3, 4, 5, 1], 'col3': ['s', 'a', 'c', 'd']})
# dummy_dict = ['a', 'b', 'c', 'd', 's']
# col = 'col3'
# DataFrame(list(df[col].apply(lambda x: col_dummy(x, col, dummy_dict))))
# 1.2.2
def col_dummy_lb(x, lb_trans, sorted_dummy_varname_list=[]):
"""
function about:变量编码功能函数集(使用LabelBinarizer方法)
by boysgs @20171103
:param x: 一个数值
:param lb_trans: 一个变量利用preprocessing.LabelBinarizer 方法生成的对象
:param sorted_dummy_varname_list: 列表,升序排列的变量所有取值组成,示例['value_1','value_2']
:return:col_dummy_dict 字典
"""
dummy_value = lb_trans.transform(str([x]))
col_dummy_dict = dict(zip(sorted_dummy_varname_list, dummy_value[0]))
return col_dummy_dict
# 2.1
def meetOneCondition(x,symbol = '=',threshold = ('None','b')):
"""
# 输入:
# 变量名:年龄
# 符号:=,!=,>,< , >=, <= , in , not in,like, not like
# 阈值:10,(10,11),'%10%'
# 输出
# 满足条件输出1,否则输出0
"""
if pd.isnull(x) or x == '':
if symbol in ['!=','not in ','not like'] and threshold!='None':
return 1
elif threshold=='None':
if symbol == '=':
return 1
elif symbol == '!=':
return 0
else:
return 0
elif symbol == '=':
if threshold=='None':
return 0
elif x == threshold:
return 1
else:
return 0
elif symbol == '!=':
if threshold=='None':
return 1
elif x != threshold:
return 1
else:
return 0
elif symbol == '>':
if x > threshold:
return 1
else:
return 0
elif symbol == '<':
if x < threshold:
return 1
else:
return 0
elif symbol == '>=':
if x >= threshold:
return 1
else:
return 0
elif symbol == '<=':
if x <= threshold:
return 1
else:
return 0
elif symbol == 'in':
if x in threshold:
return 1
else:
return 0
elif symbol == 'not in':
if x not in threshold:
return 1
else:
return 0
elif symbol == 'like':
if threshold[0] == '%' and threshold[-1] == '%':
if threshold[1:-1] in x:
return 1
else:
return 0
if threshold[0] == '%' and threshold[-1] != '%':
if threshold[1:] == x[len(x)-len(threshold[1:]):]:
return 1
else:
return 0
if threshold[0] != '%' and threshold[-1] == '%':
if threshold[0:-1] == x[0:len(threshold[0:-1])]:
return 1
else:
return 0
else:
return 'you need cheack your "like" threshold'
elif symbol == 'not like':
if threshold[0] == '%' and threshold[-1] == '%':
if threshold[1:-1] not in x:
return 1
else:
return 0
if threshold[0] == '%' and threshold[-1] != '%':
if threshold[1:] != x[len(x)-len(threshold[1:]):]:
return 1
else:
return 0
if threshold[0] != '%' and threshold[-1] == '%':
if threshold[0:-1] != x[0:len(threshold[0:-1])]:
return 1
else:
return 0
else:
return 'you need cheack your "not like" threshold'
elif symbol =='regex':
if re.search(threshold,x):
return 1
else:
return 0
else:
return 'please contact the developer for increaing then type of the symbol'
# test:
# x = 'abcde'
# meetOneCondition(x,'=','abcd2')
# meetOneCondition(x,'like','abc%')
# meetOneCondition(x,'like','%abc')
# meetOneCondition(x,'regex','b|adz|z')
# 2.2
def meetMultiCondition(condition = ((),'and',())):
"""
# 输入
# 多个条件,单个条件参考meetOneCondition中的
# 例子 condition = ( ('age','>=',18), 'and', ( ('age','<=',40),'or',('gender','=','female') ) )
# 输出
# 满足条件输出1,否则输出0
"""
if 'and' in condition:
a = [k for k in condition if k!='and']
b = []
for l in range(len(a)):
b.append(meetMultiCondition(a[l]))
if 0 in b:
return 0
else:
return 1
if 'or' in condition:
a = [k for k in condition if k != 'or']
b = []
for l in range(len(a)):
b.append(meetMultiCondition(a[l]))
if 1 in b:
return 1
else:
return 0
else:
return meetOneCondition(condition[0],condition[1],condition[2])
# test
# zz ='abcde'
# yy = 10
# xx = 5
# meetMultiCondition(((zz,'=','abc'),'or',(yy,'>',7)))
# 2.3
def singleConditionalAssignment(conditon =('z','=',('None','b')),assig1=1, assig2=0):
"""
# 单条件赋值
# 输入
# 参考meetOneCondition的输入
# 例如:conditon = ('age','>=',18)
# 输出:
# 满足条件assig1
# 不满足条件assig2
"""
if meetOneCondition(conditon[0],conditon[1],conditon[2])==1:
return assig1
elif meetOneCondition(conditon[0], conditon[1], conditon[2]) == 0:
return assig2
else:
return meetOneCondition(conditon[0],conditon[1],conditon[2])
# test
# singleConditionalAssignment((x, '=', 'abcde'), 5, 1)
# 2.4
def multiConditionalAssignment(condition = (),assig1 = 1,assig2 = 0):
"""
# 多个条件赋值
###输入
##多个条件类似meetMultiCondition的输入
###输出:
##满足条件assig1
##不满足条件assig2
"""
if meetMultiCondition(condition)==1:
return assig1
else:
return assig2
# test
# xx=5
# multiConditionalAssignment(condition =((zz,'=','abcde'),'and',( (yy,'>',10), 'or', (xx,'=',5) )),assig1 = 999,assig2 = 0)
# 2.5
def multiConditionalMultAssignment(condition = ((('zz','not in', ('硕士','博士')),1),(('zz','not in', ('硕士','博士')),2)),assig = 0):
"""
####多个条件多个赋值
###输入
##多个条件类似meetMultiCondition的输入,再加一满足的取值
###输出:
##满足条件输出输入目标值
##不满足条件assig
"""
for l in condition:
if meetMultiCondition(l[0])==1:
return l[1]
return assig
# test
# multiConditionalMultAssignment((((zz,'=','abcdef'),1),((zz,'=','abcde'),2)),3)
# 3.1
def substring(string,length,pos_start=0):
"""
function about : 字符串截取
by dabao @20171106
:param string: 被截取字段
:param length: 截取长度
:param pos_start: 从第几位开始截取,defualt=0
:return: a string :substr
"""
pos_end = length + pos_start
if string is np.NaN:
return np.NaN
else:
str_type = type(string)
if str_type==str:
substr = string[pos_start:pos_end]
else:
string = str(string)
substr = string[pos_start:pos_end]
return substr
# test
# string=370321199103050629
# length=4
# pos_start=6
# substring(string,length,pos_start)
# string=np.NaN
# 3.2
def charindex(substr,string,pos_start=0):
"""
function about : 字符串位置查询
by dabao @20171106
:param substr
:param string: substr 在 string 起始位置
:param pos_start: 查找substr的开始位置,default=0
:return: a int :substr_index
"""
if string is np.NaN:
return np.NaN
else:
substr = str(substr)
string = str(string)
substr_index = string.find(substr,pos_start)
return substr_index
# test
# string='370321199103050629'
# substr='1991'
# charindex(substr,string)
# string.find(substr,0)
# 3.3
def trim(string,substr=' ',method='both'):
"""
function about : 删除空格或其他指定字符串
by dabao @20171106
:param string: a string
:param substr: 在string两端删除的指定字符串,default=' '
:param method: 删除方式:left 删除左边, right 删除右边, both 删除两边
:return: a string :string_alter
"""
if string is np.NaN:
return np.NaN
else:
substr = str(substr)
string = str(string)
if method in ['left','right','both']:
if method =='left':
string_alter = string.lstrip(substr)
elif method == 'right':
string_alter = string.rstrip(substr)
elif method == 'both':
string_alter = string.strip(substr)
else:
string_alter = string.strip(substr)
print("Warning: method must be in ['left','right','both']! If not, the function will be acting as 'both'")
return string_alter
# test:
# string=' OPPO,HUAWEI,VIVO,HUAWEI '
# trim(string)
# (4)计算字符串长度:SQL中的LEN()函数 ,python自带 len()
# (5)字符串转换为大、小写:SQL 中的 LOWCASE,UPPER 语句,python自带函数 string.upper(),string.lower()
# 3.4
def OnlyCharNum(s,oth=''):
# 只显示字母与数字
s2 = s.lower()
fomart = 'abcdefghijklmnopqrstuvwxyz0123456789'
for c in s2:
if not c in fomart:
s = s.replace(c,'')
return s
# 4.1
def dateformat(date,symbol):
"""
输入:
变量名:时间,按照格式接收10位、19位
可选:'year','month','day','hour','minute','second'
输出
满足条件输出值,否则报错
"""
if pd.isnull(date):
return np.NaN
date = str(date)
if len(date)==10:
date=date+' 00:00:00'
date=dt.datetime.strptime(date,'%Y-%m-%d %H:%M:%S')
if symbol in ['year','month','day','hour','minute','second']:
if symbol =='year':
datetime_elect = date.year
elif symbol == 'month':
datetime_elect = date.month
elif symbol == 'day':
datetime_elect = date.day
elif symbol == 'hour':
datetime_elect = date.hour
elif symbol == 'minute':
datetime_elect = date.minute
elif symbol == 'second':
datetime_elect = date.second
else:
datetime_elect = np.NaN
print("Warning: symbol must be in ['year','month','day','hour','minute','second']! If not, the function will be acting as 'both'")
return datetime_elect
# test1:
# dateformat('2017-09-25 12:58:45','day')
# dateformat('2017-09-25 12:58:45','hour')
# dateformat('2017-09-25','day')
# dateformat(null,'hour')
# 4.2
def datediff(symbol,date_begin,date_end):
"""
输入:
变量名:时间,按照格式接收10位、19位
可选:'year','month','day','hour','minute','second'
输出
满足条件输出值,否则报错
"""
if pd.isnull(date_begin) or pd.isnull(date_end):
return np.NaN
date_begin = str(date_begin)
date_end = str(date_end)
if len(date_begin)==4:
date_begin=date_begin+'-01-01 00:00:00'
if len(date_end)==4:
date_end=date_end+'-01-01 00:00:00'
if len(date_begin)==7:
date_begin=date_begin+'-01 00:00:00'
if len(date_end)==7:
date_end=date_end+'-01 00:00:00'
if len(date_begin)==10:
date_begin=date_begin+' 00:00:00'
if len(date_end)==10:
date_end=date_end+' 00:00:00'
date_begin=dt.datetime.strptime(date_begin,'%Y-%m-%d %H:%M:%S')
date_end=dt.datetime.strptime(date_end,'%Y-%m-%d %H:%M:%S')
if symbol in ['year','month','day','hour','minute','second']:
r = relativedelta.relativedelta(date_end,date_begin)
if symbol =='year':
datetime_diff=r.years
elif symbol == 'month':
datetime_diff=r.years*12+r.months
elif symbol == 'day':
datetime_diff = (date_end-date_begin).days
elif symbol == 'hour':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff = datetime_seconds/3600+datetime_days*24
elif symbol == 'minute':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff=datetime_seconds/60+datetime_days*24*60
elif symbol == 'second':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff=datetime_seconds+datetime_days*24*60*60
else:
datetime_diff = np.NaN
print("Warning: symbol must be in ['year','month','day','hour','minute','second']! If not, the function will be acting as 'both'")
return datetime_diff
# test
# datediff('month','2013','2017-09-25 12:58:45')
# datediff('day','2017-09-25','2017-12-30')
# datediff('hour','2017-09-15 10:58:45','2017-09-25 12:58:45')
# datediff('day','2017-09-25','2017-12-30 12:58:45') | [
"l"
]
| l |
828540d2bb15e92786f7d4e9d29d60f51087bb38 | 908cf8e6ef52033bbf3d5afbb29637a25f5d66f8 | /test/test_codat_data_contracts_datasets_journal_entry_paged_response_model.py | 2fed4e4b1c7bd6ead4aef71f66240ef4f130e40b | []
| no_license | procurify/codat-python-sdk | 074769a2d9e72640741689b6f51e880d35b88095 | 3c8f664998427bda32bad8062c3bf324f39506da | refs/heads/master | 2023-08-25T03:55:19.817085 | 2021-10-22T22:14:34 | 2021-10-22T22:14:34 | 395,381,471 | 1 | 0 | null | 2021-10-20T21:10:31 | 2021-08-12T16:31:03 | Python | UTF-8 | Python | false | false | 1,570 | py | """
Codat API
[What's changed in our Swagger](https://docs.codat.io/docs/new-swagger-ui) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import codat_python_sdk
from codat_python_sdk.model.codat_data_contracts_datasets_journal_entry import CodatDataContractsDatasetsJournalEntry
from codat_python_sdk.model.codat_data_contracts_datasets_journal_entry_paged_response_links_model import CodatDataContractsDatasetsJournalEntryPagedResponseLinksModel
globals()['CodatDataContractsDatasetsJournalEntry'] = CodatDataContractsDatasetsJournalEntry
globals()['CodatDataContractsDatasetsJournalEntryPagedResponseLinksModel'] = CodatDataContractsDatasetsJournalEntryPagedResponseLinksModel
from codat_python_sdk.model.codat_data_contracts_datasets_journal_entry_paged_response_model import CodatDataContractsDatasetsJournalEntryPagedResponseModel
class TestCodatDataContractsDatasetsJournalEntryPagedResponseModel(unittest.TestCase):
"""CodatDataContractsDatasetsJournalEntryPagedResponseModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCodatDataContractsDatasetsJournalEntryPagedResponseModel(self):
"""Test CodatDataContractsDatasetsJournalEntryPagedResponseModel"""
# FIXME: construct object with mandatory attributes with example values
# model = CodatDataContractsDatasetsJournalEntryPagedResponseModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
cc9c3f6f2afd9774fe87a3ed7411d529af8e5707 | 5bb5e25009a10de97d400fdd124754cb0d6509bd | /clips/rigol-fix-png.py | d6c7b9bce5c52ae7e4afdbdbb54e472edbec3cae | []
| no_license | NovaCentrix/chris-resistance-tester | 938890d4811c1a0b0d8a764b604dc5395b405962 | c90e053f8ee465889b89100c10833ecbce064549 | refs/heads/master | 2023-07-11T03:33:11.764303 | 2021-08-15T21:37:31 | 2021-08-15T21:37:31 | 399,392,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | #!/usr/bin/env python3
# The MAIN code takes a bunch of images
# (typically oscope screen-shots, for example)
# and adds some text to the screen and
# saves it in an output file with a new name.
# The FIX_ONLY code solves some quirk with
# the Rigol PNG format which causes imagemagick
# to hiccup. Simply reading it and saving it
# in Python solves the problem.
import sys
from PIL import Image, ImageDraw, ImageFont
def fix_only(argv):
if len(argv) < 2:
print('Usage: rigol-fix-png <fname>')
exit(0)
fname = argv[1]
outfname = 'fix-' + fname
img = Image.open(fname)
img.save(outfname)
fontdir = '/Users/rclott/fonts/d2codingfont/D2Coding-Ver1.3.2-20180524/D2Coding/'
fontfile = 'D2Coding-Ver1.3.2-20180524.ttf'
fontpath = fontdir + fontfile
def main(argv):
if len(argv) < 4:
print('Usage: rigol-fix-png <input-file> <label> <output-file>')
exit(0)
fname_in = argv[1]
label = argv[2].upper()
fname_out = argv[3]
img = Image.open(fname_in)
w,h = img.size
font = ImageFont.truetype("Keyboard", 28)
draw = ImageDraw.Draw(img)
#label = 'xmt-075'.upper()
xpos = 0.125 * w
ypos = 0.75 * h
xoff = 7
yoff = 7
textposn = (xpos, ypos)
box = draw.textbbox( textposn, label, font=font )
bbox = ( box[0]-xoff, box[1]-yoff, box[2]+xoff, box[3]+yoff )
draw.rectangle(bbox, fill='gray', outline='gold', width=3)
draw.text( textposn, label , fill='white' , font=font)
img.save(fname_out)
if __name__ == "__main__":
#main(sys.argv)
fix_only(sys.argv)
| [
"[email protected]"
]
| |
32bfab98c76b271ada678fab1707be3299455126 | 624a13c06fec22ae36a3a96a9d5eefd9faddc424 | /class/cp05_class_01.py | 7a25c33a0ef2a66a758f65ed08a22625125c8eb6 | []
| no_license | ederortega/python_t01 | 747ea8966cbcc075c9bc6d9c1dd0d756731dabe0 | e8ecc8a71fc56b94ee56ac5c6f70882ec284eded | refs/heads/main | 2023-01-12T23:54:39.623150 | 2020-11-19T03:44:46 | 2020-11-19T03:44:46 | 314,127,847 | 0 | 0 | null | 2020-11-19T03:44:47 | 2020-11-19T03:39:12 | null | UTF-8 | Python | false | false | 59 | py | class ASimpleClass:
pass
# constructor
# print type
| [
"[email protected]"
]
| |
ea7af78e34c8acc6637fb1902a7c88c16081361f | 0daa78054f5d5b505047aaa28ecbbea1662f9c53 | /loop.py | 047a171e01830a7b9011b773017a51188177d3eb | []
| no_license | WindWalker19/Python_for_everybody | 22d8e616ce19a0259a56f4a30048700c142cbc63 | a2302f2ed4fcc334a096dda22b4ff6e7603c7c22 | refs/heads/master | 2022-07-11T16:39:38.038333 | 2020-05-13T21:17:40 | 2020-05-13T21:17:40 | 263,165,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #A while loop with break.
while True:
line = input("> ")
if line == "done":
print(line)
break
print("Blastoff")
while True:
line = input("> ")
if line[0] == "#":
continue # The continue would ask to go to the top of the loop without executing the code after it.
print("hello")
if line == "done":
break
print("Blastoff")
| [
"[email protected]"
]
| |
afef1d3a45280972b38bb94aebbe092d929eea1a | 193d84db39e014990c171b09a592b944e91cb524 | /sendmail.py | 981308e8c194eb9ddcb8a095182054098b8297db | []
| no_license | trungdungit45/idsoftware | 66543e1d0731b08e260ba586c6ec3964b53ddc61 | 59acddea1a3dedfe0835faea46334db2c58bac5e | refs/heads/master | 2020-03-21T12:00:38.184951 | 2018-07-14T02:56:57 | 2018-07-14T02:56:57 | 138,532,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | from Alert.sendmail import send_message
from Detection.comparetime import compareTime
import datetime
def readIP():
f = open("Log/snifflog.log","r")
list = []
f1 = f.readlines()
for x in f1:
list.append(x)
f.close()
return list
def checkLog(_listAlert, _listAlertStack):
ip = readIP()
for ift in ip:
_lineLog = ift
_warning, _root, _ipsource, _iptarget, _attack, _time, _timeStart, _date = _lineLog.split(':')
strcontent = _timeStart +' WA' + _attack + ' ' + _time + ' from '+ _ipsource + ' to ' + _iptarget + ' ' + _date
if (strcontent not in _listAlert and strcontent not in _listAlertStack):
_listAlert.append(strcontent)
if (compareTime(_timeStart, datetime.datetime.now().strftime('%H%M%S'))._time <= 60
and strcontent in _listAlert
and strcontent not in _listAlertStack):
try:
send_message(strcontent, 'Warning System')
_listAlert.remove(strcontent)
_listAlertStack.append(strcontent)
except:
print('') | [
"[email protected]"
]
| |
6fae90865636fc55b71131aa16bf7e887bde5e2e | f888c61c80af47a2ae8d374ce7a9282d134e5987 | /pyautest/golden_file_test.py | 08158c10facc645d04e4d43d7e1f262a4834b955 | [
"MIT"
]
| permissive | higumachan/pyautest | 1de272832f93aa7ca629b3ee8b8588b8186bb1a0 | 3f7fb55570397349e6dce77d49cff8ac1b965bf4 | refs/heads/master | 2020-04-22T09:53:46.973050 | 2019-03-06T13:49:56 | 2019-03-06T13:49:56 | 170,286,860 | 0 | 1 | MIT | 2019-04-03T15:35:17 | 2019-02-12T09:09:38 | Python | UTF-8 | Python | false | false | 1,738 | py | import inspect
from pathlib import Path
from typing import List, Any
from pyautest.adapter import basic_adapters
from pyautest.adapter.base_adapter import BaseAdapter
class GoldenFileTest:
def __init__(self, file_directory: Path, adapters: List[BaseAdapter]):
self.file_directory = file_directory
self.adapters = adapters
def __call__(self, name: str, obj: Any) -> bool:
test_function_name = self._find_code_stack()
if test_function_name is None:
raise Exception("not found test function in call stack")
adapter = self._find_adapter(obj)
if adapter is None:
raise Exception(f'not found adapter "{type(obj)}"')
filepath = self.file_directory / test_function_name / f"{name}.{adapter.file_extension}"
if not filepath.exists():
filepath.parent.mkdir(parents=True, exist_ok=True)
adapter.save(obj, filepath)
return True
other = adapter.load(filepath)
return adapter.equal(obj, other)
def _find_adapter(self, obj: Any) -> BaseAdapter:
for adapter in self.adapters:
for cls in adapter.target_classes:
if isinstance(obj, cls):
return adapter
return None
@staticmethod
def _find_code_stack():
framerecords = inspect.stack()
for framerecord in framerecords:
name = framerecord[0].f_code.co_name # type: str
if name.startswith("test"):
return name
return None
_default_gold_file_test = GoldenFileTest(Path('.') / "pyautest_assets", basic_adapters)
def golden_file_test(name: str, obj: Any) -> bool:
return _default_gold_file_test(name, obj)
| [
"[email protected]"
]
| |
b5abfe01419986db825a86397318c45516c2d8f0 | 814df4c836843382dc9aecc907da7e2d8e824b53 | /Decryption_Server.py | 96d28a363a72f441e1d8b007236ed04a4704894e | []
| no_license | Aditya-Ramachandran/RSA_Cryptography | ed6909dc359a6f949f0a91d24ed047354918df63 | 18f6b1a30250573286488244cc832d0883ebba10 | refs/heads/master | 2022-12-09T21:31:37.320591 | 2020-09-08T16:23:11 | 2020-09-08T16:23:11 | 289,639,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from __future__ import unicode_literals
import socket
from math import sqrt
import random
from random import randint as rand
import pickle
host = socket.gethostname()
port = 5000
s = socket.socket()
s.bind((host, port))
s.listen(2)
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
def mod_inverse(a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return -1
def isprime(n):
if n < 2:
return False
elif n == 2:
return True
else:
for i in range(1, int(sqrt(n)) + 1):
if n % i == 0:
return False
return True
#initial two random numbers p,q
p = rand(1, 1000)
q = rand(1, 1000)
def generate_keypair(p, q,keysize):
# keysize is the bit length of n so it must be in range(nMin,nMax+1).
# << is bitwise operator
# x << y is same as multiplying x by 2**y
# i am doing this so that p and q values have similar bit-length.
# this will generate an n value that's hard to factorize into p and q.
nMin = 1<<(keysize-1)
nMax = (1<<keysize) - 1
primes=[2]
# we choose two prime numbers in range(start, stop) so that the difference of bit lengths is at most 2.
start = 1<<(keysize//2-1)
stop = 1<<(keysize//2+1)
if start >= stop:
return []
for i in range(3, stop + 1, 2):
for p in primes:
if i % p == 0:
break
else:
primes.append(i)
while(primes and primes[0] < start):
del primes[0]
#choosing p and q from the generated prime numbers.
while primes:
p = random.choice(primes)
primes.remove(p)
q_values = [q for q in primes if nMin <= p * q <= nMax]
if q_values:
q = random.choice(q_values)
break
n = p * q
phi = (p - 1) * (q - 1)
#generate public key 1<e<phi(n)
e = random.randrange(1, phi)
g = gcd(e, phi)
#as long as gcd(1,phi(n)) is not 1, keep generating e
while True:
e = random.randrange(1, phi)
g = gcd(e, phi)
#generate private key
d = mod_inverse(e, phi)
if g==1 and e!=d:
break
#public key (e,n)
#private key (d,n)
return ((e, n), (d, n))
def decrypt(msg_ciphertext, package):
d, n = package
msg_plaintext = [chr(pow(c, d, n)) for c in msg_ciphertext]
# No need to use ord() since c is now a number
# After decryption, we cast it back to character
# to be joined in a string for the final result
return (''.join(msg_plaintext))
public, private = generate_keypair(p, q, 8)
print(host)
conn, address = s.accept()
print("Connected to: " + str(address))
conn.send(str(public[0]).encode())
conn.send(str(public[1]).encode())
print("Public Key: ",public)
while True:
encoded_data = pickle.loads(conn.recv(1024*4))
for i in range(len(encoded_data)):
encoded_data[i]=int(encoded_data[i])
if not encoded_data:
break
#print(''.join(map(lambda x: str(x), encoded_data)))
decoded_data = decrypt(encoded_data, private)
print("Client : " + str(decoded_data))
conn.close() | [
"[email protected]"
]
| |
9d8e6c4e68e730a0a0efe827ef579808eb65591a | d9a84de5d6abc39a6a0352695e1495108f9a677e | /hab/cutdown/nichromeCutdownController.py | 2452cb4839d832112fd60d9081bf145860a4abab | []
| no_license | wmsi/hab-scripts | 9f60169881937ad1efb399902a70c6c08171a188 | 1d2e6756ab3a18e79d55ba09e6be9352d4cf71b8 | refs/heads/master | 2021-01-01T19:41:14.389885 | 2018-03-02T13:53:24 | 2018-03-02T13:53:24 | 98,650,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | #!/usr/bin/env python
"""
################################################################################
Written by Nicholas Sullo while working at WMSI 8/17/2015
Modified heavily by Mckenna Cisler ([email protected])
while working at WMSI 7/31/2017, to use the nichrome cutdown method
Refer to http://abyz.co.uk/rpi/pigpio/ for more information and
example code for the pigpio library
IMPLEMENTATION NOTE (Mckenna):
An obvious solution to reading the logfile data is to keep the file open
over the entire course of the program, using log.readlines() to only read
the NEW lines. However, my fear with this solution is if the file somehow
gets corrupted or overwritten from the beginning, in which case the program,
sitting at a seek position 500 lines down, has to wait for 500 telem strings
before parsing another, at which point we may be thousands of feet above
the desired cutdown.
################################################################################
"""
import time
import re
import sys
import re
from nichromeControl import Nichrome
################################ CONSTANTS ####################################
MAX_ALTITUDE = 480 # Set the maximum altitude (in meters) HERE!
HAB_TELEM_FILE = '/home/pi/pits/tracker/telemetry.txt'
HAB_TELEM_BACKUP = '/home/pi/pits/tracker/telemetrydata.txt' # where to dump log data
###############################################################################
def loginfo(msg):
newMsg = time.strftime("%x %X %Z | ") + msg
print newMsg
def process_telemetry_string(telem, nichrome):
""" Extracts and anaylzes the altitude from a raw telemetry string """
telemFields = telem.split(",")
try:
# Check to make sure the string is actually the telemetry data.
# This will have to be changed based on what you name your payload
if re.match("\$\$\w{1,10}", telemFields[0]) != None:
# The 6th field in the telemetry string is the altitude
# (Turn the string altitude value into an integer)
alt = int(telemFields[5])
loginfo("altitude: {:>4} m (target: {} m)".format(alt, MAX_ALTITUDE))
# Make sure this altitude is not larger than the predetermined cut down altitude
if alt >= MAX_ALTITUDE:
nichrome.activate()
return True
# Continue on parsing errors
except IndexError or ValueError:
return False
# not done if we're below max altitude
return False
def main():
loginfo("Starting controller...")
nichrome = Nichrome()
""" Reads telemetry lines from a logfile and transfers them to a backup file """
while True:
# continually deactivate nichrome to make sure we don't get any spikes
nichrome.deactivate()
# Make sure to re-open files becasue otherwise, if one is deleted,
# we will stop writing to it
# This opens the log file the Pi in the sky saves to
with open(HAB_TELEM_FILE, 'r+') as log:
# This opens a file to move the telemetry data to
with open(HAB_TELEM_BACKUP, 'a') as logout:
# Read what lines we have
# (from the seek position, which we enforce to be 0)
log.seek(0)
telemetry = log.readlines()
# IMMEDIATELY remove the lines we just read
# (I was inclined to delete the lines after everything had
# finished with the idea that if the lines below had an exception,
# we could re-read the data. However, I realized that it is likely
# that something about that data caused the error, so it's best
# to skip it the next time around. Additionally, clearning them
# below has a chance of overwriting a new line of data that had
# been added to the file in the interim, though this is unlikely)
log.seek(0)
log.truncate()
# transfer lines from log file to logout file
logout.writelines(telemetry)
# process the lines
for line in telemetry:
done = process_telemetry_string(line, nichrome)
# After we lose the balloon, there is no reason for this
# program to continue running, so break out of all loops
if done:
loginfo("Keeping nichrome pulled low after cutdown.")
keepNichromeLow(nichrome)
# delay for a short bit
time.sleep(0.25)
def keepNichromeLow(nichrome):
""" Sleeps forever, periodically forcing nichrome to stay low (deactivated) """
while True:
loginfo("Deactivating nichrome again...")
nichrome.deactivate()
time.sleep(2)
def create_telemetry_file():
""" Creates the telemetry file if it isn't there """
loginfo("Creating telem file if it doesn't exist...")
with open(HAB_TELEM_FILE, "w"):
pass
while True:
# restart on any exception
try:
create_telemetry_file()
main()
break # if we finish gracefully, quit
except SyntaxError as e:
loginfo("SYNTAX ERROR: {}".format(e))
break
except KeyboardInterrupt:
break
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
loginfo("RUNTIME ERROR ({}): {}".format(exc_type, exc_value))
continue
| [
"[email protected]"
]
| |
7d7718ce352aa1898575a3f0c040ed0540bbe9b3 | ea759ab6c37c83c06c0af127fa3169e912ef25d2 | /cnn_cascade_lasagne.py | be272bde0110dcc0da710b2caddd7d8bf9a23e2f | []
| no_license | Soledad89/Cascade-CNN-Face-Detection | 74d1178fc042b91a46ddc5affe9e1d190d813e70 | a75bcb74f763bdba851398c6096dbc058f5c2021 | refs/heads/master | 2021-01-12T06:51:29.560884 | 2016-10-10T21:05:58 | 2016-10-10T21:05:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,677 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 31 20:20:22 2016
@author: Kostya
"""
from lasagne.nonlinearities import softmax, rectify as relu
from lasagne import layers
from lasagne import updates
from lasagne import regularization
from lasagne import objectives
from time import time
from six.moves import cPickle as pickle
from util import Util as util
from sklearn.cross_validation import train_test_split
import theano
import theano.tensor as T
import scipy as sp
import sys
sys.setrecursionlimit(10000)
class Cnn(object):
net = None
subnet = None
nn_name = ''
l_rates = []
max_epochs = 120
batch_size = 256
verbose = 0
eta = None
__train_fn__ = None
# create classifcation nets
def __build_12_net__(self):
network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
network = layers.dropout(network, p=0.1)
network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DropoutLayer(network,p=0.3)
network = layers.DenseLayer(network,num_units = 16,nonlinearity = relu)
network = layers.batch_norm(network)
network = layers.DropoutLayer(network,p=0.3)
network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
return network
def __build_24_net__(self):
network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
network = layers.dropout(network, p=0.1)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DropoutLayer(network,p=0.5)
network = layers.batch_norm(network)
network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
network = layers.DropoutLayer(network,p=0.5)
network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
return network
def __build_48_net__(self):
network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.batch_norm(network)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
return network
def __build_12_calib_net__(self):
network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DenseLayer(network,num_units = 128,nonlinearity = relu)
network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
return network
def __build_24_calib_net__(self):
network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
network = layers.Conv2DLayer(network,num_filters=32,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
return network
def __build_48_calib_net__(self):
network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
return network
def __build_loss_train__fn__(self):
# create loss function
prediction = layers.get_output(self.net)
loss = objectives.categorical_crossentropy(prediction, self.__target_var__)
loss = loss.mean() + 1e-4 * regularization.regularize_network_params(self.net, regularization.l2)
val_acc = T.mean(T.eq(T.argmax(prediction, axis=1), self.__target_var__),dtype=theano.config.floatX)
# create parameter update expressions
params = layers.get_all_params(self.net, trainable=True)
self.eta = theano.shared(sp.array(sp.float32(0.05), dtype=sp.float32))
update_rule = updates.nesterov_momentum(loss, params, learning_rate=self.eta,
momentum=0.9)
# compile training function that updates parameters and returns training loss
self.__train_fn__ = theano.function([self.__input_var__,self.__target_var__], loss, updates=update_rule)
self.__predict_fn__ = theano.function([self.__input_var__], layers.get_output(self.net,deterministic=True))
self.__val_fn__ = theano.function([self.__input_var__,self.__target_var__], [loss,val_acc])
def __init__(self,nn_name,batch_size=1024,freeze=1,l_rates = sp.float32(0.05)*sp.ones(512,dtype=sp.float32),verbose = 1,subnet= None):
self.nn_name = nn_name
self.subnet = subnet
if subnet != None and freeze:
self.subnet.__freeze__()
self.batch_size = batch_size
self.verbose = verbose
self.l_rates = l_rates
self.__input_var__ = T.tensor4('X'+self.nn_name[:2])
self.__target_var__ = T.ivector('y+'+self.nn_name[:2])
self.max_epochs = self.l_rates.shape[0]
if self.nn_name == '12-net':
self.net = self.__build_12_net__()
elif self.nn_name == '24-net':
self.net = self.__build_24_net__()
elif self.nn_name == '48-net':
self.net = self.__build_48_net__()
elif self.nn_name =='12-calib_net':
self.net = self.__build_12_calib_net__()
elif self.nn_name =='24-calib_net':
self.net = self.__build_24_calib_net__()
elif self.nn_name =='48-calib_net':
self.net = self.__build_48_calib_net__()
self.__build_loss_train__fn__()
def iterate_minibatches(self,X, y, batchsize, shuffle=False):
assert len(X) == len(y)
if shuffle:
indices = sp.arange(len(X))
sp.random.shuffle(indices)
for start_idx in range(0, len(X) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield X[excerpt], y[excerpt]
def __freeze__(self):
for layer in layers.get_all_layers(self.net):
for param in layer.params:
layer.params[param].discard('trainable')
def train_on_hdd(self,rootdir = '12-net/'):
print(self.nn_name,'training start...','data folder',rootdir)
mean_acc = 0
total_time = 0
bpaths = util.get_files(rootdir = rootdir,fexpr = '*.npz')
m = len(bpaths)
r = len(util.load_from_npz(bpaths [-1]))
total_len = m * len(util.load_from_npz(bpaths [0]))
print('data input size is around',total_len)
for epoch in range(self.max_epochs):
self.eta.set_value(self.l_rates[epoch])
t_loss = 0
start = time()
for bpath in bpaths:
batch = util.load_from_npz(bpath)
items,labels = batch[:,0],batch[:,1]
items = sp.array([e.astype(sp.float32) for e in items])
labels = labels.astype(sp.int32)
X_train, X_val, y_train, y_val = train_test_split(items,labels,test_size = 0.25)
t_loss += self.__train_fn__ (X_train,y_train)
val_acc = 0
val_batches = 0
for xval,yval in self.iterate_minibatches(X_val,y_val,16):
err, acc = self.__val_fn__(xval, yval)
val_acc += acc
val_batches += 1
if self.verbose:
dur = time() - start
a0 = 100*(val_acc/val_batches)
mean_acc += a0
total_time += dur
print("epoch %d out of %d \t loss %g \t acсuracy %g \t time %d s \t" % (epoch + 1,self.max_epochs, t_loss / (total_len),a0,dur))
m = (total_time)//60
s = total_time - 60 * m
h = m//60
m = m - 60 * h
mean_acc = mean_acc / self.max_epochs
print('Training end with total time %d h %d m %d s and mean accouracy over epochs %g' % (h,m,s,mean_acc))
def fit(self,X,y):
X = X.astype(sp.float32)
y = y.astype(sp.int32)
total_time = 0
mean_acc = 0
print(self.nn_name,'training start...')
for epoch in range(self.max_epochs):
self.eta.set_value(self.l_rates[epoch])
t_loss = 0
start = time()
for input_batch, target in self.iterate_minibatches(X,y,self.batch_size):
X_train, X_val, y_train, y_val = train_test_split(input_batch, target,test_size = 0.1)
t_loss += self.__train_fn__ (X_train,y_train)
val_acc = 0
val_batches = 0
for xval,yval in self.iterate_minibatches(X_val,y_val,16):
err, acc = self.__val_fn__(xval, yval)
val_acc += acc
val_batches += 1
if self.verbose:
dur = time() - start
a0 = 100*(val_acc/val_batches)
mean_acc += a0
total_time += dur
print("epoch %d out of %d \t loss %g \t acсuracy %g \t time %d s \t" % (epoch + 1,self.max_epochs, t_loss / (len(X)),100*(val_acc/val_batches),dur))
m = (total_time)//60
s = total_time - 60 * m
h = m//60
m = m - 60 * h
mean_acc = mean_acc / self.max_epochs
print('Training end with total time %d h %d m %d s and mean accouracy over epochs %g' % (h,m,s,mean_acc))
def predict(self,X):
proba = self.predict_proba(X=X)
y_pred = sp.argmax(proba,axis=1)
return sp.array(y_pred)
def predict_proba(self,X,X12 = None,X24 = None):
proba = []
N = max(1,self.batch_size)
for x_chunk in [X[i:i + N] for i in range(0, len(X), N)]:
chunk_proba = self.__predict_fn__(x_chunk)
for p in chunk_proba:
proba.append(p)
return sp.array(proba)
def __save_model_old__(self,model_name = nn_name+'.pickle'):
with open(model_name, 'wb') as f:
pickle.dump(self, f, -1)
def __load_model_old__(self,model_name = nn_name+'.pickle'):
with open(model_name, 'rb') as f:
model = pickle.load(f)
f.close()
return model
def save_model(self,model_name = nn_name+'.npz'):
sp.savez(model_name, *layers.get_all_param_values(self.net))
def load_model(self,model_name = nn_name+'.npz'):
print(model_name,'is loaded')
with sp.load(model_name) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
layers.set_all_param_values(self.net, param_values)
return self | [
"[email protected]"
]
| |
7ea5150e0d97ba74d7b4cc0f9c625fa76e762586 | 25bb769e523c104acfde204e1e86a6bd6ba1edd3 | /sdk/message.py | 46b5f5edf983bd42b142c7664607811c252816bc | [
"MIT"
]
| permissive | shekhirin/HarryBotter | 14e45b43ac2091e77c7c9fdb5fe796531c79e6b8 | e1977dbade44840288145f08aef60746ac66982b | refs/heads/master | 2021-05-31T23:03:21.666081 | 2016-07-01T08:57:00 | 2016-07-01T08:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from sdk import Attachment
class Message:
def __init__(self, recipient, message, notification_type=None):
if type(message) is str:
message = {'text': message}
elif type(message) is Attachment:
message = {'attachment': message.json}
self.json = {k: (v) for k, v in locals().items() if v is not None}
del self.json['self']
if len(self.json) == 0:
raise ValueError('Both text and attachment are None')
for k, v in self.json.items():
if hasattr(v, 'json'):
self.json[k] = v.json
| [
"[email protected]"
]
| |
5573f5982530159c643ba9fdd383fc08c091845e | 7a368de76d45b2253c7b498a97ea68e298446d65 | /crowdgezwitscher/blog/views_api.py | b9aafd1b282b3960ddbfc2970331cd970ef36b02 | [
"MIT"
]
| permissive | Strassengezwitscher/Strassengezwitscher | 2c902cb24e70a70b95bfa252ffd7c12a49722bae | afdd433acb35c1a554ba79464b744975de065151 | refs/heads/develop | 2022-12-19T01:44:24.243092 | 2020-02-12T10:35:18 | 2020-02-12T10:35:18 | 56,970,152 | 4 | 1 | MIT | 2022-12-10T17:55:44 | 2016-04-24T12:25:12 | Python | UTF-8 | Python | false | false | 419 | py | from rest_framework import generics
from blog.models import BlogEntry
from blog.serializers import BlogSerializer
class BlogAPIList(generics.ListAPIView):
queryset = BlogEntry.objects.filter(status=BlogEntry.PUBLISHED)
serializer_class = BlogSerializer
class BlogAPIDetail(generics.RetrieveAPIView):
queryset = BlogEntry.objects.filter(status=BlogEntry.PUBLISHED)
serializer_class = BlogSerializer
| [
"[email protected]"
]
| |
bb2ccf947d3c1273b14ce5a92873ebe6d910636b | c9e6e551067f0bb8f59ebd4b3d8594297374125c | /region/migrations/0001_initial.py | 2b218811cfc169b4e06f052b7cf1c1485abdeceb | []
| no_license | andyxmai/bondreach-backend-django | 9565a1b6d8cd8650185221589853efcc2bd18807 | 8db8fc6c8b4cbb2d1f49e469ede1b3de4d04c0b4 | refs/heads/master | 2021-03-24T13:11:36.581046 | 2017-04-12T22:09:12 | 2017-04-12T22:09:12 | 84,764,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-12 23:45
from __future__ import unicode_literals
from django.db import migrations, models
import utils.dates
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Region',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=utils.dates.utcnow)),
('name', models.CharField(max_length=200)),
],
),
]
| [
"[email protected]"
]
| |
82580bba4d72b48263cdfe60efcac80622837b76 | 4a132226a1039aca3bb96b6d482806f61210562c | /cars/urls.py | ce1630b34d6e7d18b3f93a667315a9da33f692ed | []
| no_license | PietrzakJustyna/cars_api | e6bc373d676e06088b72625391b3a05be0f65abe | 7c1be387364ae3ab001e2a62384195cd1898e83b | refs/heads/master | 2022-12-03T16:43:47.496027 | 2020-08-20T12:55:54 | 2020-08-20T12:55:54 | 288,796,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | """cars URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from carsapi.views import CarsView, PopularCarsView, Rate
urlpatterns = [
path('admin/', admin.site.urls),
path('cars/', CarsView.as_view(), name='cars'),
path('popular/', PopularCarsView.as_view(), name='popular'),
path('rate/', Rate.as_view(), name='rate')
]
| [
"[email protected]"
]
| |
18c99b97419a151d0e42d5246e38ea853e23bb2e | d6fc3f78bdb938b9eb02ad473b914829bff2fc9b | /Drawing/bar_plot.py | 7bf9e9117af5dddf141ae967f1779a6d64b4545f | []
| no_license | Doctor-Feng/Machine_Learning | ec0e796325dfd4ee4864669b499c0b3f5a12008e | ff8a12d7e270bddbe3d5d362c3b63b25802516db | refs/heads/master | 2020-06-22T16:57:08.260868 | 2019-07-19T10:07:23 | 2019-07-19T10:07:23 | 101,947,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import numpy as np
import matplotlib.pyplot as plt
plt.figure (1)
index = [0.3,0.8]
plt.bar(index,[0.212,0.002],0.25,alpha = 0.8,color = 'b')
plt.ylabel('time(ms)')
plt.title('')
plt.xticks( np.add(index,0.5 * 0.25),('train','test'))
plt.legend()
#plt.savefig('wind_Power_Usage_Diagram.png',dpi = 600)
plt.show()
| [
"[email protected]"
]
| |
c48387129d1b598576966c6cc19fb3e7bd4127ea | 4aee1b5faac38af7429d5a4e518f971b5e966cf6 | /MaximizationBias_Example6.7/MaximizationBias_QLearning_Example6.7.py | dd0b4ca11110dc48c9cac60cc593679f86fc8fd1 | []
| no_license | themagicsketchbook/RL_Sutton-Barto_Solutions | 5d35ea4524f06e7fc0002f9be861651fbb3acf04 | 9e0cc8696357c6f3cdbc4a662e5c6a062a5857ff | refs/heads/master | 2022-01-12T04:13:19.583974 | 2019-07-16T13:18:51 | 2019-07-16T13:18:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,507 | py | import numpy as np
import matplotlib.pyplot as plt
class simulation:
def __init__(self,action_num,method):
self.action_num = action_num
self._method = method
self.ACTIONS = [[0,1],[i for i in range(action_num)]]
if self._method == 'Q':
self.Q_values = [[0.0, 0.0], [0.0 for i in range(action_num)]]
else:
self.Q1_values = [[0.0,0.0],[0.0 for i in range(action_num)]]
self.Q2_values = [[0.0, 0.0], [0.0 for i in range(action_num)]]
def choose_action(self,state):
e = np.random.random()
if e < EPSILON:
action = np.random.choice(self.ACTIONS[state])
else:
if self._method == 'Q':
action = np.random.choice(np.flatnonzero(self.Q_values[state] == np.max(self.Q_values[state])))
else:
action_values = np.array(self.Q1_values[state])+np.array(self.Q2_values[state])
action = np.random.choice(np.flatnonzero(action_values == np.max(action_values)))
return action
def determine_transition(self,cur_state,action):
next_state = None
ended = True
if cur_state == 0:
reward = 0
if action == 0:
next_state = 1
ended = False
if cur_state == 1:
reward = np.random.normal(-0.1, 1)
return next_state,reward,ended
def update_QValues(self,curr_state,action,reward,next_state):
if self._method == 'Q':
if next_state == None:
self.Q_values[curr_state][action] += ALFA * (reward - self.Q_values[curr_state][action])
else:
max_nextQValue = np.max(self.Q_values[next_state])
self.Q_values[curr_state][action] += ALFA * (
reward + GAMMA * max_nextQValue - self.Q_values[curr_state][action])
else:
e = np.random.random()
if e<0.5:
if next_state == None:
self.Q1_values[curr_state][action]+=ALFA*(reward-self.Q1_values[curr_state][action])
else:
max_nextQValue = self.Q2_values[next_state][np.argmax(self.Q1_values[next_state])]
self.Q1_values[curr_state][action] += ALFA * (reward + GAMMA*max_nextQValue- self.Q1_values[curr_state][action])
else:
if next_state == None:
self.Q2_values[curr_state][action]+=ALFA*(reward-self.Q2_values[curr_state][action])
else:
max_nextQValue = self.Q1_values[next_state][np.argmax(self.Q2_values[next_state])]
self.Q2_values[curr_state][action] += ALFA * (reward + GAMMA*max_nextQValue- self.Q2_values[curr_state][action])
def run_simulation(self):
episode_direction = []
for episode in range(EPISODES):
curr_state = 0
while True:
action = self.choose_action(curr_state)
next_state, reward, episode_ended= self.determine_transition(curr_state, action)
self.update_QValues(curr_state,action,reward,next_state)
if episode_ended:
episode_direction.append(1 if curr_state == 1 else 0)
break
curr_state = next_state
return 100*np.divide(np.cumsum(episode_direction),np.arange(1,EPISODES+1))
EPSILON = 0.1
B_ACTION_CHOICE = [1,2,5,10,100]
ALFA = 0.1
GAMMA = 1
EPISODES = 300
RUNS = 10000
Percentage_left_actions = np.zeros((len(B_ACTION_CHOICE),EPISODES))
method = 'DQ' # Use Q if using just Q and use 'DQ' if using Double-Q
for run in range(RUNS):
if run in np.arange(0,RUNS,RUNS/10):
print('Run number = {}'.format(run))
for i,action_num in enumerate(B_ACTION_CHOICE):
Sim = simulation(action_num,method)
Percentage_left_actions[i,:]+=Sim.run_simulation()
Percentage_left_actions/=RUNS
fig = plt.figure(figsize=(8,10))
Actions_Plot = plt.subplot()
for i,action_choice in enumerate(B_ACTION_CHOICE):
Actions_Plot.plot(np.arange(1,EPISODES+1),Percentage_left_actions[i],label = '{}'.format(action_choice))
Actions_Plot.set_xticks([1,100,200,300])
Actions_Plot.set_yticks([0,5,25,50,75,100])
Actions_Plot.set_ylabel('% left actions from A')
Actions_Plot.set_xlabel('Episodes')
Actions_Plot.legend(title = 'Number of actions in B') | [
"[email protected]"
]
| |
d89c1bbae2ca69a1855e7e0c6509bfeadfa76bc7 | d5dd127b213c9a4b47cf57f8a9b7b9528856842b | /killerbee/killerbee/build/scripts-2.7/zbdump | 4f081e72b003b73ab9113fc227af8a66e557e0bb | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | nofarmazor/Soteria | 9f4a770c6f6d894111e58cbb3aaa41a675d4982f | ef9295cb69c37980a841a83ba07f5769bcc64e9f | refs/heads/master | 2016-09-06T14:44:00.758903 | 2015-04-22T11:47:34 | 2015-04-22T11:47:34 | 31,611,446 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | #!/sw/bin/python2.7
'''
zbdump - a tcpdump-like tool for ZigBee/IEEE 802.15.4 networks
Compatible with Wireshark 1.1.2 and later ([email protected])
The -p flag adds CACE PPI headers to the PCAP ([email protected])
'''
import sys
import signal
import argparse
from killerbee import *
def interrupt(signum, frame):
global packetcount
global kb
global pd, dt
kb.sniffer_off()
kb.close()
if pd:
pd.close()
if dt:
dt.close()
print("{0} packets captured".format(packetcount))
sys.exit(0)
# PcapDumper, only used if -w is specified
pd = None
# DainTreeDumper, only used if -W is specified
dt = None
# Global
packetcount = 0
# Command-line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-i', '--iface', '--dev', action='store', dest='devstring')
#parser.add_argument('-g', '--gps', '--ignore', action='append', dest='ignore')
parser.add_argument('-w', '--pcapfile', action='store')
parser.add_argument('-W', '--dsnafile', action='store')
parser.add_argument('-p', '--ppi', action='store_true')
parser.add_argument('-c', '-f', '--channel', action='store', type=int, default=None)
parser.add_argument('-n', '--count', action='store', type=int, default=-1)
parser.add_argument('-D', action='store_true', dest='showdev')
args = parser.parse_args()
if args.showdev:
show_dev()
sys.exit(0)
if args.channel == None:
print >>sys.stderr, "ERROR: Must specify a channel."
sys.exit(1)
if args.pcapfile is None and args.dsnafile is None:
print >>sys.stderr, "ERROR: Must specify a savefile with -w (libpcap) or -W (Daintree SNA)"
sys.exit(1)
elif args.pcapfile is not None:
pd = PcapDumper(DLT_IEEE802_15_4, args.pcapfile, ppi=args.ppi)
elif args.dsnafile is not None:
dt = DainTreeDumper(args.dsnafile)
kb = KillerBee(device=args.devstring)
signal.signal(signal.SIGINT, interrupt)
if not kb.is_valid_channel(args.channel):
print >>sys.stderr, "ERROR: Must specify a valid IEEE 802.15.4 channel for the selected device."
kb.close()
sys.exit(1)
kb.set_channel(args.channel)
kb.sniffer_on()
print("zbdump: listening on \'{0}\', link-type DLT_IEEE802_15_4, capture size 127 bytes".format(kb.get_dev_info()[0]))
rf_freq_mhz = (args.channel - 10) * 5 + 2400
while args.count != packetcount:
packet = kb.pnext()
# packet[1] is True if CRC is correct, check removed to have promiscous capture regardless of CRC
if packet != None: # and packet[1]:
packetcount+=1
if pd:
pd.pcap_dump(packet['bytes'], ant_dbm=packet['dbm'], freq_mhz=rf_freq_mhz)
if dt:
dt.pwrite(packet['bytes'])
kb.sniffer_off()
kb.close()
if pd:
pd.close()
if dt:
dt.close()
print("{0} packets captured".format(packetcount))
| [
"[email protected]"
]
| ||
a65604c0deab61126203e90bd4b92e397c7b27c7 | 73c9537b3e2dd9c57e581d474b9e2daf7a8fb02a | /petccenv/lib/python3.4/site-packages/django_summernote/__init__.py | c63655d2b511e883b3efacf6fa71393ea01972ab | []
| no_license | pviniciusm/petcc | 8f6ec2966729051f11b482c4c7ed522df3f920ba | 30ccddce6d0e39ccea492ac73b2ddca855c63cee | refs/heads/master | 2021-01-21T13:29:52.835434 | 2016-04-23T18:06:07 | 2016-04-23T18:06:07 | 54,607,007 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | version_info = (0, 8, 2)
__version__ = version = '.'.join(map(str, version_info))
__project__ = PROJECT = 'django-summernote'
__author__ = AUTHOR = "Park Hyunwoo <[email protected]>"
default_app_config = 'django_summernote.apps.DjangoSummernoteConfig'
| [
"[email protected]"
]
| |
12eb00ffdab31b071abf94283d7fc51c9a985f4b | b0a21635d93a2bb7cdaf7083b0081147aa0bfec2 | /binary_search.py | 644c531840ffb6492f5c59c49a61c7328aa5b5c0 | []
| no_license | iyuroch/algo_labs | 990637748c2d79233cad2302a932c96589d29d10 | 2fb19487ee7a39413b2f3d12bb393ef30897294c | refs/heads/master | 2020-04-09T02:36:00.833364 | 2018-12-19T13:25:41 | 2018-12-19T13:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
def search_el(arr, left_idx, right_idx, el):
middle_idx = (left_idx + right_idx) / 2
if arr[middle_idx] == el:
return middle_idx
if left_idx == right_idx:
return False
elif arr[middle_idx] < el:
return search_el(arr, middle_idx + 1, right_idx, el)
elif arr[middle_idx] > el:
return search_el(arr, left_idx, middle_idx - 1, el)
def binary_search(arr, el):
left_idx = 0
right_idx = len(arr) - 1
idx = search_el(arr, left_idx, right_idx, el)
return idx
if __name__ == "__main__":
arr = [1,2,3,4,5,6,7,8]
el = 7 #idx = 6
el = 2
el = 0
el = 9
el = 8
el = 1
idx = binary_search(arr, el)
print(idx, arr[idx]) | [
"[email protected]"
]
| |
4113853c20a7674a37b502b73ee6d10f9288b8e6 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/eventgrid/v20200101preview/get_event_subscription.py | 80b0097a55e3921372b7d0b2fecda5f6ee7bbc0b | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 8,537 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetEventSubscriptionResult',
'AwaitableGetEventSubscriptionResult',
'get_event_subscription',
]
@pulumi.output_type
class GetEventSubscriptionResult:
"""
Event Subscription
"""
def __init__(__self__, dead_letter_destination=None, destination=None, event_delivery_schema=None, expiration_time_utc=None, filter=None, id=None, labels=None, name=None, provisioning_state=None, retry_policy=None, topic=None, type=None):
if dead_letter_destination and not isinstance(dead_letter_destination, dict):
raise TypeError("Expected argument 'dead_letter_destination' to be a dict")
pulumi.set(__self__, "dead_letter_destination", dead_letter_destination)
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if event_delivery_schema and not isinstance(event_delivery_schema, str):
raise TypeError("Expected argument 'event_delivery_schema' to be a str")
pulumi.set(__self__, "event_delivery_schema", event_delivery_schema)
if expiration_time_utc and not isinstance(expiration_time_utc, str):
raise TypeError("Expected argument 'expiration_time_utc' to be a str")
pulumi.set(__self__, "expiration_time_utc", expiration_time_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, list):
raise TypeError("Expected argument 'labels' to be a list")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if retry_policy and not isinstance(retry_policy, dict):
raise TypeError("Expected argument 'retry_policy' to be a dict")
pulumi.set(__self__, "retry_policy", retry_policy)
if topic and not isinstance(topic, str):
raise TypeError("Expected argument 'topic' to be a str")
pulumi.set(__self__, "topic", topic)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deadLetterDestination")
def dead_letter_destination(self) -> Optional['outputs.StorageBlobDeadLetterDestinationResponse']:
"""
The DeadLetter destination of the event subscription.
"""
return pulumi.get(self, "dead_letter_destination")
@property
@pulumi.getter
def destination(self) -> Optional[Any]:
"""
Information about the destination where events have to be delivered for the event subscription.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="eventDeliverySchema")
def event_delivery_schema(self) -> Optional[str]:
"""
The event delivery schema for the event subscription.
"""
return pulumi.get(self, "event_delivery_schema")
@property
@pulumi.getter(name="expirationTimeUtc")
def expiration_time_utc(self) -> Optional[str]:
"""
Expiration time of the event subscription.
"""
return pulumi.get(self, "expiration_time_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventSubscriptionFilterResponse']:
"""
Information about the filter for the event subscription.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Optional[Sequence[str]]:
"""
List of user defined labels.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the event subscription.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="retryPolicy")
def retry_policy(self) -> Optional['outputs.RetryPolicyResponse']:
"""
The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events.
"""
return pulumi.get(self, "retry_policy")
@property
@pulumi.getter
def topic(self) -> str:
"""
Name of the topic of the event subscription.
"""
return pulumi.get(self, "topic")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetEventSubscriptionResult(GetEventSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventSubscriptionResult(
dead_letter_destination=self.dead_letter_destination,
destination=self.destination,
event_delivery_schema=self.event_delivery_schema,
expiration_time_utc=self.expiration_time_utc,
filter=self.filter,
id=self.id,
labels=self.labels,
name=self.name,
provisioning_state=self.provisioning_state,
retry_policy=self.retry_policy,
topic=self.topic,
type=self.type)
def get_event_subscription(event_subscription_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventSubscriptionResult:
"""
Event Subscription
:param str event_subscription_name: Name of the event subscription
:param str scope: The scope of the event subscription. The scope can be a subscription, or a resource group, or a top level resource belonging to a resource provider namespace, or an EventGrid topic. For example, use '/subscriptions/{subscriptionId}/' for a subscription, '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for a resource group, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}' for a resource, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}' for an EventGrid topic.
"""
__args__ = dict()
__args__['eventSubscriptionName'] = event_subscription_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20200101preview:getEventSubscription', __args__, opts=opts, typ=GetEventSubscriptionResult).value
return AwaitableGetEventSubscriptionResult(
dead_letter_destination=__ret__.dead_letter_destination,
destination=__ret__.destination,
event_delivery_schema=__ret__.event_delivery_schema,
expiration_time_utc=__ret__.expiration_time_utc,
filter=__ret__.filter,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
retry_policy=__ret__.retry_policy,
topic=__ret__.topic,
type=__ret__.type)
| [
"[email protected]"
]
| |
e9936e19970a668d639cc6a18072df2cadc2ce7a | e6f5325ab05070f5648c531660fd9a51739c1765 | /app/contribution/views.py | 0ed7f429962cdecaedd63196af66b6760b7da071 | [
"MIT"
]
| permissive | vitsyrovat/conference | 4799237db37e802c6c95ef12c6cb11e1c64c648b | 47315f79ab2117fcd259662c31a47c99c1421051 | refs/heads/master | 2022-07-22T00:51:41.819231 | 2020-05-11T18:20:20 | 2020-05-11T18:20:20 | 257,536,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | from rest_framework.viewsets import ModelViewSet
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
# Create your views here.
from contribution import serializers
from core.models import Contribution
class ContributionViewSet(ModelViewSet):
"""Manage Contributions in the db"""
serializer_class = serializers.ContributionSerializer
queryset = Contribution.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
# class CreateContribution(CreateAPIView):
# serializer_class = ContributionSerializer
| [
"[email protected]"
]
| |
b539a324c93a3ce5b5b5feedc5d1287601d63ffd | 0b4957de738dd05f964ea838016b4b811feca970 | /tests/utils/test_utils_shell.py | fdae13b81ae7f8e06716a3e3f09b9ce5f7a76e6a | [
"MIT",
"Apache-2.0"
]
| permissive | bossjones/ultron8 | bdb5db72ba58b80645ae417cdf97287cfadd325d | 09d69c788110becadb9bfaa7b3d2a2046f6b5a1c | refs/heads/master | 2023-01-13T06:52:45.679582 | 2023-01-03T22:25:54 | 2023-01-03T22:25:54 | 187,934,920 | 0 | 0 | Apache-2.0 | 2023-01-03T22:25:56 | 2019-05-22T00:44:03 | Python | UTF-8 | Python | false | false | 1,008 | py | """Test shell utils"""
# pylint: disable=protected-access
import logging
import pytest
from six.moves import zip
from ultron8.utils.shell import quote_unix
logger = logging.getLogger(__name__)
@pytest.mark.utilsonly
@pytest.mark.unittest
class TestShellUtilsTestCase:
def test_quote_unix(self):
arguments = ["foo", "foo bar", "foo1 bar1", '"foo"', '"foo" "bar"', "'foo bar'"]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
""",
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
assert actual_value == expected_value.strip()
| [
"[email protected]"
]
| |
2c06844c17a93585bc26348c15831ffd9eb9962a | 5788e3f1c0216a2b5126fa8d17f9a808f7bef457 | /test_community/test_community/urls.py | 8b6b9c7b7b43a340174e2216c69f1a154008d3af | []
| no_license | ChoiJunhee/2020-2_Django | 85074efc1cd8fb5739bcf03e71ad993da45a134e | 11c941ca993162d831c37c96dd6ba9e0e12b3dee | refs/heads/master | 2023-01-10T20:05:55.739760 | 2020-11-07T09:37:41 | 2020-11-07T09:37:41 | 297,317,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """test_community URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from user.views import home
urlpatterns = [
path('admin/', admin.site.urls),
path('user/', include('user.urls')),
path('', home),
]
| [
"[email protected]"
]
| |
c5a234c511cc301fc2e43c3d0beaec4deaf478e7 | 9bbaf9115d404cd4afe428858310c8fab1155711 | /Main.py | e9bd19b68191f97ea0f694c3290d1912a90fa137 | [
"MIT"
]
| permissive | iamjb17/DSaA2 | d752dadfe6d716a9491351490076dbdbc126fea7 | c3e97ca0746341beaf0f8c5e27c6c82fca0cf173 | refs/heads/master | 2023-02-14T01:48:53.285454 | 2021-01-14T23:14:03 | 2021-01-14T23:14:03 | 324,405,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Authored By: Jessie Burton #001356971
import RunRoute
import UI
# Created using PyCharm Community Edition 2020.1.3 x64 on a Lenovo Laptop Running Windows 10 on AMD hardware
# only run this code if I am running as the main entry point of the application
if __name__ == '__main__':
# * Driver Main Class-- The dominate time complexity is O(n^3), worst case *
class Main:
# The primary use of dict data structure was used because 0(1) search feature and ability to self adjust its size to fit the data
# Main Algorithm used is the Greedy Algorithm, self adjusting
# O(n^2) + O(n^3)
delivery_data = RunRoute.run_route()
# O(n)
UI.run_ui(delivery_data)
| [
"[email protected]"
]
| |
dcbdf42869d55f83f734a2914900efafc4a859d0 | 39e66a296f1d83472d02c15c9b8975dcc137c57a | /crawling_indeed.py | de79e280a5aeae28dbd22b063d8e540e996932ee | []
| no_license | nvquang/goodwin | fe538d620e237d4c1ff642bb728b7f61dbab310c | d050ec537d1100ee1670143fc2d8b5c281671e56 | refs/heads/master | 2020-12-30T12:23:41.427173 | 2017-05-21T14:37:35 | 2017-05-21T14:37:35 | 91,432,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | import urllib.request
import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
def get_text(tag):
if tag:
return tag.text
return None
def crawling_indeed():
links_df = pd.read_csv("indeed_links.csv")
hdr = {'User-Agent': 'Mozilla/5.0'}
columns = ['company_name', 'nb_reviews', 'nb_jobs', 'overall_rating',
'work_life_balance', 'compensation_benefits', 'job_security', 'management', 'culture']
result_pd = pd.DataFrame(columns=columns)
for index, row in links_df.iterrows():
link = row['link']
req = urllib.request.Request(link, headers=hdr)
response = urlopen(req)
soup = BeautifulSoup(response, 'html.parser')
company_name = get_text(soup.select_one(".cmp-company-name"))
nb_reviews = get_text(
soup.select_one("#cmp-menu-container > ul > li:nth-of-type(2) > a > div"))
nb_jobs = get_text(
soup.select_one("#cmp-menu-container > ul > li:nth-of-type(5) > a > div"))
overall_rating = get_text(
soup.select_one(".cmp-average-rating"))
work_life_balance = get_text(
soup.select_one("#cmp-reviews-attributes > dd:nth-of-type(1) > span.cmp-star-rating"))
compensation_benefits = get_text(
soup.select_one("#cmp-reviews-attributes > dd:nth-of-type(2) > span.cmp-star-rating"))
job_security = get_text(
soup.select_one("#cmp-reviews-attributes > dd:nth-of-type(3) > span.cmp-star-rating"))
management = get_text(
soup.select_one("#cmp-reviews-attributes > dd:nth-of-type(4) > span.cmp-star-rating"))
culture = get_text(
soup.select_one("#cmp-reviews-attributes > dd:nth-of-type(5) > span.cmp-star-rating"))
result_pd.loc[index] = [company_name, nb_reviews, nb_jobs, overall_rating,
work_life_balance, compensation_benefits,
job_security, management, culture]
result_pd.to_csv("indeed_crawling_result.csv", index=False)
crawling_indeed()
| [
"[email protected]"
]
| |
7f9fb102af433872f71ba987ff70a370de785e99 | 5e79820bf510e4bd5a5be1de22246cf9e54ecf30 | /logestic_regression/norm.py | cdc913cfa8eb98770b039869b9d5b1e49abd9d34 | []
| no_license | bojunf/machine-learning-project | 08258bb7675a3896e8df32d8ea1099baae2995d4 | f4b12ce76f26408b1707ca2b8116b58042780a8a | refs/heads/master | 2021-08-14T12:28:06.153629 | 2017-11-15T17:54:31 | 2017-11-15T17:54:31 | 110,864,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | import sys
import numpy as np
ftrain = str(sys.argv[1])
ftest = str(sys.argv[2])
fval = str(sys.argv[3]) # input file names
traindata = []
with open('{0}'.format(ftrain), 'r') as f: # read training data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
traindata.append(map(int, arr))
traindata = np.array(traindata)
mean, std = [], []
nfeat = len(traindata[0])
for i in range(nfeat): # find mean and std for each features of all training data
mean.append(np.mean(traindata[:, i]))
std.append(np.std(traindata[:, i]))
testdata, valdata = [], []
normtrain, normtest, normval = [], [], []
with open('{0}'.format(ftest), 'r') as f: # read test data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
testdata.append(map(int, arr))
with open('{0}'.format(fval), 'r') as f: # read validation data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
valdata.append(map(int, arr))
testdata = np.array(testdata)
valdata = np.array(valdata)
for i in range(nfeat): # normalize data based on mean and std of training data
if (std[i] != 0.0):
traindata[:, i] = (traindata[:, i] - mean[i]) / float(std[i])
testdata[:, i] = (testdata[:, i] - mean[i]) / float(std[i])
valdata[:, i] = (valdata[:, i] - mean[i]) / float(std[i])
np.savetxt('norm_train.txt', traindata)
np.savetxt('norm_test.txt', testdata)
np.savetxt('norm_val.txt', valdata)
np.savetxt('mean.txt', mean)
np.savetxt('std.txt', std) # save normalized data into files
| [
"[email protected]"
]
| |
f179eade30c3bd9c2fd92c1dcafbdf2683622c47 | 635cb7fb75048f9de7b95b48d1f59de68f9b3368 | /R01/sortowanie_obiektów_bez_wbudowanej_obsługi_porównań/example.py | 7641bed7b8787e11f23a4ef78d74ba00e90b1ae8 | []
| no_license | anpadoma/python_receptury3 | 9e889ac503e48eb62160050eecfdc4a64072c184 | c761f2c36707785a8a70bdaccebd7533c76dee21 | refs/heads/master | 2021-01-22T14:38:34.718999 | 2014-01-31T22:09:44 | 2014-01-31T22:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from operator import attrgetter
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
# Przykład
users = [User(23), User(3), User(99)]
print(users)
# Sortowanie według pola user-id
print(sorted(users, key=attrgetter('user_id')))
| [
"[email protected]"
]
| |
36368975cba55c5f7af99945aefefc4387fc4ba2 | 36166fb6929110b695523e1e569836412b557b22 | /dynamic/dispatch.py | a550cab81ae52aae1bc200e4fbedc6a1f99fce0d | []
| no_license | Valt25/vanillaPythonWebApp | e33536454eb9353050e4008b26cb89772178193d | 558a9dbe34a730403304bf1b8ce186553878454e | refs/heads/master | 2020-06-15T08:13:27.598747 | 2019-07-04T13:22:18 | 2019-07-04T13:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from dynamic.urls import urls
from dynamic.utils import Response
from utils import url_not_found
def get_view_func_from_urls(environ, urls):
for url in urls:
if environ['PATH_INFO'].split('?')[0].strip('/') == url[0] and environ['REQUEST_METHOD'] == url[1]:
return url[2]
def generate_wsgi_response(start_response, response: Response):
start_response(str(response.status), response.headers)
return bytes(str(response.data), 'utf-8')
def dispatch(environ, start_response):
view_func = get_view_func_from_urls(environ, urls)
if view_func:
response = view_func(environ)
return generate_wsgi_response(start_response, response)
else:
return url_not_found(start_response, environ['REQUEST_URI'])
| [
"[email protected]"
]
| |
2df3e4723de73e9f011ea8c4dbbaf3d9347995df | b83b0cd0ceeaed79afbc8203dfc38336553b324f | /Python/loops/starpattern.py | 82a8545dbc27eb7076f3703cfb3c384bd50f5b43 | []
| no_license | SBartonJr3/ClassBarton | 2576bd3423676698a61185a25835c1ed2fdeb9c6 | 514093ec6e5d8990ba3452b2ff8e2b0c76259ee6 | refs/heads/master | 2020-04-27T18:24:39.005771 | 2019-06-10T17:21:29 | 2019-06-10T17:21:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | #Stephen Barton Jr
#Python Programming, star pattern
#22 APR 2019
def main():
for i in range(1,6):
for j in range(1,i+1):
print("*", end = " ")
print()
main()
| [
"[email protected]"
]
| |
8f98de03e4669f0cea77fa4b917683db4d9be640 | 1f256bf20e68770c1a74f7e41ef6730623db0c74 | /location_management/migrations/0001_initial.py | 4015d233eb0c002e111dfd9acab22eacef6e3268 | [
"MIT"
]
| permissive | davtoh/enterprise-website | 380ea32b730f16b7157e59ca0dc1e86d1f10e4a8 | 00b6c42cd6cb01517c152b9ffce9cfb56744703d | refs/heads/master | 2021-06-05T09:20:27.721789 | 2021-05-05T04:58:04 | 2021-05-05T05:14:05 | 141,315,681 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | # Generated by Django 2.0.6 on 2018-07-05 23:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('latitude', models.DecimalField(decimal_places=8, max_digits=10)),
('longitude', models.DecimalField(decimal_places=8, max_digits=11)),
],
),
migrations.CreateModel(
name='Countries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('code', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='States',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('code', models.CharField(max_length=10)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location_management.Countries')),
],
),
migrations.AddField(
model_name='cities',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location_management.Countries'),
),
migrations.AddField(
model_name='cities',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location_management.States'),
),
]
| [
"[email protected]"
]
| |
3f7a3592ecb43458823f4a89ef52c6dcfbfef71c | 70d4ef0863906b3ca64f986075cd35b8412b871e | /blueapps/account/sites/default.py | e996ac9936aeb25beb19699d619290f60b693d5c | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | selinagyan/bk-sops | 72db0ac33d9c307f51769e4baa181ceb8e1b279e | 39e63e66416f688e6a3641ea8e975d414ece6b04 | refs/heads/master | 2020-05-07T16:44:33.312442 | 2019-04-11T02:09:25 | 2019-04-11T02:09:25 | 180,696,241 | 0 | 0 | null | 2019-04-11T02:07:11 | 2019-04-11T02:07:10 | null | UTF-8 | Python | false | false | 2,955 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
class ConfFixture(object):
"""
登录模块项目变量汇总
"""
#################
# 浏览器参数说明 #
#################
# 登录模块,可选项为 components 目录下的模块,如 qcloud_tlogin
BACKEND_TYPE = None
# 用户验证 Backend qcloud_tlogin.backends.QPtloginBackend
USER_BACKEND = None
# 用户登录验证中间件 qcloud_tlogin.middlewares.LoginRequiredMiddleware
LOGIN_REQUIRED_MIDDLEWARE = None
# 用户模型 qcloud_tlogin.models.UserProxy
USER_MODEL = None
# 登录平台弹窗链接 http://xxxx.com/accounts/login_page/
CONSOLE_LOGIN_URL = None
# 登录平台链接 http://login.o.qcloud.com
LOGIN_URL = None
# 内嵌式的登录平台链接(可嵌入弹框、IFrame)http://xxx.com/plain/
LOGIN_PLAIN_URL = None
# 是否提供内嵌式的统一登录页面
HAS_PLAIN = True
# 跳转至登录平台是否加跨域前缀标识
# http://xxx.com/login/?c_url={CROSS_PREFIX}http%3A//xxx.com%3A8000/
ADD_CROSS_PREFIX = True
CROSS_PREFIX = ''
# 跳转至登录平台是否加上APP_CODE
# http://xxx.com/login/?c_url=http%3A//xxx.com%3A8000/&app_code=xxx
ADD_APP_CODE = True
# http://xxx.com/login/?c_url=http%3A//xxx.com%3A8000/&{APP_KEY}=xxx
APP_KEY = 'app_code'
SETTINGS_APP_KEY = 'APP_CODE'
# 跳转至登录平台,回调参数名称
# http://xxx.com/login/?{C_URL}=http%3A//xxx.com%3A8000/
C_URL = 'c_url'
# 内嵌式的登录平台的尺寸大小,决定前端适配的弹框大小
IFRAME_HEIGHT = 490
IFRAME_WIDTH = 460
###############
# 微信参数说明 #
###############
# 登录模块 weixin
WEIXIN_BACKEND_TYPE = None
# 用户认证中间件 bk_ticket.middlewares.LoginRequiredMiddleware
WEIXIN_MIDDLEWARE = None
# 用户认证 Backend bk_ticket.backends.TicketBackend
WEIXIN_BACKEND = None
# 用户信息链接 http://xxx.com/user/weixin/get_user_info/
WEIXIN_INFO_URL = None
# 用户 OAUTH 认证链接 https://xxx.com/connect/oauth2/authorize
WEIXIN_OAUTH_URL = None
# 在微信端的应用ID 'xxxx'
WEIXIN_APP_ID = None
| [
"[email protected]"
]
| |
1f1e8010afc725d867016c4c3a0daab4b420b78b | 29801a4a09e6c97061e67e21fd7600317d86bd29 | /TIY2_4.py | 22ce625692bbaadfa8eded661600954ebfdcf6d6 | []
| no_license | EthanPassinoFWCS/Chapter2Anthis | 3fe1f0b501a67202686879e7b2ffdad196e02a44 | 1f21cee7b09979d47878dc76c891ca7a667fbedf | refs/heads/main | 2023-01-04T11:21:28.922019 | 2020-11-02T20:12:26 | 2020-11-02T20:12:26 | 309,483,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | name = "John Smith"
print(name.lower())
print(name.upper())
print(name.title()) | [
"[email protected]"
]
| |
a811597869c088ec4c17da0719f6b9a3e9e8a9b8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_46/83.py | 728c1c577aee018ba646a8511a4f62a6e9af6751 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | import psyco
psyco.full()
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
if args not in self.memoized:
self.memoized[args] = self.function(*args)
return self.memoized[args]
def clear(self):
self.memoized = {}
def alloc(size, default = 0): return [default] * size
def alloc2(r, c, default = 0): return [alloc(c, default)] * r
def isset(a, bit): return ((a >> bit) & 1) > 0
def dig(c): return ord(c) - 48
def abs(x):
if x<0: return -x;
return x
def area(x1, y1, x2, y2, x3, y3):
return abs((x3-x1)*(y2-y1) - (x2-x1)*(y3-y1))/2
def bisection(f, lo, hi):
"""
finds the integer x where f(x)=0.
assumes f is monotounous.
"""
while lo < hi:
mid = (lo+hi)//2
midval = f(mid)
if midval < 0:
lo = mid+1
elif midval > 0:
hi = mid
else:
return mid
return None
def minarg(f, args):
min_val = None
min_arg = None
for a in args:
temp=f(a)
if min_arg==None or temp<min_val:
min_val=temp
min_arg=a
return min_arg, min_val
#mat[i] = lowest row for the row currently at position i
def solve():
c=0
for i in range(N):
#print mat, c
#print "i=", i
if mat[i]>i:
for j in range(i+1, N):
if mat[j]<=i:
#print "replace", i, " with ", j
mat.insert(i, mat[j])
#print mat
del mat[j+1]
#mat[j]=None
c+=j-i
break
return c
from time import time
if __name__ == "__main__":
def getInts(): return map(int, input.readline().rstrip('\n').split(' '))
def getFloats(): return map(float, input.readline().rstrip('\n').split(' '))
def getMatrix(rows): return [getInts() for _ in range(rows)]
input, output = open("d:/gcj/in", "r"), open('d:/gcj/output', 'w')
start_time=time()
for case in range(1, int(input.readline()) + 1):
N, = getInts()
mat=[[int(d) for d in input.readline().rstrip('\n')] for _ in range(N)]
for i in range(N):
j=N-1
while j>0 and mat[i][j]==0:
j-=1
mat[i]=j
s="Case #%d: %d\n" % (case, solve())
print s
output.write(s)
print time()-start_time
| [
"[email protected]"
]
| |
588745154691fddda9c9d4900f105db06f013696 | 2060b4819d71871980f8bcf02129d5b6af0a80fb | /fixture/application.py | 159ed380790f067786d5a98bdc54d6fac6d980b3 | [
"Apache-2.0"
]
| permissive | Algirwitch/python_training | dba44d1722b2fb3e3e2e160891642b5892d5f791 | 65a316a366b89ecf83bed58c706b7052790cf686 | refs/heads/master | 2021-08-23T03:04:56.655254 | 2017-12-02T20:14:03 | 2017-12-02T20:14:03 | 110,426,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from selenium.webdriver.firefox.webdriver import WebDriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.new_cont import NContHelper
class Application:
def __init__(self):
self.wd = WebDriver(capabilities={"marionette": False})
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.new_cont = NContHelper(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/addressbook/")
def destroy(self):
self.wd.quit()
| [
"[email protected]"
]
| |
fd21e05126b71464ee4fba8bc4e7adcee0ea0ad0 | 42a2439b783b74da9b31f0ff64c8c32fb7a626ba | /qutebrowser/.qutebrowser/config.py | d20ae20167fd2c4cf228a7b284fd749a3e097c53 | []
| no_license | zweifisch/dotfiles | 7a8401faf9adda58eaad59aa396ca36ee8167fbc | 0760f07d7651707d5348580cfc599c3a2d12a934 | refs/heads/master | 2023-06-21T04:30:28.458086 | 2023-06-11T05:34:12 | 2023-06-11T05:36:15 | 7,055,187 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py |
config.bind('t', 'set-cmd-text -s :open -t')
config.bind('O', 'set-cmd-text :open {url:pretty}')
config.bind('h', 'tab-prev')
config.bind('gT', 'tab-prev')
config.bind('l', 'tab-next')
config.bind('gt', 'tab-next')
config.bind('b', 'set-cmd-text -s :tab-select')
config.bind('gi', 'hint inputs')
config.bind('<Ctrl-i>', 'open-editor', mode='insert')
# config.bind('<Ctrl-u>', 'rl-unix-line-discard', mode='insert')
# config.bind('<Ctrl-a>', 'rl-beginning-of-line', mode='insert')
# config.bind('<Ctrl-e>', 'rl-end-of-line', mode='insert')
# config.bind('<Ctrl-w>', 'rl-end-word-rubout', mode='insert')
# c.content.proxy = 'socks://localhost:13659'
# c.content.proxy = 'socks://localhost:1086'
c.content.proxy = 'system'
c.url.searchengines = {
"g": "https://www.google.com/search?q={}",
"d": "https://duckduckgo.com/?q={}",
"b": "https://bing.com/search?q={}",
"DEFAULT": "https://www.google.com/search?q={}",
}
c.tabs.position = 'left'
c.auto_save.session = True
# c.content.headers.user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/99.0'
# c.content.headers.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.0.0 Safari/537.36'
c.content.headers.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
c.tabs.show = 'never'
c.tabs.background = True
c.aliases['np'] = 'set content.proxy none'
c.aliases['p'] = 'set content.proxy "socks://localhost:13659"'
c.aliases['readability'] = 'spawn --userscript readability-js'
c.colors.webpage.darkmode.enabled = True
# c.load_autoconfig(False)
| [
"[email protected]"
]
| |
1bae4554cc3411d5a5d0020cd8da45c46a8036f4 | 27b42507a4a6f122e545db06c9629c7693e5768d | /list even num.py | 0e22377fee378316ddaa2ff7a91b3cc1bfbb1d80 | []
| no_license | pooja89299/list | e4651a24f5ff78c37d2c8f93af35e1fd85be0031 | 99244365f920145ff638392d3183e97ae4ccdd2d | refs/heads/main | 2023-05-07T10:08:21.241095 | 2021-05-20T13:03:46 | 2021-05-20T13:03:46 | 369,207,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # a=[1,2,13,15,78,9,10,19,61,51,41,4]
# b=[]
# i=0
# sum=0
# while i<len(a):
# k=a[i]
# if k%2==0:
# b.append(k)
# sum=sum+1
# i=i+1
# print(b)
# print(sum) | [
"[email protected]"
]
| |
0bac786f12b06979a3196f901657b5cb7ec66d0d | e99439a5ce60a1c25e9c00d3f646fe53663a9e2e | /reinforce/train.py | 34a5b091946dc09f7bd0268833db242e65cb3531 | []
| no_license | aarunsrinivas/reinforcement-learning | 15e4bc553db7690509f361d0283c7209ae9ee6c3 | bb4fc3aa04969169ce21a75ae6a887d0afb6e947 | refs/heads/main | 2023-07-14T15:33:50.146129 | 2021-08-23T04:02:33 | 2021-08-23T04:02:33 | 398,105,211 | 0 | 0 | null | 2021-08-23T04:02:34 | 2021-08-20T00:05:03 | null | UTF-8 | Python | false | false | 606 | py | import gym
import numpy as np
from agent import REINFORCEAgent
scores = []
EPISODES = 1000
env = gym.make('CartPole-v0')
agent = REINFORCEAgent(0.95, [4], 16, 16, 2, lr=1e-3)
for episode in range(EPISODES):
score = 0
done = False
state = env.reset()
while not done:
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
agent.store_transition(state, action, reward, next_state, done)
state = next_state
score += reward
agent.learn()
agent.clear_memory()
scores.append(score)
print(f'Episode: {episode}, Score: {score}, Avg Score: {np.mean(scores[-100:])}')
| [
"[email protected]"
]
| |
1a442845be688845257b798f6b9a0bb3d80717e6 | 8c7efb37b53717c228a017e0799eb477959fb8ef | /wmm/scenario/migrations/0116_auto__add_field_tidalenergyparameter_ordering__add_field_pelagicconser.py | 6686340589e48b393339faa86cbcd5a5df0aba84 | []
| no_license | rhodges/washington-marinemap | d3c9b24265b1a0800c7dcf0163d22407328eff57 | e360902bc41b398df816e461b3c864520538a226 | refs/heads/master | 2021-01-23T11:47:50.886681 | 2012-09-24T18:38:33 | 2012-09-24T18:38:33 | 32,354,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,555 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TidalEnergyParameter.ordering'
db.add_column('scenario_tidalenergyparameter', 'ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'PelagicConservationParameter.ordering'
db.add_column('scenario_pelagicconservationparameter', 'ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'OffshoreConservationParameter.ordering'
db.add_column('scenario_offshoreconservationparameter', 'ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'NearshoreConservationParameter.ordering'
db.add_column('scenario_nearshoreconservationparameter', 'ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'WaveEnergyParameter.ordering'
db.add_column('scenario_waveenergyparameter', 'ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'TidalEnergyParameter.ordering'
db.delete_column('scenario_tidalenergyparameter', 'ordering')
# Deleting field 'PelagicConservationParameter.ordering'
db.delete_column('scenario_pelagicconservationparameter', 'ordering')
# Deleting field 'OffshoreConservationParameter.ordering'
db.delete_column('scenario_offshoreconservationparameter', 'ordering')
# Deleting field 'NearshoreConservationParameter.ordering'
db.delete_column('scenario_nearshoreconservationparameter', 'ordering')
# Deleting field 'WaveEnergyParameter.ordering'
db.delete_column('scenario_waveenergyparameter', 'ordering')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'scenario.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
'scenario.chlorophyl': {
'Meta': {'object_name': 'Chlorophyl'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.conservationobjective': {
'Meta': {'object_name': 'ConservationObjective'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'objective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Objective']", 'null': 'True', 'blank': 'True'})
},
'scenario.conservationsite': {
'Meta': {'object_name': 'ConservationSite'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scenario_conservationsite_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'geometry_orig': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manipulators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'scenario_conservationsite_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scenario_conservationsite_related'", 'to': "orm['auth.User']"})
},
'scenario.depthclass': {
'Meta': {'object_name': 'DepthClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.energyobjective': {
'Meta': {'object_name': 'EnergyObjective'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'objective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Objective']", 'null': 'True', 'blank': 'True'})
},
'scenario.geomorphology': {
'Meta': {'object_name': 'Geomorphology'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.mos': {
'Meta': {'object_name': 'MOS'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scenario_mos_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_chlorophyl_pelagic_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Chlorophyl']", 'null': 'True', 'blank': 'True'}),
'input_depth_class_offshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'MOSOffshoreConservationDepthClass'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['scenario.DepthClass']"}),
'input_dist_astoria_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_astoria_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_astoria_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_hoquium_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_hoquium_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_hoquium_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_port_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_port_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_port_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_shore_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_shore_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_shore_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_ecosystem_nearshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreEcosystem']", 'null': 'True', 'blank': 'True'}),
'input_exposure_nearshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreExposure']", 'null': 'True', 'blank': 'True'}),
'input_geomorphology_offshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'MOSOffshoreConservationGeomorphology'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['scenario.Geomorphology']"}),
'input_max_depth_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_depth_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_depth_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_tidalmax_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_tidalmean_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_wavesummer_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_wavewinter_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_depth_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_depth_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_depth_wind_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_tidalmax_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_tidalmean_tidal_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_wavesummer_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_wavewinter_wave_energy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_objectives': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Objective']", 'null': 'True', 'blank': 'True'}),
'input_objectives_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.ConservationObjective']", 'null': 'True', 'blank': 'True'}),
'input_objectives_energy': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.EnergyObjective']", 'null': 'True', 'blank': 'True'}),
'input_parameters_nearshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scenario.NearshoreConservationParameter']", 'symmetrical': 'False'}),
'input_parameters_offshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scenario.OffshoreConservationParameter']", 'symmetrical': 'False'}),
'input_parameters_pelagic_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scenario.PelagicConservationParameter']", 'symmetrical': 'False'}),
'input_parameters_tidal_energy': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.TidalEnergyParameter']", 'null': 'True', 'blank': 'True'}),
'input_parameters_wave_energy': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scenario.WaveEnergyParameter']", 'symmetrical': 'False'}),
'input_parameters_wind_energy': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scenario.WindEnergyParameter']", 'symmetrical': 'False'}),
'input_substrate_nearshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreSubstrate']", 'null': 'True', 'blank': 'True'}),
'input_substrate_offshore_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'MOSOffshoreConservationSubstrate'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['scenario.Substrate']"}),
'input_substrate_tidal_energy': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.TidalSubstrate']", 'null': 'True', 'blank': 'True'}),
'input_substrate_wave_energy': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'MOSWaveEnergySubstrate'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['scenario.Substrate']"}),
'input_substrate_wind_energy': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'MOSWindEnergySubstrate'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['scenario.Substrate']"}),
'input_upwelling_pelagic_conservation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Upwelling']", 'null': 'True', 'blank': 'True'}),
'input_wind_potential_wind_energy': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.WindPotential']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'overlap_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'scenarios': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Scenario']", 'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'scenario_mos_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'support_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scenario_mos_related'", 'to': "orm['auth.User']"})
},
'scenario.nearshoreconservationparameter': {
'Meta': {'object_name': 'NearshoreConservationParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.nearshoreconservationparameterarea': {
'Meta': {'object_name': 'NearshoreConservationParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.nearshoreecosystem': {
'Meta': {'object_name': 'NearshoreEcosystem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.nearshoreexposure': {
'Meta': {'object_name': 'NearshoreExposure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.nearshoresubstrate': {
'Meta': {'object_name': 'NearshoreSubstrate'},
'color': ('django.db.models.fields.CharField', [], {'default': "'778B1A55'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.objective': {
'Meta': {'object_name': 'Objective'},
'color': ('django.db.models.fields.CharField', [], {'default': "'778B1A55'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
'scenario.offshoreconservationparameter': {
'Meta': {'object_name': 'OffshoreConservationParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.offshoreconservationparameterarea': {
'Meta': {'object_name': 'OffshoreConservationParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.parameter': {
'Meta': {'object_name': 'Parameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
'scenario.pelagicconservationparameter': {
'Meta': {'object_name': 'PelagicConservationParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.pelagicconservationparameterarea': {
'Meta': {'object_name': 'PelagicConservationParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.scenario': {
'Meta': {'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scenario_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_chlorophyl': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Chlorophyl']", 'null': 'True', 'blank': 'True'}),
'input_depth_class': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.DepthClass']", 'null': 'True', 'blank': 'True'}),
'input_dist_astoria': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_hoquium': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_port': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_dist_shore': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_geomorphology': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Geomorphology']", 'null': 'True', 'blank': 'True'}),
'input_max_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_tidalmax': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_tidalmean': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_wavesummer': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_max_wavewinter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_tidalmax': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_tidalmean': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_wavesummer': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_min_wavewinter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_nearshore_ecosystem': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreEcosystem']", 'null': 'True', 'blank': 'True'}),
'input_nearshore_exposure': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreExposure']", 'null': 'True', 'blank': 'True'}),
'input_nearshore_substrate': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.NearshoreSubstrate']", 'null': 'True', 'blank': 'True'}),
'input_objective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Objective']"}),
'input_parameters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'}),
'input_substrate': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Substrate']", 'null': 'True', 'blank': 'True'}),
'input_tidal_substrate': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.TidalSubstrate']", 'null': 'True', 'blank': 'True'}),
'input_upwelling': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.Upwelling']", 'null': 'True', 'blank': 'True'}),
'input_wind_potential': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['scenario.WindPotential']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'output_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'output_mapcalc': ('django.db.models.fields.CharField', [], {'max_length': '720', 'null': 'True', 'blank': 'True'}),
'output_report': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'scenario_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scenario_scenario_related'", 'to': "orm['auth.User']"})
},
'scenario.substrate': {
'Meta': {'object_name': 'Substrate'},
'color': ('django.db.models.fields.CharField', [], {'default': "'778B1A55'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.tidalenergyparameter': {
'Meta': {'object_name': 'TidalEnergyParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.tidalenergyparameterarea': {
'Meta': {'object_name': 'TidalEnergyParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.tidalsubstrate': {
'Meta': {'object_name': 'TidalSubstrate'},
'color': ('django.db.models.fields.CharField', [], {'default': "'778B1A55'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.upwelling': {
'Meta': {'object_name': 'Upwelling'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'scenario.waveenergyparameter': {
'Meta': {'object_name': 'WaveEnergyParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.waveenergyparameterarea': {
'Meta': {'object_name': 'WaveEnergyParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.windenergyparameter': {
'Meta': {'object_name': 'WindEnergyParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scenario.Parameter']", 'null': 'True', 'blank': 'True'})
},
'scenario.windenergyparameterarea': {
'Meta': {'object_name': 'WindEnergyParameterArea'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'scenario.windenergysite': {
'Meta': {'object_name': 'WindEnergySite'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scenario_windenergysite_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'geometry_orig': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '32610', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manipulators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'scenario_windenergysite_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scenario_windenergysite_related'", 'to': "orm['auth.User']"})
},
'scenario.windpotential': {
'Meta': {'object_name': 'WindPotential'},
'density': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'speed': ('django.db.models.fields.CharField', [], {'max_length': '30'})
}
}
complete_apps = ['scenario']
| [
"[email protected]"
]
| |
e482f77c92f6c159224fa7ad371be8af9bfb1b78 | 6bd8c7bfe66d1df1dd9db790e7a27150b7f72d31 | /tagcreator/indirect/rvg_indirect_analog.py | 62455ec5eeee02740ba69a16a10a04dc6f9a3268 | []
| no_license | sebasalvarez13/ww-tag-generation | 595178a6835da41c0ed73eed4852b0e031ca5c7f | 649f6f903b2d144f7e3ee2238a4f118b77edbed7 | refs/heads/main | 2023-07-17T09:14:49.837523 | 2021-08-16T22:38:11 | 2021-08-16T22:38:11 | 353,503,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | #!/usr/bin/env python3
import csv
import os.path
from os import path
from tagcreator.indirect.indirect_analog_features import features
class RvgIndirectAnalog:
def __init__(self, line):
self.line = line
self.tag_start = "GenericRevGate"
self.controls_list = ["CMD", "Faults", "ManAngle", "Status"]
self.setpoints_list = ["MPM", "FltrWgt"]
self.measurements_list = ["AngleSts", "LevelTrans", "ProductAvailable", "Status"]
self.verify_list = ["BedDepth", "Hole", "PA", "PN", "Pos", "PosSp"]
def setpoints(self):
dict_data = []
for setpoint in self.setpoints_list:
dict1 = features()
dict1[":IndirectAnalog"] = "{}{}Sp{}".format(self.tag_start, self.line, setpoint)
dict_data.append(dict1)
return(dict_data)
def measurements(self):
dict_data = self.setpoints()
for measurement in self.measurements_list:
dict1 = features()
dict1[":IndirectAnalog"] = "{}{}{}".format(self.tag_start, self.line, measurement)
dict_data.append(dict1)
return(dict_data)
def verify(self):
dict_data = self.measurements()
for verify in self.verify_list:
dict1 = features()
dict1[":IndirectAnalog"] = "GenericEngRevGate{}Verify{}".format(self.line, verify)
dict_data.append(dict1)
return(dict_data)
def control(self):
dict_data = self.verify()
for control in self.controls_list:
dict1 = features()
dict1[":IndirectAnalog"] = "{}Control{}".format(self.tag_start, control)
dict_data.append(dict1)
return(dict_data)
def module_exists(self):
file_path = "/mnt/c/Projects/ww-tag-generation/csv-files/indirect/rvg_indirect_analog.csv"
if path.exists(file_path):
return True
else:
return False
def create_csv(self):
csv_file = "csv-files/indirect/rvg_indirect_analog.csv"
if self.module_exists() != True:
dict_data = self.control()
csv_columns = list(dict_data[0].keys())
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError as e:
print(e)
else:
dict_data = self.verify()
csv_columns = list(dict_data[0].keys())
try:
with open(csv_file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
#writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError as e:
print(e)
if __name__ == "__main__":
wm = RvgIndirectAnalog('A')
wm.create_csv() | [
"[email protected]"
]
| |
a9b78fbdc46c4c090b2ee84da3860c8721ba674b | a60133740a2097ccd90c37b5616e611e06025d1c | /evaluate.py | bb1cb184711eeaef49b0e4e5e6e9c0c36df94c34 | []
| no_license | kubumiro/CNN-Python-Framework | 078c42835554843e8af6c2564904f4c6061e9914 | 4b08b18c244601c444671cc96ed72e3863ae323f | refs/heads/master | 2020-09-22T06:15:26.402529 | 2019-11-30T23:42:51 | 2019-11-30T23:42:51 | 225,083,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py |
def model_predict(model, X, y)
| [
"[email protected]"
]
| |
52389b5b2bff83aa9b999bd20397ad5a96cf1b26 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/601.py | 1c4900414caa5c3d523730cdea08f4e249066ea5 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!/usr/bin/env python3
from fractions import gcd
from math import log
rounds = int(input())
for i in range(rounds):
n, d = input().split('/')
n = int(n)
d = int(d)
g = gcd(n,d)
n = n//g
d = d//g
if log(d,2) != round( log(d,2)):
print("Case #{}: impossible".format(i+1))
continue;
while n!=1 :
n -= 1
g = gcd(n,d)
n = n // g
d = d // g
print("Case #{}: {}".format(i+1,int(log(d,2))))
| [
"[email protected]"
]
| |
6d4472bea179f8bc119c29f82a682a99682eade1 | a7c237fab83686d7ece883f95f32f40d15226280 | /nld_from_csv.py | e28f5c31c100faa33a6e248ce16a7c50e5bde54a | [
"MIT"
]
| permissive | benjaminestes/bq-stat | 697f0dd372a266d81d0fd8aca83265eed0204baa | 688d3b847d85a956958cfce0d0d393ba60fc408c | refs/heads/master | 2022-02-19T17:08:57.224444 | 2019-07-22T21:06:21 | 2019-07-22T21:06:21 | 112,222,412 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,061 | py | #!/usr/bin/env python
# coding=utf-8
"""Given a CSV file from Stat's ranking ranking export, create a
newline-delimited JSON file corresponding with BQ schema."""
from sys import stdin
import csv
import json
def map_row_to_schema(row):
"""Associate a value from a Stat CSV export with the correct
identifier from BQ schema.
When first adding a client to this system we may have historical data
that we want to import. That data comes from Stat's ranking export.
We need to map values from Stat's data into the schema we've design
to interface with Data Studio. This function handles that mapping.
Args:
row: A dict extracted from Stat's ranking CSV, that
corresponds with a single observation of a keyword ranking.
Returns:
A dict representing data for a single keyword observation that
complies with the BQ schema of our client tables.
Keys that were missing from Stat's response get None/NULL values.
"""
return {
"timestamp": row["Date"] + " 00:00",
"keyword": row["Keyword"],
"market": row["Market"],
"location": row["Location"],
"device": row["Device"],
"rank": row["Rank"],
"base_rank": row["Rank"],
"url": row["URL"] if row["URL"] else None,
"advertiser_competition": row["Advertiser Competition"],
"gms": row["Global Monthly Searches"],
"rms": row["Regional Monthly Searches"],
"cpc": row["CPC"],
"tags": [tag.strip() for tag in row["Tags"].split("/")] if row["Tags"] else [],
}
def csv_reader():
"""If called from shell, assume Stat CSV file is fed from stdin.
Returns:
An iterable yielding a dict for each row in the Stat CSV.
"""
return csv.DictReader(stdin, delimiter="\t")
def main():
"""Creat an object corresponding to Stat's CSV export,
and write a JSON object for each observation in Stat's response."""
# Stat's API outputs a single row for each instance of a keyword,
# in the sense you'd take it looking at their GUI. That means only
# a single ranking page is included.
#
# However, this script is for importing historical data which we
# get from a ranking export. The ranking export is a CSV which
# includes a row for each ranking page. It will also include an
# empty row for an observation of no ranking page. We want to make
# sure at most a single observation is included to match what we
# get from the API.
#
# This emits a line for the first instance of a "key". By default
# this will be the best-ranking page. However, Stat could change
# this in the future.
seen = set()
for row in csv_reader():
r = map_row_to_schema(row)
key = (r["timestamp"],
r["keyword"],
r["market"],
r["location"],
r["device"])
if key not in seen:
seen.add(key)
print(json.dumps(r))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
bf3ea11b9c446a4883cb22a7a78fb68a8f7dc894 | b9767eeeddd3d3e6f591cc96a24d2fabd4373749 | /helper.py | bd4e5cbe961767a6bb5621eefa87765679b6b355 | []
| no_license | artika-tech/Olympics-Data-Analysis | a578c0ca0878a97607c7ff9cfc33dff43180631c | 4304d1d33404ae25b8a904456bc16beb3d0721ae | refs/heads/main | 2023-08-04T20:05:24.751663 | 2021-09-04T16:10:03 | 2021-09-04T16:10:03 | 403,099,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | import numpy as np
def fetch_medal_tally(df, year, country):
medal_df = df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'])
flag = 0
if year == 'Overall' and country == 'Overall':
temp_df = medal_df
if year == 'Overall' and country != 'Overall':
flag = 1
temp_df = medal_df[medal_df['region'] == country]
if year != 'Overall' and country == 'Overall':
temp_df = medal_df[medal_df['Year'] == int(year)]
if year != 'Overall' and country != 'Overall':
temp_df = medal_df[(medal_df['Year'] == int(year)) & (medal_df['region'] == country)]
if flag == 1:
x = temp_df.groupby('Year').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Year').reset_index()
else:
x = temp_df.groupby('region').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Gold',
ascending=False).reset_index()
x['total'] = x['Gold'] + x['Silver'] + x['Bronze']
x['Gold'] = x['Gold'].astype('int')
x['Silver'] = x['Silver'].astype('int')
x['Bronze'] = x['Bronze'].astype('int')
x['total'] = x['total'].astype('int')
return x
def medal_tally(df):
medal_tally = df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'])
medal_tally = medal_tally.groupby('region').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Gold',
ascending=False).reset_index()
medal_tally['total'] = medal_tally['Gold'] + medal_tally['Silver'] + medal_tally['Bronze']
medal_tally['Gold'] = medal_tally['Gold'].astype('int')
medal_tally['Silver'] = medal_tally['Silver'].astype('int')
medal_tally['Bronze'] = medal_tally['Bronze'].astype('int')
medal_tally['total'] = medal_tally['total'].astype('int')
return medal_tally
def country_year_list(df):
years = df['Year'].unique().tolist()
years.sort()
years.insert(0, 'Overall')
country = np.unique(df['region'].dropna().values).tolist()
country.sort()
country.insert(0, 'Overall')
return years, country
def data_over_time(df, col):
nations_over_time = df.drop_duplicates(['Year', col])['Year'].value_counts().reset_index().sort_values('index')
nations_over_time.rename(columns={'index': 'Edition', 'Year': col}, inplace=True)
return nations_over_time
def most_successful(df, sport):
temp_df = df.dropna(subset=['Medal'])
if sport != 'Overall':
temp_df = temp_df[temp_df['Sport'] == sport]
x = temp_df['Name'].value_counts().reset_index().head(15).merge(df, left_on='index', right_on='Name', how='left')[
['index', 'Name_x', 'Sport', 'region']].drop_duplicates('index')
x.rename(columns={'index': 'Name', 'Name_x': 'Medals'}, inplace=True)
return x
def yearwise_medal_tally(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'], inplace=True)
new_df = temp_df[temp_df['region'] == country]
final_df = new_df.groupby('Year').count()['Medal'].reset_index()
return final_df
def country_event_heatmap(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'], inplace=True)
new_df = temp_df[temp_df['region'] == country]
pt = new_df.pivot_table(index='Sport', columns='Year', values='Medal', aggfunc='count').fillna(0)
return pt
def most_successful_countrywise(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df = temp_df[temp_df['region'] == country]
x = temp_df['Name'].value_counts().reset_index().head(10).merge(df, left_on='index', right_on='Name', how='left')[
['index', 'Name_x', 'Sport']].drop_duplicates('index')
x.rename(columns={'index':'Name','Name_x':'Medals'},inplace=True)
return x
def weight_v_height(df, sport):
athlete_df = df.drop_duplicates(subset=['Name','region'])
athlete_df['Medal'].fillna('No Medal',inplace=True)
if sport != 'Overall':
temp_df = athlete_df[athlete_df['Sport']==sport]
return temp_df
else:
return athlete_df
def men_vs_women(df):
athlete_df = df.drop_duplicates(subset=['Name', 'region'])
men = athlete_df[athlete_df['Sex']=='M'].groupby('Year').count()['Name'].reset_index()
women = athlete_df[athlete_df['Sex'] == 'F'].groupby('Year').count()['Name'].reset_index()
final = men.merge(women,on='Year',how='left')
final.rename(columns={'Name_x':'Male','Name_y':'Female'},inplace=True)
final.fillna(0,inplace=True)
return final
| [
"[email protected]"
]
| |
e1d209745842586a2972bad9db2e8556b73f6e9e | aba74338092c4de7cb504419eb7b4a19a71d35d7 | /1.딥러닝과러닝머신/2고급/1-1.텍스트형식&바이너리형식.py | 732cc860293d895d1e64d6d14d321a623c7f159e | []
| no_license | goodlucky1215/artificial-intelligence | 469f6ec931dcd30aae4b9d2782588e2468a3635f | 07c5fd009ca86c6ceb0f5ce9c960aeb1ffcd435a | refs/heads/master | 2022-04-24T22:57:33.094666 | 2020-04-29T13:00:59 | 2020-04-29T13:00:59 | 259,822,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #데이터가져오기
import requests
r=requests.get("http://api.aoikujira.com/time/get.php")
#텍스트 형식으로 데이터 추출하기
text= r.text
print(text)
#바이너리 형식으로 데이터 추출하기
bin = r.content
print(bin)
| [
"[email protected]"
]
| |
c25dfb947af2e44cd0e4dea4e46957f4f9ac03ec | a1d8ea6b40b8f90f9c10378a7db213f99b1ba947 | /src/cython_catkin_example/setup.py | b719bf7badd9ef8a18410bcb7c864b86acac4775 | []
| no_license | vbillys/cython_catkin_example | 5b56a56d1a00b0040d07a24b3a4db10d1dc75186 | ee5a7a43828f3c3fba31002a5ebe275fbc312d83 | refs/heads/master | 2020-12-27T07:11:20.419112 | 2016-07-06T04:43:30 | 2016-07-06T04:43:30 | 53,922,598 | 0 | 0 | null | 2016-03-15T07:07:56 | 2016-03-15T07:07:56 | null | UTF-8 | Python | false | false | 769 | py | from setuptools import setup
from distutils.sysconfig import get_python_lib
import glob
import os
import sys
if os.path.exists('readme.rst'):
print("""The setup.py script should be executed from the build directory. Please see the file 'readme.rst' for further instructions.""")
sys.exit(1)
setup(
name = "cython_catkin_example",
package_dir = {'': 'src'},
data_files = [(get_python_lib(), glob.glob('src/*.so'))
#,('bin', ['bin/cython_catkin_example'])
],
author = 'Marco Esposito',
description = 'Example of Cython and catkin integration',
license = 'Apache',
keywords = 'cmake cython build',
url = 'http://github.com/marcoesposito1988/cython_catkin_example',
zip_safe = False,
)
| [
"[email protected]"
]
| |
677fb51759db8a07210bb76240c9cbab445670b8 | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-string/two_characters.py | 10b3ac19c02ca478f6a224f3f683e11fe2efc679 | []
| no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 1,698 | py | # https://www.hackerrank.com/challenges/two-characters
from collections import Counter
from collections import defaultdict
def alternate(s):
if s is None or 0 == len(s):
return 0
consecutiveSet = set()
for i, c in enumerate(s):
if 0 == i:
continue
if s[i - 1] == c:
consecutiveSet.add(c)
#print(consecutiveSet)
def isAlternating(cand):
for i, c in enumerate(cand):
if 0 == i:
continue
if cand[i - 1] == c:
return False
return True
cntDict = Counter([c for c in s if c not in consecutiveSet])
cntCharDict = defaultdict(list)
for c, cnt in cntDict.items():
cntCharDict[cnt].append(c)
sortedCntCharList = sorted(cntCharDict.items(), key=lambda t: t[0], reverse=True)
#print(sortedCntCharList)
for i, (cnt1, charList1) in enumerate(sortedCntCharList):
for j, (cnt2, charList2) in enumerate(sortedCntCharList):
if j < i or 1 < abs(cnt1 - cnt2):
continue
for ch1 in charList1:
for ch2 in charList2:
if ch1 == ch2:
continue
cand = [c for c in s if c == ch1 or c == ch2]
#print(cand)
if isAlternating(cand):
return len(cand)
return 0
data = [('abaacdabd', 4),
('beabeefeab', 5),
('asdcbsdcagfsdbgdfanfghbsfdab', 8),
('asvkugfiugsalddlasguifgukvsa', 0),
]
for s, expected in data:
real = alternate(s)
print('{}, expected {}, real {}, result {}'.format(s, expected, real, expected == real))
| [
"[email protected]"
]
| |
a06db2c071875ff44793b4fa25d314d8e7a501c1 | 0178c69ef9fc5e49cadeaadddb4839eeff3f4a2a | /examples/sac.py | edb4bb7454feec8eb93576ef06326455a559076a | []
| no_license | YangHaha11514/rlkit | 3b17de2b4861e12b8c13c849410b7fab335157df | 8c2ee5d1602423e352724a0b0845c646688f98df | refs/heads/master | 2020-03-14T06:22:53.568011 | 2018-03-11T01:31:38 | 2018-03-11T01:31:38 | 131,482,724 | 1 | 0 | null | 2018-04-29T09:46:53 | 2018-04-29T09:46:53 | null | UTF-8 | Python | false | false | 1,813 | py | """
Run PyTorch Soft Actor Critic on HalfCheetahEnv.
NOTE: You need PyTorch 0.3 or more (to have torch.distributions)
"""
import gym
import numpy as np
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.sac.sac import SoftActorCritic
from rlkit.torch.networks import FlattenMlp
def experiment(variant):
env = NormalizedBoxEnv(gym.make('HalfCheetah-v1'))
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = variant['net_size']
qf = FlattenMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim + action_dim,
output_size=1,
)
vf = FlattenMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim,
output_size=1,
)
policy = TanhGaussianPolicy(
hidden_sizes=[net_size, net_size],
obs_dim=obs_dim,
action_dim=action_dim,
)
algorithm = SoftActorCritic(
env=env,
policy=policy,
qf=qf,
vf=vf,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
batch_size=128,
max_path_length=999,
discount=0.99,
soft_target_tau=0.001,
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
),
net_size=300,
)
setup_logger('name-of-experiment', variant=variant)
experiment(variant)
| [
"[email protected]"
]
| |
bb3da42f0892a5fe72ea88473753bf90060eb5b5 | d6de20f751e49eb5c22846249e34bb83feea2040 | /src/core/scoring/apps/online/apps/search_engines/management/commands/search_yandex.py | 16788bac85610453cefce5f74d0aabba37a54e73 | []
| no_license | daritorius/scoring-example | 48a9d3f50b755a688314a261ab1ecf02c8368ac9 | cb9fec5b710031305521d884a363d5540bcba376 | refs/heads/master | 2022-02-19T04:06:43.703460 | 2014-07-30T15:54:39 | 2014-07-30T15:54:39 | 200,102,774 | 0 | 0 | null | 2022-01-06T22:36:18 | 2019-08-01T18:43:55 | Python | UTF-8 | Python | false | false | 758 | py | # -*- coding: utf-8 -*-
from core.main.base.modules.Singleton import Singleton
from core.scoring.apps.online.apps.search_engines.actions.YandexActions import YandexActions
from django.core.management import BaseCommand
from django.utils.translation import ugettext_lazy as _
class Data(object):
__metaclass__ = Singleton
def __init__(self):
setattr(self, 'profile_first_name', 'Иван')
setattr(self, 'profile_second_name', 'Иванович')
setattr(self, 'profile_third_name', 'Иванов')
setattr(self, 'profile_birthday', '17-09-1986')
class Command(BaseCommand):
actions = YandexActions()
def handle(self, *args, **options):
data = Data()
self.actions.check_simple_search(data) | [
"[email protected]"
]
| |
1b1d43ac638223550a5a9f28cb4d5f216a837cbf | 1fac53ab13a9a682ecd926857ef565fa779afae4 | /fbseries.py | 44da677508725917468869fb71285e9ed733a195 | []
| no_license | Shamabanu/python-1 | 339123ff4e7667d6331c207cb1c7ca3fc775dc48 | 4c1642679bb0bdd53a1d21e5421e04eb7abda65b | refs/heads/master | 2020-04-13T23:49:27.700807 | 2018-12-29T15:10:26 | 2018-12-29T15:10:26 | 163,516,492 | 1 | 0 | null | 2018-12-29T14:16:28 | 2018-12-29T14:16:28 | null | UTF-8 | Python | false | false | 219 | py | def fibonacci(n):
if(n <= 1):
return n
else:
return(fibonacci(n-1) + fibonacci(n-2))
n = int(input("Enter no of terms:"))
print("Fibonacci sequence:")
for i in range(n):
print (fibonacci(i))
| [
"[email protected]"
]
| |
2625c8e76f3d89680e11b486b6d8510b714ec271 | a788067ff3826294e7a27459fbbe3391ca1fe870 | /proj/core/feed/forms.py | b1da6f3f3c6969c7528b9434a4bde8ec06e162ff | []
| no_license | Goobs/taskz | 2e69aff4f8315b772a6b5e32191d9d7a146bae6c | 6b572b1df6817e1093264bc83d6ac79c0096ad80 | refs/heads/master | 2016-09-05T18:10:11.848557 | 2014-03-17T18:40:17 | 2014-03-17T18:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # -*- coding: utf-8 -*-
from proj.core.utils.crispymixin import *
from .models import *
class FeedForm(CrispyModelForm):
content = forms.CharField(widget=forms.Textarea())
class Meta:
model = Feed
fields = ('content',)
def get_layout(self, *args, **kwargs):
self.helper.label_class = 'sr-only'
self.helper.field_class = ''
self.helper.form_method = 'post'
return Layout(
Div(
Field('content', rows=2, placeholder=u'Напишите, что у вас нового'),
css_class='col-md-12'
),
StrictButton(u'<i class="fa fa-share"></i> Отправить', type='submit',
css_class='btn-primary', name='post', value='1'),
)
| [
"[email protected]"
]
| |
0f433f4c1f603c220ce7a2803bc50c6f90e6d28c | 2d50f351f616d6f5afc72f1f04de243252caeeee | /app.py | 7116f45004cfdd49483da5cb2b459fdbf13ab38b | []
| no_license | cllcall/douban_flask | 2a8a4948ac5bb485736c4ebe96495e6514e19cc1 | 0e49ca1a5f1a244ddf116698f837a7799447338c | refs/heads/master | 2023-05-05T12:57:45.206936 | 2021-05-29T13:43:50 | 2021-05-29T13:43:50 | 371,888,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | from flask import Flask,render_template
import sqlite3
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/index')
def home():
#return render_template("index.html")
return index()
@app.route('/movie')
def movie():
datalist = []
conn = sqlite3.connect("movie.db")
cur = conn.cursor()
sql = "select * from movie250"
data = cur.execute(sql)
for item in data:
datalist.append(item)
cur.close()
conn.close()
return render_template("movie.html",movies = datalist)
@app.route('/score')
def score():
score = [] #评分
num = [] #每个评分所统计出的电影数量
conn = sqlite3.connect("movie.db")
cur = conn.cursor()
sql = "select score,count(score) from movie250 group by score"
data = cur.execute(sql)
for item in data:
score.append(str(item[0])+"分")
num.append(item[1])
cur.close()
conn.close()
return render_template("score.html",score = score,num = num)
@app.route('/word')
def word():
return render_template("word.html")
@app.route('/team')
def team():
return render_template("team.html")
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
399b13357b719cf03b12dbebc9c3cd588315a576 | 5774101105b47d78adb7a57eefdfa21502bbd70c | /Django/csvt-django/csvt05/manage.py | cf37dbd64ee66f2e67594f34529b3a7bff46f5fa | []
| no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csvt05.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
ceb00ada558194b136dbe08db9825882d460d300 | 577341517a9aa9f94afac9c9b1ca5efbf39bbf02 | /initial.py | 2a5e707232a35b2fd7bdce64b724911033252ff2 | []
| no_license | prathyu0398/Freshworks_assignment | 7276dde9aabb9536aa519fd7c861cd133c577f92 | 02cf03ae47ef2ab7d6aa7e7aa72533e46a1da100 | refs/heads/main | 2023-01-29T03:00:13.984100 | 2020-12-01T08:07:18 | 2020-12-01T08:07:18 | 317,468,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,852 | py | import threading
from threading import *
import time
import json
#https://github.com/sriharsha9598/CRD-operations-of-a-file-based-key-value-data-store
f=open("data.json",)
d=json.load(f)
def create(key, value, timeout=0):
if key in d:
print("error: this key already exists") # error message1
else:
if (key.isalpha()):
if len(d) < (1024 * 1020 * 1024) and value <= (
16 * 1024 * 1024):
if timeout == 0:
l = [value, timeout]
else:
l = [value, time.time() + timeout]
if len(key) <= 32: # constraints for input key_name capped at 32chars
d[key] = l
else:
print("Error: Memory limit exceeded!! ") # error message2
else:
print(
"Error: Invalind key_name!! key_name must contain only alphabets and no special characters or numbers") # error message3
def read(key):
if key not in d:
print("Error: given key does not exist in database. Please enter a valid key") # error message4
else:
b = d[key]
if b[1] != 0:
if time.time() < b[1]:
stri = str(key) + ":" + str(
b[0])
return stri
else:
print("Error: time-to-live of", key, "has expired") # error message5
else:
stri = str(key) + ":" + str(b[0])
return stri
def delete(key):
if key not in d:
print("Error: Given key does not exist in database. Please enter a valid key") # error message4
else:
b = d[key]
if b[1] != 0:
if time.time() < b[1]: # comparing the current time with expiry time
del d[key]
print("key is successfully deleted")
else:
print("error: time-to-live of", key, "has expired") # error message5
else:
del d[key]
print("key is successfully deleted")
def modify(key, value):
b = d[key]
if b[1] != 0:
if time.time() < b[1]:
if key not in d:
print("error: given key does not exist in database. Please enter a valid key") # error message6
else:
l = []
l.append(value)
l.append(b[1])
d[key] = l
else:
print("error: time-to-live of", key, "has expired") # error message5
else:
if key not in d:
print("error: given key does not exist in database. Please enter a valid key") # error message6
else:
l = []
l.append(value)
l.append(b[1])
d[key] = l
| [
"[email protected]"
]
| |
35f6822467bc9491df6aecb05a27905bfc3f14e3 | 571d560deb0a9e4d96cd1491aefe247b2f0a3ff4 | /Source/Interfaces/GraylogInterface.py | f677f75a94c0a330fe2d833fc9041483bfd14332 | [
"MIT"
]
| permissive | owentl/office365-audit-log-collector | 8ec5e0824682c03288f5e3a4b5e81cac7c91e04d | 35876909259f56cb48e2588f7f65a8224d50ef9d | refs/heads/master | 2022-06-02T22:41:51.058334 | 2022-04-21T09:14:35 | 2022-04-21T09:14:35 | 483,233,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | from . import _Interface
from collections import deque
import logging
import threading
import socket
import json
import time
class GraylogInterface(_Interface.Interface):
def __init__(self, graylog_address=None, graylog_port=None, **kwargs):
super().__init__(**kwargs)
self.gl_address = graylog_address
self.gl_port = graylog_port
def _send_message(self, msg, retries=3, **kwargs):
"""
Send a single message to a graylog input; the socket must be closed after each individual message,
otherwise Graylog will interpret it as a single large message.
:param msg: dict
"""
msg_string = json.dumps(msg)
if not msg_string:
return
while True:
try:
sock = self._connect_to_graylog_input()
except OSError as e: # For issue: OSError: [Errno 99] Cannot assign requested address #6
if retries:
logging.error("Error connecting to graylog: {}. Retrying {} more times".format(e, retries))
retries -= 1
time.sleep(30)
else:
logging.error("Error connecting to graylog: {}. Giving up for this message: {}".format(
e, msg_string))
self.unsuccessfully_sent += 1
return
else:
break
try:
sock.sendall(msg_string.encode())
except Exception as e:
self.unsuccessfully_sent += 1
logging.error("Error sending message to graylog: {}.".format(e))
sock.close()
self.successfully_sent += 1
def _connect_to_graylog_input(self):
"""
Return a socket connected to the Graylog input.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.gl_address, int(self.gl_port)))
return s
| [
"[email protected]"
]
| |
c1dc9b8de82a537e52ed23b956fe00cfff4c98ee | 2db67c3d6d4ec252f3e76ce6e92f258c4e1fbf73 | /delfin/drivers/dell_emc/unity/unity.py | 7652a09592639cd3844daf83fc8c520d00d832a2 | [
"Apache-2.0"
]
| permissive | jiangyutan/delfin | 2b74ed2473f2e42f8cc1d185a8ac4c0835035bd0 | d63b5f19efabc1c6ef94f0244e9f89c2ecceb7ed | refs/heads/v0.8.0-maint | 2023-05-04T21:18:08.539343 | 2021-03-15T08:00:53 | 2021-03-15T08:00:53 | 286,358,774 | 0 | 0 | Apache-2.0 | 2020-08-10T02:38:37 | 2020-08-10T02:38:36 | null | UTF-8 | Python | false | false | 8,054 | py | # Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from delfin.common import constants
from delfin.drivers import driver
from delfin.drivers.dell_emc.unity import rest_handler, alert_handler, consts
from delfin.drivers.dell_emc.unity.alert_handler import AlertHandler
LOG = log.getLogger(__name__)
class UNITYStorDriver(driver.StorageDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rest_handler = rest_handler.RestHandler(**kwargs)
self.rest_handler.login()
def reset_connection(self, context, **kwargs):
self.rest_handler.logout()
self.rest_handler.verify = kwargs.get('verify', False)
self.rest_handler.login()
def close_connection(self):
self.rest_handler.logout()
def get_storage(self, context):
system_info = self.rest_handler.get_storage()
capacity = self.rest_handler.get_capacity()
version_info = self.rest_handler.get_soft_version()
disk_info = self.rest_handler.get_disk_info()
status = constants.StorageStatus.OFFLINE
if system_info is not None and capacity is not None:
system_entries = system_info.get('entries')
for system in system_entries:
name = system.get('content').get('name')
model = system.get('content').get('model')
serial_number = system.get('content').get('serialNumber')
health_value = system.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
break
capacity_info = capacity.get('entries')
for per_capacity in capacity_info:
free = per_capacity.get('content').get('sizeFree')
total = per_capacity.get('content').get('sizeTotal')
used = per_capacity.get('content').get('sizeUsed')
subs = per_capacity.get('content').get('sizeSubscribed')
break
soft_version = version_info.get('entries')
for soft_info in soft_version:
version = soft_info.get('content').get('id')
break
disk_entrier = disk_info.get('entries')
raw = 0
for disk in disk_entrier:
raw = raw + int(disk.get('content').get('rawSize'))
result = {
'name': name,
'vendor': 'DELL EMC',
'model': model,
'status': status,
'serial_number': serial_number,
'firmware_version': version,
'location': '',
'subscribed_capacity': int(subs),
'total_capacity': int(total),
'raw_capacity': int(raw),
'used_capacity': int(used),
'free_capacity': int(free)
}
return result
def list_storage_pools(self, context):
pool_info = self.rest_handler.get_all_pools()
pool_list = []
pool_type = constants.StorageType.BLOCK
if pool_info is not None:
pool_entries = pool_info.get('entries')
for pool in pool_entries:
health_value = pool.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
p = {
'name': pool.get('content').get('name'),
'storage_id': self.storage_id,
'native_storage_pool_id': str(
pool.get('content').get('id')),
'description': pool.get('content').get('description'),
'status': status,
'storage_type': pool_type,
'total_capacity': int(pool.get('content').
get('sizeTotal')),
'subscribed_capacity': int(pool.get('content').get(
'sizeSubscribed')),
'used_capacity': int(pool.get('content').get('sizeUsed')),
'free_capacity': int(pool.get('content').get('sizeFree'))
}
pool_list.append(p)
return pool_list
def volume_handler(self, volumes, volume_list):
if volumes is not None:
vol_entries = volumes.get('entries')
for volume in vol_entries:
total = volume.get('content').get('sizeTotal')
used = volume.get('content').get('sizeAllocated')
vol_type = constants.VolumeType.THICK
if volume.get('content').get('isThinEnabled') is True:
vol_type = constants.VolumeType.THIN
compressed = True
deduplicated = volume.get('content').\
get('isAdvancedDedupEnabled')
health_value = volume.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
v = {
'name': volume.get('content').get('name'),
'storage_id': self.storage_id,
'description': volume.get('content').get('description'),
'status': status,
'native_volume_id': str(volume.get('content').get('id')),
'native_storage_pool_id':
volume.get('content').get('pool').get('id'),
'wwn': volume.get('content').get('wwn'),
'type': vol_type,
'total_capacity': int(total),
'used_capacity': int(used),
'free_capacity': int(total - used),
'compressed': compressed,
'deduplicated': deduplicated
}
volume_list.append(v)
def list_volumes(self, context):
page_size = 1
volume_list = []
while True:
luns = self.rest_handler.get_all_luns(page_size)
if 'entries' not in luns:
break
if len(luns['entries']) < 1:
break
self.volume_handler(luns, volume_list)
page_size = page_size + 1
return volume_list
def list_alerts(self, context, query_para=None):
page_size = 1
alert_model_list = []
while True:
alert_list = self.rest_handler.get_all_alerts(page_size)
if 'entries' not in alert_list:
break
if len(alert_list['entries']) < 1:
break
alert_handler.AlertHandler() \
.parse_queried_alerts(alert_model_list, alert_list, query_para)
page_size = page_size + 1
return alert_model_list
def add_trap_config(self, context, trap_config):
pass
def remove_trap_config(self, context, trap_config):
pass
@staticmethod
def parse_alert(context, alert):
return AlertHandler.parse_alert(context, alert)
def clear_alert(self, context, alert):
return self.rest_handler.remove_alert(context, alert)
| [
"[email protected]"
]
| |
93856c78a47412b99de857cb1abbf8b25758ad79 | f8bbdfb112618136fc4adccb03ce25fbfc48bff5 | /panel/config/admin/management_data/CustomPages/Member.py | 16842cd9719bcaac1229acc9f6e270cb55f48b24 | []
| no_license | lazypanda10117/CICSA-Ranking-Platform | 160973987b533ede6e0b94af29b5bc85646b2bc0 | d5f6ac64a1f85c3333c71a7d81749b49145b9a16 | refs/heads/master | 2022-12-09T23:21:28.649252 | 2020-04-28T22:53:07 | 2020-04-28T22:53:07 | 133,093,367 | 3 | 2 | null | 2021-09-22T17:51:39 | 2018-05-11T22:14:01 | Python | UTF-8 | Python | false | false | 3,351 | py | from cicsa_ranking.models import Member
from .AbstractCustomClass import AbstractCustomClass
from panel.component.CustomElements import Choices
from misc.CustomFunctions import MiscFunctions, RequestFunctions, LogFunctions
class MemberView(AbstractCustomClass):
def __init__(self, request):
self.base_class = Member
self.validation_table = {
'base_table_invalid': {'_state'},
'base_form_invalid': {'_state', 'id'},
}
super().__init__(request, self.base_class, self.validation_table)
# View Process Functions
def abstractFormProcess(self, action, **kwargs):
try:
post_dict = dict(self.request.POST)
dispatcher = super().populateDispatcher()
if dispatcher.get(action):
member_id = kwargs.pop('id', None)
member = self.useAPI(self.base_class).editSelf(id=member_id)
else:
member = self.base_class()
member.member_name = RequestFunctions.getSingleRequestObj(post_dict, 'member_name')
member.member_school = RequestFunctions.getSingleRequestObj(post_dict, 'member_school')
member.member_email = RequestFunctions.getSingleRequestObj(post_dict, 'member_email')
member.member_status = RequestFunctions.getSingleRequestObj(post_dict, 'member_status')
if not action == 'delete':
member.save()
LogFunctions.generateLog(
self.request, 'admin', LogFunctions.makeLogQuery(self.base_class, action.title(), id=member.id))
if action == 'delete':
member.delete()
except Exception:
print({"Error": "Cannot Process " + action.title() + " Request."})
# View Generating Functions
# Form Generating Functions
def getFieldData(self, **kwargs):
action = kwargs.pop('action')
element_id = kwargs.pop('element_id')
field_data_dispatcher = self.populateDispatcher()
if field_data_dispatcher.get(action):
field_data = MiscFunctions.filterDict(self.useAPI(self.base_class).getSelf(id=element_id).__dict__.items(),
self.validation_table['base_form_invalid'])
return field_data
return None
def getChoiceData(self):
choice_data = dict()
choice_data["member_status"] = Choices().getStatusChoices()
choice_data["member_school"] = Choices().getSchoolChoices()
return choice_data
def getDBMap(self, data):
return None
def getMultiChoiceData(self):
return None
def getSearchElement(self, **kwargs):
return None
# Table Generating Functions
def getTableSpecificHeader(self):
return [field.name for field in self.base_class._meta.get_fields()
if field.name not in self.validation_table['base_table_invalid']]
def getTableRowContent(self, content):
field_data = MiscFunctions.filterDict(self.useAPI(self.base_class).getSelf(id=content.id).__dict__.items(),
self.validation_table['base_table_invalid'])
field_data = self.updateChoiceAsValue(field_data, self.getChoiceData())
field_data = MiscFunctions.grabValueAsList(field_data)
return field_data
| [
"[email protected]"
]
| |
9e93607857282ba92b99032fb6fda11e657ed2d1 | b4a065f13aa86e69193a932647cafff38f2051c0 | /code/private_ip_finder/poc001/private_ip_finder.py | fa21e73f1eb9c7a5ac7813986c36a9a93b0b685f | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
]
| permissive | nicc777/python_poc_stuff | 84808f1258ba90a635be7f9591d7f7ff72405bbf | 81d84897e9b410cecca97546a3c79f27546cb3a7 | refs/heads/master | 2020-07-01T19:26:14.901929 | 2018-12-03T16:20:21 | 2018-12-03T16:20:21 | 74,262,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!/usr/bin/env python3
import socket
import traceback
def get_ip()->str:
ip_address = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 443))
ip_address = '{}'.format(s.getsockname()[0])
s.close()
except:
traceback.print_exc()
return ip_address
if __name__ == '__main__':
print(get_ip())
# EOF
| [
"[email protected]"
]
| |
f5bcdcacff09c019f93bdd3bb510a1ef20ca3e79 | de7040427f055a4fd9d1ec9d6c36567265d8c65d | /code/ki_1/python/python/set.py | 925da07384b6b071ee8ee9469e80ead67332e401 | []
| no_license | vuquang142k/Program_Python | d98cb4000a8f91988ca33fc638cce78e93e7caf6 | 303cfd4d72fa8c97709fb26ae54b29cd20a317b4 | refs/heads/main | 2023-06-16T11:50:40.395295 | 2021-07-06T20:34:11 | 2021-07-06T20:34:11 | 344,947,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | #Set la 1 container,tuy nhien ko su dung nhieu bang list hay tuple
#Duoc gioi han boi {}, tat ca nhung gi nam trong do la nhung phan tu cua Set
#Cac phan tu cua Set duoc phan cach nhau boi dau ,
#Set khong chua nhieu hon 1 phan tu trung lap
#chi chua hashable object
set_1={69,96}
print(set_1) #{96,69}
set_2={'HowKteam'}
print(set_2) #{'HowKteam'}
#unhashable type:list
#khong chay voi list
#khong chay voi set trong set
set_2={1,2,{'HowKteam'}} #Typeerror
set_2={1,1,1}
print(set_2) #{1}
set_2=set((1,2,3)) #{1,2,3}
set_2=set('HowKteam') #{'t','m','H','o','e','a','K','w'}
print({1,2,3,4}-{2,3}) #{1,4}
print({1,2,3,4} & {{4,5}) #{4}
print({1,2,3} | {4,5}) #{1,2,3,4,5}
print({1,2,3}^{3,4}) #{1,2,4}
set1={1,2,3,4}
set1.remove(1) #{2,3,4}
set1.discard(5) #{1,2,3,4},giong remove nhung phan tu ko co trong set ko bao loi
set1.add(5) #{1,2,3,4,5}
| [
"[email protected]"
]
| |
3c6272b5ed36863e8a7b012c1491944ae1bc0fed | d61f7eda203a336868c010abb8f9a6f45dd51adb | /217. Contains Duplicate.py | bfe1c7f845dd61be31b14fab7c6bc51dc3d70b9b | []
| no_license | Mschikay/leetcode | b91df914afc728c2ae1a13d3994568bb6c1dcffb | 7c5e5fe76cee542f67cd7dd3a389470b02597548 | refs/heads/master | 2020-04-17T12:11:38.810325 | 2019-10-06T02:37:32 | 2019-10-06T02:37:32 | 166,570,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# s = set()
# for n in nums:
# if n in s:
# return True
# s.add(n)
# return False
return not (len(set(nums)) == len(nums)) | [
"[email protected]"
]
| |
a846d3b7ee66e5286fde9b10540aff373ef6a1e1 | 21583a54a0ff37b70f34ced45abbaedd5ecc5518 | /fusion_map/urls.py | 6a596ee9b5d800666af4bcbf0970ea09e6f12d8e | []
| no_license | lud0/fusion_map | 2094468111ac5019dac43d66dd0d76e05f7a8a60 | 0ffb86305408087bb92b7388d890680f79577852 | refs/heads/master | 2020-03-24T11:18:49.304421 | 2018-07-28T14:09:42 | 2018-07-28T14:09:42 | 142,677,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | """ fusion_map URL Configuration
"""
from django.urls import path
from main import api, views
urlpatterns = [
path('', views.index),
path('api/1/locations/add', api.add_location),
path('api/1/locations/removeall', api.remove_all),
]
| [
"[email protected]"
]
| |
3a8869e9053daa50023d1680085020838c976a27 | db664689ca6dbb8eb04854c4f83c42033e2a41e5 | /K-Means Clustering with word embedded data/RunKMeans.py | 90c9f17ad0df596a39668163a3dca3b2e32c3843 | []
| no_license | KashishNarang94/DataMiningCSE506 | 6202c0b6e652de4cf86ed2f55d3e26f37a6165f4 | 6849c69bfbe2ef3c23fe1f22a99c1a8774c1b5e7 | refs/heads/master | 2020-09-09T18:50:32.969506 | 2020-01-25T19:43:52 | 2020-01-25T19:43:52 | 221,533,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import KMeansImplement
QuestionNo=[2,3,4,5]
flag_normalize=[0,1]
KMeansImplement.Kmeans(flag_normalize[0],QuestionNo[0])
KMeansImplement.Kmeans(flag_normalize[1],QuestionNo[1])
KMeansImplement.Kmeans(flag_normalize[0],QuestionNo[2])
KMeansImplement.Kmeans(flag_normalize[0],QuestionNo[3]) | [
"[email protected]"
]
| |
de3cfdc440c151793be00c74b519e37b26974039 | c543f79b9befcbd18af68f10b795d6ede9e2646b | /manage.py | 5aa54c7665e460e7fb641be5a5f535b4cfd470b1 | []
| no_license | BerkutaRuslan/OceanTT | bb7e5c799321ae993d12dc7f11d45545a56f5787 | 3369082320042f2d6ec8664b5433a509ba1104dd | refs/heads/master | 2023-01-24T06:38:32.783453 | 2020-11-30T15:52:09 | 2020-11-30T15:52:09 | 317,198,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oceantt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a30b1616f644fb660ee165cfd9d32bdf3e1587cf | 7c6a05a163c653eba6509674c31586cd63f15bde | /src/navigation/scripts/nav_stack/Serial_node.py | ae29f521e8085739b5a8a06d178e3079df46f74d | []
| no_license | Team-Anveshak/aurora2018 | 02016e035cede1ba08f7143604e59baca3a7cf36 | 975aea6e498af8829c512108792dad878a27b07b | refs/heads/master | 2021-05-13T22:09:44.795530 | 2018-05-19T03:18:44 | 2018-05-19T03:18:44 | 116,481,108 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | #!/usr/bin/env python
import string
import rospy
import serial
from rover_msgs.msg import enc
from geometry_msgs.msg import Twist
pwm=0
port='/dev/ttyACM0'
try:
ser = serial.Serial(port=port, baudrate=57600, timeout=1)
except serial.serialutil.SerialException:
pass
class Serial(object):
def __init__(self,ser):
self._ser=ser
def callback(self,msg):
pwm=msg.linear.x
self._ser.write('pwm')
rospy.loginfo(pwm)
def callback(msg):
rospy.loginfo("ok")
pwm = msg.linear.x
#ser.write('pwm')
def main():
rospy.init_node("Serial_node")
#try:
#ser = serial.Serial(port=port, baudrate=57600, timeout=1)
#except serial.serialutil.SerialException:
#pass
rospy.sleep(3)
pub=rospy.Publisher("Encoder",enc, queue_size=10)
#classcall=Serial(ser)
rospy.Subscriber('/cmd_vel_mux/input/teleop', Twist, callback)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
encvalue=enc()
#encvalue.left=2
#encvalue.right=3
line = ser.readline()
lineparts = string.split(line,',')
#linesparts = lineparts[0].replace(",","")
if(lineparts[0]=='e'):
#encvalue.left=float(lineparts[1])
#encvalue.right=float(lineparts[2])
try:
encvalue.left=float(lineparts[1])
encvalue.right=float(lineparts[2])
rospy.loginfo(float(lineparts[1]))
except (ValueError,IndexError):
pass
rospy.loginfo('running')
pub.publish(encvalue)
rate.sleep()
rospy.spin
ser.write('pwm')
#rospy.spin
#classcall=Serial(pub,ser)
#rospy.Subscriber('/cmd_vel', Twist, classcall.callback)
#rospy.spin
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
]
| |
05352a15e8fe5729ce8218b174d55903f616d532 | 0f812d8a0a3743a9ff9df414e096a7f9830b0397 | /old/demo/onelinkmanipulator_demo_PID.py | 56c7d8e95f6edfbf0742cc0e0823707431e2d674 | [
"MIT"
]
| permissive | pierrecaillouette/AlexRobotics | 18977eec79875b7fc8c84d11f1c680be93b43fcb | 2223100df3e141d88491dde3d60a4eadd07a5c72 | refs/heads/master | 2021-04-09T03:18:58.858708 | 2019-04-28T15:30:26 | 2019-04-28T15:30:26 | 248,833,850 | 0 | 0 | MIT | 2020-03-20T19:14:52 | 2020-03-20T19:14:52 | null | UTF-8 | Python | false | false | 2,985 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
import numpy as np
###########################
# Load libs
###########################
from AlexRobotics.dynamic import Manipulator
from AlexRobotics.control import linear
from AlexRobotics.control import ComputedTorque
from AlexRobotics.planning import RandomTree
from AlexRobotics.control import DPO
###########################
# Objectives
###########################
x_start = np.array([-3.0, 0.0])
x_goal = np.array([ 0.0, 0.0])
###########################
# Create objects
###########################
Robot = Manipulator.OneLinkManipulator()
PD = linear.PD( kp = 5 , kd = 2 )
PID = linear.PID( kp = 5 , kd = 2 , ki = 4 )
CTC = ComputedTorque.ComputedTorqueController( Robot )
SLD = ComputedTorque.SlidingModeController( Robot )
RRT = RandomTree.RRT( Robot , x_start )
VI = DPO.ValueIteration1DOF( Robot , 'quadratic' )
############################
# Params
############################
tmax = 8 # max motor torque
Robot.u_ub = np.array([ tmax]) # Control Upper Bounds
Robot.u_lb = np.array([-tmax]) # Control Lower Bounds
RRT.x_start = x_start
RRT.discretizeactions( 3 )
RRT.dt = 0.1
RRT.goal_radius = 0.3
RRT.max_nodes = 5000
RRT.max_solution_time = 5
RRT.dyna_plot = True
RRT.dyna_node_no_update = 10
RRT.traj_ctl_kp = 25
RRT.traj_ctl_kd = 10
PID.dt = 0.001
CTC.w0 = 2
SLD.lam = 1
SLD.nab = 0
SLD.D = 5
###########################
# Offline Plannning
###########################
#RRT.find_path_to_goal( x_goal )
#RRT.plot_2D_Tree()
###########################
# Offline Optimization
###########################
#VI.first_step()
#VI.load_data( 'data/' + 'R1' + 'quadratic' )
#VI.compute_steps(1)
#
## Plot Value Iteration Results
#ValueIterationAlgo.plot_raw()
#ValueIterationAlgo.plot_J_nice( 2 )
###########################
# Assign controller
###########################
#Robot.ctl = PD.ctl
Robot.ctl = PID.ctl
#Robot.ctl = CTC.ctl
#Robot.ctl = SLD.ctl
#Robot.ctl = RRT.trajectory_controller
#VI.assign_interpol_controller()
###########################
# Simulation
###########################
Robot.plotAnimation( x_start , tf=10, n=10001, solver='euler' )
###########################
# Plots
###########################
Robot.Sim.phase_plane_trajectory()
#Robot.Sim.phase_plane_trajectory( PP_OL = False , PP_CL = True )
Robot.Sim.plot_CL()
###########################
# and more
###########################
#from AlexRobotics.dynamic import CustomManipulator
#BoeingArm = CustomManipulator.BoeingArm()
#BoeingArm.plot3DAnimation( x0 = np.array([0.2,0,0,0,0,0]) )
# Hold script in console
import matplotlib.pyplot as plt
plt.show() | [
"[email protected]"
]
| |
55d16850c20edcac6154e8d98839f22a73dcb294 | 48c3636efbea499243bfdd035e2fa2c54179c842 | /core/migrations/0003_pontosturisticos_atracoes.py | 703c69e274c46247b4493018be00938115942d66 | []
| no_license | agmguerra/pontos_turisticos | 625e8981e3d3d00c81811759070447fefe654f2b | d6e987e08b7636b96cc45a1e052c240c73b5d947 | refs/heads/master | 2020-08-14T14:18:39.406632 | 2019-12-09T18:41:56 | 2019-12-09T18:41:56 | 215,182,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.2.6 on 2019-10-15 01:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atracoes', '0001_initial'),
('core', '0002_pontosturisticos_aprovado'),
]
operations = [
migrations.AddField(
model_name='pontosturisticos',
name='atracoes',
field=models.ManyToManyField(to='atracoes.Atracao'),
),
]
| [
"[email protected]"
]
| |
09472d9e33fd1ac9ca1eb9f00d82e5bb5a706341 | 7559dfd02bf6ebc15386f6f5035d4a1e66b19ac2 | /fts_base/wizard/__init__.py | a47b82d512e1935d34e8e86f23de1328cb846067 | []
| no_license | Therp/fulltextsearch | 3b1077549df7032c3f11285395eb6464152a5249 | e8ed30c2a71ed5b0d10488590219ea6dbaf61d62 | HEAD | 2016-09-06T04:19:36.057770 | 2014-09-12T13:09:55 | 2014-09-12T13:09:55 | 23,957,766 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | import fts_config
| [
"[email protected]"
]
| |
c4ff477e4d4593ec29941b50cc598115076da953 | e0d27e8f36ddc657e0b18db12e8d5e4894acf6d1 | /__init__.py | 62071625e7c8fef7523b8277fe60a611cdbbc40c | []
| no_license | dilkuwor/empproj | e41dbbd7443f13aaf6681871ca5ff168a3659424 | c14d2157db097bf38ade82989dcb7856e5fbbe71 | refs/heads/master | 2021-01-24T03:57:53.364058 | 2018-03-03T20:10:14 | 2018-03-03T20:10:14 | 122,914,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from flask import Flask
from flask_restful import Api
from timeapi.resources.employee import Employee, EmployeeCollection
from timeapi.resources.project import Project, ProjectCollection
from timeapi.resources.project_employee import ProjectEmployee
from apispec import APISpec
from flask_apispec.extension import FlaskApiSpec
app = Flask(__name__)
api = Api(app)
api = Api(app)
api.add_resource(Employee, '/employees/<string:employee_id>')
api.add_resource(EmployeeCollection, '/employees')
api.add_resource(ProjectCollection, '/projects')
api.add_resource(Project, '/projects/<string:project_id>')
api.add_resource(ProjectEmployee, '/projects/<string:project_id>/employees')
app.config.update({
'APISPEC_SPEC': APISpec(
title='timeapi',
version='v1',
plugins=['apispec.ext.marshmallow'],
),
'APISPEC_SWAGGER_URL': '/spec/',
})
docs = FlaskApiSpec(app)
docs.register(Employee)
docs.register(EmployeeCollection)
docs.register(ProjectCollection)
docs.register(Project)
docs.register(ProjectEmployee)
'''
if __name__ == '__main__':
app.run(debug=True)
''' | [
"[email protected]"
]
| |
c025936563a10774a9b3acd93602aa92bf3b75f8 | 2b7dccfa789bdeff28f306243f5ee04137e131be | /PythonAndCoding/tweettweet.py | ad177cc437e3640df20a5dee289e4f0e18627984 | []
| no_license | animeshsrivastava246/PythonWork | d3a88a65bbf50da3ffb9912ab18dd0126e502c8e | 3bdee1f06fd7faf92b02c37f3361e7e92011c57b | refs/heads/main | 2023-07-13T12:17:05.083860 | 2021-08-16T11:30:42 | 2021-08-16T11:30:42 | 396,763,690 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | import tweepy,time
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user=api.me()
def limit_handler(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(300)#miliseconds
for follower in limit_handler(tweepy.Cursor(api.followers).items()):
print(follower.name)
#print(user.followers_count)
#print(user.screen_name)
#print(user.name)
#public_tweets = api.home_timeline()
#for tweet in public_tweets:
# print(tweet.text)
# Tweepy.org DOCUMENTATION | [
"[email protected]"
]
| |
27b994c3924007d90e49e4fc1c92fc4eef573a45 | 5bc7733ecb92e5c426ba3ff620ffba8da27292f6 | /postGRE_script.py | 08ac3a51cf487cea29986395757646fbbea6f9ed | []
| no_license | CodeyBank/simple-database-using-postgres | 66ac01bfed27ad8b4047bc081c909c22b66bab53 | 29c0ab25cbef664621d84cc659818dfdc720046b | refs/heads/main | 2023-05-19T16:47:53.768820 | 2021-06-05T18:47:08 | 2021-06-05T18:47:08 | 374,189,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import psycopg2
def create_table():
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
connection.commit()
connection.close()
def insert(item, quantity, price):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("INSERT INTO store VALUES('%s', '%s', '%s')" %(item, quantity, price))
#cur.execute("INSERT INTO store VALUES(%s, %s, %s)", (item, quantity, price)) #Alternative method to avoid database injections from hackers
connection.commit()
connection.close()
#insert("Coffee cup", 10, 2.5)
# This function deletes a row. pass the row item as an argument
def delete_item(item):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("DELETE FROM store WHERE item=%s", (item,)) #when there is only one parameter, always end with ','
connection.commit()
connection.close()
def view_db():
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("SELECT * FROM store")
rows = cur.fetchall() # .fetchall() methodReturns the rows of a DB as a list of a tuples
connection.close()
return rows
def update_db(quantity, price, item):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("UPDATE store SET quantity=%s, price=%s WHERE item=%s", (quantity, price, item))
rows = cur.fetchall() # .fetchall() methodReturns the rows of a DB as a list of a tuples
connection.close()
return rows
create_table()
delete_item("Orange")
print(view_db()) | [
"[email protected]"
]
| |
42983223ec1ec685ad95399c0bf213151fcc9c28 | 8a754ae404f73a2fc1c9eb2ed3a796cc1c65b2da | /src/data/.ipynb_checkpoints/make_dataset-checkpoint.py | fca6a0c07c291a8ffb091da8b3eb6f8908511210 | [
"MIT"
]
| permissive | DataEconomistDK/Recession-Predictor | 2d0ec8a8bb4f88b135bf1eb19836928d84b44a04 | 74f453cad275f216c30403ddc6a49d6a22b054d7 | refs/heads/master | 2021-04-23T17:42:23.682241 | 2020-04-23T08:10:37 | 2020-04-23T08:10:37 | 249,952,543 | 1 | 0 | MIT | 2020-03-25T10:48:08 | 2020-03-25T10:48:07 | null | UTF-8 | Python | false | false | 18,886 | py | """
This module gets data from FRED and Yahoo Finance, builds some features,
and saves the data into the respective filepaths.
"""
import re
from io import StringIO
import json
from datetime import datetime, timedelta
import requests as req
import pandas as pd
import RecessionPredictor_paths as path
class YahooData:
"""
Retrieves data from Yahoo Finance.
Original code source: https://stackoverflow.com/questions/44225771/scraping-historical-data-from-yahoo-finance-with-python
"""
timeout = 2
crumb_link = 'https://finance.yahoo.com/quote/{0}/history?p={0}'
crumble_regex = r'CrumbStore":{"crumb":"(.*?)"}'
quote_link = 'https://query1.finance.yahoo.com/v7/finance/download/{quote}?period1={dfrom}&period2={dto}&interval=1mo&events=history&crumb={crumb}'
def __init__(self, symbol, days_back=7):
"""
symbol: ticker symbol for the asset to be pulled.
"""
self.symbol = str(symbol)
self.session = req.Session()
self.dt = timedelta(days=days_back)
def get_crumb(self):
"""
Original code source: https://stackoverflow.com/questions/44225771/scraping-historical-data-from-yahoo-finance-with-python
"""
response = self.session.get(self.crumb_link.format(self.symbol), timeout=self.timeout)
response.raise_for_status()
match = re.search(self.crumble_regex, response.text)
if not match:
raise ValueError('Could not get crumb from Yahoo Finance')
else:
self.crumb = match.group(1)
def get_quote(self):
"""
Original code source: https://stackoverflow.com/questions/44225771/scraping-historical-data-from-yahoo-finance-with-python
"""
if not hasattr(self, 'crumb') or len(self.session.cookies) == 0:
self.get_crumb()
now = datetime.utcnow()
dateto = int(now.timestamp())
datefrom = -630961200
# line in original code: datefrom = int((now - self.dt).timestamp())
url = self.quote_link.format(quote=self.symbol, dfrom=datefrom, dto=dateto, crumb=self.crumb)
response = self.session.get(url)
response.raise_for_status()
return pd.read_csv(StringIO(response.text), parse_dates=['Date'])
class DataSeries:
"""
Contains methods and objects to retrieve data from FRED and Yahoo Finance.
"""
def __init__(self):
self.dates = []
self.values = []
def fred_response(self, params):
"""
Makes requests to the FRED API.
params: dictionary, FRED API parameters.
"""
params = dict(params)
fred_request = req.get(url='https://api.stlouisfed.org/fred/series/observations',
params=params)
fred_json = json.loads(fred_request.text)['observations']
for observation in fred_json:
self.dates.append(str(observation['date']))
self.values.append(float(observation['value']))
def yahoo_response(self, series_id):
"""
Retrieves data from Yahoo Finance, and performs timestamp adjustments.
series_id: ticker symbol for the asset to be pulled.
"""
series_id = str(series_id)
series_dataframe = YahooData(series_id).get_quote()[::-1]
series_dataframe.reset_index(inplace=True)
series_dataframe.drop('index', axis=1, inplace=True)
most_recent_day = datetime.strptime(str(series_dataframe['Date'][0])[:10],
'%Y-%m-%d').day
if most_recent_day != 1:
series_dataframe = series_dataframe[1:]
series_dataframe.reset_index(inplace=True)
series_dataframe.drop('index', axis=1, inplace=True)
self.dates.extend([str(series_dataframe['Date'][index])[:10]
for index in range(0, len(series_dataframe))])
self.values.extend([float(series_dataframe['Adj Close'][index])
for index in range(0, len(series_dataframe))])
class MakeDataset:
"""
The manager class for this module.
"""
def __init__(self):
"""
fred_series_ids: identifiers for FRED data series.
yahoo series_ids: identifiers for Yahoo Finance data series.
"""
self.fred_series_ids = {'Non-farm_Payrolls': 'PAYEMS',
'Civilian_Unemployment_Rate': 'UNRATE',
'Effective_Fed_Funds': 'FEDFUNDS',
'CPI_All_Items': 'CPIAUCSL',
'10Y_Treasury_Rate': 'GS10',
'5Y_Treasury_Rate': 'GS5',
'3_Month_T-Bill_Rate': 'TB3MS',
'IPI': 'INDPRO'}
self.yahoo_series_ids = {'S&P_500_Index': '^GSPC'}
self.primary_dictionary_output = {}
self.primary_df_output = pd.DataFrame()
self.shortest_series_name = ''
self.shortest_series_length = 1000000
self.secondary_df_output = pd.DataFrame()
def get_fred_data(self):
"""
Cycles through "fred_series"ids" to get data from the FRED API.
"""
import time
now = datetime.now()
month = now.strftime('%m')
year = now.year
most_recent_date = '{}-{}-07'.format(year, month)
print('\nGetting data from FRED API as of {}...'.format(most_recent_date))
for series_name in list(self.fred_series_ids.keys()):
series_data = DataSeries()
series_id = self.fred_series_ids[series_name]
print('\t|--Getting data for {}({}).'.format(series_name, series_id))
params = {'series_id': series_id,
'api_key': path.fred_api_key,
'file_type': 'json',
'sort_order': 'desc',
'realtime_start': most_recent_date,
'realtime_end': most_recent_date}
success = False
while success == False:
try:
series_data.fred_response(params)
except json.JSONDecodeError:
delay = 5
print('\t --CONNECTION ERROR--',
'\n\t Sleeping for {} seconds.'.format(delay))
time.sleep(delay)
else:
success = True
self.primary_dictionary_output[series_name] = series_data
print('Finished getting data from FRED API!')
def get_yahoo_data(self):
"""
Cycles through "yahoo_series"ids" to get data from the Yahoo Finance.
"""
import time
print('\nGetting data from Yahoo Finance...')
for series_name in list(self.yahoo_series_ids.keys()):
series_data = DataSeries()
series_id = self.yahoo_series_ids[series_name]
print('\t|--Getting data for {}({}).'.format(series_name, series_id))
success = False
while success == False:
try:
series_data.yahoo_response(series_id)
except req.HTTPError:
delay = 5
print('\t --CONNECTION ERROR--',
'\n\t Sleeping for {} seconds.'.format(delay))
time.sleep(delay)
else:
success = True
self.primary_dictionary_output[series_name] = series_data
print('Finished getting data from Yahoo Finance!')
def find_shortest_series(self):
"""
Finds the length and name of the shortes series in the primary
dataset.
"""
for series_name in self.primary_dictionary_output.keys():
series_data = self.primary_dictionary_output[series_name]
if len(series_data.dates) < self.shortest_series_length:
self.shortest_series_length = len(series_data.dates)
self.shortest_series_name = series_name
def combine_primary_data(self):
"""
Combines primary data into a single dictionary (such that each series
is the same length and is time-matched to each other) and saves it
as a json object.
"""
print('\nCombining primary dataset...')
now = datetime.now()
current_month = int(now.strftime('%m'))
current_year = now.year
dates = []
for months_ago in range(0, self.shortest_series_length):
if current_month < 10:
dates.append('{}-0{}-01'.format(current_year, current_month))
else:
dates.append('{}-{}-01'.format(current_year, current_month))
if current_month == 1:
current_month = 12
current_year -= 1
else:
current_month -= 1
self.primary_df_output['Dates'] = dates
for series_name in self.primary_dictionary_output.keys():
series_data = self.primary_dictionary_output[series_name]
self.primary_df_output[series_name] = series_data.values[:self.shortest_series_length]
print('Finished combining primary dataset!')
print('\t|--Saving primary dataset to {}'.format(path.data_primary))
self.primary_df_output.to_json(path.data_primary)
self.primary_df_output.to_json(path.data_primary_most_recent)
print('\nPrimary dataset saved to {}'.format(path.data_primary_most_recent))
def get_primary_data(self):
"""
Gets primary data from FRED API and Yahoo Finance.
"""
print('\nGetting primary data from APIs...')
self.get_fred_data()
self.get_yahoo_data()
self.find_shortest_series()
self.combine_primary_data()
def calculate_secondary_data(self):
"""
Builds some features from the primary dataset to create a secondary
dataset.
"""
dates = []
payrolls_3mo = []
payrolls_12mo = []
unemployment_rate = []
unemployment_rate_12mo_chg = []
real_fed_funds = []
real_fed_funds_12mo = []
CPI_3mo = []
CPI_12mo = []
treasury_10Y_12mo = []
treasury_3M_12mo = []
treasury_10Y_3M_spread = []
treasury_10Y_5Y_spread = []
treasury_10Y_3M_spread_12mo = []
sp_500_3mo = []
sp_500_12mo = []
IPI_3mo = []
IPI_12mo = []
for index in range(0, len(self.primary_df_output) - 12):
dates.append(self.primary_df_output['Dates'][index])
payrolls_3mo_pct_chg = (self.primary_df_output['Non-farm_Payrolls'][index]
/ self.primary_df_output['Non-farm_Payrolls'][index + 3]) - 1
payrolls_3mo.append(((1 + payrolls_3mo_pct_chg) ** 4) - 1)
payrolls_12mo.append((self.primary_df_output['Non-farm_Payrolls'][index]
/ self.primary_df_output['Non-farm_Payrolls'][index + 12]) - 1)
unemployment_rate.append(self.primary_df_output['Civilian_Unemployment_Rate'][index])
unemployment_rate_12mo_chg.append((self.primary_df_output['Civilian_Unemployment_Rate'][index])
- self.primary_df_output['Civilian_Unemployment_Rate'][index + 12])
CPI_3mo_pct_chg = (self.primary_df_output['CPI_All_Items'][index]
/ self.primary_df_output['CPI_All_Items'][index + 3]) - 1
CPI_3mo.append(((1 + CPI_3mo_pct_chg) ** 4) - 1)
CPI_12mo_pct_chg = (self.primary_df_output['CPI_All_Items'][index]
/ self.primary_df_output['CPI_All_Items'][index + 12]) - 1
CPI_12mo.append(CPI_12mo_pct_chg)
real_fed_funds.append(self.primary_df_output['Effective_Fed_Funds'][index]
- (CPI_12mo_pct_chg * 100))
real_fed_funds_12mo.append(self.primary_df_output['Effective_Fed_Funds'][index]
- self.primary_df_output['Effective_Fed_Funds'][index + 12])
treasury_10Y_12mo.append(self.primary_df_output['10Y_Treasury_Rate'][index]
- self.primary_df_output['10Y_Treasury_Rate'][index + 12])
treasury_3M_12mo.append(self.primary_df_output['3_Month_T-Bill_Rate'][index]
- self.primary_df_output['3_Month_T-Bill_Rate'][index + 12])
treasury_10Y_3M_spread_today = (self.primary_df_output['10Y_Treasury_Rate'][index]
- self.primary_df_output['3_Month_T-Bill_Rate'][index])
treasury_10Y_3M_spread.append(treasury_10Y_3M_spread_today)
treasury_10Y_3M_spread_12mo_ago = (self.primary_df_output['10Y_Treasury_Rate'][index + 12]
- self.primary_df_output['3_Month_T-Bill_Rate'][index + 12])
treasury_10Y_3M_spread_12mo.append(treasury_10Y_3M_spread_today
- treasury_10Y_3M_spread_12mo_ago)
treasury_10Y_5Y_spread_today = (self.primary_df_output['10Y_Treasury_Rate'][index]
- self.primary_df_output['5Y_Treasury_Rate'][index])
treasury_10Y_5Y_spread.append(treasury_10Y_5Y_spread_today)
sp_500_3mo.append((self.primary_df_output['S&P_500_Index'][index]
/ self.primary_df_output['S&P_500_Index'][index + 3]) - 1)
sp_500_12mo.append((self.primary_df_output['S&P_500_Index'][index]
/ self.primary_df_output['S&P_500_Index'][index +12]) - 1)
IPI_3mo_pct_chg = (self.primary_df_output['IPI'][index]
/ self.primary_df_output['IPI'][index + 3]) - 1
IPI_3mo.append(((1 + IPI_3mo_pct_chg) ** 4) - 1)
IPI_12mo_pct_chg = (self.primary_df_output['IPI'][index]
/ self.primary_df_output['IPI'][index + 12]) - 1
IPI_12mo.append(IPI_12mo_pct_chg)
self.secondary_df_output = pd.DataFrame({
'Dates': dates,
'Payrolls_3mo_pct_chg_annualized': payrolls_3mo,
'Payrolls_12mo_pct_chg': payrolls_12mo,
'Unemployment_Rate': unemployment_rate,
'Unemployment_Rate_12mo_chg': unemployment_rate_12mo_chg,
'Real_Fed_Funds_Rate': real_fed_funds,
'Real_Fed_Funds_Rate_12mo_chg': real_fed_funds_12mo,
'CPI_3mo_pct_chg_annualized': CPI_3mo,
'CPI_12mo_pct_chg': CPI_12mo,
'10Y_Treasury_Rate_12mo_chg': treasury_10Y_12mo,
'3M_Treasury_Rate_12mo_chg': treasury_3M_12mo,
'3M_10Y_Treasury_Spread': treasury_10Y_3M_spread,
'3M_10Y_Treasury_Spread_12mo_chg': treasury_10Y_3M_spread_12mo,
'5Y_10Y_Treasury_Spread': treasury_10Y_5Y_spread,
'S&P_500_3mo_chg': sp_500_3mo,
'S&P_500_12mo_chg': sp_500_12mo,
'IPI_3mo_pct_chg_annualized': IPI_3mo,
'IPI_12mo_pct_chg': IPI_12mo})
def create_secondary_data(self):
"""
Creates and saves the secondary dataset as a json object.
"""
print('\nCreating secondary dataset from "primary_dataset_most_recent.json"')
self.primary_df_output = pd.read_json(path.data_primary_most_recent)
self.primary_df_output.sort_index(inplace=True)
self.calculate_secondary_data()
print('Finished creating secondary dataset!')
print('\t|--Saving secondary dataset to {}'.format(path.data_secondary))
self.secondary_df_output.to_json(path.data_secondary)
self.secondary_df_output.to_json(path.data_secondary_most_recent)
print('\nSecondary dataset saved to {}'.format(path.data_secondary_most_recent))
def get_all_data(self):
"""
Gets data from primary sources (FRED and Yahoo Finance), then performs
preliminary manipulations before saving the data.
"""
self.get_primary_data()
self.create_secondary_data()
# FRED citations
#U.S. Bureau of Labor Statistics, All Employees: Total Nonfarm Payrolls [PAYEMS], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/PAYEMS
#U.S. Bureau of Labor Statistics, Civilian Unemployment Rate [UNRATE], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/UNRATE
#Board of Governors of the Federal Reserve System (US), Effective Federal Funds Rate [FEDFUNDS], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/FEDFUNDS
#U.S. Bureau of Labor Statistics, Consumer Price Index for All Urban Consumers: All Items [CPIAUCSL], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/CPIAUCSL
#Board of Governors of the Federal Reserve System (US), 10-Year Treasury Constant Maturity Rate [GS10], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/GS10
#Board of Governors of the Federal Reserve System (US), 5-Year Treasury Constant Maturity Rate [GS5], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/GS5
#Board of Governors of the Federal Reserve System (US), 3-Month Treasury Bill: Secondary Market Rate [TB3MS], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/TB3MS
#Board of Governors of the Federal Reserve System (US), Industrial Production Index [INDPRO], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/INDPRO
#MIT License
#
#Copyright (c) 2019 Terrence Zhang
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE. | [
"[email protected]"
]
| |
346f62a2096c1ecfa719e42210fc519957c983e5 | 1e5812e1839bbb67e2d284a96f3f5fa8360ceeca | /own/visualizer.py | fc7a7f32e4f27023554246dec93d3d62304926a5 | []
| no_license | TSBB15-VT21-group-4/3d-reconstruction | aaf8498abdea6bb2b7b79181073b33bd0636c517 | a36b331bb40fb19e9ae515807e705e76e70aef93 | refs/heads/master | 2023-07-03T16:55:08.330639 | 2021-08-06T09:50:00 | 2021-08-06T09:50:00 | 393,331,214 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,323 | py | #!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
from external import lab3
import warnings
class Visualizer():
"""
Declares functions used for visualization.
"""
def __init__(self):
pass
def visualize_corresp(self, view_1, view_2, p_index):
"""
Displays the correspondences between the 2 given View objects.
"""
proj_1 = view_1.projections[:, p_index]
proj_2 = view_2.projections[:, p_index]
lab3.show_corresp(view_1.image, view_2.image, proj_1, proj_2, vertical=False)
plt.title(f'Cameras {view_1.id} ({view_1.camera_center[0,0]:.0f}, {view_1.camera_center[1,0]:.0f}, {view_1.camera_center[2,0]:.0f})'
f' and {view_2.id} ({view_2.camera_center[0,0]:.0f}, {view_2.camera_center[1,0]:.0f}, {view_2.camera_center[2,0]:.0f})')
#plt.show()
def visualize_3d_points(self, book_keeper, m, estimate_color=True):
"""
Displays a point cloud.
---
Input:
point_cloud : [int, int]
[Nx3] matrix representing the 3D points in the point cloud
(N = number of 3D points)
"""
if estimate_color:
est_col = self.estimate_color(book_keeper.Q)
z_hat = np.array([0, 0, 1]).reshape(3, 1)
point_cloud = book_keeper.P.T # nx3
n = book_keeper.P.shape[1]
# only 3d points
fig_3d = plt.figure('3d point cloud')
ax_3d = fig_3d.gca(projection='3d')
if estimate_color:
ax_3d.scatter(point_cloud[:, 0], point_cloud[:, 1], point_cloud[:, 2],
color=est_col[book_keeper.index_3d_2d])
else:
ax_3d.scatter(point_cloud[:, 0], point_cloud[:, 1], point_cloud[:, 2],
color=(0, 0, 1))
fig = plt.figure()
ax = fig.gca(projection='3d')
# Existing 3d points
ax.scatter(point_cloud[0:n-m, 0], point_cloud[0:n-m, 1], point_cloud[0:n-m, 2], color=(0, 0, 1))
# Newly added 3d points
ax.scatter(point_cloud[n-m:, 0], point_cloud[n-m:, 1], point_cloud[n-m:, 2], color=(1, 0, 1))
for view_idx in range(len(book_keeper.Q)):
if book_keeper.Q[view_idx].id == 0:
# origin camera pose
ax.scatter(book_keeper.Q[view_idx].camera_center[0], book_keeper.Q[view_idx].camera_center[1], book_keeper.Q[view_idx].camera_center[2], color=(0, 1, 0))
ax.text(book_keeper.Q[view_idx].camera_center[0, 0], book_keeper.Q[view_idx].camera_center[1, 0],
book_keeper.Q[view_idx].camera_center[2, 0], f'C{book_keeper.Q[view_idx].id}')
ax.quiver(0, 0, 0, 0, 0, 1)
elif view_idx == len(book_keeper.Q) - 1:
# newly added camera pose
ax.scatter(book_keeper.Q[view_idx].camera_center[0], book_keeper.Q[view_idx].camera_center[1],
book_keeper.Q[view_idx].camera_center[2], color=(1, 0, 0))
ax.text(book_keeper.Q[view_idx].camera_center[0, 0], book_keeper.Q[view_idx].camera_center[1, 0],
book_keeper.Q[view_idx].camera_center[2, 0], f'C{book_keeper.Q[view_idx].id}')
view_direction = book_keeper.Q[view_idx].rotation_matrix.T @ z_hat
ax.quiver(book_keeper.Q[view_idx].camera_center[0], book_keeper.Q[view_idx].camera_center[1],
book_keeper.Q[view_idx].camera_center[2], view_direction[0], view_direction[1],
view_direction[2])
else:
# already added camera poses
ax.scatter(book_keeper.Q[view_idx].camera_center[0], book_keeper.Q[view_idx].camera_center[1], book_keeper.Q[view_idx].camera_center[2], color=(0, 0, 0))
ax.text(book_keeper.Q[view_idx].camera_center[0, 0], book_keeper.Q[view_idx].camera_center[1, 0],
book_keeper.Q[view_idx].camera_center[2, 0], f'C{book_keeper.Q[view_idx].id}')
view_direction = book_keeper.Q[view_idx].rotation_matrix.T @ z_hat
ax.quiver(book_keeper.Q[view_idx].camera_center[0], book_keeper.Q[view_idx].camera_center[1],
book_keeper.Q[view_idx].camera_center[2], view_direction[0], view_direction[1],
view_direction[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
var = str(len(book_keeper.Q))
# plt.savefig('images/' + var + '_plot.png', bbox_inches='tight', dpi=250)
# plt.close('all')
plt.show()
def visualize_reprojection(self, view, points_3d):
"""
Visualize reprojection of already triangulated 3d points on top of view.image.
------
Inputs:
view : View
points_3d : 3xN
"""
lab3.imshow(view.image)
reproj = lab3.project(points_3d, view.camera_matrix)
plt.scatter(reproj[0, :], reproj[1, :], color=(1, 0, 0))
# plt.show()
def estimate_color(self, view):
"""
Estimates the color for 3D points in a point cloud.
---
Input
view : [View, View]
List of view objects
---
Output:
estimated_color :
[Nx3] matrix representing the normalized rgb color for every 3D point
(N = number of 3D points)
"""
num_3d_points = view[0].projections.shape[1]
num_cameras = len(view)
estimated_color = np.zeros((num_3d_points, 3))
for point_idx in range(num_3d_points):
color_sum = np.zeros((3))
num_views_visible = 0
for camera_idx in range(num_cameras):
pixel_x = round(view[camera_idx].projections[0][point_idx])
pixel_y = round(view[camera_idx].projections[1][point_idx])
if pixel_x != -1:
color_sum += view[camera_idx].image[pixel_y, pixel_x]
num_views_visible += 1
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning) # ignore a warning with nanmean
estimated_color[point_idx] = color_sum/(255*num_views_visible)
return estimated_color
def visualize_camera_centers(self, R_est, t_est, R_gt, t_gt):
fig = plt.figure()
ax = fig.gca(projection='3d')
gt_cameras = np.zeros([R_gt.shape[0], 3, 1])
est_cameras = np.zeros([R_est.shape[0], 3, 1])
for i in range(R_gt.shape[0]):
gt_cameras[i, ...] = -R_gt[i, ...].T @ t_gt[i, ...]
est_cameras[i, ...] = -R_est[i, ...].T @ t_est[i, ...]
ax.scatter(gt_cameras[:, 0], gt_cameras[:, 1], gt_cameras[:, 2], color=(1, 0, 0))
ax.scatter(est_cameras[:, 0], est_cameras[:, 1], est_cameras[:, 2], color=(0, 0, 1))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def visualize_interest_points(self, view):
"""
Displays the 2D projections of the given View object on the actual image.
"""
plt.imshow(view.image)
plt.scatter(view.projections[0, :], view.projections[1, :], c='r', label='o')
plt.show()
| [
"[email protected]"
]
| |
43161b15896e4902218ba23e07244705afec3bd9 | b0bb0dcdf8228cbdd02e47a9e2097892f7dd0861 | /bak/download.py | 03f5f93a9a8b197c04727d19e700059556ff1ede | []
| no_license | scmsqhn/zipline_for_u | 562de0d8ed638de431d207e6808db7e19fc168f7 | 369b17fd3142fcfb9ced7ce0b17a3a35a8af37d4 | refs/heads/master | 2021-01-20T06:51:09.012662 | 2017-05-02T14:54:38 | 2017-05-02T14:54:38 | 89,936,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,161 | py | # -*- coding: utf-8 -*-
# @Author: yuqing5
# date: 20151023
import tushare as ts
from sqlalchemy import create_engine
import datetime
import time
import pandas as pd
import os
import cPickle
from pandas import DataFrame
import pandas.io.sql as SQL
import sys
sys.path.append('./utility/')
from tool_decorator import local_memcached
def date2str(date):
return date.strftime("%Y-%m-%d")
class DownLoad(object):
'''
1.下载历史数据
2. 更新每天数据
3. 装载历史数据
'''
def __init__(self):
self.basic = ts.get_stock_basics()
self.engine = create_engine('mysql://root:[email protected]/stock_info?charset=utf8')
self.connection = self.engine.connect()
@staticmethod
def date2str(today=None):
if today == None:
today =datetime.date.today()
return today.strftime("%Y-%m-%d")
def down_history(self, stock, index=False):
'''
下载历史至今天的数据,可以用于下载新股票
date,open,high,close,low,volume,amount
'''
print '--'*10,"downing ",stock,'--'*10
date = self.basic.ix[stock]['timeToMarket']
#20100115 竟然是个整数
start_year = date/10000
today =datetime.date.today()
end_year = int(today.strftime("%Y"))
suffix = "-" + str(date)[4:6] + "-" + str(date)[6:8]
raw_data = None
#针对次新股,今年的股票
if start_year == end_year:
raw_data = ts.get_h_data(stock,index)
for year in range(start_year, end_year):
start = str(year) + suffix
right = datetime.datetime.strptime(str(year+1) + suffix, "%Y-%m-%d")-datetime.timedelta(days=1)
#跨年的应该没有那天上市的公司,所以不存在bug
end = right.strftime("%Y-%m-%d")
print start, "-----",end
data = ts.get_h_data(stock,start=start,end=end,index=index)
if data is None:
print None
else:
print data.shape
raw_data = pd.concat([raw_data, data], axis=0)
#看看是否需要补充最后一段时间的数据
if (year+1) == end_year and end < today.strftime("%Y-%m-%d"):
this_year_start = str(year+1) + suffix
print this_year_start, "-------",today.strftime("%Y-%m-%d")
data = ts.get_h_data(stock, start=this_year_start, end=today.strftime("%Y-%m-%d"),index=index)
if data is None:
print None
else:
print data.shape
raw_data = pd.concat([raw_data, data], axis=0)
raw_data = raw_data.sort_index(ascending=True)
raw_data.to_sql('day_'+stock, self.engine)
return raw_data
def down_all_day_stick(self):
'''
下载所有股票的历史数据
'''
for stock in self.basic.index:
try:
print stock
self.down_history(stock)
except Exception ,ex:
print Exception, ";",ex
def append_days(self,stock, start, end):
'''
添加stock,指定时间范围内的数据
'''
data = ts.get_h_data(stock,start=start,end=end)
data = data.sort_index(ascending=True)
data.to_sql('day_'+stock, self.engine,if_exists='append')
def append_all_days(self, start=None, end=None):
'''
添加所有股票数据
'''
if start == None:
start = datetime.datetime.today()
end = start
for stock in self.basic['code']:
self.append_days(stock, start, end)
def load_data(self, stock):
'''
加载股票历史数据
'''
search_sql = "select * from {0}".format('day_'+stock)
raw_data = SQL.read_sql(search_sql, self.engine)
return raw_data
def check_is_new_stock(self, stock):
'''
检测该股票是否为新上市股票
结果不需要该函数
'''
check_sql = "show tables like '{0}'".format('day_'+stock)
result = self.connection.execute(check_sql)
if result.first() == None:
return True
else:
return False
#默认为近3年数据
def down_period(self, stock,start=None,end=None):
raw_data = ts.get_hist_data(stock,start,end)
return raw_data
#新股如603861 有问题
#封装一下ts接口,同一天不要重复获取数据
class TS(object):
@staticmethod
@local_memcached
def memchaced_data(funcname, fileprefix):
'''
使用方法
1. funcname ts的方法名
2. fileprefix 该方法缓存的文件名字
'''
raw_data = funcname()
return raw_data
if __name__ == '__main__':
# dl = DownLoad()
# dl.down_all_day_stick()
# raw_data = dl.load_data('000001')
# print raw_data
TS() | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.