blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d091376ea903c1328ac580659f780419ba14131f
|
5f834f8aa0603f4f7adc56fdcd5e227538931f81
|
/diab_logisReg.py
|
2ce02d3f7c7d13896c4c7c0870cb4b25f1af7a59
|
[] |
no_license
|
Kamal-prog-code/HealthCare
|
d9a613bcb315a04b14feead97bb4367034f91606
|
2d2fe464a5d25c1373634663dc1eaf07a9064a30
|
refs/heads/main
| 2023-01-20T22:17:55.157525 | 2020-12-05T20:50:03 | 2020-12-05T20:50:03 | 318,627,358 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,462 |
py
|
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.ml.feature import StandardScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.classification import RandomForestClassifier
import pickle
import os
spark = SparkSession.builder.appName('HSP').getOrCreate()
df=spark.read.csv('hdfs://localhost:9000/user/BigDataProj/diab.csv',inferSchema=True,header=True)
from pyspark.sql.functions import col
from sklearn.linear_model import LogisticRegression
new_data = df.select(*(col(c).cast("float").alias(c) for c in df.columns))
from pyspark.sql.functions import col,count,isnan,when
from sklearn.preprocessing import StandardScaler
new_data.select([count(when(col(c).isNull(),c)).alias(c) for c in new_data.columns]).show()
cols=new_data.columns
cols.remove("Outcome")
assembler = VectorAssembler(inputCols=cols,outputCol="features")
data=assembler.transform(new_data)
# data.select("features",'Outcome').show(truncate=False)
train, tesT = df.randomSplit([0.7, 0.3])
x_col = new_data.columns
x_train = train.toPandas()[x_col[:-1]].values
y_train = train.toPandas()['Outcome'].values
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
cls = LogisticRegression()
cls.fit(x_train,y_train)
save_path = 'prediction/'
completeName = os.path.join(save_path, "dblogR.pkl")
pickle.dump(cls, open(completeName, 'wb'))
|
[
"[email protected]"
] | |
b25f51bd9909f386f89f3058a2323e1d1b8c133f
|
6c2608bc87b522da77c792e20330989de17b3005
|
/Chap-7/ex179.py
|
43c10f227985eb4652d2196644fcc6bc8c504dfe
|
[] |
no_license
|
AleByron/AleByron-The-Python-Workbook-second-edition
|
8a0b408c1bbd90c82e6b837fc898ee10341ca8fa
|
491b2fd394aa04e29a4b2dbe9a615c547e239028
|
refs/heads/main
| 2023-01-13T21:01:17.757669 | 2020-11-11T01:29:28 | 2020-11-11T01:29:28 | 306,487,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
def square(n,g):
if 0<n-g**2<10**-12:
return g
else:
g = square(n,(g+(n/g))/2)
return g
def main():
n = 32
g = 1
print(square(n,g))
main()
|
[
"[email protected]"
] | |
bfb90c8755e3b83e9062e88376453a3cfeeee7ec
|
9c2edc273db48dcb6d31a937510476b7c0b0cc61
|
/pyopengl_sample/tutorial1.py
|
0fd92b98d815dd26d6457ba6f9ac33791867e7e0
|
[] |
no_license
|
miyamotok0105/python_sample
|
4d397ac8a3a723c0789c4c3e568f3319dd754501
|
77101c981bf4f725acd20c9f4c4891b29fbaea61
|
refs/heads/master
| 2022-12-19T22:53:44.949782 | 2020-05-05T05:09:22 | 2020-05-05T05:09:22 | 81,720,469 | 1 | 0 | null | 2022-11-22T02:22:55 | 2017-02-12T11:15:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 391 |
py
|
#!/usr/bin/python
from OpenGL.GL import *
from OpenGL.GLUT import *
def draw():
glClearColor(1.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glFlush()
glutSwapBuffers()
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(320, 240)
glutCreateWindow("PyOpenGL 1")
glutDisplayFunc(draw)
glutMainLoop()
|
[
"[email protected]"
] | |
0d918889f8d20d3a4695849eb65eab1ae2ad9c9d
|
edfd1db2b48d4d225bc58be32fbe372a43415112
|
/team-task/airflow2.0/dags/efirmant/lesson3.challenge2.py
|
ea755ca01b0b78617310f0d87c4b0b0748206373
|
[] |
no_license
|
rwidjojo/airflow-training
|
ed83cb9e97ca85ef06de1426f2f41014881a1f22
|
ac82040d8ddc3859df5576eee08d397e824016f1
|
refs/heads/main
| 2023-08-12T21:01:17.672059 | 2021-01-04T09:17:48 | 2021-01-04T09:17:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 915 |
py
|
import logging
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.postgres_hook import PostgresHook
owner = 'efirmant' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
dag = DAG(
f'{owner}.lesson3.challenge2',
default_args=default_args,
description='Read data from postgresql',
schedule_interval=None,
)
def read_data():
db_conn = PostgresHook(postgres_conn_id='efirmant_posgres2')
result = db_conn.get_records('SELECT order_date, count(order_id) from efirmant_orders GROUP BY order_date')
for row in result:
logging.info(row)
read_task = PythonOperator(
task_id="read",
python_callable=read_data,
dag=dag
)
|
[
"[email protected]"
] | |
3b97278167640c790740fbd6e9a435d1e87ce6e0
|
baaa8c9486e02f4232f4926cf4e1a2eeee1199b4
|
/accounts/admin.py
|
2395fb1f93dfca90cba93acc7edf2da53b6c172c
|
[] |
no_license
|
bondarenkoav/helpdesk
|
b2be867605d484c34aaea4d8bea876c633947f14
|
866ea2dc6ee5182d6310d800b301270b38490fd2
|
refs/heads/master
| 2023-01-08T09:44:15.852016 | 2022-12-28T10:53:39 | 2022-12-28T10:53:39 | 93,615,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,047 |
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from accounts.models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name = u'Профиль'
verbose_name_plural = u'Профиль'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (ProfileInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
# class ProfileInline(admin.StackedInline):
# model = Profile
# can_delete = False
# verbose_name = u'Профиль'
# verbose_name_plural = u'Профиль'
# fk_name = 'user'
#
#
# # @admin.register(User)
# class CustomUserAdmin(UserAdmin):
# inlines = (ProfileInline, )
# list_display = ('username', 'last_name', 'first_name', 'is_active',
# 'get_phone', 'get_birthday', 'get_groups', 'get_location')
# list_filter = ('is_active', 'groups')
# search_fields = ('username', 'first_name', 'last_name')
#
# list_select_related = True
#
# def get_groups(self, instance):
# list_groups = ''
# for group in instance.groups.all():
# if list_groups == '':
# list_groups = group.name
# else:
# list_groups = list_groups + ', ' + group.name
# return list_groups
# get_groups.short_description = u'Группы'
#
# def get_location(self, instance):
# return instance.profile.location
# get_location.short_description = u'Город'
#
# def get_birthday(self, instance):
# return instance.profile.birthday
# get_birthday.short_description = u'Дата рождения'
#
# def get_phone(self, instance):
# return instance.profile.phone
# get_phone.short_description = u'Номер'
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
|
[
"[email protected]"
] | |
8e291920bc9258758fe57e54877cada173a13eef
|
63bf6161532eefa72aa3be8b01cde601b08507dc
|
/python-mapping-example/fhir_model_generator/tests/model/slot_tests.py
|
ad3cec096349f05c2c4414e7b0a4ae6fc7aac7a8
|
[
"Apache-2.0"
] |
permissive
|
Healthedata1/mFHIR
|
4ef370b87e03e973918e5683977d32fe262655bc
|
1b4ea441cfa08b661416a3badedf7e90f2809163
|
refs/heads/master
| 2022-12-10T21:07:03.948406 | 2021-06-18T01:58:23 | 2021-06-18T01:58:23 | 129,964,251 | 9 | 5 | null | 2022-12-09T05:23:54 | 2018-04-17T20:57:15 |
HTML
|
UTF-8
|
Python
| false | false | 6,767 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 on 2020-02-10.
# 2020, SMART Health IT.
import os
import io
import unittest
import json
from model import slot
from model.fhirdate import FHIRDate
class SlotTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or \
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'fhir-parser', 'downloads'))
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Slot", js["resourceType"])
return slot.Slot(js)
def testSlot1(self):
inst = self.instantiate_from("slot-example-busy.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot1(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot1(inst2)
def implSlot1(self, inst):
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://example.org/identifiers/slots")
self.assertEqual(inst.identifier[0].value, "123132")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertTrue(inst.overbooked)
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:00:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:00:00Z")
self.assertEqual(inst.status, "busy")
self.assertEqual(inst.text.status, "generated")
def testSlot2(self):
inst = self.instantiate_from("slot-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot2(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot2(inst2)
def implSlot2(self, inst):
self.assertEqual(inst.appointmentType.coding[0].code, "WALKIN")
self.assertEqual(inst.appointmentType.coding[0].display, "A previously unscheduled walk-in visit")
self.assertEqual(inst.appointmentType.coding[0].system, "http://terminology.hl7.org/CodeSystem/v2-0276")
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.serviceType[0].coding[0].code, "57")
self.assertEqual(inst.serviceType[0].coding[0].display, "Immunization")
self.assertEqual(inst.specialty[0].coding[0].code, "408480009")
self.assertEqual(inst.specialty[0].coding[0].display, "Clinical immunology")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.status, "free")
self.assertEqual(inst.text.status, "generated")
def testSlot3(self):
inst = self.instantiate_from("slot-example-unavailable.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot3(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot3(inst2)
def implSlot3(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.id, "3")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.status, "busy-unavailable")
self.assertEqual(inst.text.status, "generated")
def testSlot4(self):
inst = self.instantiate_from("slot-example-tentative.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot4(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot4(inst2)
def implSlot4(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T10:00:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T10:00:00Z")
self.assertEqual(inst.id, "2")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.status, "busy-tentative")
self.assertEqual(inst.text.status, "generated")
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
63cf4e7fc790f00047e1d1b4a59e089134a6a4ce
|
1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03
|
/cpos/foundation/_callable/core.py
|
d5bcbc5bd5bde2ddfb68fa7a980ecfe3e94c65cb
|
[] |
no_license
|
yizhong120110/CPOS
|
a05858c84e04ce4aa48b3bfb43ee49264ffc5270
|
68ddf3df6d2cd731e6634b09d27aff4c22debd8e
|
refs/heads/master
| 2021-09-01T17:59:53.802095 | 2017-12-28T05:43:06 | 2017-12-28T05:43:06 | 106,247,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,495 |
py
|
# -*- coding: utf-8 -*-
import textwrap
import sys
import os
from ..substrate.utils.logger import logger
from ..substrate.interfaces import Clarified
class Callable(object):
def __init__(self, py_code, fragment_name=''):
self.d = {'py_code':py_code,'fragment_name':fragment_name}
def run(self, np,np_local):
"""
# 在np中注册py_code
"""
try:
#logger.ods (str(self.d['py_code']) , lv = 'dev' , cat = 'foundation.callable')
exec(self.d['py_code'],np,np_local)
return True
except:
logger.oes("callable error:" , lv = 'error' , cat = 'foundation.callable')
return False
def get_name (self):
return self.d['fragment_name']
class DynamicRuntime(Clarified):
NAME = "DynamicRuntime"
# np_init_code and udp_np should be deprecated , keeping it is just for the compatibility with old codes.
# Runtime and Callable should be used like test cases showed below.
def __init__ (self,np_init_code='',upd_np={}, np = {}, np_local = None):
self.np = np
self.np_local = np_local
self.np_init_code = np_init_code
self.prepare_environment()
self.np.update(upd_np)
def call(self,callable_object):
return callable_object.run(self.np,self.np_local)
def prepare_environment(self):
ca = Callable(textwrap.dedent(self.np_init_code.replace('\r', '')))
self.call(ca)
return True
def last_result (self):
# equel to the "_" variable in the py console.
if '_' in (self.np.keys()):
return self.np['_']
return None
def var (self,var_name):
# equel to the "_" variable in the py console.
if var_name in (self.np.keys()):
return self.np[var_name]
return None
def statement_dynamic_call (statement = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if statement != '':
if not dr.call( Callable( statement ) ):
return None
return dr
def direct_dynamic_call (module_name = '',func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if module_name != '':
statement = 'from %s import %s' % (module_name,'*' if func_name == '' else func_name)
dr = statement_dynamic_call(statement,dr)
if func_name != '' and func_name != '*':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
def direct_dynamic_call_pyfile (pyfile='' , root='' ,func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if pyfile != '':
root = os.path.abspath(root) + os.sep
pyfile = os.path.abspath(os.path.join(root, pyfile.strip('/\\')))
statement = open(pyfile,mode='rb').read()
dr = statement_dynamic_call(statement,dr)
if func_name != '':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
scall = statement_dynamic_call
dcall = direct_dynamic_call
dcallpy = direct_dynamic_call_pyfile
#######################################################################
#TEST
a = 0
def __test_call ():
global a
a = 100
print ('__test_call')
return 0
def _test1 ():
# 使用globals()会对当前环境造成影响,导致open不能正常使用
#dr = DynamicRuntime(np=globals())
dr = DynamicRuntime(np=locals())
dr = dcall('os',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = scall('print(\' Hello \')',runtime = dr)
if dr:
dr = scall('__test_call()',runtime = dr)
print(a)
def _test2 ():
b = 1
c = 1
dr = DynamicRuntime( np = locals())
scall('b = b + 1',dr)
print(dr)
print(b)
## note! we have to obtain the resualt manually. The 'b = b + 1' call will not touch the 'b' in this scope.
# why? ????
#refer to python doc [exec]:
#Note
#The default locals act as described for function locals() below:
# modifications to the default locals dictionary should not be attempted. Pass an explicit locals dictionary
# if you need to see effects of the code on locals after function exec() returns.
#
print (dr.var('b'))
def _test3 ():
dr = scall('t3 = "this is t3" ')
print(dr.var('t3'))
dr = scall('t4 = t3 + " and t4" ',dr)
print(dr.var('t4'))
def _test4 ():
# 如果下面这句执行报错,则说明本地环境被破坏,是np=globals()造成的
#print("++++++++++==========",help(open))
dr = dcallpy(os.path.abspath( __file__ ),'_test4_print')
dr = dcallpy(func_name='_test4_print_2' ,args='1111' ,runtime=dr)
dr = dcallpy(func_name='_test4_print_3' ,args='1111,2222' ,runtime=dr)
def _test4_print():
print("===== my name is _test4_print")
def _test4_print_2(aaaa):
print("===== my name is _test4_print_2 %s"%(aaaa))
def _test4_print_3(aaaa,bbbbb):
print("===== my name is _test4_print_3 %s %s"%(aaaa,bbbbb))
def _test5 ():
dr = scall('')
dr.np['aaaaa'] = 'test is aaaaa'
dr = dcall(func_name = 'print',args = 'aaaaa',runtime = dr)
if __name__ == '__main__':
_test1()
print('==========================================================')
_test2()
print('==========================================================')
_test3()
print('==========================================================')
_test4()
print('==========================================================')
_test5()
|
[
"[email protected]"
] | |
37a08b96698b20dd1fea9d7b61d6b4b83fbb7d5e
|
2672a2b664ed12f190b68deb51476b451a524561
|
/portal/config.py
|
e45d5065a743935fa64b17b3a1a2a8ea6266d98c
|
[] |
no_license
|
LCBRU/genvasc_portal_web
|
9a2a27b4a2ba0fb2db402efc96eea8b2ed0a86e6
|
11eb562a5e92fd05fd5a902b7e062a2813e7b3f7
|
refs/heads/master
| 2023-01-09T09:59:07.301366 | 2023-01-07T14:44:07 | 2023-01-07T14:44:07 | 132,786,398 | 0 | 0 | null | 2022-01-11T13:17:30 | 2018-05-09T16:45:40 |
Python
|
UTF-8
|
Python
| false | false | 2,527 |
py
|
import os
from dotenv import load_dotenv
# Load environment variables from '.env' file.
load_dotenv()
class BaseConfig(object):
REMEMBER_COOKIE_NAME = 'GENVASC Remember Me'
REMEMBER_COOKIE_DURATION = 1
MAIL_SERVER = os.environ['MAIL_SERVER']
MAIL_DEBUG = os.environ['MAIL_DEBUG']
SECURITY_EMAIL_SENDER = os.environ['LCBRUIT_EMAIL_ADDRESS']
SECRET_KEY = os.environ['GGPP_FLASK_SECRET_KEY']
DEBUG = os.environ['GGPP_FLASK_DEBUG'] == 'True'
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
SQLALCHEMY_TRACK_MODIFICATIONS = (
os.environ['GGPP_SQLALCHEMY_TRACK_MODIFICATIONS'] == 'True'
)
SQLALCHEMY_ECHO = os.environ['GGPP_SQLALCHEMY_ECHO'] == 'True'
SECURITY_PASSWORD_HASH = os.environ['GGPP_SECURITY_PASSWORD_HASH']
SECURITY_PASSWORD_SALT = os.environ['GGPP_SECURITY_PASSWORD_SALT']
SECURITY_TRACKABLE = os.environ['GGPP_SECURITY_TRACKABLE'] == 'True'
SMTP_SERVER = 'localhost'
APPLICATION_EMAIL_ADDRESS = os.environ['LCBRUIT_EMAIL_ADDRESS']
ERROR_EMAIL_SUBJECT = 'GENVASC Portal Error'
SECURITY_CHANGEABLE = True
SECURITY_RECOVERABLE = True
MAIL_DEFAULT_SENDER = os.environ["LCBRUIT_EMAIL_ADDRESS"]
# Admin user
ADMIN_EMAIL_ADDRESS = os.environ['ADMIN_EMAIL_ADDRESS']
ADMIN_FIRST_NAME = os.environ['ADMIN_FIRST_NAME']
ADMIN_LAST_NAME = os.environ['ADMIN_LAST_NAME']
ADMIN_PASSWORD = os.environ['ADMIN_PASSWORD']
# Celery Settings
broker_url=os.environ["BROKER_URL"]
result_backend=os.environ["CELERY_RESULT_BACKEND"]
CELERY_RATE_LIMIT=os.environ["CELERY_RATE_LIMIT"]
CELERY_REDIRECT_STDOUTS_LEVEL=os.environ["CELERY_REDIRECT_STDOUTS_LEVEL"]
CELERY_DEFAULT_QUEUE=os.environ["CELERY_DEFAULT_QUEUE"]
# Celery Schedules
PRACTICE_ETL_SCHEDULE_MINUTE=os.environ["PRACTICE_ETL_SCHEDULE_MINUTE"]
PRACTICE_ETL_SCHEDULE_HOUR=os.environ["PRACTICE_ETL_SCHEDULE_HOUR"]
# Databases
PRACTICE_DATABASE_URI=os.environ["PRACTICE_DATABASE_URI"]
RECRUIT_DATABASE_URI=os.environ["RECRUIT_DATABASE_URI"]
IMPORT_DATABASE_URI=os.environ["IMPORT_DATABASE_URI"]
class TestConfig(BaseConfig):
"""Configuration for automated testing"""
TESTING = True
SQLALCHEMY_DATABASE_URI="sqlite://"
PRACTICE_DATABASE_URI="sqlite://"
RECRUIT_DATABASE_URI="sqlite://"
WTF_CSRF_ENABLED = False
SMTP_SERVER = None
SQLALCHEMY_ECHO = False
broker_url=os.environ["BROKER_URL"] + '/test'
class TestConfigCRSF(TestConfig):
WTF_CSRF_ENABLED = True
|
[
"[email protected]"
] | |
3dd764efee547895b61b17074bef1e80ee82a562
|
9bb6795a12d6e042b962704dab9ec59d92d54e8f
|
/1_numpy/2_reshape.py
|
b5d60e8241460327f2b7b83d534050593e76005f
|
[] |
no_license
|
kimsoosoo0928/Perfect_Guide
|
c5177037512cb06814f0bbfcb70a22d14c9ec1fb
|
9b615d320957babb1a918fb38282062998a1e5c4
|
refs/heads/main
| 2023-07-18T12:29:03.353274 | 2021-08-29T00:31:28 | 2021-08-29T00:31:28 | 396,668,104 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,093 |
py
|
import numpy as np
array1 = np.arange(10)
print('array1 : \n', array1)
array2 = array1.reshape(2,5)
print('array2 : \n', array2)
array3 = array1.reshape(5,2)
print('array3 : \n', array3)
'''
array1 :
[0 1 2 3 4 5 6 7 8 9]
array2 :
[[0 1 2 3 4]
[5 6 7 8 9]]
array3 :
[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]
'''
array1 = np.arange(10)
print(array1)
array2 = array1.reshape(-1,5)
print('array2 shape : ', array2.shape)
array3 = array1.reshape(5,-1)
print('array3 shape : ', array3.shape)
'''
[0 1 2 3 4 5 6 7 8 9]
array2 shape : (2, 5)
array3 shape : (5, 2)
'''
array1 = np.arange(8)
array3d = array1.reshape((2,2,2))
print('array3d : \n', array3d.tolist())
array5 = array1.reshape((-1,1))
print('array5 : \n', array5.tolist())
print('array5 shape : \n', array5.shape)
array6 = array1.reshape((-1,1))
print('array6 : \n', array6.tolist())
print('array6 shape : \n', array6.shape)
'''
array3d :
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
array5 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array5 shape :
(8, 1)
array6 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array6 shape :
(8, 1)
'''
|
[
"[email protected]"
] | |
7fa50c182bf54b2fbf51441eefa0f324279633e7
|
1431b07074b96c7baa6a43a99717da2a658424af
|
/test/utils/Test_Zip_Folder.py
|
d6ecc3e784eaacfbffe1988284d8bf95e88f557b
|
[
"Apache-2.0"
] |
permissive
|
almeidam/pbx-gs-python-utils
|
054a7334070627bc27f682ed78c2986230d1cfab
|
3f8987dd2d1fc27d1d262385280d7303009f5453
|
refs/heads/master
| 2020-04-30T10:44:46.179729 | 2019-03-20T13:59:01 | 2019-03-20T13:59:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 564 |
py
|
import json
from unittest import TestCase
from utils.Dev import Dev
from utils.Files import Files
from utils.Misc import Misc
from utils.Zip_Folder import Zip_Folder
class Test_Zip_Folder(TestCase):
def test__using_with__no_params(self):
with Zip_Folder() as (zip_file):
assert zip_file is None
def test__using_with_params(self):
target_folder = Files.current_folder()
with Zip_Folder(target_folder) as (zip_file):
assert Files.exists(zip_file) is True
assert Files.exists(zip_file) is False
|
[
"[email protected]"
] | |
69acf6cb42853141e98f121c77a9d61f1f1a30cf
|
2c926b4847a44c7f831d47ed0160751d3248e8f4
|
/venv/lib/python3.8/site-packages/hubspot/automation/actions/models/single_field_dependency.py
|
f18ca6bf64e6504458c415ed11f6e4ab7e527d5a
|
[] |
no_license
|
Women-in-Tech-Society/WITS_Site
|
c42cd2c9abe1b5515b80be82dc876a6c3842e42a
|
5dbf22f5ee5a36358f6f279af4c13d86d31653c5
|
refs/heads/main
| 2023-05-11T02:34:05.531902 | 2021-06-01T01:05:12 | 2021-06-01T01:05:12 | 278,658,100 | 0 | 5 | null | 2022-11-22T18:41:35 | 2020-07-10T14:43:28 |
Python
|
UTF-8
|
Python
| false | false | 6,688 |
py
|
# coding: utf-8
"""
Custom Workflow Actions
Create custom workflow actions # noqa: E501
The version of the OpenAPI document: v4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.automation.actions.configuration import Configuration
class SingleFieldDependency(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"dependency_type": "str",
"dependent_field_names": "list[str]",
"controlling_field_name": "str",
}
attribute_map = {
"dependency_type": "dependencyType",
"dependent_field_names": "dependentFieldNames",
"controlling_field_name": "controllingFieldName",
}
def __init__(
self,
dependency_type="SINGLE_FIELD",
dependent_field_names=None,
controlling_field_name=None,
local_vars_configuration=None,
): # noqa: E501
"""SingleFieldDependency - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._dependency_type = None
self._dependent_field_names = None
self._controlling_field_name = None
self.discriminator = None
self.dependency_type = dependency_type
self.dependent_field_names = dependent_field_names
self.controlling_field_name = controlling_field_name
@property
def dependency_type(self):
"""Gets the dependency_type of this SingleFieldDependency. # noqa: E501
:return: The dependency_type of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._dependency_type
@dependency_type.setter
def dependency_type(self, dependency_type):
"""Sets the dependency_type of this SingleFieldDependency.
:param dependency_type: The dependency_type of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and dependency_type is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type`, must not be `None`"
) # noqa: E501
allowed_values = ["SINGLE_FIELD"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and dependency_type not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type` ({0}), must be one of {1}".format( # noqa: E501
dependency_type, allowed_values
)
)
self._dependency_type = dependency_type
@property
def dependent_field_names(self):
"""Gets the dependent_field_names of this SingleFieldDependency. # noqa: E501
:return: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:rtype: list[str]
"""
return self._dependent_field_names
@dependent_field_names.setter
def dependent_field_names(self, dependent_field_names):
"""Sets the dependent_field_names of this SingleFieldDependency.
:param dependent_field_names: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:type: list[str]
"""
if (
self.local_vars_configuration.client_side_validation
and dependent_field_names is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependent_field_names`, must not be `None`"
) # noqa: E501
self._dependent_field_names = dependent_field_names
@property
def controlling_field_name(self):
"""Gets the controlling_field_name of this SingleFieldDependency. # noqa: E501
:return: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._controlling_field_name
@controlling_field_name.setter
def controlling_field_name(self, controlling_field_name):
"""Sets the controlling_field_name of this SingleFieldDependency.
:param controlling_field_name: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and controlling_field_name is None
): # noqa: E501
raise ValueError(
"Invalid value for `controlling_field_name`, must not be `None`"
) # noqa: E501
self._controlling_field_name = controlling_field_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SingleFieldDependency):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SingleFieldDependency):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
d75ab97fb9184a24f45a05f01fc83903b2dc748e
|
6f8aec72f983715b1dcc1e067e980a440440423a
|
/bruteguard/patterns/singleton.py
|
a9f7c0f51adf9d2ce958d11132938a6d7c1b1ffb
|
[
"MIT"
] |
permissive
|
dcopm999/django-brute-guard
|
41cef7c1f98b275c0ef2176424c8ef1e75002fdb
|
e4c629d81f1cc732ddae2a43042e92ea423884b8
|
refs/heads/master
| 2023-08-02T06:16:54.219332 | 2021-09-30T05:45:10 | 2021-09-30T05:45:10 | 409,435,237 | 0 | 0 |
MIT
| 2021-09-30T05:45:10 | 2021-09-23T03:32:47 |
Python
|
UTF-8
|
Python
| false | false | 1,076 |
py
|
from typing import Dict
class SingletonMeta(type):
"""
В Python класс Одиночка можно реализовать по-разному. Возможные способы
включают себя базовый класс, декоратор, метакласс. Мы воспользуемся
метаклассом, поскольку он лучше всего подходит для этой цели.
"""
_instances: Dict[type, type] = {}
def __call__(cls, *args, **kwargs):
"""
Данная реализация не учитывает возможное изменение передаваемых
аргументов в `__init__`.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class Singleton(object):
def __new__(cls):
if not hasattr(cls, "instance"):
cls.instance = super(Singleton, cls).__new__(cls)
return cls.instance
|
[
"[email protected]"
] | |
9bf099e4570aab4e3c827aba4cfa379cb7ad7196
|
a86cb1d0cc2c01ccc5b7d03d25a1b98d4f8b66ca
|
/day_18/crawling_03.py
|
08ce38608a68158041385e8770f169492843e3ce
|
[] |
no_license
|
yongseongCho/python_201911
|
020efd812df909f6d1150c6a15a9a4fa6ee946b6
|
f4696fac81a101d13a95ca0ca602e6478b4d2f58
|
refs/heads/master
| 2020-09-12T12:44:46.364259 | 2019-12-19T13:17:08 | 2019-12-19T13:17:08 | 222,429,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,321 |
py
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
html = '''
<td class="title">
<div class="tit3">
<a href="/movie/bi/mi/basic.nhn?code=181710"
title="포드 V 페라리">포드 V 페라리</a>
</div>
</td>
'''
soup = bs(html, 'html.parser')
# BeautifulSoup 객체의 find 메소드
# 태그의 이름과 속성의 정보를 조합하여
# 검색하는 경우 활용
# - find 메소드는 검색 결과를 하나만 반환
# - 최초에 발견된 첫 번째 태그를 반환
# 첫 번째로 검색된 td 태그의 객체를 반환
tag = soup.find(name='td')
print(f'tag -> {tag}')
# 첫 번째로 검색된 a 태그의 객체를 반환
tag = soup.find(name='a')
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 title인 객체를 반환
tag = soup.find(attrs={'class':'title'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 tit3인 객체를 반환
tag = soup.find(attrs={'class':'tit3'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# 태그의 이름이 td이고,
# class 속성이 tit3인 객체를 반환
# - 존재하지 않는 경우 None 값이 반환
tag = soup.find(name='td',
attrs={'class':'tit3'})
print(f'tag -> {tag}')
|
[
"[email protected]"
] | |
90adc3801f23ed865f8ce3373066f9a2a5ee43e3
|
e2bd8debf59f71e2c7fabea03cc108618944b2b0
|
/el_pagination/paginators.py
|
6da5231fca53a0b0e1e586150ed4c8803e1d1b0e
|
[] |
no_license
|
successar/Quizz
|
874c7c8656c33973d5d4f9563073b0434573a333
|
2244ff13568db92e3ff88156982ec44c83418199
|
refs/heads/master
| 2021-01-21T13:11:45.960397 | 2016-05-11T10:34:48 | 2016-05-11T10:34:48 | 53,747,315 | 1 | 1 | null | 2016-05-07T15:00:41 | 2016-03-12T18:36:34 |
Python
|
UTF-8
|
Python
| false | false | 4,359 |
py
|
"""Customized Django paginators."""
from __future__ import unicode_literals
from math import ceil
from django.core.paginator import (
EmptyPage,
Page,
PageNotAnInteger,
Paginator,
)
class CustomPage(Page):
"""Handle different number of items on the first page."""
def start_index(self):
"""Return the 1-based index of the first item on this page."""
paginator = self.paginator
# Special case, return zero if no items.
if paginator.count == 0:
return 0
elif self.number == 1:
return 1
return (
(self.number - 2) * paginator.per_page + paginator.first_page + 1)
def end_index(self):
"""Return the 1-based index of the last item on this page."""
paginator = self.paginator
# Special case for the last page because there can be orphans.
if self.number == paginator.num_pages:
return paginator.count
return (self.number - 1) * paginator.per_page + paginator.first_page
class BasePaginator(Paginator):
"""A base paginator class subclassed by the other real paginators.
Handle different number of items on the first page.
"""
def __init__(self, object_list, per_page, **kwargs):
if 'first_page' in kwargs:
self.first_page = kwargs.pop('first_page')
else:
self.first_page = per_page
super(BasePaginator, self).__init__(object_list, per_page, **kwargs)
def get_current_per_page(self, number):
return self.first_page if number == 1 else self.per_page
class DefaultPaginator(BasePaginator):
"""The default paginator used by this application."""
def page(self, number):
number = self.validate_number(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + self.get_current_per_page(number)
if top + self.orphans >= self.count:
top = self.count
return CustomPage(self.object_list[bottom:top], number, self)
def _get_num_pages(self):
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(0, self.count - self.orphans - self.first_page)
try:
self._num_pages = int(ceil(hits / float(self.per_page))) + 1
except ZeroDivisionError:
self._num_pages = 0 # fallback to a safe value
return self._num_pages
num_pages = property(_get_num_pages)
class LazyPaginator(BasePaginator):
"""Implement lazy pagination."""
def validate_number(self, number):
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
number = self.validate_number(number)
current_per_page = self.get_current_per_page(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + current_per_page
# Retrieve more objects to check if there is a next page.
objects = list(self.object_list[bottom:top + self.orphans + 1])
objects_count = len(objects)
if objects_count > (current_per_page + self.orphans):
# If another page is found, increase the total number of pages.
self._num_pages = number + 1
# In any case, return only objects for this page.
objects = objects[:current_per_page]
elif (number != 1) and (objects_count <= self.orphans):
raise EmptyPage('That page contains no results')
else:
# This is the last page.
self._num_pages = number
return CustomPage(objects, number, self)
def _get_count(self):
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
raise NotImplementedError
page_range = property(_get_page_range)
|
[
"[email protected]"
] | |
2a739b751d27912b4ec246d9d6c54a4b4576bb53
|
441ee516fa509a66eb6a6132ed0fbafeae1a06ae
|
/uploadf/models.py
|
ecd3c53d7d6062ba60639a19f3c1636e76875306
|
[] |
no_license
|
Shirhussain/FileUpload
|
3237627020ec322d4097e757b64f9f0c64feb4e7
|
19d2e848d7d05fd46838f9140c0a5658bbca281a
|
refs/heads/master
| 2022-08-26T13:26:23.859084 | 2020-05-27T22:02:36 | 2020-05-27T22:02:36 | 264,777,859 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 418 |
py
|
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=50)
pdf = models.FileField(upload_to="mag/")
cover = models.ImageField(upload_to="mag/cover/", null=True, blank=True)
def __str__(self):
return self.title
def delete(self,*args, **kwargs):
self.pdf.delete()
self.cover.delete()
super().delete(*args, **kwargs)
|
[
"[email protected]"
] | |
fbf9ee05f41aa879f6e8efe0d638a2ad5f92c86f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/53/usersdata/94/21421/submittedfiles/matriz2.py
|
0d65cb52f0ea2e9536be7e87278cf3364bd3fd2d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,374 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def soma_diagonal_principal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def soma_diagonal_secundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]:
return soma
def soma_linha(a):
a=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
a.append(soma)
return c
def soma_coluna(a):
a=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
a.append(soma)
return d
def magico(a):
sdp=soma_diagonal_principal(a)
sds=soma_diagonal_secundaria(a)
sl=soma_linha(a)
sc=soma_coluna(a)
cont=0
for i in range(0,len(sl),1):
if sdp==sds==sl[i]==sc[i]:
cont=cont+1
if cont==len(sl):
return True
else:
return False
linhas=input('digite a quantidade de linhas')
colunas=input('digite a quantidade de colunas')
a=np.zeros((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('digite um elemento:')
if magico(a):
print ('S')
else:
print ('N')
|
[
"[email protected]"
] | |
5cbebc094716ebcd2abe250c57520dee3117a1d0
|
d7d25574246fd8585396a02ebd2ca8450e49b082
|
/leetcode-py/leetcode1041.py
|
44b0f6df2298740f5cbbebe712ae04d38cac1548
|
[] |
no_license
|
cicihou/LearningProject
|
b6b1de2300e574835f253935d0c0ae693b194020
|
3a5649357e0f21cbbc5e238351300cd706d533b3
|
refs/heads/master
| 2022-12-04T06:18:14.856766 | 2022-11-29T08:54:16 | 2022-11-29T08:54:16 | 141,606,471 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,835 |
py
|
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
'''
lc 657 的变种
这个问题比较 tricky,其实更接近数学问题
实际上只要判断机器人最后是否面朝北,就可以知道是不是回到了原点
关于为什么 instructions *= 4 是最小限度的解释:
https://leetcode.com/problems/robot-bounded-in-circle/discuss/850437/Python-O(n)-solution-explained
https://leetcode.com/problems/robot-bounded-in-circle/discuss/290915/Python-Concise-%2B-Explanation
note: 这个 direc 的判断思想好好记一下,我自己写的时候直觉要用两个数组表达左右方向,其实左右方向只是周期不同
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
instructions *= 4
for ch in instructions:
if ch == 'G':
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
else:
if ch == 'L':
d = (d + 1) % 4
if ch == 'R':
d = (d + 3) % 4
return start == (0, 0)
'''
更加 tricky 的数学解法:判断机器人是否朝北,不朝北就一定能走回来
https://leetcode.com/problems/robot-bounded-in-circle/discuss/291221/Python-O(N)-time-O(1)-space-beats-100-detailed-explanations
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
for ch in instructions:
if ch == 'L':
d = (d+1) % 4
elif ch == 'R':
d = (d+3) % 4
else:
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
return start == (0, 0) or d != 0
|
[
"[email protected]"
] | |
c8fe58376e632a3abf6fabe21b845ea9bfca8392
|
493d5df9420ef94d9c5e82acb2d163e2a8c639b7
|
/memo_app/forms.py
|
f9a73ac5260ef3004567845ec9abe38b54032eea
|
[] |
no_license
|
reina0207/django
|
0e3d6422c137be52978526128112ebf319e0f462
|
c42744935043efdcc4b9f3f14641105d082d691a
|
refs/heads/master
| 2023-08-13T14:10:59.979651 | 2021-10-17T01:48:48 | 2021-10-17T01:48:48 | 417,983,393 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
from django import forms
from .models import Memo
class PostForm(forms.ModelForm):
class Meta:
model = Memo
fields = ['content']
widgets = {
'content':forms.Textarea
}
|
[
"[email protected]"
] | |
cfbbe9fd87346ac41ce6d9352492c08480e4ec86
|
3cdbe5f5810a035ae168f8ff01c39f58c571e428
|
/golf/migrations/0047_auto_20171013_0759.py
|
3a2a55b2b3c56503895e31b0b55d1c431628e5d5
|
[
"MIT"
] |
permissive
|
kenrumer/scorekeeper
|
ebd6467e2ecde3da96bb08ef77a56f967cbde00e
|
c7f22676e84dfdf6ca3361c6ff56719f68fce31f
|
refs/heads/master
| 2021-01-22T05:01:11.370869 | 2018-01-12T07:13:20 | 2018-01-12T07:13:20 | 102,276,714 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 07:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('golf', '0046_auto_20171013_0021'),
]
operations = [
migrations.RemoveField(
model_name='round',
name='tournament',
),
migrations.AddField(
model_name='round',
name='tournament_date',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='golf.TournamentDate', verbose_name='Tournament and Date'),
),
]
|
[
"[email protected]"
] | |
d5c5779cb06cd034955a358e57ccad53113de7b0
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/featureFun.py
|
6077221b9a1eba16c0182067048fda6cda0a3b49
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,821 |
py
|
# -*- coding: utf-8 -*-
"""
(c) 2015
@author: Janto Oellrich
email: [email protected]
CONTENT:
Contains FEATURE EXTRACTION funtions for
the AXA telematics competition.
FUNCTION LIST:
features: creates feature vector for one trip
driverFrame: creates feature matrix containing features
of all trips of one driver
createFeatmat: create feature matrix for all drivers
"""
from load import *
from modules import *
from paths import *
def features(trip,plotting=False):
"""
Extracts features of a trip dataframe.
OUTPUT:
np.array including features
list of angles between points in deg
"""
# 1. duration
duration = len(trip)
# 2. speed: euclidean distance between adjacent points
speed = np.sum(np.diff(trip,axis=0)**2,axis=1)**0.5
### 2.1. smooth GPS data (by convolution) ####
smooth_speed = movingaverage(speed,10)
#smooth_speed[np.where(smooth_speed>65)[0]] = smooth_speed[np.where(smooth_speed>65)[0]-1]
# head changes
head = np.diff(trip,axis=0)
head_x,head_y = head[:,0],head[:,1]
head_quantiles_x = ss.mstats.mquantiles(head_x,np.linspace(0.02,0.99,10))
head_quantiles_y = ss.mstats.mquantiles(head_y,np.linspace(0.02,0.99,10))
# compute speed statistics
mean_speed = smooth_speed.mean()
max_speed = max(smooth_speed)
std_speed = speed.std()
# 3. acceleration
smooth_accel = np.diff(smooth_speed)
# 3.1 get all negative acceleration values
accel_s = np.array(smooth_accel)
neg_accel = accel_s[accel_s<0]
pos_accel = accel_s[accel_s>0]
# 3.3 average breaking strength
mean_breaking = neg_accel.mean()
mean_acceleration = pos_accel.mean()
# summary statistics
std_breaking = neg_accel.std()
std_acceleration = pos_accel.std()
# 4. total distance traveled
total_dist = np.sum(smooth_speed,axis=0)
# 5. relative standzeit (last 5% are discarded due standing)
last = round(len(trip)*0.05)
eps = 1 # threshold for determining standing
# relative standzeit
speed_red = np.array(speed)[:last]
standzeit = len(speed_red[speed_red<0+eps])/float(duration)
#### DRIVING STYLE REALTED FEATURES ####
# 1. acceleration from stop
# 1.1 get end of stops: where is speed near zero
end_stops = stops(smooth_speed)
n_stops = len(end_stops) # how many stops
# 1.2 how does the driver accelerate from stop?
end_stops = end_stops.astype(int)[:-1,1]
# following interval
interval = 7 # 7 seconds following end of stop
# only those which dont exceed indices of trip
end_stops = end_stops[end_stops+interval<len(smooth_speed)-1]
n_stops = len(end_stops)
if n_stops>1:
anfahren = np.zeros(shape=(1,n_stops)) # initialize array
for i in range(n_stops):
# slope at acceleration
start = end_stops[i]
anfahren[0,i] = np.diff([smooth_speed[start],smooth_speed[start+interval]])
else:
anfahren = np.array([0])
# compute statistics
mean_anfahren = anfahren.mean()
max_anfahren = anfahren.max()
std_anfahren = anfahren.std()
# end cell
last_cell = rounddown(normalize(trip[-2:,:]),30)[-1]
# determine trip is a back-home trip
if last_cell[0]==0 and last_cell[1]==0:
hometrip=1
else:
hometrip=0
# speed quantiles
speed_quantiles = ss.mstats.mquantiles(smooth_speed,np.linspace(0.02,0.99,25))
# acceleration quantiles
accel_quantiles = ss.mstats.mquantiles(smooth_accel,np.linspace(0.02,0.99,25))
################# PLOTS #################
if plotting:
figure()
x = range(1,len(trip)) # x values for plotting
#plot(x,total_dist,label='velocity') #speed
hold('on')
#plot(x,accel,color='red',alpha=0.6,label='acceleration') #acceleration
grid('on')
xlabel('time')
# plot smoothed speed data
plot(smooth_speed,color='k',label='Spline Interpol')
# plot smoothed accelerationd data
plot(smooth_accel,'red',label='Acceleration')
legend(loc='best')
#legend()
######################################
return np.concatenate((speed_quantiles,accel_quantiles,head_quantiles_x,head_quantiles_y,np.array([duration,total_dist,standzeit,std_speed,std_breaking,std_acceleration,std_anfahren,mean_anfahren,max_anfahren,n_stops,hometrip])))
def driverFrame(driver,n_features=10):
# initialize dataframe
trips = np.zeros(shape=(200,n_features))
# load all trips at once
all_trips = loadDriver(driver)
counter = 0
for trip in all_trips:
trips[counter,:] = features(trip,False)
counter += 1
return trips
def createFeatmat():
"""
Computes the features of all trips and stores them in a matrix.
"""
driverFolder = DATA
# driver IDs
drivers = sorted([int(folderName) for folderName in os.listdir(driverFolder)])
print 'Creating feature matrix...'
n_feat = 81
for i,driver in enumerate(drivers):
if i == 0:
featmat = driverFrame(driver,n_feat)
else:
featmat = np.vstack((featmat,driverFrame(driver,n_feat)))
print '\t\t{0} trips, {1} features'.format(featmat.shape[0],featmat.shape[1])
# write to file
np.save(os.path.join(FEATURES,'featurematrix1.npy'))
return featmat
|
[
"[email protected]"
] | |
7b1d36e6759d21e129f1ccb505e5824290d24a31
|
02d8a8b44dc9f8f3c63c2f62f24ceaee7d94fd12
|
/apps/profile/views.py
|
ef24c418f7057f9700edd7b07e9c9801961c3ee3
|
[] |
no_license
|
bladas/transfer
|
0970a4290e2e92e56853a64211ab3e79c479c0aa
|
54c61b7bf340af4f48c7c7162805697b0417f4d7
|
refs/heads/master
| 2023-01-04T12:45:36.784275 | 2019-12-08T17:34:36 | 2019-12-08T17:34:36 | 224,606,015 | 0 | 0 | null | 2022-11-22T04:52:01 | 2019-11-28T08:31:59 |
Python
|
UTF-8
|
Python
| false | false | 2,751 |
py
|
from django.shortcuts import render, redirect
from django.views.generic import ListView, FormView
from apps.orders.models import *
from django.core import mail
from django.template.loader import render_to_string
conection = mail.get_connection()
conection.open()
class ProfileView(ListView):
template_name = 'profile.html'
model = Order
# paginate_by = 5
def get_queryset(self):
pass
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['OrderFalse'] = Order.objects.filter(end=False).order_by('-id')
context['OrderTrue'] = Order.objects.filter(end=True).order_by('-id')
return context
def post(self, request, *args, **kwargs):
print(self.request.POST)
if self.request.method == "POST":
order_id = self.request.POST.get('order_id')
objects = Order.objects.get(pk=order_id)
print(objects.email)
email = objects.email
message = render_to_string('message/positive_message.html', {})
message2 = render_to_string('message/negative_message.html', {})
if self.request.POST.get('materialExampleRadios') == '1':
# Order.objects.update(end=True)
Order.objects.filter(pk=order_id).update(flag = 'Одобрено', end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message,
'[email protected]', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено одобрение")
return redirect('/')
elif self.request.POST.get('materialExampleRadios') == '2':
# Order.objects.update()
# Order.objects.update(flag = 'Отклонено')
Order.objects.filter(pk=order_id).update(flag = 'Отклонено',end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message2,
'[email protected]', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено отказ")
return redirect('/')
|
[
"[email protected]"
] | |
ae3dc2f7bf203c611851ed1cdfa6151cfb952a15
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/c748512c4c45e257bc625ccf036e18c86d69f1c8-<main>-fix.py
|
759edf1deb6fc8e4fa82cf7d84981a26157ee87e
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,832 |
py
|
def main():
'Main program body.'
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Start a new Shippable run.')
parser.add_argument('project', metavar='account/project', help='Shippable account/project')
target = parser.add_mutually_exclusive_group()
target.add_argument('--branch', help='branch name')
target.add_argument('--run', metavar='ID', help='Shippable run ID')
parser.add_argument('--key', metavar='KEY', default=api_key, required=(not api_key), help='Shippable API key')
parser.add_argument('--env', nargs=2, metavar=('KEY', 'VALUE'), action='append', help='environment variable to pass')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
headers = dict(Authorization=('apiToken %s' % args.key))
data = dict(projectFullNames=args.project)
url = 'https://api.shippable.com/projects'
response = requests.get(url, data, headers=headers)
if (response.status_code != 200):
raise Exception(response.content)
result = response.json()
if (len(result) != 1):
raise Exception(('Received %d items instead of 1 looking for %s in:\n%s' % (len(result), args.project, json.dumps(result, indent=4, sort_keys=True))))
project_id = response.json()[0]['id']
data = dict(globalEnv=dict(((kp[0], kp[1]) for kp in (args.env or []))))
if args.branch:
data['branchName'] = args.branch
elif args.run:
data['runId'] = args.run
url = ('https://api.shippable.com/projects/%s/newBuild' % project_id)
response = requests.post(url, json=data, headers=headers)
if (response.status_code != 200):
raise Exception(('HTTP %s: %s\n%s' % (response.status_code, response.reason, response.content)))
print(json.dumps(response.json(), indent=4, sort_keys=True))
|
[
"[email protected]"
] | |
4598001c5648f08752ef2002d4ba2a58a4b810b4
|
94e06376dc265c7bf1a2e51acb9714d02b21503a
|
/python打卡/day9_数字.py
|
0303d4bab06f6795e5925726a39bafdecf382745
|
[] |
no_license
|
zhangquanliang/python
|
4b2db32bed4e4746c8c49c309563f456dc41c6be
|
f45ef96e385b1cd6c5dfb53bf81042d953a9ec46
|
refs/heads/master
| 2021-04-26T23:30:12.217397 | 2019-03-20T06:18:14 | 2019-03-20T06:18:14 | 124,005,916 | 11 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 640 |
py
|
# -*- coding: utf-8 -*-
# 1. 内嵌整数列表中的指相加
def nested_sum(t):
a = 0
for x in t:
for y in x:
a += y
print(a)
# 2. 接受数字列表,返回累计和
def cumsum(t):
list = []
a = 0
for x in t:
a += x
list.append(a)
print(list)
# 3. 接受一个列表,返回新列表,包含除第一个和最后一个元素外的所有值
def middle(t):
t.pop(0)
t.pop()
print(t)
# t = [1, 2, 3, 4, 1212, 121]
# middle(t)
# 4. 斐波纳契数列
a, b = 0, 1
for i in range(1, 13):
print('第%s个月:%s对兔子' % (i, b))
a, b = b, a + b
|
[
"[email protected]"
] | |
68107dada2e7dd7dc4eabd477e86ea95d7540946
|
e719bcfde03c0be2c84a7f1e13d12b80fa00ea84
|
/session2/draw_2.py
|
0780ebd4934bca50609387bc0e008130c608d56c
|
[] |
no_license
|
minhduc9699/phamMinhDuc-D4E17
|
53b80e53ff175f0357fb01c9876aa12b343ca060
|
363b61745a206f33c5cfa3532a5abd920fcf4ad1
|
refs/heads/master
| 2023-01-22T08:25:14.210959 | 2020-12-05T04:53:38 | 2020-12-05T04:53:38 | 308,668,542 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
from turtle import *
speed(-1)
for edge in range(3, 11):
print(edge)
for i in range(edge):
forward(100)
left(360/edge)
mainloop()
|
[
"[email protected]"
] | |
b53a79653da1f30b4346d7cee4b0c1ab43348665
|
74167e4c1061b454d1ab1c2140a1fc2f4540ee2e
|
/accounts/models.py
|
fc5a140def7581969baf9c6413966fd5a150517c
|
[] |
no_license
|
Pagante/ProjectCart
|
f72a1a611445f66c1320c0c21e1832d3ecf67a2a
|
4f065a02a8235c6744768328af5c1e103321ed44
|
refs/heads/main
| 2023-06-05T23:53:10.316222 | 2021-06-27T21:47:28 | 2021-06-27T21:47:28 | 380,840,906 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,861 |
py
|
from django.db import models
from django.contrib.auth.models import BaseUserManager,AbstractBaseUser
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password=None):
if not 'email':
raise ValueError('User must have an email')
if not username:
raise ValueError('User must have a username')
user = self.model(
email = self.normalize_email(email),
username = username,
first_name = first_name,
last_name = last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, first_name, last_name, username, email, password):
user = self.create_user (
email = self.normalize_email(email),
username = username,
password= password,
first_name= first_name,
last_name= last_name,
)
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superadmin = True
user.save(using= self._db)
return user
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=100, unique=True)
email = models.CharField(max_length=100, unique=100)
phone_number = models.CharField(max_length=50)
# required Field
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def fullName(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, obj_module):
return True
class UserProfile(models.Model):
user = models.OneToOneField(Account, on_delete=models.CASCADE)
address_line_1 = models.CharField(blank=True, max_length=100)
address_line_2 = models.CharField(blank=True, max_length=100)
profile_picture = models.ImageField(upload_to='userprofile', blank=True)
city = models.CharField(blank=True, max_length=50)
state = models.CharField(blank=True, max_length=50)
country = models.CharField(blank=True, max_length=50)
def __str__(self):
return self.user.first_name
def fullAddress(self):
return f'{self.address_line_1} {self.address_line_2}'
|
[
"[email protected]"
] | |
cd46193f2107a70f24bf853229b251e11f09edd3
|
5989e503a733e8b29f4c502008446a75c2b43ff8
|
/src/aids/migrations/0080_auto_20191104_1028.py
|
e61367fa67ca3885c3616e240f880421e5dac253
|
[] |
no_license
|
samuelpath/aides-territoires
|
399a6a7b0607ef5a8d2b327247446b239f5b1a42
|
5793bd49d7157a34e08c29e56a46e1e3ead0651f
|
refs/heads/master
| 2022-12-20T14:35:18.671563 | 2020-08-21T08:00:33 | 2020-08-21T08:00:33 | 288,424,578 | 0 | 0 | null | 2020-08-18T10:27:17 | 2020-08-18T10:27:16 | null |
UTF-8
|
Python
| false | false | 389 |
py
|
# Generated by Django 2.2.5 on 2019-11-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aids', '0079_remove_aid_subvention_rate'),
]
operations = [
migrations.RenameField(
model_name='aid',
old_name='subvention_rate_range',
new_name='subvention_rate',
),
]
|
[
"[email protected]"
] | |
1e838cff1c3206fca261549ede085035a1794d7c
|
b301f5d799fb973f12ff457c94a3fb54f5c6fd6b
|
/pages/views.py
|
56e3848a96f39428eb0488fb6874d562f048fe72
|
[] |
no_license
|
MahmudulHassan5809/DjangoHousingSellingProject
|
ca3a8b9e3d83dd87532b33295e56e50ba7e9576d
|
82d02e04fe2a0cd510f160ad4159f40f4e5779d3
|
refs/heads/master
| 2020-04-09T08:01:41.092034 | 2018-12-03T11:24:13 | 2018-12-03T11:24:13 | 160,179,800 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 852 |
py
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from listings.choices import price_choices , bedroom_choices , state_choices
from listings.models import Listing
from realtors.models import Realtor
# Create your views here.
def index(request):
#return HttpResponse('Hello');
listings = Listing.objects.order_by('-list_date').filter(is_published=True);
return render(request , 'pages/index.html',{'listings' : listings ,
'state_choices' : state_choices,
'bedroom_choices' : bedroom_choices,
'price_choices' : price_choices,
})
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors' : realtors,
'mvp_realtors' : mvp_realtors
}
return render(request , 'pages/about.html',context)
|
[
"[email protected]"
] | |
fb58a27373295fd23e4e441d7160e90f57d8c58a
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/local_rulestacks_create_or_update_minimum_set_gen.py
|
eb4b8b0451f4477d85ec725f4dfa1603454d7a23
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 1,765 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python local_rulestacks_create_or_update_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="2bf4a339-294d-4c25-b0b2-ef649e9f5c27",
)
response = client.local_rulestacks.begin_create_or_update(
resource_group_name="rgopenapi",
local_rulestack_name="lrs1",
resource={"location": "eastus", "properties": {}},
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_CreateOrUpdate_MinimumSet_Gen.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a94e1bb1dc306e6a03ea0107933cb542bdaea003
|
50671b3e4e8ed7e9702c9941bb71fdbf92dffbe6
|
/src/cogs/events.py
|
0b28700d34057dbeef8be93aeb3c40ea8a08314b
|
[] |
no_license
|
pikoUsername/Emulator
|
3dd67d0d3934c2ec9283b9b52edebec31c654326
|
96e6563c7cbcea051e4e41a377d917a2a9f5528a
|
refs/heads/main
| 2023-02-27T22:42:43.154987 | 2021-02-09T14:51:56 | 2021-02-09T14:51:56 | 321,045,486 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,199 |
py
|
import os
import sys
from discord.ext import commands
from discord.ext.commands import errors
import discord
from loguru import logger
from ..models import Guild
from ..utils.notify import notify_all_owners
class DiscordEvents(commands.Cog, name="Events"):
__slots__ = "bot",
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
if os.environ.get("notify_admins"):
await notify_all_owners(self.bot, text="BOT STARTED")
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
g = await Guild.query.where(guild.id == Guild.guild_id).gino.first()
g.add_guild(guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild):
await self.bot.fm.delete_all_guild_files(guild.id)
g = await Guild.get_guild(guild.id)
await g.delete()
logger.info("leaved and deleted thats guild folder")
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, errors.MissingRequiredArgument) or isinstance(err, errors.BadArgument):
helper = str(ctx.invoked_subcommand) if ctx.invoked_subcommand else str(ctx.command)
await ctx.send_help(helper)
elif isinstance(err, errors.CommandInvokeError):
logger.exception(f"{err}, {sys.exc_info()}")
if "2000 or fewer" in str(err) and len(ctx.message.clean_content) > 1900:
return await ctx.send(
"You attempted to make the command display more than 2,000 characters...\n"
"Both error and command will be ignored."
)
await ctx.send(embed=discord.Embed(
title="Error on processing Command",
description=f"```{err}```",
), delete_after=30)
elif isinstance(err, errors.MissingPermissions):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="Permission ERROR"))
elif isinstance(err, errors.CheckFailure):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="You cant made this"))
elif isinstance(err, errors.MaxConcurrencyReached):
await ctx.send(
"You've reached max capacity of command usage at once, please finish the previous one...",
delete_after=30)
elif isinstance(err, errors.CommandOnCooldown):
await ctx.send(
f"This command is on cool down... try again in {err.retry_after:.2f} seconds.",
delete_after=30)
elif isinstance(err, errors.CommandNotFound):
pass
elif isinstance(err, errors.NoPrivateMessage):
await ctx.send(
embed=discord.Embed(title="Private message Not work",
description="Bot work only in guild channels"))
else:
logger.exception(err)
await self.bot.send_error(ctx, err)
def setup(bot):
bot.add_cog(DiscordEvents(bot))
|
[
"[email protected]"
] | |
6fc2e9842a862e151818555c40bd68c1fe986ae7
|
aa5d98396184ab9dc479075b37a3664c385de027
|
/tests/selenium/breadcrumb_test.py
|
8ae1041e04352b0b8d70180fdda1d4cfface3872
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uk-gov-mirror/ONSdigital.sbr-ui
|
c6a66cd6982e9e98a991eadbb8cef0f1fb6ba2bf
|
48bbfdc59e393dd4d2d008b8414ac96d2e2be44f
|
refs/heads/master
| 2021-10-12T00:02:34.160448 | 2018-10-17T14:59:04 | 2018-10-17T14:59:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,910 |
py
|
import unittest
from selenium import webdriver
from tests.helper_methods import create_selenium_config
from tests.constants import BASE_URL, SEARCH_URL
from tests.constants import ENTERPRISE, LOCAL_UNIT, REPORTING_UNIT, LEGAL_UNIT, COMPANY_HOUSE, VALUE_ADDED_TAX, PAY_AS_YOU_EARN
from tests.constants import BREADCRUMB_SEARCH_ID, BREADCRUMB_SELECTED_ID, BREADCRUMB_ENT_ID, BREADCRUMB_LEU_ID
from tests.constants import SEARCH_BUTTON_ID, PERIOD_INPUT_ID, UNIT_TYPE_INPUT_ID
from tests.constants import USERNAME_INPUT_ID, PASSWORD_INPUT_ID, SEARCH_INPUT_ID, LOGIN_BUTTON_ID, LOGOUT_BUTTON_ID
from tests.constants import ERN, UBRN, RURN, LURN, VATREF, PAYEREF, CRN, PERIOD
from tests.constants import ADMIN_USERNAME, ADMIN_PASSWORD
class BreadcrumbTest(unittest.TestCase):
"""
The breadcrumb is present on each unit page and allows navigation up the unit hierarchy.
TODO: test for when a breadcrumb link returns 404/500
"""
def setUp(self):
self.options = create_selenium_config()
self.driver = webdriver.Firefox(firefox_options=self.options)
self.driver.get(BASE_URL)
self.driver.find_element_by_id(USERNAME_INPUT_ID).send_keys(ADMIN_USERNAME)
self.driver.find_element_by_id(PASSWORD_INPUT_ID).send_keys(ADMIN_PASSWORD)
self.driver.find_element_by_id(LOGIN_BUTTON_ID).click()
def tearDown(self):
self.driver.find_element_by_id(LOGOUT_BUTTON_ID).click()
self.driver.quit()
def search_by_unit_id_type_period(self, unit_id, unit_type, period):
self.driver.find_element_by_id(SEARCH_INPUT_ID).send_keys(unit_id)
self.driver.find_element_by_id(UNIT_TYPE_INPUT_ID).send_keys(unit_type)
self.driver.find_element_by_id(PERIOD_INPUT_ID).send_keys(period)
self.driver.find_element_by_id(SEARCH_BUTTON_ID).click()
def assert_breadcrumb_item_text_and_url(self, breadcrumb_id, unit_id, unit_type, period):
breadcrumb_item = self.driver.find_element_by_id(breadcrumb_id)
self.assertEqual(breadcrumb_item.text, f'{unit_type} - {unit_id}')
target_url = f'{SEARCH_URL}/periods/{period}/types/{unit_type}/units/{unit_id}'
self.assertEqual(breadcrumb_item.get_attribute('href'), target_url)
def assert_current_breadcrumb_item_text(self, expected_text):
current_item_text = self.driver.find_element_by_id(BREADCRUMB_SELECTED_ID).text
self.assertEqual(current_item_text, expected_text)
def assert_breadcrumb_search_href(self):
href = self.driver.find_element_by_id(BREADCRUMB_SEARCH_ID).get_attribute('href')
self.assertEqual(href, SEARCH_URL)
def test_ent_breadcrumb(self):
self.search_by_unit_id_type_period(ERN, ENTERPRISE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{ENTERPRISE}/units/{ERN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'ENT - {ERN}')
def test_lou_breadcrumb(self):
self.search_by_unit_id_type_period(LURN, LOCAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LOCAL_UNIT}/units/{LURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LOU - {LURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_reu_breadcrumb(self):
self.search_by_unit_id_type_period(RURN, REPORTING_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{REPORTING_UNIT}/units/{RURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'REU - {RURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_leu_breadcrumb(self):
self.search_by_unit_id_type_period(UBRN, LEGAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LEGAL_UNIT}/units/{UBRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LEU - {UBRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_ch_breadcrumb(self):
self.search_by_unit_id_type_period(CRN, COMPANY_HOUSE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{COMPANY_HOUSE}/units/{CRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'CRN - {CRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_vat_breadcrumb(self):
self.search_by_unit_id_type_period(VATREF, VALUE_ADDED_TAX, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{VALUE_ADDED_TAX}/units/{VATREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'VAT - {VATREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_paye_breadcrumb(self):
self.search_by_unit_id_type_period(PAYEREF, PAY_AS_YOU_EARN, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{PAY_AS_YOU_EARN}/units/{PAYEREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'PAYE - {PAYEREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
40fec2d844ff14fbb903f58d6e96f8e46ad3fe8c
|
f83934dd60d4961848c0a86f6d7fbe07b79a1d63
|
/glumpy/graphics/collections/__init__.py
|
c497f742a32eb93341c0ab317f56e2fc37a848d5
|
[] |
no_license
|
brianholland/glumpy
|
2a31e2f5fd039d1debb30dd010ad36c458f329cf
|
a691082385e02db9b1d461847b9e36d8534630fa
|
refs/heads/master
| 2020-12-25T21:43:58.743259 | 2015-11-30T11:04:46 | 2015-11-30T11:04:46 | 46,670,630 | 0 | 0 | null | 2015-11-30T11:04:46 | 2015-11-22T17:10:24 |
Python
|
UTF-8
|
Python
| false | false | 951 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
from . collection import Collection
from . base_collection import BaseCollection
from . path_collection import PathCollection
from . point_collection import PointCollection
from . glyph_collection import GlyphCollection
from . marker_collection import MarkerCollection
from . polygon_collection import PolygonCollection
from . segment_collection import SegmentCollection
from . triangle_collection import TriangleCollection
from . raw_path_collection import RawPathCollection
from . raw_triangle_collection import RawTriangleCollection
from . agg_path_collection import AggPathCollection
from . agg_fast_path_collection import AggFastPathCollection
|
[
"[email protected]"
] | |
c71de68d5d8e1ed94307b087f795dddfc08ddc00
|
7b8b03b7818a1fea58f174ff8c18b43578a6233f
|
/tests/core/test_models.py
|
b6c868d65b928963cc11299b613fc8c6b8eeec36
|
[] |
no_license
|
defance/coins_ph
|
400e4316a2d9a63752b21190ca7f1b0543b85343
|
2f0d3038f5dcca4c0f8711a1b095c6078799eb0b
|
refs/heads/master
| 2020-04-30T19:15:58.398453 | 2019-03-21T22:30:16 | 2019-03-21T22:30:16 | 177,033,466 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
from django.test import TestCase
from tests.factories import AccountFactory
class TestTransactionAccount(TestCase):
def test_update_balance(self):
account = AccountFactory(balance=10)
account.update_balance(42)
account.refresh_from_db()
self.assertEquals(account.balance, 52)
|
[
"[email protected]"
] | |
f800d6b3ca316df0db0ffe4717caaddae33260f8
|
3ea684487ef727fb2f8d16a030769f32a4f4003a
|
/datahq/apps/receiver/bootstrap.py
|
90c3fa6dc99fc38cd04840c76b173a531f02f9b5
|
[] |
no_license
|
adewinter/data-hq
|
5781e6669e0625ea9ae7cf94ec77c528485c2951
|
ca03656c835f8caa5156326500c05bb83ab931ca
|
refs/heads/master
| 2021-01-18T12:48:26.584454 | 2010-08-19T13:15:03 | 2010-08-19T13:15:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 280 |
py
|
import os
from django.conf import settings
# make our directories if they're not there
for dir in [settings.RECEIVER_SUBMISSION_PATH,
settings.RECEIVER_ATTACHMENT_PATH,
settings.RECEIVER_EXPORT_PATH]:
if not os.path.isdir(dir):
os.mkdir(dir)
|
[
"[email protected]"
] | |
19cc849f50ba984019a615ec3532eb04f622db66
|
3efee0cf2bd9e0c34bfdd94ab24a15cb88c04509
|
/PWEM_examples/kxky_bandstructure_benchmark_plotting_with_fdfd.py
|
20872b8c10843e1edc1184e3d3cbe5d7ee1b70bd
|
[
"MIT"
] |
permissive
|
luwl85/Rigorous-Coupled-Wave-Analysis
|
bf5016ec70525f5e7bf59dfa93a03902afdfac12
|
a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143
|
refs/heads/master
| 2023-04-25T20:46:45.397976 | 2021-05-20T22:17:54 | 2021-05-20T22:17:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 800 |
py
|
import sys
import os
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from mpl_toolkits.mplot3d import Axes3D
matlab_data =os.path.join('kxky_photonic_circle_bandstructure.mat');
mat = scipy.io.loadmat(matlab_data)
print(mat.keys())
wvlen_scan = np.squeeze(mat['wvlen_scan']);
omega_scan = 1/wvlen_scan;
ky_spectra = np.squeeze(mat['ky_spectra']);
print(ky_spectra.shape)
ky_scan = np.linspace(-np.pi, np.pi, 400);
X,Y = np.meshgrid(omega_scan, ky_scan);
print(X.shape)
#first dimension is ky... second dimension is kx...
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, np.real(ky_spectra[:,:,0]), marker='.')
plt.show();
|
[
"[email protected]"
] | |
6821205dff8d4bf5af67bd99c4b092e8d390a3c3
|
5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982
|
/python/pymonad/monad_parse.py
|
a37f2195d1412f89bfddadf9d4bb469858d0db09
|
[] |
no_license
|
philzook58/python
|
940c24088968f0d5c655e2344dfa084deaefe7c6
|
6d43db5165c9bcb17e8348a650710c5f603e6a96
|
refs/heads/master
| 2020-05-25T15:42:55.428149 | 2018-05-14T03:33:29 | 2018-05-14T03:33:29 | 69,040,196 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
# parser is of form string -> [(symbol, therestofstring), (other possiblity), (other possiblity), ...]
#parserbind needsto return
def parsebind(parser , parserproducer):
def combinerparser(string):
possibleparses = parser(string)
for (symb, restofstring) in possibleparses:
return combinedparser
|
[
"[email protected]"
] | |
94a1445b5d73052a0e9fbc2caed1e94ae674a0da
|
4f2b9848ee1cf41017b424c7367a240f93625e86
|
/doc/tutorial/config.py
|
cdb60e8b8cc1f8f18664a9d5edb55b488c038574
|
[
"Apache-2.0"
] |
permissive
|
martin-dostal-eli/python-icat
|
f5cc0e497376d7264db1af2bb9ad588e29a9bd7b
|
8c882a3095f2dd7276a7c0edba44dc9b3ef4eedd
|
refs/heads/master
| 2023-08-18T02:12:30.267009 | 2021-07-20T11:24:25 | 2021-07-20T11:24:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
#! /usr/bin/python
from __future__ import print_function
import icat
import icat.config
config = icat.config.Config(needlogin=False, ids=False)
client, conf = config.getconfig()
print("Connect to %s\nICAT version %s" % (conf.url, client.apiversion))
|
[
"[email protected]"
] | |
e797642929d74abf07f38be4d559a60c4edc39c4
|
a7b07e14f58008e4c9567a9ae67429cedf00e1dc
|
/lib/jnpr/healthbot/swagger/models/rule_schema_variable.py
|
630dceaecb69d32efa58bd7ea4450d2121bdd4cb
|
[
"Apache-2.0"
] |
permissive
|
dmontagner/healthbot-py-client
|
3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9
|
0952e0a9e7ed63c9fe84879f40407c3327735252
|
refs/heads/master
| 2020-08-03T12:16:38.428848 | 2019-09-30T01:57:24 | 2019-09-30T01:57:24 | 211,750,200 | 0 | 0 |
Apache-2.0
| 2019-09-30T01:17:48 | 2019-09-30T01:17:47 | null |
UTF-8
|
Python
| false | false | 6,447 |
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaVariable(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'name': 'str',
'type': 'str',
'value': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name',
'type': 'type',
'value': 'value'
}
def __init__(self, description=None, name=None, type=None, value=None): # noqa: E501
"""RuleSchemaVariable - a model defined in Swagger""" # noqa: E501
self._description = None
self._name = None
self._type = None
self._value = None
self.discriminator = None
if description is not None:
self.description = description
self.name = name
self.type = type
if value is not None:
self.value = value
@property
def description(self):
"""Gets the description of this RuleSchemaVariable. # noqa: E501
Description about the variable # noqa: E501
:return: The description of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RuleSchemaVariable.
Description about the variable # noqa: E501
:param description: The description of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this RuleSchemaVariable. # noqa: E501
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:return: The name of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RuleSchemaVariable.
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:param name: The name of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
if name is not None and not re.search('^[a-zA-Z][a-zA-Z0-9_-]*$', name): # noqa: E501
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[a-zA-Z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._name = name
@property
def type(self):
"""Gets the type of this RuleSchemaVariable. # noqa: E501
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:return: The type of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RuleSchemaVariable.
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:param type: The type of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["int", "float", "string", "boolean", "device-group", "device", "sensor-argument"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def value(self):
"""Gets the value of this RuleSchemaVariable. # noqa: E501
Default value for the variable # noqa: E501
:return: The value of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this RuleSchemaVariable.
Default value for the variable # noqa: E501
:param value: The value of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaVariable):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
913a809b21dce8f948f0e742c823d688bef2cbc7
|
6032f996f989d521dbdee23ce6c1fbd778d8e964
|
/qanta/wikipedia/categories.py
|
b5dedd32b88990ad251a82da1ae4cf7fe424ea37
|
[
"MIT"
] |
permissive
|
npow/qb
|
9af1c07afd10f6aad9dbcbdd9209c6fde0e4347f
|
044e623d2cbda96209fa1fdedffefa2208c98755
|
refs/heads/master
| 2020-05-26T15:41:13.864334 | 2019-05-26T16:47:07 | 2019-05-26T16:47:07 | 188,290,907 | 0 | 0 | null | 2019-05-23T19:02:23 | 2019-05-23T19:02:23 | null |
UTF-8
|
Python
| false | false | 1,525 |
py
|
"""
Process Wikipedia category links
"""
import json
import re
import csv
import click
import tqdm
@click.group()
def categorylinks_cli():
pass
@categorylinks_cli.command()
@click.argument('categories_csv')
@click.argument('out_jsonl')
def clean(categories_csv, out_jsonl):
with open(categories_csv) as in_f, open(out_jsonl, 'w') as out_f:
for line in csv.reader(in_f):
if len(line) == 2:
if re.match(r'[a-zA-Z0-9\-\_\s]+', line[1]):
out_f.write(json.dumps({
'id': int(line[0]),
'cat': line[1]
}))
out_f.write('\n')
@categorylinks_cli.command()
@click.argument('category_csv')
@click.argument('out_json')
def disambiguate(category_csv, out_json):
disambiguation_pages = set()
blacklist = {
'Articles_with_links_needing_disambiguation_from_April_2018',
'All_articles_with_links_needing_disambiguation'
}
with open(category_csv) as f:
reader = csv.reader(f)
for r in tqdm.tqdm(reader, mininterval=1):
page_id, category = r[0], r[1]
l_category = category.lower()
if ((category not in blacklist) and
('disambiguation' in l_category) and
('articles_with_links_needing_disambiguation' not in l_category)):
disambiguation_pages.add(int(page_id))
with open(out_json, 'w') as f:
json.dump(list(disambiguation_pages), f)
|
[
"[email protected]"
] | |
7a2fcbba659bb83f947490fc946a7ff3ba4665d2
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/ARMmbed/htrun/mbed_host_tests/host_tests_plugins/host_test_plugins.py
|
1c965fab88a3dc757f8bce97bec9d4293718641b
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null |
UTF-8
|
Python
| false | false | 7,762 |
py
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plugins used with host tests
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = [] # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
return False
def is_os_supported(self, os_name=None):
"""!
@return Returns true if plugin works (supportes) under certain OS
@os_name String describing OS.
See self.mbed_os_support() and self.mbed_os_info()
@details In some cases a plugin will not work under particular OS
mainly because command / software used to implement plugin
functionality is not available e.g. on MacOS or Linux.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""! Function prints error in console and exits always with False
@param text Text to print
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
"""! Function prints notification in console and exits always with True
@param text Text to print
@param NL Newline will be added behind text if this flag is True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
"""! Waits until destination_disk is ready and can be accessed by e.g. copy commands
@return True if mount point was ready in given time, False otherwise
@param destination_disk Mount point (disk) which will be checked for readiness
@param init_delay - Initial delay time before first access check
@param loop_delay - polling delay for access check
"""
result = False
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char('.')
return result
def check_parameters(self, capability, *args, **kwargs):
"""! This function should be ran each time we call execute() to check if none of the required parameters is missing
@return Returns True if all parameters are passed to plugin, else return False
@param capability Capability name
@param args Additional parameters
@param kwargs Additional parameters
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters):
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(missing_parameters)))
return False
return True
def run_command(self, cmd, shell=True):
"""! Runs command from command line.
@param cmd Command to execute
@param shell True if shell command should be executed (eg. ls, ps)
@details Function prints 'cmd' return code if execution failed
@return True if command successfully executed
"""
result = True
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
def mbed_os_info(self):
"""! Returns information about host OS
@return Returns tuple with information about OS and host platform
"""
result = (os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform)
return result
def mbed_os_support(self):
"""! Function used to determine host OS
@return Returns None if host OS is unknown, else string with name
@details This function should be ported for new OS support
"""
result = None
os_info = self.mbed_os_info()
if (os_info[0] == 'nt' and os_info[1] == 'Windows'):
result = 'Windows7'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux' and ('Ubuntu' in os_info[3])):
result = 'Ubuntu'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux'):
result = 'LinuxGeneric'
elif (os_info[0] == 'posix' and os_info[1] == 'Darwin'):
result = 'Darwin'
return result
|
[
"[email protected]"
] | |
5957be3eebf4bcc847582b8b20f6771924155403
|
4c9580b2e09e2b000e27a1c9021b12cf2747f56a
|
/chapter05/chapter05_example01/chapter05_example01/settings.py
|
6bdae198873c43cd8667c6b9aac8266fb69c6642
|
[] |
no_license
|
jzplyy/xiaoyue_mall
|
69072c0657a6878a4cf799b8c8218cc7d88c8d12
|
4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc
|
refs/heads/master
| 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,368 |
py
|
"""
Django settings for chapter05_example01 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'chapter05_example01\\apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gj(^a0w1e_)p4_+%9y4q3i#7yz_423=^ze4+9-wpj!8sci=esy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'goods'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chapter05_example01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chapter05_example01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
53f85d5e77b251fd803da0cc317dc6dac3e3fd02
|
b74e9be747c1a99fc5d67ca096157f512baf02ca
|
/tools/harness-automation/cases/reed_5_2_4.py
|
151fa67c0fb6cee9b74e8e983fa7bb67a1aea761
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amccool/openthread
|
468838cebc083d234192926aacb0e3efc0a83463
|
1e9d3c1dbfd66aa48c4cbb1dda0b41c9f05fefc7
|
refs/heads/master
| 2021-01-16T23:03:46.503666 | 2016-09-06T03:21:05 | 2016-09-06T03:21:05 | 67,469,844 | 0 | 0 |
BSD-3-Clause
| 2019-11-01T20:11:16 | 2016-09-06T03:23:32 |
C++
|
UTF-8
|
Python
| false | false | 1,846 |
py
|
#!/usr/bin/env python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_2_4(HarnessCase):
suite = 16
case = '5 2 4'
golden_devices_required = 17
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ef8495ed987b371d3c9c09347e179d7fee0cfd92
|
6320fef2ea7376c2b35f97f1a5af004e90f09098
|
/1-2주차 실습(복습)/venv/Lib/site-packages/pygame/tests/test_utils/__init__.py
|
fd3ec69cb674929081e3d41837df1458fa33d018
|
[] |
no_license
|
Dplo1514/ploaistudy
|
7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9
|
e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c
|
refs/heads/master
| 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,442 |
py
|
#################################### IMPORTS ###################################
is_pygame_pkg = __name__.startswith("pygame.tests.")
import tempfile, sys, pygame, time, os
################################################################################
# Python 3.x compatibility
try:
xrange_ = xrange
except NameError:
xrange_ = range
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
if sys.version_info[0] == 3:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tobytes()
else:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tostring()
import unittest
if not hasattr(unittest.TestCase, "subTest"):
import contextlib
@contextlib.contextmanager
def subTest(self, msg=None, **params):
yield
return
unittest.TestCase.subTest = subTest
def geterror():
return sys.exc_info()[1]
class AssertRaisesRegexMixin(object):
"""Provides a way to prevent DeprecationWarnings in python >= 3.2.
For this mixin to override correctly it needs to be before the
unittest.TestCase in the multiple inheritance hierarchy.
e.g. class TestClass(AssertRaisesRegexMixin, unittest.TestCase)
This class/mixin and its usage can be removed when pygame no longer
supports python < 3.2.
"""
def assertRaisesRegex(self, *args, **kwargs):
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegex(
*args, **kwargs
)
except AttributeError:
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegexp(
*args, **kwargs
)
except AttributeError:
self.skipTest("No assertRaisesRegex/assertRaisesRegexp method")
################################################################################
this_dir = os.path.dirname(os.path.abspath(__file__))
trunk_dir = os.path.split(os.path.split(this_dir)[0])[0]
if is_pygame_pkg:
test_module = "tests"
else:
test_module = "test"
def trunk_relative_path(relative):
return os.path.normpath(os.path.join(trunk_dir, relative))
def fixture_path(path):
return trunk_relative_path(os.path.join(test_module, "fixtures", path))
def example_path(path):
return trunk_relative_path(os.path.join("examples", path))
sys.path.insert(0, trunk_relative_path("."))
################################## TEMP FILES ##################################
def get_tmp_dir():
return tempfile.mkdtemp()
################################################################################
def question(q):
return raw_input_("\n%s (y/n): " % q.rstrip(" ")).lower().strip() == "y"
def prompt(p):
return raw_input_("\n%s (press enter to continue): " % p.rstrip(" "))
#################################### HELPERS ###################################
def rgba_between(value, minimum=0, maximum=255):
if value < minimum:
return minimum
elif value > maximum:
return maximum
else:
return value
def combinations(seqs):
"""
Recipe 496807 from ActiveState Python CookBook
Non recursive technique for getting all possible combinations of a sequence
of sequences.
"""
r = [[]]
for x in seqs:
r = [i + [y] for y in x for i in r]
return r
def gradient(width, height):
"""
Yields a pt and corresponding RGBA tuple, for every (width, height) combo.
Useful for generating gradients.
Actual gradient may be changed, no tests rely on specific values.
Used in transform.rotate lossless tests to generate a fixture.
"""
for l in xrange_(width):
for t in xrange_(height):
yield (l, t), tuple(map(rgba_between, (l, t, l, l + t)))
def rect_area_pts(rect):
for l in xrange_(rect.left, rect.right):
for t in xrange_(rect.top, rect.bottom):
yield l, t
def rect_perimeter_pts(rect):
"""
Returns pts ((L, T) tuples) encompassing the perimeter of a rect.
The order is clockwise:
topleft to topright
topright to bottomright
bottomright to bottomleft
bottomleft to topleft
Duplicate pts are not returned
"""
clock_wise_from_top_left = (
[(l, rect.top) for l in xrange_(rect.left, rect.right)],
[(rect.right - 1, t) for t in xrange_(rect.top + 1, rect.bottom)],
[(l, rect.bottom - 1) for l in xrange_(rect.right - 2, rect.left - 1, -1)],
[(rect.left, t) for t in xrange_(rect.bottom - 2, rect.top, -1)],
)
for line in clock_wise_from_top_left:
for pt in line:
yield pt
def rect_outer_bounds(rect):
"""
Returns topleft outerbound if possible and then the other pts, that are
"exclusive" bounds of the rect
?------O
|RECT| ?|0)uterbound
|----|
O O
"""
return ([(rect.left - 1, rect.top)] if rect.left else []) + [
rect.topright,
rect.bottomleft,
rect.bottomright,
]
def import_submodule(module):
m = __import__(module)
for n in module.split(".")[1:]:
m = getattr(m, n)
return m
class SurfaceSubclass(pygame.Surface):
"""A subclassed Surface to test inheritance."""
def __init__(self, *args, **kwargs):
super(SurfaceSubclass, self).__init__(*args, **kwargs)
self.test_attribute = True
def test():
"""
Lightweight test for helpers
"""
r = pygame.Rect(0, 0, 10, 10)
assert rect_outer_bounds(r) == [(10, 0), (0, 10), (10, 10)] # tr # bl # br
assert len(list(rect_area_pts(r))) == 100
r = pygame.Rect(0, 0, 3, 3)
assert list(rect_perimeter_pts(r)) == [
(0, 0),
(1, 0),
(2, 0), # tl -> tr
(2, 1),
(2, 2), # tr -> br
(1, 2),
(0, 2), # br -> bl
(0, 1), # bl -> tl
]
print("Tests: OK")
################################################################################
|
[
"[email protected]"
] | |
665dcc52eba524df257caff6e50e0b2f063ee789
|
ae65873c3584cef7139066b224daad04410af6d2
|
/Top10Words.py
|
b6f4ffe80cc71c2040700a1f7c86913682066030
|
[] |
no_license
|
rajatkashyap/Python
|
2240c7472d07803c460c7a55d570e20694b694f9
|
f74c85c65b0e209a5f7ab25b653d42835222faaf
|
refs/heads/master
| 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 |
Python
|
UTF-8
|
Python
| false | false | 558 |
py
|
f=open('UHC.txt')
dict={}
words=f.read().split()
for word in words:
w=word.lower()
dict[w]=dict.get(w,0)+1
#print dict
str_tups=[]
for k,v in dict.items():
str_tups.append((v,k))
#print str_tups
str_tups.sort(reverse=True)
print str_tups[:10]
keys=dict.keys()
values=dict.values()
#print keys
#print values
values.sort(reverse=True)
for i in range(10):
for key in keys:
if dict[key]==values[i]:
print key,values[i]
'''
for i in range(10):
for d in dict:
if d[keys[i]]==values[i]:
print d '''
|
[
"[email protected]"
] | |
718c1a3aa265318be8f270943122a2fef285e6e9
|
59d48214613a195573b5a0a1f10b32c889172155
|
/alexa/reciPullLambda/ask_sdk_model/canfulfill/can_fulfill_intent_request.py
|
61ffc9fb00f47a05ab691639b45bca434c75fe2e
|
[
"MIT"
] |
permissive
|
ReciPull/recipull.github.io
|
60861ebb7a6d77d39907c6332e346194ce4ad107
|
e6b800af02658bb7948297c4ddc1b7af6d978839
|
refs/heads/master
| 2023-01-08T19:03:11.864298 | 2019-06-13T05:07:39 | 2019-06-13T05:07:39 | 180,684,629 | 1 | 0 |
MIT
| 2022-12-09T22:33:18 | 2019-04-11T00:33:03 |
Python
|
UTF-8
|
Python
| false | false | 6,414 |
py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.dialog_state import DialogState
from ask_sdk_model.intent import Intent
class CanFulfillIntentRequest(Request):
"""
An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'dialog_state': 'ask_sdk_model.dialog_state.DialogState',
'intent': 'ask_sdk_model.intent.Intent'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'dialog_state': 'dialogState',
'intent': 'intent'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, dialog_state=None, intent=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[DialogState], Optional[Intent]) -> None
"""An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
self.__discriminator_value = "CanFulfillIntentRequest" # type: str
self.object_type = self.__discriminator_value
super(CanFulfillIntentRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.dialog_state = dialog_state
self.intent = intent
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, CanFulfillIntentRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
4a5a3b8daa86ac399ae0a0cc3604254a77635bbf
|
00cb405170a6a9572bef0ec8f373813eada08c03
|
/Agario/Window.py
|
bb5038b0e49d302d2ded8589eaacfcc9884a849c
|
[] |
no_license
|
MarcPartensky/Python-Games
|
c0ad2857be5832d6029642bb0a96bc8e403a12e3
|
ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed
|
refs/heads/master
| 2022-09-03T00:04:16.402288 | 2022-08-12T17:10:22 | 2022-08-12T17:10:22 | 166,606,022 | 2 | 1 | null | 2021-03-07T16:20:15 | 2019-01-19T23:56:04 |
Python
|
UTF-8
|
Python
| false | false | 1,621 |
py
|
import pygame
from pygame.locals import *
class Window:
made=0
def __init__(self,game=None,size=None,font="monospace",set=True):
Window.made+=1
self.number=Window.made
self.title=game.name
self.font=font
self.open=True
pygame.init()
self.setSize(size)
self.font = pygame.font.SysFont(self.font, 65)
self.screen=pygame.display.set_mode(self.size)
pygame.display.set_caption(self.title)
def setSize(self,size=None):
if size is None:
info = pygame.display.Info()
self.size=(info.current_w/2,info.current_h/2)
else:
self.size=size
def pop_up(self,message):
pass
def scale(self,picture,size):
return pygame.transform.scale(picture,size)
def check(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.open=False
def select(self):
while self.open:
self.check()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN and event.button == 1:
return (event.pos[0],event.pos[1])
def point(self):
for event in pygame.event.get():
return (event.pos[0],event.pos[1])
def flip(self):
pygame.display.flip()
def drawBackground(self,background):
if type(background) is tuple:
self.screen
self.screen.blit(picture, position)
def drawPicture(self,picture,position):
self.screen.blit(picture, position)
def display(page):
pass
|
[
"[email protected]"
] | |
037c0297e6528cdbf68ecb8b3295c9ce74f0598e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/143/usersdata/126/62651/submittedfiles/av2_p3_m2.py
|
7d22e7aa5f8b510c2f6ca3d97182ffc3fc5c67bd
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 653 |
py
|
# -*- coding: utf-8 -*-
def listadegraus(a):
b=[]
for i in range(0,len(a)-1,1):
if a[i]>a[i+1]:
cont=0
for i in range(a[i],a[i+1],-1):
cont=cont+1
b.insert(0,cont)
elif a[i]<a[i+1]:
cont=0
for i in range(a[i],a[i+1],1):
cont=cont+1
b.insert(0,cont)
elif a[i]==a[i+1]:
cont=0
b.insert(0,cont)
return(b)
n=int(input('digite a quantidade de termos da lista:'))
a=[]
for i in range(0,n,1):
m=int(input('digite um valor:'))
a.append(m)
print(listadegraus(a))
|
[
"[email protected]"
] | |
df82e709433df0b153edd7d9aea14060851ad2cf
|
c31c8095ce4d4e9686e3e7ad6b004342e49671fa
|
/forum/classes/archives/CLASS_Lieu.py
|
c5b8db114583e2f045264fd8b45f2735706e116e
|
[] |
no_license
|
Lionalisk/arrakambre
|
7bcc96dea2ca2a471572bfb1646256f1382ce25b
|
2caece9be5eebf21ddfa87a6c821c32b5d5019a2
|
refs/heads/master
| 2020-12-07T19:31:24.471090 | 2020-01-09T10:14:29 | 2020-01-09T10:14:29 | 232,782,172 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,184 |
py
|
from django.db import models
from forum.models import Maison
from forum.classes.CLASS_Perso import *
print('BBBB')
class Lieu(models.Model):
nom = models.CharField(max_length=100, unique=True)
description = models.TextField(default='')
image = models.CharField(max_length=40, default = 'lieu_none.jpg')
maison = models.ForeignKey(Maison, verbose_name="Maison", null=True, on_delete=models.SET_NULL, blank=True)
passages = models.ManyToManyField('self', blank=True)
lieu_parent = models.ForeignKey('self', verbose_name="Lieu", null=True, on_delete=models.DO_NOTHING, blank=True)
dissimulation = models.SmallIntegerField(default=0)
defense_garde = models.SmallIntegerField(default=0)
defense_assault = models.SmallIntegerField(default=0)
defense_intrusion = models.SmallIntegerField(default=0)
perso_autorise = models.ManyToManyField('Perso', blank=True, related_name = 'persos_autorises') # liste des personnes autorisees par le maitre des lieux a entrer
secret = models.BooleanField(default=False)
proprietaire = models.ForeignKey('Perso', null=True, on_delete=models.SET_NULL, blank=True, related_name = 'proprietaire')
#action =
def __str__(self):
return self.nom
|
[
"[email protected]"
] | |
da6fa81c852b746e1fded343f4e04a7e146e335e
|
39b8aa964883b2bde4349e0c9c38e3233c310548
|
/src/Power of Four.py
|
96d2db9a48b59d6376e2dbcb8be1027d9d34085f
|
[] |
no_license
|
orifake/leetcode-python
|
053b82491e0b8d6197dd12d92eec5883211285db
|
8e375ebebe0a0285efefc33ed61afb22f41d0c75
|
refs/heads/master
| 2023-03-09T14:32:17.833456 | 2021-02-26T16:09:31 | 2021-02-26T16:09:31 | 264,466,829 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
import math
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and (num & (num - 1)) == 0 and \
((num & 0b01010101010101010101010101010101) == num)
class Solution2:
def isPowerOfFour(self, num: int) -> bool:
if num <= 0:
return False
return (math.log10(num) / math.log10(4)) % 1 == 0
t = Solution()
print(t.isPowerOfFour(4))
|
[
"[email protected]"
] | |
17eb256179da0f291fdd0e5d21d32169501672e1
|
e21ed71610f9d1004dfa21206300c0e9f3887e89
|
/modulo_2/Codewars/dev-junior/find_even_array.py
|
beb4a2bad5d9a8b39ec87d16249da6a0ba36113a
|
[] |
no_license
|
hpfn/wttd-2017-exerc
|
c0c79ee0cb3b5b331932787d280deee679357bc1
|
b1bf1394d2e2adc29257b7c4273af21b8509335f
|
refs/heads/master
| 2020-12-30T11:29:13.218980 | 2017-10-03T19:04:03 | 2017-10-03T19:04:03 | 91,572,803 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 176 |
py
|
# coding=utf-8
def find_even_index(arr):
tam_arr = len(arr)
for x in range(tam_arr):
if sum(arr[:x]) == sum(arr[x+1:]):
return x
return -1
|
[
"[email protected]"
] | |
be24fff7640880924ac1b8352d63c9ce128039bd
|
49beeee0d9aff3b776545cb553ef1bf15dd9f190
|
/example/example/views.py
|
6c06b12a01b8dad493049a74201b5a5b9af1ada9
|
[
"MIT"
] |
permissive
|
bluedisk/django-korean-fields
|
238364cf4f766db824adec832aaa2d83619cded1
|
b655e23d9a73e61cb217e34719ee6a2509f8f475
|
refs/heads/master
| 2020-03-19T09:55:10.974426 | 2018-11-10T15:02:02 | 2018-11-10T15:02:02 | 136,327,803 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 595 |
py
|
# -*- coding: utf-8 -*-
from django.forms import forms, CharField
from django.http import HttpResponse
from django.shortcuts import render
from korean.fields import JuminFormField
class TestForm(forms.Form):
jumin1 = JuminFormField()
jumin2 = JuminFormField()
def demo(request):
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
return HttpResponse('success : ' + form.cleaned_data['jumin'])
else:
form = TestForm(initial={'jumin1': '010203-4567890'})
return render(request, 'demo.html', {'form': form})
|
[
"[email protected]"
] | |
d47c3724879680967f10765f503c820e7982fb3f
|
714d4d2796e9b5771a1850a62c9ef818239f5e77
|
/components/metrics/DEPS
|
2f4d413d44817a460d2dc1304dd4027f1f530765
|
[
"BSD-3-Clause"
] |
permissive
|
CapOM/ChromiumGStreamerBackend
|
6c772341f815d62d4b3c4802df3920ffa815d52a
|
1dde005bd5d807839b5d45271e9f2699df5c54c9
|
refs/heads/master
| 2020-12-28T19:34:06.165451 | 2015-10-21T15:42:34 | 2015-10-23T11:00:45 | 45,056,006 | 2 | 0 | null | 2015-10-27T16:58:16 | 2015-10-27T16:58:16 | null |
UTF-8
|
Python
| false | false | 243 |
# This component is shared with the Chrome OS build, so it's important to limit
# dependencies to a minimal set.
include_rules = [
"-components",
"+components/compression",
"+components/metrics",
"+components/variations",
"-net",
]
|
[
"[email protected]"
] | ||
d3b6e9f0e660a6ab3559ab5e2029a46b8e10bf27
|
255efb54075eb8cc2412bf1d5c936a97a003337e
|
/xt/environment/__init__.py
|
69338935f833cbdd1def7455667f8075e68b8eed
|
[
"MIT"
] |
permissive
|
jinqiuzhao/xingtian
|
914a4d48c62fd8b3d4ddd0479e9bab54bbe5cba7
|
95953dc6109c96e68dcdeb9755b3679ff51742d4
|
refs/heads/master
| 2023-06-06T06:20:28.815549 | 2021-07-02T10:00:42 | 2021-07-02T10:00:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build environment module.
Do encapsulation for different simulations.
Unify the single and multi-agents.
"""
from __future__ import division, print_function
from xt.framework import Registers
def env_builder(env_name, env_info, **kwargs):
"""
Build the interface func for creating environment.
:param env_name:the name of environment
:param env_info: the config info of environment
:return:environment instance
"""
return Registers.env[env_name](env_info, **kwargs)
|
[
"[email protected]"
] | |
d98e426c5ffa96200e49a63c91cbb1ac43216323
|
220e3fe31f00df908dc8d00c507400425f924cc3
|
/examples/multi_system/act6/unload.py
|
bf0fcc574b45c2f7fcf2d21c030c21e4aa89ff1f
|
[
"MIT"
] |
permissive
|
danielmitterdorfer/Thespian
|
3ed700d9fc6da35becfe801d3ab3bb68c86bddbc
|
f59439df8a6147b90ec31b44924d6a1b620f09d9
|
refs/heads/master
| 2021-01-19T05:06:33.005708 | 2017-07-31T04:44:03 | 2017-07-31T04:44:03 | 65,544,862 | 0 | 0 | null | 2016-08-12T10:22:29 | 2016-08-12T10:22:29 | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from thespian.actors import ActorSystem, Actor, ValidateSource, ValidatedSource
import sys
portnum = int(sys.argv[1])
srchash = sys.argv[2]
asys = ActorSystem('multiprocTCPBase', {'Admin Port': portnum})
asys.unloadActorSource(srchash)
|
[
"[email protected]"
] | |
46e48392571cf7b50609181560a7a5b5cfd54d72
|
1d665f40197ba89f756e862c0e62a889c42cddfb
|
/commission/migrations/0007_auto_20150407_2034.py
|
2b1be1c3a9965aa2314ab05057b9179433f0c7eb
|
[
"MIT"
] |
permissive
|
Ourinternet/website
|
8d9f9ddfe7d17fb0bb11b978cf3a7cd34af456ed
|
648203c0d0620da2d11b3b0e398ee218b5bef5df
|
refs/heads/master
| 2021-01-21T21:49:06.834576 | 2016-03-16T20:43:58 | 2016-03-16T20:43:58 | 15,683,988 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('commission', '0006_auto_20150407_1825'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='link',
field=models.CharField(max_length=1024, null=True, blank=True),
),
]
|
[
"[email protected]"
] | |
044985b9b265586f2b071cc1296c5845a039b17d
|
56b7e5ed6941fc4b83148e00bd51421dc3ac993a
|
/Indeed/Expire Map.py
|
2b1778212c66da456e0bb6bd3e0defd2bbc1db77
|
[] |
no_license
|
samir-0711/Leetcode-Python
|
f960e15015a3f2fd88f723d7f9237945a7133553
|
d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c
|
refs/heads/master
| 2022-12-18T05:27:48.224001 | 2020-09-30T21:03:42 | 2020-09-30T21:03:42 | 300,061,318 | 0 | 0 | null | 2020-09-30T20:59:42 | 2020-09-30T20:59:42 | null |
UTF-8
|
Python
| false | false | 722 |
py
|
import time
class Data:
def __init__(self, value, duration):
self.value = value
self.duration = duration
self.startTime = int(round(time.time()))
class ExpireMap:
def __init__(self):
self.map = {}
def get(self, key):
data = self.map[key]
if data == None:
return None
currTime = int(round(time.time()))
if currTime - data.startTime <= data.duration:
return data.value
else:
del data
def set(self, key, value, duration):
data = Data(value, duration)
self.map[key] = data
test1 = ExpireMap()
test1.set(1, 5, 3)
time.sleep(2)
print test1.get(1)
time.sleep(2)
print test1.get(1)
|
[
"[email protected]"
] | |
6d9a899cc5415e40329693b80d3cc1bbf9759db2
|
a257bf65a2a1ba2c6841dd25c89d98c5672e4e57
|
/BackEnd/Semana22/DjangoRestFramework/DjangoRestFramework/wsgi.py
|
424593130b609b9f268eda5e5d98d2c974645dad
|
[] |
no_license
|
jorgegarba/CodiGo9
|
190cb67e3c7f9cbad271baf62657bda7ca03ec42
|
3b85c36a3ed8d2d5ee1d0fb6e8ca18599621fe47
|
refs/heads/master
| 2023-01-22T22:31:00.244982 | 2020-03-31T17:59:37 | 2020-03-31T17:59:37 | 211,982,487 | 6 | 5 | null | 2023-01-05T05:23:27 | 2019-10-01T00:21:25 |
JavaScript
|
UTF-8
|
Python
| false | false | 415 |
py
|
"""
WSGI config for DjangoRestFramework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRestFramework.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
0bebf2b16ff727c6ad6f1d7aca0f42970ec1dc48
|
bed559d18b0a9604e6d18879e1f3837d228d1440
|
/rx/backpressure/pausable.py
|
631ce64e952fd6f555f3e9866c6f605c96299a8e
|
[
"Apache-2.0"
] |
permissive
|
jesonjn/RxPY
|
a80b7a8f0a3a8a6ddcb7f3ed678d2f8411cad84e
|
9dfb62979f2c54b93bbb8c0ee5fa18cfae4d73d0
|
refs/heads/master
| 2020-12-29T00:25:17.866220 | 2014-11-15T10:24:05 | 2014-11-15T10:24:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,932 |
py
|
from six import add_metaclass
from rx import Observable
from rx.internal import ExtensionMethod
from rx.disposables import CompositeDisposable, Disposable
from rx.subjects import Subject
class PausableObservable(Observable):
def __init__(self, source, subject=None):
self.source = source
self.subject = subject or Subject()
self.is_paused = True
super(PausableObservable, self).__init__(self.subscribe)
def subscribe(self, observer):
conn = self.source.publish()
subscription = conn.subscribe(observer)
connection = [Disposable.empty()]
def on_next(b):
if b:
connection[0] = conn.connect()
else:
connection[0].dispose()
connection[0] = Disposable.empty()
pausable = self.subject.distinct_until_changed().subscribe(on_next)
return CompositeDisposable(subscription, connection[0], pausable)
def pause(self):
if self.is_paused:
return
self.is_paused = True
self.subject.on_next(False)
def resume(self):
if not self.is_paused:
return
self.is_paused = False
self.subject.on_next(True)
@add_metaclass(ExtensionMethod)
class ObservablePausable(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
def pausable(self, pauser):
"""Pauses the underlying observable sequence based upon the observable
sequence which yields True/False.
Example:
pauser = rx.Subject()
source = rx.Observable.interval(100).pausable(pauser)
Keyword parameters:
pauser -- {Observable} The observable sequence used to pause the
underlying sequence.
Returns the observable {Observable} sequence which is paused based upon
the pauser."""
return PausableObservable(self, pauser)
|
[
"[email protected]"
] | |
b891a21e50fd7f9a52706f2b802ad343cca4ea72
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/compute_management_client_enums.py
|
94796a92c7936618c37a51b7bf0ec2a9b37639ee
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 |
MIT
| 2019-10-25T15:56:00 | 2014-06-27T19:40:56 |
Python
|
UTF-8
|
Python
| false | false | 1,085 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class StorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
class OperatingSystemTypes(str, Enum):
windows = "Windows"
linux = "Linux"
class DiskCreateOption(str, Enum):
empty = "Empty"
attach = "Attach"
from_image = "FromImage"
import_enum = "Import"
copy = "Copy"
restore = "Restore"
class SnapshotStorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
standard_zrs = "Standard_ZRS"
class AccessLevel(str, Enum):
none = "None"
read = "Read"
|
[
"[email protected]"
] | |
cddab9580d9af9da3a18d635c9717ed2acc1f201
|
4bc2d855558ccb962991f997e9779919031687dd
|
/capstone/causalmodel/migrations/0001_initial.py
|
d9fe267a7a9b8e4c5697913127b312847c7b2554
|
[] |
no_license
|
jmblontoc/Likha-Capstone
|
80081e44b7ad6457eb776432e623c6db8b7a17e2
|
e1c32911b58cd1419c8e1a554ac32210456d201d
|
refs/heads/master
| 2022-12-10T03:26:32.946638 | 2018-12-09T04:33:10 | 2018-12-09T04:33:10 | 134,726,142 | 0 | 1 | null | 2022-11-25T23:52:42 | 2018-05-24T14:21:36 |
Python
|
UTF-8
|
Python
| false | false | 1,187 |
py
|
# Generated by Django 2.0.5 on 2018-06-27 15:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metric', models.CharField(max_length=255)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('threshold', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='RootCause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.AddField(
model_name='datamap',
name='root_cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='causalmodel.RootCause'),
),
]
|
[
"[email protected]"
] | |
770781cf8434a6484eb3418aafba1bd504f0315d
|
1a819b4d69a7c455199b638b1609d3284ecbf255
|
/alttprbot_srl/racebot.py
|
c760ffc28d30de0301fd73fb1bf3fb04a1d6a28b
|
[] |
no_license
|
Maxor14/sahasrahbot
|
5167355a23a4e9d91171b583fe8065acd0ab99a6
|
9183933869f87743d94867cf52c463179d0b687a
|
refs/heads/master
| 2021-05-22T21:30:54.015013 | 2020-04-01T01:01:47 | 2020-04-01T01:01:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,951 |
py
|
import asyncio
import math
import re
import ircmessage
from alttprbot.database import spoiler_races, srl_races
from alttprbot.tournament import league
from alttprbot.util.srl import srl_race_id
from alttprbot_srl import alt_hunter, discord_integration
from config import Config as c
starting = re.compile(
"\\x034\\x02The race will begin in 10 seconds!\\x03\\x02")
go = re.compile("\\x034\\x02GO!\\x03\\x02")
newroom = re.compile(
"Race initiated for (.*)\. Join\\x034 (#srl-[a-z0-9]{5}) \\x03to participate\.")
runnerdone = re.compile(
"(.*) (has forfeited from the race\.|has finished in .* place with a time of [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.)")
racedone = re.compile(
"^Status: Complete \| Game: .*$"
)
srl_game_whitelist = [
'The Legend of Zelda: A Link to the Past Hacks',
'A Link to the Past & Super Metroid Combo Randomizer'
]
async def topic_change_handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
if target.startswith('#srl-') and racedone.search(message):
await asyncio.sleep(5)
await league.process_league_race_finish(target, client)
async def handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
srl_id = srl_race_id(target)
if target == '#speedrunslive':
result = newroom.search(message)
if result and result.group(1) in srl_game_whitelist:
if not c.DEBUG:
await asyncio.sleep(1)
await client.join(result.group(2))
await asyncio.sleep(60)
await client.message(result.group(2), "Hi! I'm SahasrahBot, your friendly robotic elder and ALTTPR/SMZ3 seed roller. To see what I can do, visit https://sahasrahbot.synack.live")
else:
print(f'would have joined {result.group(2)}')
if target.startswith('#srl-'):
if starting.match(message) or message == 'test starting':
race = await srl_races.get_srl_race_by_id(srl_id)
if race:
if not client.in_channel(target):
await client.join(target)
await client.message(target, f".setgoal {race['goal']}")
if race['message'] is not None:
await asyncio.sleep(15)
await client.message(target, race['message'])
await srl_races.delete_srl_race(srl_id)
if go.match(message) or message == 'test go':
# spoilers
race = await spoiler_races.get_spoiler_race_by_id(srl_id)
if race:
await client.message(target, 'Sending spoiler log...')
await client.message(target, '---------------')
await client.message(target, f"This race\'s spoiler log: {race['spoiler_url']}")
await client.message(target, '---------------')
await client.message(target, 'GLHF! :mudora:')
await countdown_timer(
ircbot=client,
duration_in_seconds=race['studytime'],
srl_channel=target,
beginmessage=True,
)
await spoiler_races.delete_spoiler_race(srl_id)
await discord_integration.discord_race_start(srl_id)
await alt_hunter.check_race(srl_id)
if message == 'test complete':
await topic_change_handler(target, source, message, client)
result = runnerdone.search(message)
if result:
await discord_integration.discord_race_finish(result.group(1), srl_id)
async def countdown_timer(ircbot, duration_in_seconds, srl_channel, beginmessage=False):
loop = asyncio.get_running_loop()
reminders = [1800, 1500, 1200, 900, 600, 300,
120, 60, 30, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
start_time = loop.time()
end_time = loop.time() + duration_in_seconds
while True:
# print(datetime.datetime.now())
timeleft = math.ceil(start_time - loop.time() + duration_in_seconds)
# print(timeleft)
if timeleft in reminders:
minutes = math.floor(timeleft/60)
seconds = math.ceil(timeleft % 60)
if minutes == 0 and seconds > 10:
msg = f'{seconds} second(s) remain!'
elif minutes == 0 and seconds <= 10:
msg = ircmessage.style(
f"{seconds} second(s) remain!", fg='green', bold=True)
else:
msg = f'{minutes} minute(s), {seconds} seconds remain!'
await ircbot.message(srl_channel, msg)
reminders.remove(timeleft)
if loop.time() >= end_time:
if beginmessage:
await ircbot.message(srl_channel, ircmessage.style('Log study has finished. Begin racing!', fg='red', bold=True))
break
await asyncio.sleep(.5)
|
[
"[email protected]"
] | |
07c821b253d8b2176af47cd42bb65e0f706db38a
|
3109e3a7f2f2dccc5a806695f0adbe0fed879112
|
/ecommerce/Loma/migrations/0022_auto_20190204_1200.py
|
4724c3c1c3f80c03fa75c1a13fc32a1f6bb13401
|
[] |
no_license
|
Maheshwari2604/ecommercee
|
9ebbf18b4fbf933a0d9641009f7f17ce836de587
|
4411e7e10eccda907711200d2c0d873db3d7f803
|
refs/heads/master
| 2020-04-20T18:03:49.575124 | 2019-02-12T16:02:05 | 2019-02-12T16:02:05 | 169,007,411 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 466 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-04 06:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Loma', '0021_auto_20190203_1829'),
]
operations = [
migrations.AlterField(
model_name='promocode_model',
name='promocode_name',
field=models.CharField(max_length=11),
),
]
|
[
"[email protected]"
] | |
8c1b2c443b10f64ad81dbb48b78341c22ec527dc
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/discount_info_v3.py
|
3eeec1c5d49a77c443407f9193187e6c6e93816a
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,663 |
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DiscountInfoV3:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'discount_id': 'str',
'discount_value': 'str',
'discount_type': 'int',
'orders': 'list[OrderV3]'
}
attribute_map = {
'discount_id': 'discount_id',
'discount_value': 'discount_value',
'discount_type': 'discount_type',
'orders': 'orders'
}
def __init__(self, discount_id=None, discount_value=None, discount_type=None, orders=None):
"""DiscountInfoV3 - a model defined in huaweicloud sdk"""
self._discount_id = None
self._discount_value = None
self._discount_type = None
self._orders = None
self.discriminator = None
self.discount_id = discount_id
self.discount_value = discount_value
self.discount_type = discount_type
self.orders = orders
@property
def discount_id(self):
"""Gets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:return: The discount_id of this DiscountInfoV3.
:rtype: str
"""
return self._discount_id
@discount_id.setter
def discount_id(self, discount_id):
"""Sets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:param discount_id: The discount_id of this DiscountInfoV3.
:type: str
"""
self._discount_id = discount_id
@property
def discount_value(self):
"""Gets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:return: The discount_value of this DiscountInfoV3.
:rtype: str
"""
return self._discount_value
@discount_value.setter
def discount_value(self, discount_value):
"""Sets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:param discount_value: The discount_value of this DiscountInfoV3.
:type: str
"""
self._discount_value = discount_value
@property
def discount_type(self):
"""Gets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:return: The discount_type of this DiscountInfoV3.
:rtype: int
"""
return self._discount_type
@discount_type.setter
def discount_type(self, discount_type):
"""Sets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:param discount_type: The discount_type of this DiscountInfoV3.
:type: int
"""
self._discount_type = discount_type
@property
def orders(self):
"""Gets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:return: The orders of this DiscountInfoV3.
:rtype: list[OrderV3]
"""
return self._orders
@orders.setter
def orders(self, orders):
"""Sets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:param orders: The orders of this DiscountInfoV3.
:type: list[OrderV3]
"""
self._orders = orders
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscountInfoV3):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
9c68ae44c857794289d718b86b9cf28781944546
|
d49f38323dc30a3cb4a581b451f7db7eec220324
|
/app.py
|
c50f59488d7cad0a63272dce103f97c62cf594dd
|
[] |
no_license
|
bbiyongel/NaverAPI-telegram
|
0e67259ed2faa86860014f0a5ff1ee0528175b67
|
bfcffdb03c6c2cb2387aee461490c520542227bf
|
refs/heads/master
| 2022-01-15T19:50:28.409431 | 2019-07-12T09:00:15 | 2019-07-12T09:00:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,740 |
py
|
from pprint import pprint
from flask import Flask, request
import requests
from decouple import config
import random
app = Flask(__name__)
token = config('TELEGRAM_TOKEN')
base_url = f"https://api.telegram.org/bot{token}"
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
@app.route(f'/{token}', methods=['POST']) #
def telegram():
response = request.get_json()
chat_id = response.get('message').get('chat').get('id')
# 사진 파일이 온다면,
if response.get('message').get('photo'):
# 사진 파일의 id를 가져온다
file_id = response.get('message').get('photo')[-1].get('file_id')
# 텔레그램 서버에 파일의 경로를 받아온다.
file_response = requests.get(
f'{base_url}/getFile?file_id={file_id}').json()
# 파일 경로를 통해 URL을 만든다.
file_path = file_response.get('result').get('file_path')
file_url = f'https://api.telegram.org/file/bot{token}/{file_path}'
# print(file_url)
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청보내기! POST
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url, headers=headers, files={'image': image}).json()
if response.get('faces'):
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요"
else:
text = "연예인을 닮지 않음..."
else:
text = "사람 아닌듯"
# print(text)
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
# text가 온다면
elif response.get('message').get('text'):
# 사용자가 보낸 메시지를 text 변수에 저장, 사용자 정보는 chat_id에 저장
text = response.get('message').get('text')
chat_id = response.get('message').get('chat').get('id')
if '/번역 ' == text[0:4]:
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
data = {
'source': 'ko',
'target': 'en',
'text': text[4:]
}
# data = {
# 'source': 'en',
# 'target': 'ko',
# 'text': 'War never again! Never again war!'
# }
response = requests.post(naver_url, headers=headers, data=data).json()
text = response.get('message').get('result').get('translatedText')
# if 인사말이 오면, 나만의 인사해주기
elif '안녕' in text or 'hi' in text:
text = '간디'
elif '로또' in text:
text = sorted(random.sample(range(1,46), 6))
# 마지막 url 만들어서 메시지 보내기
if text=='호우':
text = '장마임'
if text=='패드립':
text = '패드립 머신 가동'
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
return 'OK', 200 # 200 : 응답 상태 코드
if __name__ == '__main__':
import os
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
[
"[email protected]"
] | |
9ca3d949f4eba7c4f5c4434c364d62be9b136a99
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/UMLRT2Kiltera_MM/graph_MT_post__Model.py
|
3f264f3c35aea6264d6efa85f991b713f54237a9
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 |
MIT
| 2023-07-21T13:33:39 | 2015-05-25T18:15:26 |
Python
|
UTF-8
|
Python
| false | false | 2,610 |
py
|
"""
__graph_MT_post__Model.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
___________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__Model(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([110.0, 41.0, 110.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__Model_S', width = '0', justify= 'left', stipple='' )
self.gf66 = GraphicalForm(drawing, h, 'gf66', fontObject=font)
self.graphForms.append(self.gf66)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__Model
|
[
"levi"
] |
levi
|
e6a2a28a5d17ffa3424d45048710a8687df2c863
|
9256eeff108787245a1d9a8e27f80c04377ba10f
|
/src/datasets/mnist.py
|
49071693a70659a10514560cc67cff58309b79cf
|
[
"MIT"
] |
permissive
|
martinhavlicek/meta-inference-public
|
99a22daef937921deb9f677f68aa1c954e456e55
|
3cad0b84acd407f3d790f3d75d3045f62bdbf250
|
refs/heads/master
| 2022-04-12T14:15:42.514426 | 2020-03-31T21:39:50 | 2020-03-31T21:39:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,748 |
py
|
import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
# ----- ROTATED MNIST -----
ROTATIONS = np.arange(-180, 180, 20)
DEFAULT_ROTATIONS = ROTATIONS[0::2]
UNSEEN_ROTATIONS = ROTATIONS[1::2]
DEFAULT_ROTATIONS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_ROTATIONS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_ROTATIONS_DISJOINT = ROTATIONS[:len(ROTATIONS) // 2 + 1]
UNSEEN_ROTATIONS_DISJOINT = ROTATIONS[len(ROTATIONS) // 2 + 1:]
ALL_ROTATIONS = ROTATIONS
DEFAULT_ROTATIONS_DICT = {
'standard': DEFAULT_ROTATIONS,
'sparse': DEFAULT_ROTATIONS_SPARSE,
'disjoint': DEFAULT_ROTATIONS_DISJOINT
}
UNSEEN_ROTATIONS_DICT = {
'standard': UNSEEN_ROTATIONS,
'sparse': UNSEEN_ROTATIONS_SPARSE,
'disjoint': UNSEEN_ROTATIONS_DISJOINT
}
def load_many_rotated_mnist(data_dir, image_size=32, train=True,
rotations=DEFAULT_ROTATIONS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular rotation.
"""
return [
load_rotated_mnist( data_dir, image_size=image_size,
train=train, rotation=rotation)
for rotation in rotations
]
def load_rotated_mnist(data_dir, image_size=32, train=True, rotation=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
rotate_image = rotate_transform(rotation)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
rotate_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def rotate_transform(angle):
def f(img):
return transforms.functional.rotate(img, angle)
return f
# ----- SCALED MNIST -----
SCALES = np.arange(0.5, 2.0, 0.1)
DEFAULT_SCALES = SCALES[0::2]
UNSEEN_SCALES = SCALES[1::2]
DEFAULT_SCALES_SPARSE = np.array([0.6, 1.0 ,1.4, 1.8])
UNSEEN_SCALES_SPARSE = np.array([0.5, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3, 1.5, 1.6, 1.7, 1.9])
DEFAULT_SCALES_DISJOINT = SCALES[:len(SCALES) // 2 + 1]
UNSEEN_SCALES_DISJOINT = SCALES[len(SCALES) // 2 + 1:]
ALL_SCALES = SCALES
DEFAULT_SCALES_DICT = {
'standard': DEFAULT_SCALES,
'sparse': DEFAULT_SCALES_SPARSE,
'disjoint': DEFAULT_SCALES_DISJOINT
}
UNSEEN_SCALES_DICT = {
'standard': UNSEEN_SCALES,
'sparse': UNSEEN_SCALES_SPARSE,
'disjoint': UNSEEN_SCALES_DISJOINT
}
def load_many_scaled_mnist( data_dir, image_size=32, train=True,
scales=DEFAULT_SCALES):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular scale.
"""
return [
load_scaled_mnist( data_dir, image_size=image_size,
train=train, scale=scale)
for scale in scales
]
def load_scaled_mnist(data_dir, image_size=32, train=True, scale=1):
"""
Load a MNIST dataset where each image has is scaled by a scale.
"""
scale_image = scale_transform(scale)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
scale_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def scale_transform(scale):
def f(img):
size = img.size
i, j, h, w = get_crop_params(img, scale, ratio=1)
return transforms.functional.resized_crop(
img, i, j, h, w, size, Image.BILINEAR)
return f
def get_crop_params(img, scale, ratio=1):
w = img.size[0] * scale
h = img.size[1] * scale
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
# ----- SHEARED MNIST -----
SHEARS = np.arange(-180, 180, 20)
DEFAULT_SHEARS = SHEARS[0::2]
UNSEEN_SHEARS = SHEARS[1::2]
DEFAULT_SHEARS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_SHEARS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_SHEARS_DISJOINT = SHEARS[:len(SHEARS) // 2 + 1]
UNSEEN_SHEARS_DISJOINT = SHEARS[len(SHEARS) // 2 + 1:]
ALL_SHEARS = SHEARS
DEFAULT_SHEARS_DICT = {
'standard': DEFAULT_SHEARS,
'sparse': DEFAULT_SHEARS_SPARSE,
'disjoint': DEFAULT_SHEARS_DISJOINT
}
UNSEEN_SHEARS_DICT = {
'standard': UNSEEN_SHEARS,
'sparse': UNSEEN_SHEARS_SPARSE,
'disjoint': UNSEEN_SHEARS_DISJOINT
}
def load_many_sheared_mnist(data_dir, image_size=32, train=True,
shears=DEFAULT_SHEARS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular shear.
"""
return [
load_sheared_mnist( data_dir, image_size=image_size,
train=train, shear=shear)
for shear in shears
]
def load_sheared_mnist(data_dir, image_size=32, train=True, shear=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
shear_image = shear_transform(shear)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
shear_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def shear_transform(shear):
def f(img):
return transforms.functional.affine(img, 0, (0, 0), 1, shear)
return f
|
[
"[email protected]"
] | |
ac81e7a4a5a4e1eec99fc4dd938031a42d326728
|
1064db5dfd154c4bc600e0e03841b0f73f0eefbc
|
/home/migrations/0008_auto_20200529_0800.py
|
55f78b9f74855b21f14e8caf061dee753c0981a6
|
[] |
no_license
|
crowdbotics-apps/web-29-dev-5196
|
3303921a0e5c8794e8e67f55c9841f3ec7610c16
|
7beda8f7d57ce9b9858a46f7e3940d6eed4b5725
|
refs/heads/master
| 2023-05-26T23:00:23.271209 | 2020-05-29T12:47:07 | 2020-05-29T12:47:07 | 267,768,914 | 0 | 0 | null | 2021-06-13T04:08:30 | 2020-05-29T04:59:18 |
Python
|
UTF-8
|
Python
| false | false | 342 |
py
|
# Generated by Django 2.2.12 on 2020-05-29 08:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("home", "0007_customtext_kjhkh"),
]
operations = [
migrations.RenameField(
model_name="customtext", old_name="kjhkh", new_name="ghfnhgfgjh",
),
]
|
[
"[email protected]"
] | |
b5c1fff82ac0901d1ae985cd1826ca4b47c6f5af
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Bisong19Building/I_PartVIII/C_Chapter47/index.py
|
cce9e2225cec24eabc5302e3a2817b1a5b9cd72f
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,830 |
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_Overviewof.index import Overviewof as A_Overviewof
from .B_Createa.index import Createa as B_Createa
from .C_BuildContainers.index import BuildContainers as C_BuildContainers
from .D_Compilethe.index import Compilethe as D_Compilethe
from .E_Uploadand.index import Uploadand as E_Uploadand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CHAPTER 47
#
#
#
# Deploying
# an End-to-End Machine
# Learning Solution
# on Kubeflow Pipelines
# A Kubeflow pipeline component is an implementation of a pipeline task. A component
# is a step in the workflow. Each task takes one or more artifacts as input and may produce
# one or more artifacts as output.
# Each component usually includes two parts:
#
# • Client code: The code that talks to endpoints to submit jobs, for
# example, code to connect with the Google Cloud Machine Learning
# Engine.
#
# • Runtime code: The code that does the actual job and usually runs in
# the cluster, for example, the code that prepares the model for training
# on Cloud MLE.
# A component consists of an interface (inputs/outputs), the implementation
# (a Docker container image and command-line arguments), and metadata (name,
# description).
#
#
#
#
# 687
# © Ekaba Bisong 2019
# E. Bisong, Building Machine Learning and Deep Learning Models on Google Cloud Platform,
# https://doi.org/10.1007/978-1-4842-4470-8_47
#
# Chapter 47 Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines
#
#
# Overview of a Simple End-to-End Solution Pipeline
# In this simple example, we will implement a deep neural regressor network to predict the
# closing prices of Bitcoin crypto-currency. The machine learning code itself is pretty basic
# as it is not the focus of this article. The goal here is to orchestrate a machine learning
# engineering solution using microservice architectures on Kubernetes with Kubeflow
# Pipelines. The code for this chapter is in the book code repository. Clone the repository
# from the GCP Cloud Shell.
# The pipeline consists of the following components:
#
# 1. Move raw data hosted on GitHub to a storage bucket.
#
# 2. Transform the dataset using Google Dataflow.
#
# 3. Carry out hyper-parameter training on Cloud Machine
# Learning Engine.
#
# 4. Train the model with the optimized hyper-parameters.
#
# 5. Deploy the model for serving on Cloud MLE.
#
#
#
# Create a Container Image for Each Component
# First, we’ll package the client and runtime code into a Docker image. This image
# also contains the secure service account key to authenticate against GCP. For example,
# the component to transform the dataset using Dataflow has the following files built into
# its image:
# • __ Dockerfile: Dockerfile to build the Docker image.
#
# • __ build.sh: Script to initiate the container build and upload to
# Google Container Registry.
#
# • __ dataflow_transform.py: Code to run the beam pipeline on
# Cloud Dataflow.
#
# • __ service_account.json: Secure key to authenticate container
# on GCP.
#
# • __ local_test.sh: Script to run the image pipeline component
# locally.
#
#
# 688
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Chapter47(HierNode):
def __init__(self):
super().__init__("Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines")
self.add(Content())
self.add(A_Overviewof())
self.add(B_Createa())
self.add(C_BuildContainers())
self.add(D_Compilethe())
self.add(E_Uploadand())
# eof
|
[
"[email protected]"
] | |
de3acc9720419a15a1a42835f76a34d6293154c3
|
16c77266859989d156fe3f4d0ce3a37a1898ad38
|
/dacc/xls/write.py
|
1fad19e6f9792761fed509ba748792ebd263a457
|
[
"MIT"
] |
permissive
|
SRHerzog/ut
|
92620e66be2ea9707d9cd3cf390179326ed2eefe
|
894bd5607eb76676aaea7a37ed8a91b5fb5e805e
|
refs/heads/master
| 2021-06-30T19:15:46.131299 | 2017-09-15T20:47:35 | 2017-09-15T20:47:35 | 103,696,926 | 0 | 0 | null | 2017-09-15T20:08:10 | 2017-09-15T20:08:10 | null |
UTF-8
|
Python
| false | false | 3,367 |
py
|
__author__ = 'thor'
import os
import pandas as pd
from pandas import ExcelWriter
from openpyxl import load_workbook
from openpyxl.reader.excel import InvalidFileException
try:
from xlwings import Workbook, Sheet
except ImportError as e:
print(e)
def multiple_dfs_to_multiple_sheets(df_list, xls_filepath, sheet_list=None, **kwargs):
"""
Writes multiple dataframes in different excel sheets.
Input:
* xls_filepath: The excel file to write into
* And then there's several choices:
* df_list (a list of dataframes) and sheet_list (a list of corresponding names)
* df_list = a list of {sheet_name: dataframe}
* df_list = a list of (sheet_name, dataframe) tuples, when the order of the sheets matters)
--> If no sheet names are given, the function either gives the name of the dataframe (if any), or
simply iterates over sheet numbers...
"""
if sheet_list is None:
if isinstance(df_list, dict):
# df_list, sheet_list = zip(df_list.values(), df_list.keys())
df_list, sheet_list = df_list.values(), df_list.keys()
elif isinstance(df_list[0], tuple):
sheet_list = map(lambda x: x[0], df_list)
df_list = map(lambda x: x[1], df_list)
else:
sheet_list = []
for i, df in enumerate(df_list):
name = df.name
if not name:
name = "sheet {}".format(i)
sheet_list.append(name)
writer = ExcelWriter(xls_filepath)
for df, sheet_name in zip(df_list, sheet_list):
df.to_excel(writer, sheet_name, **kwargs)
writer.save()
def df_to_excel_without_overwriting_it(df, xls_filepath, sheet_name, **kwargs):
"""
write df to an excel sheet without overwriting the whole excel file if it exists
(may need to create the excel with some data in it already for this to work)
"""
try:
book = load_workbook(xls_filepath)
writer = pd.ExcelWriter(xls_filepath, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
try:
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
writer.save()
except InvalidFileException:
try:
df.to_excel(excel_writer=xls_filepath, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
def clear_sheet_contents_without_changing_formatting(xls_filepath, sheet_name):
if os.path.exist(xls_filepath): # else do nothing
with Workbook(fullname=xls_filepath, app_visible=False) as wkb:
Sheet(sheet=sheet_name, wkb=wkb).clear_contents()
def _replace_non_numeric_non_strings_with_strings(df):
index_names = df.index.names
df = df.reset_index(drop=False, inplace=False)
for c in df.columns:
if df[c].dtype.name == 'object':
if not isinstance(df[c].iloc[0], basestring):
df[c] = df[c].apply(str)
df = df.set_index(index_names)
return df
|
[
"[email protected]"
] | |
1109161a39f73fe01e4a6f4099ad4dad4a0939bc
|
abdb582b9ab76eaf6df1fdb5843c24fa6fa1ede0
|
/flendz_test/urls.py
|
80bc3d35b33735c54f511c2ea63a1065e235799b
|
[] |
no_license
|
jabykuniyil/flendz
|
1375341ee97986842d962702e0f1ac7f6d48cae7
|
ef952f9e14320b9c512b4047c6726ab9ff776120
|
refs/heads/main
| 2023-05-27T20:12:36.774259 | 2021-06-05T04:38:47 | 2021-06-05T04:38:47 | 372,798,247 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('test_app.url')),
]
|
[
"[email protected]"
] | |
ca2951f89c8fcf239e756f26b15ef01148feb032
|
3b50605ffe45c412ee33de1ad0cadce2c5a25ca2
|
/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py
|
7e252e048b64c9b158fabe21b818fbccaf71a26c
|
[
"Apache-2.0"
] |
permissive
|
Superjomn/Paddle
|
f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1
|
7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188
|
refs/heads/develop
| 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 |
Apache-2.0
| 2023-04-14T02:29:52 | 2016-08-30T01:45:54 |
C++
|
UTF-8
|
Python
| false | false | 3,680 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
from utils import extra_cc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\multi_out_jit\\multi_out_jit.pyd'.format(get_build_directory())
if os.name == 'nt' and os.path.isfile(file):
cmd = 'del {}'.format(file)
run_cmd(cmd, True)
# Compile and load custom op Just-In-Time.
multi_out_module = load(
name='multi_out_jit',
sources=['multi_out_test_op.cc'],
extra_include_paths=paddle_includes, # add for Coverage CI
extra_cxx_cflags=extra_cc_args, # test for cflags
verbose=True,
)
class TestMultiOutputDtypes(unittest.TestCase):
def setUp(self):
self.custom_op = multi_out_module.multi_out
self.dtypes = ['float32', 'float64']
self.devices = ['cpu']
def run_static(self, device, dtype):
paddle.set_device(device)
x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
with paddle.static.scope_guard(paddle.static.Scope()):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='X', shape=[None, 8], dtype=dtype)
outs = self.custom_op(x)
exe = paddle.static.Executor()
exe.run(paddle.static.default_startup_program())
res = exe.run(
paddle.static.default_main_program(),
feed={'X': x_data},
fetch_list=outs,
)
return res
def check_multi_outputs(self, outs, is_dynamic=False):
out, zero_float64, one_int32 = outs
if is_dynamic:
zero_float64 = zero_float64.numpy()
one_int32 = one_int32.numpy()
# Fake_float64
self.assertTrue('float64' in str(zero_float64.dtype))
np.testing.assert_array_equal(
zero_float64, np.zeros([4, 8]).astype('float64')
)
# ZFake_int32
self.assertTrue('int32' in str(one_int32.dtype))
np.testing.assert_array_equal(
one_int32, np.ones([4, 8]).astype('int32')
)
def test_static(self):
paddle.enable_static()
for device in self.devices:
for dtype in self.dtypes:
res = self.run_static(device, dtype)
self.check_multi_outputs(res)
paddle.disable_static()
def test_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
paddle.set_device(device)
x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
x = paddle.to_tensor(x_data)
outs = self.custom_op(x)
self.assertTrue(len(outs) == 3)
self.check_multi_outputs(outs, True)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
665a296262fe97164ada5fc3e0db919390d90e00
|
e45d2faad9389886a82ff5176853b1ff6e37caae
|
/simplecv/017_face_detect.py
|
e93e398dd543658092ca32de34f80eb4096d57e8
|
[] |
no_license
|
allenmo/python_study
|
6320aa4cd80fe46ccf73076015c67bdcb6338d30
|
7aff5d810ca6e791d62235d57c072a8dc14457ca
|
refs/heads/master
| 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 678 |
py
|
from SimpleCV import *
cam = Camera()
disp = Display()
size = cam.getImage().size()
segment = HaarCascade("face.xml")
while disp.isNotDone():
img = cam.getImage()
autoface = img.findHaarFeatures(segment)
lenFace = len(autoface)
if ( lenFace > 0 ):
for i in range(0,lenFace):
face = autoface[i]
x = face.x
y = face.y
width = face.width()
height = face.height()
img.dl().centeredRectangle((x,y),(width,height),Color.LIME)
img.applyLayers()
img.drawText("Num of Face: " + str(lenFace), x = size[0]-150, y = size[1]-30, color = Color.LIME, fontsize = 24)
img.show()
|
[
"[email protected]"
] | |
f90334a1939d9b22c35a1f046ae87e4ce66693cb
|
ac305c6739541e84857e297f8eb1b19417978548
|
/module_128.py
|
b9ba541614d3ccd041e0fe0728a597cc18a34050
|
[] |
no_license
|
imhardikj/git_test
|
d6608d6c02e0bc454f9dd31ffbbc5704a7046a61
|
43f0de2e9ac09ecd4fdfee27879fd8ae354a0685
|
refs/heads/master
| 2020-03-27T21:56:46.394739 | 2018-09-03T11:27:58 | 2018-09-03T11:27:58 | 147,189,474 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,214 |
py
|
"""A set of classes used to represent electric cars."""
from module_121 import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=70):
"""Initialize the batteery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class ElectricCar(Car):
"""Models aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
|
[
"[email protected]"
] | |
3c840954bad45d6884f9cadc51628038511b55ba
|
d6475dda9db9ea6e447db2b4d75d2ebdf454e9d8
|
/polls/models.py
|
fefdac850f120944eee69c1278d883e9925f2e2d
|
[] |
no_license
|
yoophi/django_polls
|
3d92b01f239ed6933b7593408b788f7adf2e6c31
|
f94c0ff6307cbdd2d3c65a6b5131a515b6fe67af
|
refs/heads/master
| 2021-01-10T00:57:18.706884 | 2016-03-24T14:50:38 | 2016-03-24T14:50:38 | 54,241,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 971 |
py
|
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
@python_2_unicode_compatible
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"[email protected]"
] | |
c9585d56b0fe94af3866093cae1b512d95ca70cb
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/tests/components/fritz/test_button.py
|
36af1c27f5e0bcf2f1852749964ed9cdf872c95c
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 |
Apache-2.0
| 2023-02-21T23:40:57 | 2019-10-31T04:33:09 |
Python
|
UTF-8
|
Python
| false | false | 2,402 |
py
|
"""Tests for Fritz!Tools button platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .const import MOCK_USER_DATA
from tests.common import MockConfigEntry
async def test_button_setup(hass: HomeAssistant, fc_class_mock, fh_class_mock) -> None:
"""Test setup of Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
buttons = hass.states.async_all(BUTTON_DOMAIN)
assert len(buttons) == 4
for button in buttons:
assert button.state == STATE_UNKNOWN
@pytest.mark.parametrize(
("entity_id", "wrapper_method"),
[
("button.mock_title_firmware_update", "async_trigger_firmware_update"),
("button.mock_title_reboot", "async_trigger_reboot"),
("button.mock_title_reconnect", "async_trigger_reconnect"),
("button.mock_title_cleanup", "async_trigger_cleanup"),
],
)
async def test_buttons(
hass: HomeAssistant,
entity_id: str,
wrapper_method: str,
fc_class_mock,
fh_class_mock,
) -> None:
"""Test Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
button = hass.states.get(entity_id)
assert button
assert button.state == STATE_UNKNOWN
with patch(
f"homeassistant.components.fritz.common.AvmWrapper.{wrapper_method}"
) as mock_press_action:
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
mock_press_action.assert_called_once()
button = hass.states.get(entity_id)
assert button.state != STATE_UNKNOWN
|
[
"[email protected]"
] | |
827c7b9b76801ff6a9ebbc2f8342fe133931ca45
|
f17de2f1a2804033a7b7fc74a0d09f964fe1d876
|
/hungerExpress/food/migrations/0003_auto_20180331_1736.py
|
a285d1dd32068594eea223b405926bad96304f74
|
[] |
no_license
|
udwivedi394/djangoProjects
|
60d6eb275ce75dab3884f1a9c68e01226625c4e2
|
22075b7f850d796afe5a0c06411eb5ff762357b7
|
refs/heads/master
| 2021-09-10T21:54:44.363710 | 2018-04-03T01:58:27 | 2018-04-03T01:58:27 | 126,106,563 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
# Generated by Django 2.0.3 on 2018-03-31 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('food', '0002_auto_20180331_1725'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='contact_no',
field=models.CharField(max_length=20),
),
]
|
[
"[email protected]"
] | |
ff01db056009a80fa1000e2954fbb76c769b6e7e
|
a3d2620bbf25002c7b182600c2e40f8f06555e91
|
/exc/exc/wsgi.py
|
8d7d6db299d15b0077bd2774bf300955b5612354
|
[] |
no_license
|
alejo8591/backend-lab
|
782736a82933f705f825a1194369bfe13e86c0ec
|
4a02a9552083a7c877e91b0f8b81e37a8650cf54
|
refs/heads/master
| 2016-09-03T03:53:43.878240 | 2015-11-26T06:35:38 | 2015-11-26T06:35:38 | 3,911,349 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
"""
WSGI config for exc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "exc.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"[email protected]"
] | |
968290c1917596dac408fca7d1a91f4c18315524
|
3024cafafbfc75193105af7f225d3b12eb2aea46
|
/DjangoProjects/project24/iplapp/models.py
|
b6932bc062b857864ce7ec33dc7f0cac6088b6d7
|
[] |
no_license
|
jaishankarg24/Django-Rest-Framework
|
33266f6825d51abb8a512426baedf59f2ee957c8
|
809ee9208ffbef4202a8f4058a84f5322793af52
|
refs/heads/master
| 2023-03-02T20:56:38.051060 | 2021-02-12T05:37:48 | 2021-02-12T05:37:48 | 338,233,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
from django.db import models
# Create your models here.
class IplTable(models.Model):
name = models.CharField( max_length=50)
age = models.IntegerField()
country = models.CharField( max_length=50)
|
[
"[email protected]"
] | |
5c5ff093f8e4848fe2435494f5abccda014f4507
|
84a1f9d626828b6ecaee4ef037081f4d8750a990
|
/编程/9月/9.12/习题答案.py
|
df9a5234a978fced165131300f75ac2e75628528
|
[] |
no_license
|
dujiaojingyu/Personal-programming-exercises
|
5a8f001efa038a0cb3b6d0aa10e06ad2f933fe04
|
72a432c22b52cae3749e2c18cc4244bd5e831f64
|
refs/heads/master
| 2020-03-25T17:36:40.734446 | 2018-10-01T01:47:36 | 2018-10-01T01:47:36 | 143,986,099 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,921 |
py
|
#coding=utf-8
import linecache
import time
now = time.time() #代码开始时间
# 前期准备,整理数据
data_keys = ('bid', 'uid', 'username', 'v_class', 'content', 'img', 'created_at', 'source', 'rt_num', 'cm_num', 'rt_uid', 'rt_username', 'rt_v_class', 'rt_content', 'rt_img', 'src_rt_num', 'src_cm_num', 'gender', 'rt_bid', 'location', 'rt_mid', 'mid', 'lat', 'lon', 'lbs_type', 'lbs_title', 'poiid', 'links', 'hashtags', 'ats', 'rt_links', 'rt_hashtags', 'rt_ats', 'v_url', 'rt_v_url')
keys = {data_keys[k]:k for k in xrange(0,len(data_keys))}
f = linecache.getlines('t.txt')
lines = [x[1:-1].split('","') for x in f] #拆分
#1 输出用户总数
users = set([line[keys['username']] for line in lines])
user_total = len(set(users))
assert type(user_total) == int
#2 每一个用户的名字 list
users = list(users)
assert type(users) == list
#3 有多少个2012年11月发布的tweets
lines_from_2012_11 = filter(lambda line:line[keys['created_at']].startswith('2012-11'),lines)
lines_total_from_2012_11 = len(lines_from_2012_11)
assert type(lines_total_from_2012_11) == int
#
# #4 该文本里,有哪几天的数据?
#
# users_by_date = [line[keys['created_at']].split(' ')[0] for line in lines]
#
# lines_by_created = list(set(users_by_date))
#
# lines_by_created.sort()
#
# assert type(lines_by_created) == list
#
#
# #5 该文本里,在哪个小时发布的数据最多?
# # todo 这里用time模块做时间转换最好。下例只为讲解拆分方法
#
# hours = [int(line[keys['created_at']][11:13]) for line in lines]
#
# total_by_hour = [(h,hours.count(h)) for h in xrange(0,24) ]
#
# total_by_hour.sort(key=lambda k:k[1],reverse=True)
#
# max_hour = total_by_hour[0][0]
#
# assert type(max_hour) == int
#
#
# #6 该文本里,输出在每一天发表tweets最多的用户
#
# dateline_by_user = {k:dict() for k in lines_by_created}
#
# for line in lines:
# dateline = line[keys['created_at']].split(' ')[0]
# username = line[keys['username']]
# if dateline_by_user[dateline].has_key(username):
# dateline_by_user[dateline][username] += 1
# else:
# dateline_by_user[dateline][username] = 1
#
# for k,v in dateline_by_user.items():
# us = v.items()
# us.sort(key=lambda k:k[1],reverse=True)
# dateline_by_user[k] = {us[0][0]:us[0][1]}
#
# assert type(dateline_by_user) == dict
#
#
# #7 请按照时间顺序输出 2012-11-03 每个小时的发布tweets的频率
#
# lines_from_2012_11_03 = filter(lambda line:line[keys['created_at']].startswith('2012-11-03'),lines)
#
# hourlines_from_2012_11_03 = {str(i):0 for i in xrange(0,24)}
#
# for line in lines_from_2012_11_03:
# hour = line[keys['created_at']][11:13]
# hourlines_from_2012_11_03[str(int(hour))] += 1
#
# hour_timeline_from_2012_11_03 = [(k,v) for k,v in hourlines_from_2012_11_03.items()]
# hour_timeline_from_2012_11_03.sort(key=lambda k:int(k[0]))
#
# assert type(hour_timeline_from_2012_11_03) == list
#
#
# #8 统计该文本里,来源的相关信息和次数
#
# source = set([k[keys['source']] for k in lines])
# source_dict = {s:0 for s in source}
# for line in lines:
# source_name = line[keys['source']]
# source_dict[source_name] += 1
# source_list = [(k,v) for k,v in source_dict.items()]
# source_list.sort(key=lambda k:k[1],reverse=True)
# assert type(source_list) == list
#
#
# #9 计算转发URL中:以:"https://twitter.com/umiushi_no_uta"开头的有几个
#
# umi_total = 0
# for line in lines:
# if line[keys['rt_v_url']].startswith('https://twitter.com/umiushi_no_uta'):
# umi_total += 1
# assert type(umi_total) == int
#
#
# #10 UID为573638104的用户 发了多少个微博
#
# tweets_total_from_573638104 = 0
# for line in lines:
# if line[keys['uid']] == '573638104' :
# tweets_total_from_573638104 += 1
# assert type(tweets_total_from_573638104) == int
#
#
# #11 定义一个函数,该函数可放入任意多的用户uid参数(如果不存在则返回null),函数返回发微薄数最多的用户uid。
#
# def get_user_by_max_tweets(*uids):
#
# '''
# @deprecated:参数可为字符串或者数字
# '''
#
# if len(uids) > 0:
# uids = filter(lambda u:type(u) == int or u.isdigit(),uids)
# uids = map(str,uids)
# if len(uids) > 0:
# uids_dict = {x:0 for x in uids}
# for line in lines:
# uid = line[keys['uid']]
# if uid in uids:
# uids_dict[uid] += 1
# uids_and_tweets_total = [(x,y) for x,y in uids_dict.items()]
# uids_and_tweets_total.sort(key=lambda k:k[1],reverse=True)
# return uids_and_tweets_total[0][0]
# return "null"
#
#
# assert get_user_by_max_tweets() == 'null'
# assert get_user_by_max_tweets('ab','cds') == 'null'
# assert get_user_by_max_tweets('ab','cds','123b') == 'null'
# assert get_user_by_max_tweets('12342','cd') == '12342'
# assert get_user_by_max_tweets('28803555',28803555) == '28803555'
# assert get_user_by_max_tweets('28803555',28803555,'96165754') == '28803555'
#
#
# #12 该文本里,谁发的微博内容长度最长
#
# lines_by_content_length = [(line[keys['username']],len(line[keys['content']])) for line in lines]
# lines_by_content_length.sort(key=lambda k:k[1],reverse=True)
# user_by_max_content = lines_by_content_length[0][0]
# # todo 如果有多个最多怎么办?
# assert type(user_by_max_content) == str
#
#
# #13 该文本里,谁转发的URL最多
#
# lines_by_rt = [(line[keys['uid']],int(line[keys['rt_num']])) for line in lines if line[keys['rt_num']] != '']
# lines_by_rt.sort(key=lambda k:k[1],reverse=True)
# user_by_max_rt = lines_by_rt[0][0]
# assert type(user_by_max_rt) == str
#
#
# #14 该文本里,11点钟,谁发的微博次数最多。
#
# lines_on_hour11 = filter(lambda line:line[keys['created_at']].startswith('11',11,13),lines)
# lines_by_uid_on_hour11 = {k[keys['uid']]:0 for k in lines_on_hour11}
# for line in lines_on_hour11:
# uid = line[keys['uid']]
# lines_by_uid_on_hour11[uid] += 1
# d = [(k,v) for k,v in lines_by_uid_on_hour11.items()]
# d.sort(key=lambda k:k[1],reverse=True)
# uid_by_max_tweets_on_hour11 = d[0][0]
# # todo 如果有多个最多怎么办?
# assert type(uid_by_max_tweets_on_hour11) == str
#
#
# #15 该文本里,哪个用户的源微博URL次数最多。 (要求:输出用户的uid,字符串格式。)
#
# uid_by_v_url = {k[keys['uid']]:0 for k in lines}
# for line in lines:
# uid = line[keys['uid']]
# if lines[keys['v_url']] != '':
# uid_by_v_url[uid] += 1
# uid_sort_by_v_url = [(k,v) for k,v in uid_by_v_url.items()]
# uid_sort_by_v_url.sort(key=lambda k:k[1],reverse=True)
# uid_by_max_v_url = uid_sort_by_v_url[0][0]
# # todo 如果有多个最多怎么办?
# assert type(uid_by_max_v_url) == str
#
# print '运算时间:%s'%(time.time() - now) #整体运行时间
|
[
"[email protected]"
] | |
95aa037242063b122b3bd33f7bb1314f54c46850
|
11ad104b0309a2bffd7537d05e2ab3eaf4aed0ca
|
/tests/helpers/test_storage_remove.py
|
9a447771ea630816f159fba84f8ff655f447eb56
|
[
"Apache-2.0"
] |
permissive
|
koying/home-assistant
|
15e5d01a45fd4373b3d286e1b2ca5aba1311786d
|
9fc92ab04e0d1933cc23e89b4095714aee725f8b
|
refs/heads/dev
| 2023-06-24T01:15:12.150720 | 2020-11-01T12:27:33 | 2020-11-01T12:27:33 | 189,232,923 | 2 | 1 |
Apache-2.0
| 2023-01-13T06:04:15 | 2019-05-29T13:39:02 |
Python
|
UTF-8
|
Python
| false | false | 1,252 |
py
|
"""Tests for the storage helper with minimal mocking."""
import asyncio
from datetime import timedelta
import os
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_test_home_assistant
async def test_removing_while_delay_in_progress(tmpdir):
"""Test removing while delay in progress."""
loop = asyncio.get_event_loop()
hass = await async_test_home_assistant(loop)
test_dir = await hass.async_add_executor_job(tmpdir.mkdir, "storage")
with patch.object(storage, "STORAGE_DIR", test_dir):
real_store = storage.Store(hass, 1, "remove_me")
await real_store.async_save({"delay": "no"})
assert await hass.async_add_executor_job(os.path.exists, real_store.path)
real_store.async_delay_save(lambda: {"delay": "yes"}, 1)
await real_store.async_remove()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
await hass.async_stop()
|
[
"[email protected]"
] | |
34906a49299704ce8c70279a90752f8f06fab619
|
7c8bd2e26fdabf1555e0150272ecf035f6c21bbd
|
/ps프로젝트/BS/숫자카드2.py
|
8734a278232da1fa846614d424d7f3945e467c48
|
[] |
no_license
|
hyeokjinson/algorithm
|
44090c2895763a0c53d48ff4084a96bdfc77f953
|
46c04e0f583d4c6ec4f51a24f19a373b173b3d5c
|
refs/heads/master
| 2021-07-21T10:18:43.918149 | 2021-03-27T12:27:56 | 2021-03-27T12:27:56 | 245,392,582 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
from collections import Counter
def check(v):
lt=0
rt=n-1
cnt=0
while lt<=rt:
mid=(lt+rt)//2
if arr[mid]==v:
return 1
elif arr[mid]>v:
rt=mid-1
else:
lt=mid+1
return 0
if __name__ == '__main__':
n=int(input())
arr=list(map(int,input().split()))
arr.sort()
m=int(input())
arr1=list(map(int,input().split()))
c=Counter(arr)
res=[]
for i in range(m):
if check(arr1[i]):
res.append(c[arr1[i]])
else:
res.append(0)
for x in res:
print(x,end=' ')
|
[
"[email protected]"
] | |
9d7d2d581d50ca04cf1b4329b5b87bf803707862
|
c2e6b6119a1d03bc293572d568d21a6b76762a1f
|
/ex.py
|
30c1077d8fe6fac7ee1c285147c7a62bef2ee59a
|
[] |
no_license
|
kafura-kafiri/Fesss
|
24a92e5185881066b0d2f61d1649ab0e43a0f479
|
7b660723237dfbdbd3ba9772a9d2a9c771807bb7
|
refs/heads/master
| 2021-05-03T17:17:54.799918 | 2018-02-06T16:06:40 | 2018-02-06T16:06:40 | 120,443,736 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,280 |
py
|
# LSTM for international airline passengers problem with regression framing
import numpy
from pandas import read_csv
import datetime
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
def parse(x):
return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
dataframe = read_csv('fesss.csv', parse_dates=['Date'], engine='python', date_parser=parse)
dataset = dataframe.values
start = dataset[0, 0]
for i in range(len(dataset)):
_start = dataset[i, 0]
dataset[i, 0] = (dataset[i, 0] - start).total_seconds()
start = _start
dataset = dataset.astype('float32')
# normalize the dataset
delta_scaler = MinMaxScaler(feature_range=(0, 1))
delay_scaler = MinMaxScaler(feature_range=(0, 1))
# print(dataset)
def scale(scaler, dataset, i):
data = dataset[:, i]
data = data.reshape(data.shape[0], 1)
data = scaler.fit_transform(data)
dataset[:, i] = data.reshape(data.shape[0])
return dataset
dataset = scale(delta_scaler, dataset, 0)
dataset = scale(delay_scaler, dataset, 1)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
l = [dataset[i + 1][0]]
l.extend(dataset[i:(i + look_back), 1])
l.append(dataset[i + 1][2])
dataX.append(l)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
look_back = 1
dataX, dataY = create_dataset(dataset, look_back)
# reshape input to be [samples, time steps, features]
dataX = numpy.reshape(dataX, (dataX.shape[0], 1, dataX.shape[1]))
print(dataset)
print(dataX)
print(dataY)
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back + 2)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(dataX, dataY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(dataX)
from math import sqrt
rmse = sqrt(mean_squared_error(dataY, trainPredict))
print('RMSE: %.3f' % rmse)
|
[
"[email protected]"
] | |
8ef2d2abe68d0b5499e760395b40896a467518c4
|
2e9193625039cbd93a76a1ac1115e84599c6afcd
|
/HashTable/hashtableImp.py
|
1f19d4d3fcdd4ca486866e38beb7dbb1a273fa65
|
[] |
no_license
|
hieudx149/DatastructAndAlgorithms
|
d54b79c3375dfb17989160a1d2dc74505061eae5
|
a5f147b2f644f2a273c50756c9d297fa8b6bcd08
|
refs/heads/master
| 2023-06-16T13:38:32.039274 | 2021-07-13T10:35:54 | 2021-07-13T10:35:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,581 |
py
|
class hash_table:
def __init__(self, size):
self.size = size
self.data = [None]*self.size
def __str__(self): # As in the array implementation, this method is used to print the attributes of the class object in a dictionary format
return str(self.__dict__)
def _hash(self, key):
hash = 0
for i in range(len(key)):
hash = (hash + ord(key[i])*i) % self.size
return hash
def set(self, key, value):
address = self._hash(key)
if not self.data[address]:
self.data[address] = [[key, value]]
else:
self.data[address].append([key, value])
print(self.data)
def get(self, key):
address = self._hash(key)
bucket = self.data[address]
if bucket:
for i in range(len(bucket)):
if bucket[i][0] == key:
return bucket[i][1]
return None
def keys(self):
list_key = []
for i in range(self.size):
if self.data[i]:
for j in range(len(self.data[i])):
list_key.append(self.data[i][j][0])
return list_key
def values(self):
list_value = []
for i in range(self.size):
if self.data[i]:
for j in range(len(self.data[i])):
list_value.append(self.data[i][j][1])
return list_value
new_hash = hash_table(5)
new_hash.set('duong', 100)
new_hash.set('xuan', 200)
new_hash.set('hieu', 300)
print(new_hash.keys())
print(new_hash.values())
|
[
"[email protected]"
] | |
5cbcaaa43ef258823c6c27044d41b401cda0c79d
|
6b301b0b0d5fea69e6ab6d3fcfd0a9741143a9b7
|
/config/jupyter/.ipython/profile_default/startup/00-setup-spark.py
|
0219daccbe9e74cbcbd99ab8d59a1f0b6a772a72
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
frankiegu/pipeline
|
c7a166e80ccc6a351c32fb1918a41268f2380140
|
3526f58cc9b4d824a23300cd60c647a753902774
|
refs/heads/master
| 2021-01-11T06:09:36.914324 | 2016-10-03T05:33:41 | 2016-10-03T05:33:41 | 69,836,618 | 1 | 0 | null | 2016-10-03T02:56:09 | 2016-10-03T02:56:09 | null |
UTF-8
|
Python
| false | false | 489 |
py
|
import glob
import os
import sys
# Setup SPARK_HOME
spark_home = os.getenv('SPARK_HOME', None)
if not spark_home:
raise ValueError('SPARK_HOME environment variable is not set')
# System sys.path
sys.path.insert(0, os.path.join(spark_home, 'python'))
for lib in glob.glob(os.path.join(spark_home, 'python/lib/py4j-*-src.zip')):
sys.path.insert(0, lib)
os.environ['PYSPARK_SUBMIT_ARGS']='--master %s %s pyspark-shell' % (os.getenv('SPARK_MASTER'), os.getenv('SPARK_SUBMIT_ARGS'))
|
[
"[email protected]"
] | |
bc4d8fdf44a8f6da59b0a8ead9eefac7907e6a29
|
b3455474da0bc27c913ff88908be0d0bddba352d
|
/5.AI/1.Machine Learning/196_mushroom_train2.py
|
0919272787d2e7922608902f2ded949c86259dab
|
[] |
no_license
|
rntva/JumpToPython
|
7286bc94e40b553fa7b9fbca7934f2e35f63b54e
|
090f0ed5bf28ae7832e5edde11936b71b4fb324b
|
refs/heads/master
| 2021-05-01T02:33:44.528975 | 2018-07-18T08:24:07 | 2018-07-18T08:24:07 | 121,182,629 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,230 |
py
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
#데이터 읽기
mr = pd.read_csv("mushroom.csv", header=None)
#데이터 내부의 분류 변수 전개
label = []
data = []
attr_list = []
for row_index, row in mr.iterrows() :
label.append(row.ix[0])
exdata = []
for col, v in enumerate(row.ix[1:]) :
if row_index == 0 :
attr = {"dic" : {}, "cnt" : 0}
attr_list.append(attr)
else :
attr = attr_list[col]
#버섯의 특징 기호를 배열로 나타내기
d = [0,0,0,0,0,0,0,0,0,0,0,0]
if v in attr["dic"] : idx = attr["dic"][v]
else :
idx = attr["cnt"]
attr["dic"][v] = idx
attr["cnt"] += 1
d[idx] = 1
exdata += d
data.append(exdata)
#학습, 데트스 데이터 나누기
data_train, data_test, label_train, label_test = train_test_split(data, label)
#학습시키기
clf = RandomForestClassifier()
clf.fit(data_train, label_train)
#예측하기
pre = clf.predict(data_test)
#결과테스트
ac_score = metrics.accuracy_score(label_test, pre)
print("정답률", ac_score)
|
[
"[email protected]"
] | |
fedb6ed76a5d7115dd820e753d6a9561b86a1f9e
|
36e27ca74b734994fb2e5cd4e328e7b82202d8cd
|
/nodarb/migrations/0007_nodarb_tips_rada.py
|
23417ec23dc96ae31da304e4df5cc8abde817eeb
|
[] |
no_license
|
svabis/vf
|
5e9513f3a767a9561e2fb8bd3e37bb3c03d113dd
|
d83a4afd177e4f7007a9ce824ae5ed36f18654fc
|
refs/heads/master
| 2020-05-21T21:19:59.952463 | 2018-06-04T11:11:50 | 2018-06-04T11:11:50 | 84,647,341 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 404 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nodarb', '0006_auto_20170311_1644'),
]
operations = [
migrations.AddField(
model_name='nodarb_tips',
name='rada',
field=models.BooleanField(default=True),
),
]
|
[
"[email protected]"
] | |
a891b7dbf6d6642a5556df699356d8e6d45ea81e
|
9eef031728a6cdcd681cad9ba6b0709269383905
|
/examples/test/test_analyzer.py
|
bd467f4878203aa3e45a31a9040cd5ead57b0c12
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
kbeckmann/liteeth
|
906b6f30b5d3be28f2bfac91704c7d5ddf26e85e
|
54acf9fd76c226d7760294ffde86418e52e0951b
|
refs/heads/master
| 2022-12-24T17:02:42.834415 | 2020-08-24T20:14:35 | 2020-08-24T20:14:35 | 300,029,015 | 0 | 0 |
NOASSERTION
| 2020-09-30T19:03:51 | 2020-09-30T19:03:50 | null |
UTF-8
|
Python
| false | false | 570 |
py
|
#!/usr/bin/env python3
#
# This file is part of LiteEth.
#
# Copyright (c) 2015-2018 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from litex import RemoteClient
wb = RemoteClient()
wb.open()
# # #
from litescope.software.driver.analyzer import LiteScopeAnalyzerDriver
analyzer = LiteScopeAnalyzerDriver(wb.regs, "analyzer", debug=True)
analyzer.configure_trigger(cond={})
analyzer.configure_subsampler(1)
analyzer.run(offset=128, length=256)
analyzer.wait_done()
analyzer.upload()
analyzer.save("dump.vcd")
# # #
wb.close()
|
[
"[email protected]"
] | |
ffb723bce5647ba3b185cf4e227e25b2ff78a4d7
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/frdyon001/question2.py
|
26dadb99dfec05c266eb818b46161070e84fcf6d
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,854 |
py
|
# Student Number: FRDYON001
# Name: Yonela Ford
# 30 Second Rule Expert
# Date: 08 March 2014
def rule():
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
ans=input("Did anyone see you? (yes/no)\n")
if (ans=="yes"):
ans=input("Was it a boss/lover/parent? (yes/no)\n")
if (ans=="yes"):
ans=input("Was it expensive? (yes/no)\n")
if (ans=="yes"):
ans=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
ans=input("Is it chocolate? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
print("Decision: Eat it.")
elif (ans=="no"):
ans=input("Was it sticky? (yes/no)\n")
if (ans=="yes"):
ans=input("Is it a raw steak? (yes/no)\n")
if (ans=="yes"):
ans=input("Are you a puma? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
ans=input("Did the cat lick it? (yes/no)\n")
if (ans=="yes"):
ans=input("Is your cat healthy? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
print( "Decision: Eat it.")
elif (ans=="no"):
ans=input("Is it an Emausaurus? (yes/no)\n")
if (ans=="yes"):
ans=input("Are you a Megalosaurus? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
ans=input("Did the cat lick it? (yes/no)\n")
if (ans=="yes"):
ans=input("Is your cat healthy? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
print("Decision: Eat it.")
rule()
|
[
"[email protected]"
] | |
371ee8cb4b4f7e37636a6fbfe01b1f1ba8180744
|
f8b5aafac15f408a48fabf853a918015c927e6fe
|
/bk_tomo/venv/venv27/bin/openstack
|
ef4239b2369d1cd6ac9e4daa1bf696a84ace7ec5
|
[] |
no_license
|
to30/tmp
|
bda1ac0ca3fc61e96c2a1c491367b698d7e97937
|
ec809683970af6787728c2c41f161f416155982a
|
refs/heads/master
| 2021-01-01T04:25:52.040770 | 2016-05-13T16:34:59 | 2016-05-13T16:34:59 | 58,756,087 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 243 |
#!/home/tomo/venv/venv27/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from openstackclient.shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
f59f062c20bb92420bb4ec172e9e3f763356ef80
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/quiz04_20200628163202.py
|
43cf11c468754194ccbd6ea39f998db2cd2226d8
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 826 |
py
|
# 당신의 학교에서는 파이썬 코딩 대회를 주최합니다.
# 참석률을 높이기 위해 댓글 이벤트를 진행하기로 하였습니다.
# 댓글 작성자들 중에 추첨을 통해 1명은 치킨, 3명은 커피 쿠폰을 받게 됩니다.
# 추첨 프로그램을 작성하시오.
# 조건 1: 편의상 댓글은 20명이 작성하였고 아이디는 1~20 이라고 가정
# 조건 2: 댓글 내용과 상관 없이 무작위로 추첨하되 중복 불가
# 조건 3: random 모듈의 shuffle과 sample을 활용
# (출력 예제)
# -- 당첨자 발표 - -
# 치킨 당첨자: 1
# 커피 당첨자: [2, 3, 4]
# -- 축하합니다. --
# (활용 예제)
from random import *
# lst = [1, 2, 3, 4, 5]
# print(lst)
# shuffle(lst)
# print(lst)
# print(sample(lst, 1))
winner = random(20) + 1
print(winner)
|
[
"[email protected]"
] | |
4453fb58e33a80b6a1510a8e4e5c633e06b4cdc2
|
e36985669a2b068dfb3e43b7f5870dc114bb158b
|
/python_code/dataExtraction.py
|
7722d25b7d06ff6e71446c9ef08cf4b970e527d8
|
[] |
no_license
|
assassint2017/Data-extraction-UI
|
b3f0f43dc48e12c0da158bdb4a7c2c9dd5d92ab5
|
d7e1b97100ad97b334f03b0fbf09c2a506339b1c
|
refs/heads/master
| 2020-04-11T06:18:50.417214 | 2018-12-21T12:38:47 | 2018-12-21T12:38:47 | 161,577,841 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,732 |
py
|
"""
数据提取代码
"""
import datetime
import pandas as pd
from numpy import nan
#-------------------------需要设置的部分-------------------------------
# 设定csv文件路径 路径中不要出现中文
# csvDir = 'C:\\Users\\14595\\Desktop\\2018HB example .csv.gz'
# 设定提取csv文件路径 路径中不要出现中文
# extDir = 'C:\\Users\\14595\\Desktop\\ext.csv'
# 各哨点数量汇总文件路径 路径中不要出现中文
# summaryDir = 'C:\\Users\\14595\\Desktop\\summary.csv'
# 设定时间区间
# start = pd.Timestamp(datetime.date(year=2018, month=1, day=1))
# end = pd.Timestamp(datetime.date(year=2018, month=5, day=30))
# 设定选定的地区
# locs = [42010200, 42050300, 42050600]
#---------------------------------------------------------------------
def dataExtraction(csvDir, extDir, summaryDir, start, end, locs):
# 读取csv文件
csv = pd.read_csv(csvDir, compression='gzip', encoding='gbk')
# 时间日期格式化处理
csv['诊断时间'] = pd.to_datetime(csv['诊断时间'], format='%Y/%m/%d')
# 根据条件进行筛选
if start is None and end is None: # 如果只选择了地区编码
csv = csv[csv['报告单位地区编码'].isin(locs)]
elif locs is None: # 如果只选择了诊断时间
csv = csv[(csv['诊断时间'] >= start) & (csv['诊断时间'] <= end)]
else: # 如果两种条件都选择了
csv = csv[(csv['诊断时间'] >= start) & (csv['诊断时间'] <= end) & (csv['报告单位地区编码'].isin(locs))]
# 保存提取数据到csv文件
csv.to_csv(extDir, index=0, encoding='gbk')
def removeSpace(item):
"""
去除在输入过程中误键入的空格
"""
return item.strip()
csv['录卡用户所属单位'].apply(removeSpace)
temp = pd.value_counts(csv['录卡用户所属单位'])
codes = []
for hospital in list(temp.index):
index = csv[csv['录卡用户所属单位'] == hospital].index.tolist()[0]
codes.append(csv['报告单位地区编码'][index])
summary = pd.DataFrame()
summary['报告单位地区编码'] = codes
summary['报告单位'] = list(temp.index)
summary['病例数'] = temp.values
summary.sort_values(by=['报告单位地区编码'], inplace=True)
summary.reset_index(drop=True, inplace=True)
nanlist = []
for i in range(1, len(summary['报告单位地区编码'])):
if summary.loc[i, '报告单位地区编码'] == summary.loc[i - 1, '报告单位地区编码']:
nanlist.append(i)
for i in nanlist:
summary.loc[i, '报告单位地区编码'] = nan
summary.to_csv(summaryDir, index=False, encoding='gbk')
|
[
"[email protected]"
] | |
99bb440e3d91a657af83b6b5699a5675b2c46f7c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03136/s297842517.py
|
a2a6230496234027046d6691748a5f445af9dd64
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
n = int(input())
a = list(map(int, input().split()))
b = [0]*n
b = sorted(a)
c = 0
for i in range(n-1):
c += b[i]
if c>b[n-1]:
print("Yes")
else:
print("No")
|
[
"[email protected]"
] | |
d5c4cac739d6c9ad1a641938dda9973c912c84c5
|
e944d288093c9234c3a6a2a76ffe4e3c9b236cf1
|
/annotation_utils/coco/structs/__init__.py
|
66e257d06be64002c0bce0580e1d58fd6c768ce7
|
[
"MIT"
] |
permissive
|
darwinharianto/annotation_utils
|
598b043345790580e99f34f159b9612b9b1bcd52
|
1cbdadaa28ff945e705dd7b806dda395e32ab23c
|
refs/heads/master
| 2022-04-27T01:20:10.738778 | 2020-04-27T09:23:57 | 2020-04-27T09:23:57 | 255,525,300 | 0 | 0 |
MIT
| 2020-04-27T09:23:59 | 2020-04-14T06:10:57 |
Python
|
UTF-8
|
Python
| false | false | 245 |
py
|
from .objects import COCO_Info, COCO_License, COCO_Image, \
COCO_Annotation, COCO_Category
from .handlers import COCO_License_Handler, COCO_Image_Handler, \
COCO_Annotation_Handler, COCO_Category_Handler
from .dataset import COCO_Dataset
|
[
"[email protected]"
] | |
a1f02577c0adfa04d1396283c0f946dca6808285
|
77ee1f677ab2ececb821a11be128b76bcf0e8d6f
|
/electrum_mona/gui/qt/lightning_dialog.py
|
1d709aed9935b2c01bce4e473c6c8bdd4f25e9d9
|
[
"MIT"
] |
permissive
|
zcore-dev/electrum-mona
|
c74e6142a0f34721be70dba68d524ae9ce03179c
|
2beb0c9c7794e8b03d1725bae41ee8b792c57275
|
refs/heads/master
| 2020-08-22T15:32:55.604727 | 2019-10-21T22:56:29 | 2019-10-21T22:56:29 | 216,427,159 | 0 | 0 |
MIT
| 2019-10-20T21:03:48 | 2019-10-20T21:03:48 | null |
UTF-8
|
Python
| false | false | 3,658 |
py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QDialog, QWidget, QLabel, QVBoxLayout, QCheckBox,
QGridLayout, QPushButton, QLineEdit, QTabWidget)
from electrum_mona.i18n import _
from .util import HelpLabel, MyTreeView, Buttons
class LightningDialog(QDialog):
def __init__(self, gui_object):
QDialog.__init__(self)
self.gui_object = gui_object
self.config = gui_object.config
self.network = gui_object.daemon.network
self.setWindowTitle(_('Lightning Network'))
self.setMinimumSize(600, 20)
vbox = QVBoxLayout(self)
self.num_peers = QLabel('')
vbox.addWidget(self.num_peers)
self.num_nodes = QLabel('')
vbox.addWidget(self.num_nodes)
self.num_channels = QLabel('')
vbox.addWidget(self.num_channels)
self.status = QLabel('')
vbox.addWidget(self.status)
vbox.addStretch(1)
b = QPushButton(_('Close'))
b.clicked.connect(self.close)
vbox.addLayout(Buttons(b))
self.network.register_callback(self.on_channel_db, ['channel_db'])
self.network.register_callback(self.set_num_peers, ['gossip_peers'])
self.network.register_callback(self.set_unknown_channels, ['unknown_channels'])
self.network.channel_db.update_counts() # trigger callback
self.set_num_peers('', self.network.lngossip.num_peers())
self.set_unknown_channels('', len(self.network.lngossip.unknown_ids))
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.num_nodes.setText(_(f'{num_nodes} nodes'))
self.num_channels.setText(_(f'{num_channels} channels'))
def set_num_peers(self, event, num_peers):
self.num_peers.setText(_(f'Connected to {num_peers} peers'))
def set_unknown_channels(self, event, unknown):
self.status.setText(_(f'Requesting {unknown} channels...') if unknown else '')
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def closeEvent(self, event):
self.gui_object.lightning_dialog = None
event.accept()
|
[
"[email protected]"
] | |
fc9b0c269aecdb44c4736fe6b9da03555f7de8e3
|
31622dd16963b459ac6eec71fcf54e4d243ac773
|
/edu_sharing_client/models/license.py
|
073b8ab7d8a99b38a95e9902e2a8e4a23e2cd02e
|
[] |
no_license
|
torsten-simon/oeh-search-etl
|
95e6e92698a97c98ef9d5b02076edcf993736d6f
|
eacdadcd8af169cb54629db0d2d46a5616f854a6
|
refs/heads/master
| 2023-04-16T05:08:41.194239 | 2020-11-16T09:51:59 | 2020-11-16T09:51:59 | 318,169,232 | 0 | 0 | null | 2023-04-03T23:04:46 | 2020-12-03T11:20:44 | null |
UTF-8
|
Python
| false | false | 3,484 |
py
|
# coding: utf-8
"""
edu-sharing Repository REST API
The public restful API of the edu-sharing repository. # noqa: E501
OpenAPI spec version: 1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class License(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'icon': 'str',
'url': 'str'
}
attribute_map = {
'icon': 'icon',
'url': 'url'
}
def __init__(self, icon=None, url=None): # noqa: E501
"""License - a model defined in Swagger""" # noqa: E501
self._icon = None
self._url = None
self.discriminator = None
if icon is not None:
self.icon = icon
if url is not None:
self.url = url
@property
def icon(self):
"""Gets the icon of this License. # noqa: E501
:return: The icon of this License. # noqa: E501
:rtype: str
"""
return self._icon
@icon.setter
def icon(self, icon):
"""Sets the icon of this License.
:param icon: The icon of this License. # noqa: E501
:type: str
"""
self._icon = icon
@property
def url(self):
"""Gets the url of this License. # noqa: E501
:return: The url of this License. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this License.
:param url: The url of this License. # noqa: E501
:type: str
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(License, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, License):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
d2edaeec8fdcd119849df0305b0cb817b3235ebe
|
8d9318a33afc2c3b5ca8ac99fce0d8544478c94a
|
/Books/Casandra DB/opscenter-5.1.0/lib/py/orbited/proxy.py
|
4c0e80c1f97cce4bb513bffb9be5583f06edd599
|
[] |
no_license
|
tushar239/git-large-repo
|
e30aa7b1894454bf00546312a3fb595f6dad0ed6
|
9ee51112596e5fc3a7ab2ea97a86ec6adc677162
|
refs/heads/master
| 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:ba1b92cda51cc0fbe137994a7b857befa45aa64d45009e4fb34ed1df78d0f3fc
size 5501
|
[
"[email protected]"
] | |
8f55ee77bb2e6f0f501c6aae41fe353d5946e7ed
|
48f092fd8191b0218df8605dc7125e526764e59e
|
/NestedLoops/venv/Scripts/pip-script.py
|
1f860a6a2d99a98a14ef6f35a31d2812b31131f3
|
[] |
no_license
|
LalityaSawant/Python-Projects
|
2edb430c094fe3d6b4e706cc61f885aa07e24dff
|
b142708256e26867f09b3063f5f3fffa305ec496
|
refs/heads/master
| 2020-05-01T03:00:26.012301 | 2019-03-23T22:09:33 | 2019-03-23T22:09:33 | 177,235,109 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
#!C:\Users\lsawant\Documents\Learning\Python\PycharmProjects\NestedLoops\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"[email protected]"
] | |
10cefb112ffc8a72f2ddcd285ff5b6f871ecf497
|
41523dd4871e8ed1043d2b3ddf73417fcbdde209
|
/day16/map函数.py
|
7a19700236dcf557aafb01afb59951babcaa5d8d
|
[] |
no_license
|
WayneChen1994/Python1805
|
2aa1c611f8902b8373b8c9a4e06354c25f8826d6
|
a168cd3b7749afc326ec4326db413378fd3677d5
|
refs/heads/master
| 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 943 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Wayne
'''
map函数的功能:
将传入的函数依次作用于序列中的每一个对象,
然后将其作用的结果作为一个迭代器返回
'''
'''
需求:将列表中的["1", "2", "3", "4", "5"]
转为[1, 2, 3, 4, 5],写成一个函数。
'''
def func(alist):
return [int(x) for x in alist]
list1 = ["1", "2", "3", "4", "5"]
print(list1)
print(func(list1))
res = map(int, list1)
print(list(res))
'''
map(func,lsd)
参数一:要作用函数,【此函数有且只有一个参数】
参数二:要作用的序列
'''
'''
使用map函数,求n的序列[1, 4, 9, ..., n^2], 一行代码实现上述的要求,n从控制台输入。
'''
def func2(n):
return list(map(lambda x:x**2, range(1, n+1)))
num = int(input("请输入n的值:"))
print(func2(num))
print(list(map(lambda n:n*n, range(1, int(input("请输入一个整数:"))+1))))
|
[
"[email protected]"
] | |
38acb8c211006d953999bf2dfc3090c9f9313ea5
|
ee27325f6a3e6a2d1f5e004aa60f5974ad864ae9
|
/contrib/python/plotly/py3/plotly/validators/contourcarpet/__init__.py
|
09c50961c6d1e808ad2e54b12da590314f6b6cc2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
alvinahmadov/catboost
|
f32d2b16be9db7439e429c88feb5676de842fc89
|
a6e0caa4779b31199f535cf43b09879d7c653abe
|
refs/heads/master
| 2023-06-12T19:29:52.028508 | 2023-05-11T18:33:03 | 2023-05-11T18:33:03 | 202,584,937 | 0 | 0 |
Apache-2.0
| 2019-08-15T17:35:23 | 2019-08-15T17:35:23 | null |
UTF-8
|
Python
| false | false | 4,621 |
py
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._zsrc import ZsrcValidator
from ._zmin import ZminValidator
from ._zmid import ZmidValidator
from ._zmax import ZmaxValidator
from ._zauto import ZautoValidator
from ._z import ZValidator
from ._yaxis import YaxisValidator
from ._xaxis import XaxisValidator
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._transpose import TransposeValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._showscale import ShowscaleValidator
from ._showlegend import ShowlegendValidator
from ._reversescale import ReversescaleValidator
from ._opacity import OpacityValidator
from ._ncontours import NcontoursValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._line import LineValidator
from ._legendwidth import LegendwidthValidator
from ._legendrank import LegendrankValidator
from ._legendgrouptitle import LegendgrouptitleValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hovertextsrc import HovertextsrcValidator
from ._hovertext import HovertextValidator
from ._fillcolor import FillcolorValidator
from ._db import DbValidator
from ._da import DaValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._contours import ContoursValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._carpet import CarpetValidator
from ._btype import BtypeValidator
from ._bsrc import BsrcValidator
from ._b0 import B0Validator
from ._b import BValidator
from ._autocontour import AutocontourValidator
from ._autocolorscale import AutocolorscaleValidator
from ._atype import AtypeValidator
from ._asrc import AsrcValidator
from ._a0 import A0Validator
from ._a import AValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._zmin.ZminValidator",
"._zmid.ZmidValidator",
"._zmax.ZmaxValidator",
"._zauto.ZautoValidator",
"._z.ZValidator",
"._yaxis.YaxisValidator",
"._xaxis.XaxisValidator",
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._transpose.TransposeValidator",
"._textsrc.TextsrcValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._showscale.ShowscaleValidator",
"._showlegend.ShowlegendValidator",
"._reversescale.ReversescaleValidator",
"._opacity.OpacityValidator",
"._ncontours.NcontoursValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._line.LineValidator",
"._legendwidth.LegendwidthValidator",
"._legendrank.LegendrankValidator",
"._legendgrouptitle.LegendgrouptitleValidator",
"._legendgroup.LegendgroupValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hovertextsrc.HovertextsrcValidator",
"._hovertext.HovertextValidator",
"._fillcolor.FillcolorValidator",
"._db.DbValidator",
"._da.DaValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._contours.ContoursValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._carpet.CarpetValidator",
"._btype.BtypeValidator",
"._bsrc.BsrcValidator",
"._b0.B0Validator",
"._b.BValidator",
"._autocontour.AutocontourValidator",
"._autocolorscale.AutocolorscaleValidator",
"._atype.AtypeValidator",
"._asrc.AsrcValidator",
"._a0.A0Validator",
"._a.AValidator",
],
)
|
[
"[email protected]"
] | |
c484b176ad74bbf3c3d2c6945058b3f6fa039104
|
1978a9455159b7c2f3286e0ad602652bc5277ffa
|
/exercises/05_basic_scripts/task_5_2b.py
|
942e752a8c38f07e0e2a188e036ef30e8781ecff
|
[] |
no_license
|
fortredux/py_net_eng
|
338fd7a80debbeda55b5915dbfba4f5577279ef0
|
61cf0b2a355d519c58bc9f2b59d7e5d224922890
|
refs/heads/master
| 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,047 |
py
|
# -*- coding: utf-8 -*-
'''
Задание 5.2b
Преобразовать скрипт из задания 5.2a таким образом,
чтобы сеть/маска не запрашивались у пользователя,
а передавались как аргумент скрипту.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
from sys import argv
ip = str(argv[1])
mask = int(argv[2])
host_net_lst = ip.split('.')
host_net_lst[3] = '0'
mask_32 = int('1' * mask)
mask_32 = '{:<032}'.format(mask_32)
template = '''
Network:
{0:<8} {1:<8} {2:<8} {3:<8}
{0:08b} {1:08b} {2:08b} {3:08b}
Mask:
/{4:}
{5:<8} {6:<8} {7:<8} {8:<8}
{9:8} {10:8} {11:8} {12:8}
'''
print(template.format(int(host_net_lst[0]), int(host_net_lst[1]), int(host_net_lst[2]), int(host_net_lst[3]), mask,
int(mask_32[0:8], 2), int(mask_32[8:16], 2), int(mask_32[16:24], 2), int(mask_32[24:32], 2),
mask_32[0:8], mask_32[8:16], mask_32[16:24], mask_32[24:32]))
|
[
"[email protected]"
] | |
fe91480c51ec9d9e11d8cbf4c07c3dbad667f8a4
|
f2f21c643d1f5459253989e7cdba85c064cca8ce
|
/adding_bootstarp/adding_bootstarp/wsgi.py
|
b02fcd063eb36aa3dc1d03dc3104e13e690ebccf
|
[] |
no_license
|
NiteshTyagi/django_tutorial
|
342decea7532f1efb200b9f45e4123c581aad43f
|
3353f0d2907a00f43e1faee2b97abd9af66ca08f
|
refs/heads/master
| 2022-03-05T19:46:50.642154 | 2022-03-01T04:53:14 | 2022-03-01T04:53:14 | 205,629,609 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
"""
WSGI config for adding_bootstarp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'adding_bootstarp.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.