blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f89c2bef07b20a5b88a18fc7237a8db40c9581f0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/Decs/b.py | 0f263000841a7dcbb8b18269a13a7f97da46664b | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | tests = int(raw_input())
for test in xrange(1, tests + 1):
n = int(raw_input())
needed = [map(int, raw_input().split()) for i in xrange(n)]
finishes = 0
total = 0
completed = [0] * n
changed = True
while changed:
changed = False
for level in xrange(n):
if completed[level] < 2 and total >= needed[level][1]:
finishes += 1
total += 2 - completed[level]
completed[level] = 2
changed = True
break
if changed:
continue
one_star = [(-needed[i][1], i) for i in xrange(n) if completed[i] == 0 and total >= needed[i][0]]
one_star.sort()
if len(one_star) >= 1:
finishes += 1
total += 1
completed[one_star[0][1]] = 1
changed = True
if total != 2 * n:
print 'Case #%d: Too Bad' % test
else:
print 'Case #%d: %d' % (test, finishes) | [
"[email protected]"
]
| |
aa500d76845c40a3d72f123894af915ff7dbc08a | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/lib/galaxy/util/memdump.py | 25558ca4d47da9d18a9d786248dcd19f8de0f6c8 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 1,541 | py |
# Attempt to load guppy module, and only define Memdump class
# if available
try:
import pkg_resources
pkg_resources.require( "guppy" )
except:
import sys
print >> sys.stderr, "No guppy module, Memdump not available"
Memdump = None
else:
import os, sys, signal, time, guppy
class Memdump( object ):
def __init__( self, signum=signal.SIGUSR1, fname="memdump.log" ):
self.fname = fname
signal.signal( signum, self.dump )
self.heapy = guppy.hpy()
self.heap = None
def dump( self, signum, stack ):
file = open( self.fname, "a" )
print >> file, "Memdump for pid %d at %s" % ( os.getpid(), time.asctime() )
print >> file
try:
self.heap = self.heapy.heap()
print >> file, "heap():"
print >> file, self.heap
print >> file, "\nbyrcs:"
print >> file, self.heap.byrcs
print >> file, "\nbyrcs[0].byid:"
print >> file, self.heap.byrcs[0].byid
print >> file, "\nget_rp():"
print >> file, self.heap.get_rp()
self.heapy.setref()
except AssertionError:
pass
print >> file, "\nEnd dump\n"
file.close()
def setref( self ):
self.heapy.setref()
def get( self, update=False ):
if update:
self.heap = self.heapy.heap()
return self.heap
| [
"[email protected]"
]
| |
cfd2a5beceab1fefa9ef31478b2edfd1df1dc1bd | c15e8eb4911bb3422324c6cbfe4ed35b3da33934 | /kiosk_api/views/process_payments.py | c039a4982b344f96b2cb81ca7fa2391a461fedbd | []
| no_license | adam1978828/webapp1 | 6d839fbd0974e2f6861ae5a88f0170529d9edd3a | a27cb847ea7698872b64f9c58e43ebf5aad5590d | refs/heads/master | 2020-05-29T14:41:21.240267 | 2016-06-01T20:11:01 | 2016-06-01T20:11:01 | 60,207,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,940 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from sqlalchemy import or_
from libs.utils.list_functions import partition_by
from coupons.helpers.processor import CouponProcessor
from Model import *
__author__ = 'D.Ivanets, D.Kalpakchi'
# TODO: properly save transaction result after each transaction!
# => ...
@csrf_exempt
def process_payments(request):
request.current_time = datetime.datetime.utcnow()
grace_change_status(request)
request.db_session.commit()
server_error_change_status(request)
request.db_session.commit()
process_over_rented(request)
request.db_session.commit()
process_grace(request)
request.db_session.commit()
process_over_reserved(request)
request.db_session.commit()
process_offline_rents(request)
request.db_session.commit()
process_closed_rents(request)
request.db_session.commit()
process_purchases(request)
request.db_session.commit()
process_manually_reverted_purchases(request)
request.db_session.commit()
return JsonResponse({})
def process_offline_rents(request):
"""This view looks for offline rentals and pre_authorizes required amount
from customer card.
"""
# 321, "NA EJECTED RENT"
deals_offline = request.db_session.query(Deal) \
.filter(Deal.deal_status_id == 321)\
.all()
for deal in deals_offline:
deal.ps_preauth()
request.db_session.commit()
def grace_change_status(request):
"""
Change status of deals if grace period expired
"""
# 301, "G EJECTED RENT"
# 302, "G EJECTED SALE"
deals_grace = request.db_session.query(Deal) \
.filter(Deal.deal_status_id.in_((301, 302))).all()
status_map = {
301: 311, # 311, "EJECTED RENT"
302: 312, # 312, "EJECTED SALE"
}
for deal in deals_grace:
grace_period = deal.kiosk_start.settings.grace_period
grace_period = datetime.timedelta(minutes=int(grace_period))
deal_started = deal.dt_start
last_sync = deal.kiosk_start.dt_sync
now = request.current_time
if now - deal_started > grace_period:
if last_sync - deal_started > grace_period:
deal.deal_status_id = status_map[int(deal.deal_status_id)]
request.db_session.commit()
request.db_session.commit()
def server_error_change_status(request):
"""
Changes NEW deals to Error after 30 minutes of server fail.
"""
filter_date = request.current_time - datetime.timedelta(minutes=30)
# 101, "NEW RENT"
# 102, "NEW SALE"
deals_error = request.db_session.query(Deal) \
.filter(Deal.deal_status_id.in_((101, 102))) \
.filter(Deal.dt_start < filter_date).all()
for deal in deals_error:
deal.deal_status_id = 450 # 450, "SERVER ERROR"
request.db_session.commit()
request.db_session.commit()
def process_closed_rents(request):
"""
Process all rental deals where disk was already returned
"""
deals_rent = request.db_session.query(Deal) \
.filter(Deal.deal_status_id.in_((511, 521, 531, 701))) \
.filter(or_(Deal.dt_next_retry.is_(None),
Deal.dt_next_retry <= request.current_time)) \
.all()
status_map = {
511: 601,
521: 621,
531: 641,
701: 621,
}
for deal in deals_rent:
if deal.deal_status_id != 701:
deal.total_days = deal.count_rental_period()
if not deal.force_total_amount:
deal.total_amount = deal.calculate_amount()
deal.payment_system.process_amount_for_deal(deal)
request.db_session.commit()
if deal.is_fully_charged():
deal.deal_status_id = status_map[deal.deal_status_id]
deal.dt_next_retry = None
else:
delta = deal.kiosk_start.retry_delta()
deal.dt_next_retry = request.current_time + delta
request.db_session.commit()
def process_purchases(request):
"""
Process all purchases with grace period expired.
@params
"""
# 312, "EJECTED SALE" 602,"CLOSED SALE"
# 322, "NA EJECTED SALE" 602,"CLOSED SALE"
# 702, "M CHANGED SALE" 622,"M CLOSED SALE"
# 712, "CONVERTED SALE" 632,"CONV CLS SALE"
deals_sale = request.db_session.query(Deal) \
.filter(Deal.deal_status_id.in_((312, 322, 702, 712))) \
.filter(or_(Deal.dt_next_retry.is_(None),
Deal.dt_next_retry <= request.current_time)) \
.all()
status_map = {312: 602,
322: 602,
702: 622,
712: 632}
for deal in deals_sale:
if not deal.force_total_amount:
deal.total_amount = deal.calculate_amount()
deal.payment_system.process_amount_for_deal(deal)
request.db_session.commit()
if deal.is_fully_charged():
deal.deal_status_id = status_map[deal.deal_status_id]
deal.dt_next_retry = None
else:
delta = deal.kiosk_start.retry_delta()
deal.dt_next_retry = request.current_time + delta
request.db_session.commit()
def process_manually_reverted_purchases(request):
"""
Process all purchases that was manually marked as 'need revert'.
All that deals of type sale with status 522.
@params
"""
deals_sale = request.db_session.query(Deal) \
.filter(Deal.deal_status_id == 522)\
.filter(or_(Deal.dt_next_retry.is_(None),
Deal.dt_next_retry <= request.current_time)) \
.all()
for deal in deals_sale:
deal.total_amount = 0
deal.payment_system.process_amount_for_deal(deal)
request.db_session.commit()
# deal.deal_status_id = 622
if deal.is_fully_charged():
deal.deal_status_id = 622
deal.dt_next_retry = None
else:
delta = deal.kiosk_start.retry_delta()
deal.dt_next_retry = request.current_time + delta
def process_over_rented(request):
# EJECTED RENT: 311
rents = request.db_session.query(Deal) \
.filter(Deal.deal_status_id == 311) \
.filter(Deal.dt_rent_expire.isnot(None)) \
.filter(Deal.dt_rent_expire < request.current_time) \
.all()
for deal in rents:
deal.deal_type_id = 2
deal.dt_end = deal.dt_rent_expire
deal.total_amount = float(deal.tariff_value.sale) * (1 + 0.01 * float(deal.kiosk_start.settings.sale_tax_rate))
deal.deal_status_id = 712
deal.disk.state_id = 4
request.db_session.add_all([deal, deal.disk])
request.db_session.commit()
request.db_session.commit()
def process_over_reserved(request):
# TODO: change it to capture
# PREAUTH RESERVED: 241
rents = request.db_session.query(Deal) \
.filter(Deal.deal_status_id == 241) \
.filter(Deal.dt_reservation_expire.isnot(None)) \
.filter(Deal.dt_reservation_expire < request.current_time) \
.all()
# Can be done with OVER window fn.
# E.g.: s.query(func.min(Deal.id).over(partition_by='secret_code'))\
# .filter(Deal.secret_code.isnot(None)).all()
# But it depends on db, so if we change db, that code won't work. That's why:
rents = partition_by(rents, 'secret_code')
for group in rents:
if group[0].coupon:
cp = CouponProcessor(group[0].coupon)
cp.discount(group)
for deal in group:
deal.dt_end = deal.dt_reservation_expire
deal.deal_status_id = 531
deal.disk.state_id = 0
request.db_session.add_all([deal, deal.disk])
request.db_session.commit()
request.db_session.commit()
request.db_session.commit()
def process_grace(request):
# 501: G RETURNED RENT:
# 502: G RETURNED SALE:
# 420: CANNOT EJECT:
# 440: NOT PICKED:
# 460: KAPP DOWN:
deals_grace = request.db_session.query(Deal) \
.filter(Deal.deal_status_id.in_((501, 502, 420, 440, 460)))\
.filter(or_(Deal.dt_next_retry.is_(None),
Deal.dt_next_retry <= request.current_time)) \
.all()
for deal in deals_grace:
deal.total_amount = 0
deal.payment_system.process_amount_for_deal(deal)
request.db_session.commit()
if deal.is_fully_charged():
status_map = {
501: 611,
502: 612,
420: 620,
440: 640,
460: 660,
}
deal.deal_status_id = status_map[deal.deal_status_id]
else:
delta = deal.kiosk_start.retry_delta()
deal.dt_next_retry = request.current_time + delta
request.db_session.commit()
| [
"[email protected]"
]
| |
1a7633f93b0824f775592709fc99a1d901708513 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Python-3-Video-Tutorial/Exercise Files/Exercise Files/02 Quick Start/function.py | 730ac95decd51740673e575dccced4204c40da21 | []
| no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/python3
def isprime(n):
if n == 1:
print("1 is special")
return False
for x in range(2, n):
if n % x == 0:
print("{} equals {} x {}".format(n, x, n // x))
return False
else:
print(n, "is a prime number")
return True
for n in range(1, 20):
isprime(n)
| [
"[email protected]"
]
| |
3089cbd516c8333275c4d4d1c89c91ddcf6f27d1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/octal/88c13780af7b419e8a9f1b81c23ca0df.py | 815521fd47687557af3468c72421c673042ff834 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 866 | py | def Base(base_, name_):
all_digits = "0123456789" + "".join(chr(i) for i in xrange(ord('a'), ord('z')+1))
if base_ > len(all_digits):
raise ValueError("Cannot create a numbering base {}: not enough digits".format(base_))
class Base(object):
digits = all_digits[:base_]
base = base_
name = name_
def __init__(self, s):
self.num = s
acc = 0
b = self.base
for sd in self.num:
try:
d = self.digits.index(sd)
acc *= b
acc += d
except ValueError:
raise ValueError("Invalid {} digit: {}".format(self.name, sd))
self.value = acc
def to_decimal(self):
return self.value
return Base
class Octal(Base(8, 'octal')):
pass
| [
"[email protected]"
]
| |
88c5eb693664e13eaf85f607287f78a60cfd3cef | 915bfb55c32999a39807b5364c6fa48d0d0b0bb0 | /OMS/saltstack/scripts/copy_anything.py | 53d97b77fa03a43466f000f89d9e2b974a0d6055 | []
| no_license | rysinal/pythonnote | fd761d67fcf41fc009a5724ecd666db63cfef62a | 90245323b1d6fcfdec89c1abefbc34ef6fa0946d | refs/heads/master | 2021-12-23T11:39:29.580329 | 2017-11-13T08:31:07 | 2017-11-13T08:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/python
import shutil
import errno
# import os
def do_copy(src, dst):
try:
# if os.path.exists(dst):
# shutil.copyfile(src, dst)
# else:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
| [
"[email protected]"
]
| |
1d807c3ac02c9f70b4c9b2e471a6204a41b1ed38 | f7a20374403b55189cc5db6e8fa34d0ba290387c | /modules/everyday_report/report_mp.py | 9b3f45580f58891e6f10da07b88711f3cea0d088 | []
| no_license | dark-ice/upink_modules | 1a7b5a165cc5e05396c62cf33c261b907c23e33c | c497bf87a39796f1df3877542359b1927bec3a76 | refs/heads/master | 2021-05-01T04:40:16.436666 | 2014-04-12T15:09:31 | 2014-04-12T15:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,658 | py | # coding=utf-8
__author__ = 'andrey'
from openerp import tools
from openerp.osv import fields
from openerp.osv.orm import Model
class ReportMP(Model):
_name = 'day.report.mp'
_description = u'Ежедневные отчеты - МП'
_auto = False
_order = 'date'
_columns = {
'date_start': fields.date('c', select=True),
'date_end': fields.date('по', select=True),
'date': fields.date('Дата'),
'week_number': fields.integer('Номер недели', group_operator="avg"),
'ppc_plan': fields.integer('PPC план'),
'ppc_fact': fields.integer('PPC факт'),
'ppc_cash': fields.float('PPC $'),
'web_plan': fields.integer('web план'),
'web_fact': fields.integer('web факт'),
'web_cash': fields.float('web $'),
'smm_plan': fields.integer('smm план'),
'smm_fact': fields.integer('smm факт'),
'smm_cash': fields.float('smm $'),
'seo_plan': fields.integer('seo план'),
'seo_fact': fields.integer('seo факт'),
'seo_cash': fields.float('seo $'),
'call_plan': fields.integer('КЦ план'),
'call_fact': fields.integer('КЦ факт'),
'call_cash': fields.float('КЦ $'),
'video_plan': fields.integer('video план'),
'video_fact': fields.integer('video факт'),
'video_cash': fields.float('video $'),
'mp_plan': fields.integer('МП план'),
'mp_fact': fields.integer('МП факт'),
'mp_cash': fields.float('МП $'),
'moscow_plan': fields.integer('Москва план'),
'moscow_fact': fields.integer('Москва факт'),
'moscow_cash': fields.float('Москва $'),
'total_fact': fields.integer('Зашедшие брифы'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'day_report_mp')
cr.execute("""
create or replace view day_report_mp as (
SELECT
row_number()
OVER () AS id,
to_char(r.date, 'YYYY-MM-DD') date_end,
to_char(r.date, 'YYYY-MM-DD') date_start,
extract(WEEK FROM r.date) week_number,
r.date date,
max(total_fact) total_fact,
max(CASE WHEN r.direction = 'PPC' THEN r.plan ELSE 0 END) ppc_plan,
max(ppc_fact) ppc_fact,
max(ppc_cash) ppc_cash,
max(CASE WHEN r.direction = 'SMM' THEN r.plan ELSE 0 END) smm_plan,
max(smm_fact) smm_fact,
max(smm_cash) smm_cash,
max(CASE WHEN r.direction = 'SEO' THEN r.plan ELSE 0 END) seo_plan,
max(seo_fact) seo_fact,
max(seo_cash) seo_cash,
max(CASE WHEN r.direction = 'CALL' THEN r.plan ELSE 0 END) call_plan,
max(call_fact) call_fact,
max(call_cash) call_cash,
max(CASE WHEN r.direction = 'SITE' THEN r.plan ELSE 0 END) web_plan,
max(web_fact) web_fact,
max(web_cash) web_cash,
max(CASE WHEN r.direction = 'VIDEO' THEN r.plan ELSE 0 END) video_plan,
max(video_fact) video_fact,
max(video_cash) video_cash,
max(CASE WHEN r.direction = 'MP' THEN r.plan ELSE 0 END) mp_plan,
max(mp_fact) mp_fact,
max(mp_cash) mp_cash,
max(CASE WHEN r.direction = 'MOSCOW' THEN r.plan ELSE 0 END) moscow_plan,
max(moscow_fact) moscow_fact,
max(moscow_cash) moscow_cash
FROM
day_report_brief_plan r
LEFT JOIN (
SELECT
h.cr_date::DATE date,
sum(CASE WHEN bss.direction IN ('PPC', 'SEO', 'SMM', 'CALL', 'SITE', 'VIDEO', 'MP', 'MOSCOW') IS NOT NULL THEN 1 ELSE 0 END) total_fact,
sum(CASE WHEN bss.direction = 'PPC' THEN 1 ELSE 0 END) ppc_fact,
sum(CASE WHEN bss.direction = 'PPC' THEN b.sum_mediaplan ELSE 0 END) ppc_cash,
sum(CASE WHEN bss.direction = 'SMM' THEN 1 ELSE 0 END) smm_fact,
sum(CASE WHEN bss.direction = 'SMM' THEN b.sum_mediaplan ELSE 0 END) smm_cash,
sum(CASE WHEN bss.direction = 'SEO' THEN 1 ELSE 0 END) seo_fact,
sum(CASE WHEN bss.direction = 'SEO' THEN b.sum_mediaplan ELSE 0 END) seo_cash,
sum(CASE WHEN bss.direction = 'CALL' THEN 1 ELSE 0 END) call_fact,
sum(CASE WHEN bss.direction = 'CALL' THEN b.sum_mediaplan ELSE 0 END) call_cash,
sum(CASE WHEN bss.direction = 'SITE' THEN 1 ELSE 0 END) web_fact,
sum(CASE WHEN bss.direction = 'SITE' THEN b.sum_mediaplan ELSE 0 END) web_cash,
sum(CASE WHEN bss.direction = 'VIDEO' THEN 1 ELSE 0 END) video_fact,
sum(CASE WHEN bss.direction = 'VIDEO' THEN b.sum_mediaplan ELSE 0 END) video_cash,
sum(CASE WHEN bss.direction = 'MP' THEN 1 ELSE 0 END) mp_fact,
sum(CASE WHEN bss.direction = 'MP' THEN b.sum_mediaplan ELSE 0 END) mp_cash,
sum(CASE WHEN bss.direction = 'MOSCOW' THEN 1 ELSE 0 END) moscow_fact,
sum(CASE WHEN bss.direction = 'MOSCOW' THEN b.sum_mediaplan ELSE 0 END) moscow_cash
FROM brief_history h
LEFT JOIN brief_main b
ON (h.brief_id = b.id)
LEFT JOIN brief_services_stage bss
ON (bss.id = b.services_ids)
WHERE h.state_id = 'media_approval'
GROUP BY h.cr_date::DATE
) b on (b.date=r.date)
GROUP BY r.date
)""")
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
for item in domain:
if item[0] == 'date_start':
item[0] = 'date'
item[1] = '>='
if item[0] == 'date_end':
item[0] = 'date'
item[1] = '<='
item[2] = "{date} 23:59:59".format(date=item[2],)
return super(ReportMP, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby)
ReportMP() | [
"[email protected]"
]
| |
6088e141228743b67ea1602b028a24c111010e3a | e96461c5711974aee2401aad3206131b84e7b665 | /library/piglow.py | f4539f48cab58c387be9fc2b9a33bc3b879a7e34 | []
| no_license | sbelyea/piglow | 0a06507ef4859711a47027b09e58f22b7e42c5eb | d8599be3998521a3d211e38ac61043f717d74d40 | refs/heads/master | 2020-12-11T04:00:40.815366 | 2015-05-12T09:45:32 | 2015-05-12T09:45:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | import sn3218
import atexit
import time
sn3218.enable()
sn3218.enable_leds(0b111111111111111111)
clear_on_exit = True
auto_update = False
_legs = [
# r o y g b w
[ 6, 7, 8, 5, 4, 9 ],
[ 17, 16, 15, 13, 11, 10 ],
[ 0, 1, 2, 3, 14, 12 ]
]
_values = [0] * 18
colours = {
"red" : 0,
"orange" : 1,
"yellow" : 2,
"green" : 3,
"blue" : 4,
"white" : 5
}
def white(v): ring(5,v)
def blue(v): ring(4,v)
def green(v): ring(3,v)
def yellow(v): ring(2,v)
def orange(v): ring(1,v)
def red(v): ring(0,v)
def arm1(v): arm(0,v)
def arm2(v): arm(1,v)
def arm3(v): arm(2,v)
def led1(v): set(0,v)
def led2(v): set(1,v)
def led3(v): set(2,v)
def led4(v): set(3,v)
def led5(v): set(4,v)
def led6(v): set(5,v)
def led7(v): set(6,v)
def led8(v): set(7,v)
def led9(v): set(8,v)
def led10(v): set(9,v)
def led11(v): set(10,v)
def led12(v): set(11,v)
def led13(v): set(12,v)
def led14(v): set(13,v)
def led15(v): set(14,v)
def led16(v): set(15,v)
def led17(v): set(16,v)
def led18(v): set(17,v)
def arm(x,y): leg(x - 1,y)
def spoke(x,y): leg(x - 1,y)
def show():
'''
Output the contents of the values list to PiGlow.
'''
sn3218.output(_values)
def get():
return _values
def set(leds, value):
'''
Set one or more LEDs with one or more values
Args:
* leds - A single index, or list of indexes of the LEDs to set
* values - A single value, or list of values to set
'''
global _values
if isinstance(leds, list):
for led in leds:
if isinstance(value, list):
_values[leds[led] % 18] = (value[led] % 256)
else:
_values[led % 18] = (value % 256)
elif isinstance(leds, int):
leds = leds % 18
if isinstance(value, list):
_values[leds:leds + len(value)] = map(lambda v: v % 256, value)
if len(_values) > 18:
wrap = _values[18:]
_values = _values[:18]
set(0, wrap)
else:
_values[leds] = (value % 256)
else:
raise ValueError("Invalid LED(s)")
if auto_update:
show()
def ring(ring, value):
'''
Set the brightness of a specific ring
'''
ring = ring % 7
set([_legs[0][ring], _legs[1][ring], _legs[2][ring]], value)
def leg_bar(leg, percentage):
# 1530 = 6 * 255
amount = int(1530.0 * percentage)
for led in reversed(_legs[leg]):
set(led,255 if amount > 255 else amount)
amount = 0 if amount < 255 else amount - 255
def leg(leg, intensity):
set(_legs[leg % 3], intensity)
def led(led, intensity):
'''Compatibility function for old PiGlow library
Accepts LED between 1 and 18.
Calls set(led - 1, intesity)
Args:
* led - LED number from 1 to 18
* intensity - brightness from 0 to 255
'''
set(led - 1, intensity)
def single(leg, ring, intensity):
'''Sets a single LED by its leg/ring
Args:
* leg - leg index of LED
* ring - ring index of LED
* intensity - brightness from 0 to 255
'''
set(_legs[leg % 3][ring % 7], intensity)
def tween(duration, end, start = None):
'''Tweens to a particular set of intensities.
Also accepts an optional starting point, otherwise
the current state of the LED is used.
Args:
* duration - duration in seconds
* end - list of 18 values to tween to
* start - list of 18 values to start from
'''
if not len(end) == 18:
raise ValueError("Requires list of 18 values")
fps = 1.0/60
steps = int(duration / fps)
if start is None:
start = _values
for x in range(steps):
new = []
for y in range(18):
s = start[y]
e = end[y]
c = float(e - s)
b = s + ((c/float(steps)) * (x+1))
new.append(int(b))
set(0, new)
show()
time.sleep(fps)
def colour(colour, intensity):
if not isinstance(colour, int):
if colour in colours:
ring(colours[colour], intensity)
return True
else:
raise ValueError("Invalid Colour")
return False
ring(colour-1, intensity)
return True
def all(value):
set(0, [value]*18)
def clear():
set(0, [0]*18)
def off():
all(0)
show()
def _exit():
if clear_on_exit:
off()
atexit.register(_exit)
| [
"[email protected]"
]
| |
39fcde1335024ee652d7a7165f81f5e842678cd7 | ef3d4130f28c7c589c646b15d19010cf426fc0f6 | /doc/Programs/Regression/p1_arianna.py | a198ded283fcfff48233d99ca3bbe3735e696c90 | [
"CC0-1.0"
]
| permissive | CompPhysics/MachineLearning | 54e1123a96060d824307f270415646494783cff5 | 00a2bd1a7efde5fbfd9b9d6d6c365dcd82fe8baf | refs/heads/master | 2023-09-06T02:34:28.559589 | 2023-09-05T13:16:27 | 2023-09-05T13:16:27 | 103,986,991 | 154 | 136 | CC0-1.0 | 2022-11-18T11:01:02 | 2017-09-18T20:11:45 | null | UTF-8 | Python | false | false | 15,632 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 15:53:35 2019
@author: Ary
"""
import numpy as np
import pandas as pd
import sklearn.linear_model as skl
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import math
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
np.random.seed(2204)
## part a
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def Design_Matrix_X(x, y, n):
N = len(x)
l = int((n+1)*(n+2)/2)
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = x**(i-k) * y**k
return X
n_x=1000
m=5
x = np.random.uniform(0, 1, n_x)
y = np.random.uniform(0, 1, n_x)
z = FrankeFunction(x, y)
#print(x)
n = int(len(x))
z_1 = z +0.01*np.random.randn(n)
X= Design_Matrix_X(x,y,n=m)
DesignMatrix = pd.DataFrame(X)
#print(DesignMatrix)
a = np.linalg.matrix_rank(X) #we check it is not a singular matrix
#print(a)
beta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(z_1)
ztilde = X @ beta
#print(beta)
beta1 = skl.LinearRegression().fit(X,z_1) #function .fit fits linear models
ztilde1 = beta1.predict(X)
#print(ztilde)
#print('--')
#print(ztilde1)
var_beta_OLS = 1*np.linalg.inv(X.T.dot(X))
var = pd.DataFrame(var_beta_OLS)
#print(var)
var_diag=np.diag(var_beta_OLS)
#print(var_diag)
l1_OLS = beta - 1.96*np.sqrt(var_diag)/(X.shape[0])
l2_OLS = beta + 1.96*np.sqrt(var_diag)/(X.shape[0])
#print(l1_OLS)
#print(l2_OLS)
def MSE (ydata, ymodel):
n = np.size(ymodel)
y = (ydata - ymodel).T@(ydata - ymodel)
y = y/n
return y
def R2 (ydata, ymodel):
return 1-((ydata-ymodel).T@(ydata-ymodel))/((ydata-np.mean(ydata)).T@(ydata-np.mean(ydata)))
print(MSE(z_1,ztilde))
print(R2(z_1,ztilde))
print("Mean squared error: %.2f" % mean_squared_error(z_1, ztilde))
print('Variance score: %.2f' % r2_score(z_1, ztilde))
## part b
def train_test_splitdata(x_,y_,z_,i):
x_learn=np.delete(x_,i)
y_learn=np.delete(y_,i)
z_learn=np.delete(z_,i)
x_test=np.take(x_,i)
y_test=np.take(y_,i)
z_test=np.take(z_,i)
return x_learn,y_learn,z_learn,x_test,y_test,z_test
def k_fold(k,x,y,z,m,model):
n=len(x)
j=np.arange(n)
np.random.shuffle(j)
n_k=int(n/k)
MSE_K_t = 0
R2_K_t = 0
Variance_t=0
Bias_t=0
betas = np.zeros((k,int((m+1)*(m+2)/2)))
z_pred = np.zeros((200,k))
z_test1 = np.zeros((200,k))
z_train1 = np.zeros((800,k))
z_pred_train = np.zeros((800,k))
for i in range(k):
x_l,y_l,z_l,x_test,y_test,z_test=train_test_splitdata(x,y,z,j[i*n_k:(i+1)*n_k])
z_test1[:,i]=z_test
z_train1[:,i]=z_l
X = Design_Matrix_X(x_l,y_l,m)
X_test= Design_Matrix_X(x_test,y_test,m)
#print(pd.DataFrame(X))
#print(pd.DataFrame(X_test))
beta1= model.fit(X,z_l)
beta = beta1.coef_
print(beta[0])
betas[i] = beta
ztilde1 = beta1.predict(X_test)
ztilde_l = beta1.predict(X)
#print(ztilde1)
z_pred[:,i] = ztilde1
z_pred_train[:,i] = ztilde_l
# MSE_K_t+=MSE(z_test,ztilde1)
R2_K_t+=R2(z_test,ztilde1)
# Bias_t+=bias(z_test,ztilde1)
# Variance_t+=variance(ztilde1)
# check if the values computed with our function and using the methods in lines 161-163 are the same
#error_t = MSE_K_t/k
#bias_t = Bias_t/k
#variance_t = Variance_t/k
R2_t = R2_K_t/k
#print(error_t)
#print(bias_t)
#print(variance_t)
error_test = np.mean(np.mean((z_test1 - z_pred)**2 , axis=1, keepdims=True))
bias___ = np.mean( (z_test1 - np.mean(z_pred, axis=1, keepdims=True))**2 )
variance___ = np.mean( (z_pred - np.mean(z_pred, axis=1, keepdims=True))**2 )
error_train = np.mean(np.mean((z_train1 - z_pred_train)**2 , axis=1, keepdims=True))
return (error_test, bias___,variance___ , error_train, R2_t, np.std(betas, axis = 0), np.mean(betas, axis = 0))
def variance(y_tilde):
return np.sum((y_tilde - np.mean(y_tilde))**2)/np.size(y_tilde)
def bias(y, y_tilde):
return np.sum((y - np.mean(y_tilde))**2)/np.size(y_tilde)
a=k_fold(5,x,y,z_1,5,LinearRegression(fit_intercept=False))
error_test = a[0]
bias___ = a[1]
variance___ = a[2]
error_train = a[3]
print('{} = {} + {}= {}'.format(error_test, bias___, variance___, bias___+variance___))
print('BBB')
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
kfold = model_selection.KFold(n_splits=5, shuffle=True)
X= Design_Matrix_X(x,y,n=5)
k=5
z_pred = []
z_test1 = []
z_train1 = []
z_pred_train = []
for train_index, test_index in kfold.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
z_train, z_test = z[train_index], z[test_index]
z_test1.append(z_test)
z_train1.append(z_train)
print(X_train.shape, X_test.shape)
model = LinearRegression(fit_intercept=False)
model.fit(X_train,z_train)
z_pred.append(model.predict(X_test))
z_pred_train.append(model.predict(X_train))
bias = np.mean( (z_test - np.mean(z_pred))**2 )
variance = np.mean( (z_pred - np.mean(z_pred))**2 )
mse = model_selection.cross_val_score(model, X, z_1, cv=kfold, scoring='neg_mean_squared_error')
r2 = model_selection.cross_val_score(model, X, z_1, cv=kfold, scoring='r2')
print(bias)
print(variance)
print(np.absolute(mse.mean()))
print(r2.mean())
# part c
maxdegree = 20
def fold_degree(maxdegree,x,y,z,k):
error__t = np.zeros(maxdegree)
bias__t = np.zeros(maxdegree)
variance__t = np.zeros(maxdegree)
polydegree = np.zeros(maxdegree)
var_score__t = np.zeros(maxdegree)
error__l = np.zeros(maxdegree)
for degree in range(maxdegree):
#z_pred = np.empty((2000, k))
degree_fold = k_fold(k, x, y, z, degree, LinearRegression())
error_t = degree_fold[0]
bias_t = degree_fold[1]
variance_t = degree_fold[2]
var_score_t = degree_fold[4]
error_l = degree_fold[3]
polydegree[degree] = degree
error__t[degree] = error_t
bias__t[degree] = bias_t
variance__t[degree] = variance_t
var_score__t[degree] = var_score_t
error__l[degree] = error_l
print(degree)
print(error_t)
print(variance_t)
return (polydegree, error__t, bias__t, variance__t, var_score__t, error__l)
b = fold_degree(maxdegree, x, y, z, 5)
#print(b[1])
#print(b[2], b[3])
#print(b[1]+b[3])
plt.plot(b[0], (b[1]), label='Error')
plt.plot(b[0], (b[2]), label='bias')
plt.plot(b[0], (b[3]), label='Variance')
plt.legend()
plt.show()
plt.plot(b[0], (b[1]), label='Error test')
plt.plot(b[0], (b[5]), label='Error learning')
plt.legend()
plt.show()
from sklearn.utils import resample
n_boostraps = 100
error_test = np.zeros(maxdegree)
bias___ = np.zeros(maxdegree)
variance___ = np.zeros(maxdegree)
polydegree = np.zeros(maxdegree)
error_train = np.zeros(maxdegree)
x_train, x_test, y_train, y_test, z_train, z_test = train_test_split(x, y, z, test_size=0.2, shuffle=True)
z_test1 = np.zeros((200,100))
z_train1 = np.zeros((800,100))
for i in range(100):
z_test1[:,i]=z_test
for degree in range(maxdegree):
model = LinearRegression(fit_intercept=False)
z_pred = np.empty((z_test.shape[0],n_boostraps))
z_pred_train = np.empty((z_train.shape[0],n_boostraps))
for i in range(n_boostraps):
x_, y_, z_ = resample(x_train, y_train, z_train)
z_train1[:,i] = z_
X_train = Design_Matrix_X(x_,y_,degree)
X_test= Design_Matrix_X(x_test,y_test,degree)
z_pred[:, i] = model.fit(X_train, z_).predict(X_test).ravel()
z_pred_train[:, i] = model.fit(X_train, z_).predict(X_train).ravel()
polydegree[degree] = degree
error_test[degree] = np.mean(np.mean((z_test1 - z_pred)**2 , axis=1, keepdims=True))
bias___[degree] = np.mean( (z_test1 - np.mean(z_pred, axis=1, keepdims=True))**2 )
variance___[degree] = np.mean( np.var(z_pred, axis=1, keepdims=True))
error_train[degree] = np.mean(np.mean((z_train1 - z_pred_train)**2 , axis=1, keepdims=True))
#print(degree)
#print(error_test)
#print(bias___)
#print(variance___)
#print(bias___+variance___)
plt.plot(polydegree, error_test, label='Error')
plt.plot(polydegree, bias___, label='bias')
plt.plot(polydegree, variance___, label='Variance')
plt.legend()
plt.show()
plt.plot(polydegree, error_test, label='Error test')
plt.plot(polydegree, error_train, label='error training')
plt.legend()
plt.show()
#part d
lamdas = [0.001, 0.01, 0.1, 1]
for lamda in lamdas:
beta_r = np.linalg.inv(X.T.dot(X)+lamda*np.identity(21)).dot(X.T).dot(z_1)
zridge = X @ beta_r
print("Beta parameters")
print(beta_r)
#print(zridge)
clf_ridge = skl.Ridge(alpha=lamda).fit(X, z_1)
zridge1 = clf_ridge.predict(X)
#print(zridge1)
M = np.linalg.inv(X.T.dot(X)+lamda*np.identity(21))
var_beta_ridge = M.dot(X.T).dot(X).dot(M.T)
var_b_ridge = np.diag(var_beta_ridge)
print("Variance of betas")
print(var_b_ridge)
l1_Ridge = beta_r - 1.96*np.sqrt(var_b_ridge)/(X.shape[0])
l2_Ridge = beta_r + 1.96*np.sqrt(var_b_ridge)/(X.shape[0])
#print(l1_Ridge)
#print(l2_Ridge)
print(MSE(z_1,zridge))
print(R2(z_1,zridge))
c = k_fold(5,x,y,z,5,skl.Ridge(alpha=lamda))
#print(c[0])
#print(c[1])
#print(c[2])
#print(c[3])
def fold_degree_r(x,y,z,k,lamdas):
error = np.zeros(len(lamdas))
bias = np.zeros(len(lamdas))
variance = np.zeros(len(lamdas))
polylamda = np.zeros(len(lamdas))
for lamda in lamdas:
lamda_fold = k_fold(k, x, y, z, 5, skl.Ridge(alpha=lamda))
error_ = lamda_fold[0]
bias_ = lamda_fold[2]
#print(bias_)
variance_ = lamda_fold[3]
# print('AAA')
#print(lamdas.index(lamda))
polylamda[lamdas.index(lamda)] = lamda
error[lamdas.index(lamda)] = error_
bias[lamdas.index(lamda)] = bias_
variance[lamdas.index(lamda)] = variance_
return (polylamda, error, bias, variance)
d = fold_degree_r(x, y, z, 5, lamdas)
#print(b[2])
plt.plot(d[0], d[1], label='Error')
plt.plot(d[0], d[2], label='bias')
plt.plot(d[0], d[3], label='Variance')
plt.legend()
plt.show()
n_boostraps = 100
error_test = np.zeros(len(lamdas))
bias___ = np.zeros(len(lamdas))
variance___ = np.zeros(len(lamdas))
polylamda = np.zeros(len(lamdas))
error_train = np.zeros(len(lamdas))
x_train, x_test, y_train, y_test, z_train, z_test = train_test_split(x, y, z, test_size=0.2, shuffle=True)
z_test1 = np.zeros((200,100))
z_train1 = np.zeros((800,100))
for i in range(100):
z_test1[:,i]=z_test
for lamda in lamdas:
model = skl.Ridge(alpha=lamda)
z_pred = np.empty((z_test.shape[0],n_boostraps))
z_pred_train = np.empty((z_train.shape[0],n_boostraps))
for i in range(n_boostraps):
x_, y_, z_ = resample(x_train, y_train, z_train)
z_train1[:,i] = z_
X_train = Design_Matrix_X(x_,y_,5)
X_test= Design_Matrix_X(x_test,y_test,5)
z_pred[:, i] = model.fit(X_train, z_).predict(X_test).ravel()
z_pred_train[:, i] = model.fit(X_train, z_).predict(X_train).ravel()
polylamda[lamdas.index(lamda)] = lamda
error_test[lamdas.index(lamda)] = np.mean(np.mean((z_test1 - z_pred)**2 , axis=1, keepdims=True))
bias___[lamdas.index(lamda)] = np.mean( (z_test1 - np.mean(z_pred, axis=1, keepdims=True))**2 )
variance___[lamdas.index(lamda)] = np.mean( np.var(z_pred, axis=1, keepdims=True))
error_train[lamdas.index(lamda)] = np.mean(np.mean((z_train1 - z_pred_train)**2 , axis=1, keepdims=True))
print(lamda)
print(error_test)
print(bias___)
print(variance___)
print(bias___+variance___)
plt.plot(lamdas, error_test, label='Error')
plt.plot(lamdas, bias___, label='bias')
plt.plot(lamdas, variance___, label='Variance')
plt.legend()
plt.show()
plt.plot(lamdas, error_test, label='Error test')
plt.plot(lamdas, error_train, label='error training')
plt.legend()
plt.show()
# part e)
lamda=0.01
model_lasso = skl.Lasso(alpha=lamda).fit(X, z_1)
betas = model_lasso.coef_
zlasso = model_lasso.predict(X)
print(MSE(z_1,zlasso))
print(R2(z_1,zlasso))
e = k_fold(5,x,y,z,5,skl.Lasso(alpha=lamda))
print(e[0])
lamdas = [0.001, 0.01, 0.1, 1]
def fold_degree_r(x,y,z,k):
lamdas = [0.001, 0.01, 0.1, 1]
error = np.zeros(len(lamdas))
bias = np.zeros(len(lamdas))
variance = np.zeros(len(lamdas))
polylamda = np.zeros(len(lamdas))
for lamda in lamdas:
lamda_fold = k_fold(k, x, y, z, 5, skl.Lasso(alpha=lamda))
error_ = lamda_fold[0]
bias_ = lamda_fold[2]
#print(bias_)
variance_ = lamda_fold[3]
# print('AAA')
#print(lamdas.index(lamda))
polylamda[lamdas.index(lamda)] = lamda
error[lamdas.index(lamda)] = error_
bias[lamdas.index(lamda)] = bias_
variance[lamdas.index(lamda)] = variance_
return (polylamda, error, bias, variance)
f = fold_degree_r(x, y, z, 5)
print(f[1], f[2])
plt.plot(f[0], f[1], label='Error')
plt.plot(f[0], f[2], label='bias')
plt.plot(f[0], f[3], label='Variance')
plt.legend()
plt.show()
n_boostraps = 100
error_test = np.zeros(len(lamdas))
bias___ = np.zeros(len(lamdas))
variance___ = np.zeros(len(lamdas))
polylamda = np.zeros(len(lamdas))
error_train = np.zeros(len(lamdas))
x_train, x_test, y_train, y_test, z_train, z_test = train_test_split(x, y, z, test_size=0.2, shuffle=True)
z_test1 = np.zeros((200,100))
z_train1 = np.zeros((800,100))
for i in range(100):
z_test1[:,i]=z_test
for lamda in lamdas:
model = skl.Lasso(alpha=lamda)
z_pred = np.empty((z_test.shape[0],n_boostraps))
z_pred_train = np.empty((z_train.shape[0],n_boostraps))
for i in range(n_boostraps):
x_, y_, z_ = resample(x_train, y_train, z_train)
z_train1[:,i] = z_
X_train = Design_Matrix_X(x_,y_,5)
X_test= Design_Matrix_X(x_test,y_test,5)
z_pred[:, i] = model.fit(X_train, z_).predict(X_test).ravel()
z_pred_train[:, i] = model.fit(X_train, z_).predict(X_train).ravel()
polylamda[lamdas.index(lamda)] = lamda
error_test[lamdas.index(lamda)] = np.mean(np.mean((z_test1 - z_pred)**2 , axis=1, keepdims=True))
bias___[lamdas.index(lamda)] = np.mean( (z_test1 - np.mean(z_pred, axis=1, keepdims=True))**2 )
variance___[lamdas.index(lamda)] = np.mean( np.var(z_pred, axis=1, keepdims=True))
error_train[lamdas.index(lamda)] = np.mean(np.mean((z_train1 - z_pred_train)**2 , axis=1, keepdims=True))
print(lamda)
print(error_test)
print(bias___)
print(variance___)
print(bias___+variance___)
plt.plot(error_test, label='Error')
plt.semilogx(lamdas, error_test)
print(lamdas)
print(error_test)
plt.xlabel('lamdas')
plt.plot(lamdas, bias___, label='bias')
plt.plot(lamdas, variance___, label='Variance')
plt.legend()
plt.show()
plt.plot(lamdas, error_test, label='Error test')
plt.plot(lamdas, error_train, label='error training')
plt.legend()
plt.show() | [
"[email protected]"
]
| |
1ab2dafa56e225f40ea46f42f12efa3c77ff3108 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/51/usersdata/120/20281/submittedfiles/listas.py | 396d08af8cee48625cc848246acf170897a490a0 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # -*- coding: utf-8 -*-
from __future__ import division
def degrau(lista):
maior=0
for i in range(0,len(a)-1,1):
degrau=math.fabs(a[i]-a[i+1])
if degrau>maior:
maior=degrau
return maior
a=[]
n=input('insira o numero de termos da lista:')
for i in range(0,n,1):
a.append(input('digite um elemento de a:')
print maior
| [
"[email protected]"
]
| |
d28272660bc611a21f54875f44785e95cb7bd842 | d27af9d58b91b8cd998ac0eb87d980d304ff0670 | /Beginner-Contest/ABC003/ABC003_B.py | c3ac6ff2bfbccf826db9fcbc15c453d1c64bd7f8 | []
| no_license | mongesan/Atcoder-m0_ngesan-py | 29dd79daab149003ffc8b6b6bad5fa2e7daa9646 | 6654af034d4ff4cece1be04c2c8b756976d99a4b | refs/heads/master | 2023-08-20T19:50:04.547025 | 2021-10-27T12:24:51 | 2021-10-27T12:24:51 | 258,486,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9 | py | #ABC003_B | [
"[email protected]"
]
| |
10b048dcdfba609dde36b77f50d00d1d7bdb14c4 | ba7134468cb18014fe2e3e1513382fa52aafd4eb | /03_Python_network_programming/003_HTTP_Web服务器/004_Web静态服务器_多线程threading.Thread_多任务高并发.py | e24ff1c71c9bed28feb84b9f632b5cd39fd8d1ff | []
| no_license | FelixZFB/Python_advanced_learning | 4e44616b390e1c6e7da37229c7ad48c069cee71b | a71a6d733ed2134a79f02a6488807862b23438b8 | refs/heads/master | 2021-06-27T11:15:07.754719 | 2020-11-20T02:41:25 | 2020-11-20T02:41:25 | 183,116,714 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,039 | py | # -*- coding:utf-8 -*-
# 创建一个Web服务器,客户端请求后,返回显示所需要的页面
# 下面代码中已经加入了html文件夹的系统路径
# 打开一个网页后,自连接都可以打开了
# 程序会根据请求提取出名字,然后进入到html文件中查找匹配相关文件
# 然后再浏览器中显示出来
import socket
import re
import threading
def service_client(new_socket):
"为一个客户端进行服务,为这个客户端返回数据"
# 1. 接收浏览器发送过来的请求,即HTTP请求
request_data = new_socket.recv(1024).decode("utf-8")
# 将请求报文以行分隔为列表
request_header_lines = request_data.splitlines()
# 格式化打印出请求报文信息,换行打出
for line in request_header_lines:
print(line)
# 提取出请求网页的名称,即/后面的内容
# 先取出请求头的第一行
request_line = request_header_lines[0]
# 上面提取出来的请求头的第一行是:GET /index.html HTTP/1.1
# 从/之外的任何字符开始匹配,匹配多次,相当于从GET开始匹配,
# 匹配到第一个/,后面匹配除了空格外的任何字符,相当于匹配到html结束,后面出现了空格
# 并且从/之后的匹配视为一个分组,分组里面匹配结果就是/index.html
# group(0)是取出匹配的整体结果:GET /index.html
# group(1)就是第一个分组:/index.html
get_file_name = re.match("[^/]+(/[^ ]*)", request_line).group(1)
# 加入系统路径,网页都是放在html文件夹中
get_file_name = "./html" + get_file_name # ./html/index.html
print("file name is ===>%s" % get_file_name)
print('*' * 50)
# 2. 返回http格式的数据给浏览器
# 请求的网页也可能不存在,加入try语句
try:
f = open(get_file_name, 'rb')
except:
response_header = "HTTP/1.1 404 not found\r\n"
response_header += "\r\n"
response_body = "====sorry ,file not found===="
else:
# 2.1 组织相应头信息(header),浏览器中换行使用\r\n
response_header = "HTTP/1.1 200 OK\r\n" # 200表示找到这个资源
response_header += "\r\n" # 用一个空的行与body进行隔开,作为换行符
# 组织内容(body)
# 返回一个本地已经编辑好的前端html页面
response_body = f.read()
f.close()
finally:
# 2.2 组织响应报文,发送数据,由于已经不是单纯的字符串,不能使用拼接
# 头和体信息单独发送
# response = response_header + response_body
# 先发送头header信息
new_socket.send(response_header.encode("utf-8"))
# 再发送body信息
new_socket.send(response_body)
# 3. 关闭客户端套接字
new_socket.close()
def main():
"作为程序的主控制入口,完成整体控制"
# 1. 创建tcp套接字
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置当服务器先close,即服务器端4次挥手之后资源能够立即释放,这样就保证了,下次运行程序时 可以立即绑定7788端口
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2. 服务器绑定本地IP地址和端口
server_socket.bind(("", 7788))
# 3. 设置为监听套接字
server_socket.listen(128)
# 加入循环,服务器一直处于运行状态,可以不断接收新的客户端请求,
# 浏览器可以通过刷新不断请求该服务器
while True:
# 4. 等待新客户端的连接,返回一个新的客户端专用套接字
new_socket, client_addr = server_socket.accept()
# 5. 使用多进程为这个客户端服务,有新的请求,又重新创建一个子进程,注意参数后面的逗号不能省略
new_process = threading.Thread(target=service_client, args=(new_socket, ))
new_process.start()
# 注意:多线程不会复制new_socket,共享这个全局变量,此处不能close
if __name__ == "__main__":
main()
# 运行程序,打开浏览器,访问网址:http://127.0.0.1:7788/index.html
# 浏览器运行结果:
# 显示了一个html页面
# 如果随便访问一个网址:http://127.0.0.1:7788/index555.html,
# QQ浏览器则会无法显示此网页 错误代码 HTTP ERROR 404
# 火狐浏览器没有内容显示
# 打印出的请求头信息
# GET /index.html HTTP/1.1
# Host: 127.0.0.1:7788
# Connection: keep-alive
# Upgrade-Insecure-Requests: 1
# User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3719.400 QQBrowser/10.5.3715.400
# Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
# Accept-Encoding: gzip, deflate, br
# Accept-Language: zh-CN,zh;q=0.9
#
# file name is ===>./html/index.html
# ************************************************** | [
"[email protected]"
]
| |
b601ab4cd9d6945c2924065d897b8602b0755205 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/103_Tableaux/fusion.py | 580e9aa10bfa32f0c6630f48ce9e9bebfdb0224a | []
| no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # fusion.py
# fusion de listes triées
# Programmer efficacement chap 4
# Mon implémentation sans regarder cette du livre!
# 2022-05-25 PV
def fusion(l1: list[int], l2: list[int]) -> list[int]:
f = []
len1 = len(l1)
len2 = len(l2)
i1 = i2 = 0
while i1 < len1 and i2 < len2:
if l1[i1] <= l2[i2]:
f.append(l1[i1])
i1 += 1
else:
f.append(l2[i2])
i2 += 1
f.extend(l1[i1:])
f.extend(l2[i2:])
return f
# For verification
def is_sorted(l: list[int]) -> bool:
return all(l[i-1]<=l[i] for i in range(1, len(l)))
# assert(is_sorted([1,2,2,3]))
# assert(not is_sorted([4,1,2]))
# assert(is_sorted([0]))
# assert(is_sorted([]))
l1 = list(i*5 for i in range(15))
l2 = list(i*7 for i in range(12))
print(l1)
print(l2)
f = fusion(l1, l2)
print(f)
assert(len(f) == len(l1)+len(l2))
assert(all(x in f for x in l1))
assert(all(x in f for x in l2))
assert(is_sorted(f))
| [
"[email protected]"
]
| |
39c9cae21ab1ee0cf7cdede5d4282ed5af383a1c | f28bf07217731a8d97fa3d7029df11b2a0506650 | /maddpg/buffer.py | 78c8abeac3863a7af9c51688bc94145d155c2fbf | [
"MIT"
]
| permissive | postBG/deep-reinforcement-learning | 7465f7698d91363c8bacd791467f1dbb44bee9a9 | 5df5662b091c4c3f00beba1aa6f9ce8a52001c93 | refs/heads/master | 2022-12-18T20:06:08.245460 | 2021-09-05T09:26:17 | 2021-09-05T09:26:17 | 169,988,821 | 2 | 0 | MIT | 2022-12-08T01:44:34 | 2019-02-10T14:48:16 | Jupyter Notebook | UTF-8 | Python | false | false | 666 | py | from collections import deque
import random
from utilities import transpose_list
class ReplayBuffer:
def __init__(self,size):
self.size = size
self.deque = deque(maxlen=self.size)
def push(self,transition):
"""push into the buffer"""
input_to_buffer = transpose_list(transition)
for item in input_to_buffer:
self.deque.append(item)
def sample(self, batchsize):
"""sample from the buffer"""
samples = random.sample(self.deque, batchsize)
# transpose list of list
return transpose_list(samples)
def __len__(self):
return len(self.deque)
| [
"[email protected]"
]
| |
71f85e3a685ada74364b5df598a424a483de3dc9 | d7e160a2512b9d70b18adbffde4c6d9a61521a12 | /DFS/타겟 넘버.py | 4b22f473b59b4c2309755e984a9b52cd37ed5ce7 | []
| no_license | EoJin-Kim/CodingTest | 14b6cf7a3bb45954c065efdf9d1e05143cb321a3 | 975c753ee572f605f4d9a12a3dc54ab0d437dade | refs/heads/master | 2023-06-19T16:06:50.625143 | 2021-07-14T13:10:17 | 2021-07-14T13:10:17 | 356,877,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from itertools import permutations,product
'''
def solution(numbers, target):
answer = 0
length = len(numbers)
expression = [(0,1) for i in range(length)]
for exp in product(*expression):
result=0
for i in range(length):
if exp[i]==0:
result+=-numbers[i]
else:
result+=numbers[i]
if result==target:
answer+=1
return answer
'''
answer=0
def solution(numbers, target):
global answer
dfs(0,numbers,0,target)
return answer
def dfs(idx,numbers,temp,target):
global answer
length = len(numbers)
if idx == length and temp==target:
answer+=1
return
if idx==length:
return
dfs(idx + 1, numbers, temp - numbers[idx],target)
dfs(idx + 1, numbers, temp + numbers[idx],target)
print(solution([1, 1, 1, 1, 1],3)) | [
"[email protected]"
]
| |
5e414e62692567069edb6c5a647221bd64902bba | aea8fea216234fd48269e4a1830b345c52d85de2 | /fhir/resources/devicemetric.py | ed3ffc3f990d216dd34ff39c1317ddd8efaf505f | [
"BSD-3-Clause"
]
| permissive | mmabey/fhir.resources | 67fce95c6b35bfdc3cbbc8036e02c962a6a7340c | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | refs/heads/master | 2023-04-12T15:50:30.104992 | 2020-04-11T17:21:36 | 2020-04-11T17:21:36 | 269,712,884 | 0 | 0 | NOASSERTION | 2020-06-05T17:03:04 | 2020-06-05T17:03:04 | null | UTF-8 | Python | false | false | 6,904 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceMetric
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class DeviceMetric(domainresource.DomainResource):
""" Measurement, calculation or setting capability of a medical device.
Describes a measurement, calculation or setting capability of a medical
device.
"""
resource_type = "DeviceMetric"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.calibration = None
""" Describes the calibrations that have been performed or that are
required to be performed.
List of `DeviceMetricCalibration` items (represented as `dict` in JSON). """
self.category = None
""" measurement | setting | calculation | unspecified.
Type `str`. """
self.color = None
""" black | red | green | yellow | blue | magenta | cyan | white.
Type `str`. """
self.identifier = None
""" Instance identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.measurementPeriod = None
""" Describes the measurement repetition time.
Type `Timing` (represented as `dict` in JSON). """
self.operationalStatus = None
""" on | off | standby | entered-in-error.
Type `str`. """
self.parent = None
""" Describes the link to the parent Device.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.source = None
""" Describes the link to the source Device.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.type = None
""" Identity of metric, for example Heart Rate or PEEP Setting.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unit = None
""" Unit of Measure for the Metric.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DeviceMetric, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetric, self).elementProperties()
js.extend(
[
(
"calibration",
"calibration",
DeviceMetricCalibration,
"DeviceMetricCalibration",
True,
None,
False,
),
("category", "category", str, "code", False, None, True),
("color", "color", str, "code", False, None, False),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
True,
None,
False,
),
(
"measurementPeriod",
"measurementPeriod",
timing.Timing,
"Timing",
False,
None,
False,
),
(
"operationalStatus",
"operationalStatus",
str,
"code",
False,
None,
False,
),
(
"parent",
"parent",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
(
"unit",
"unit",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class DeviceMetricCalibration(backboneelement.BackboneElement):
""" Describes the calibrations that have been performed or that are required to
be performed.
"""
resource_type = "DeviceMetricCalibration"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.state = None
""" not-calibrated | calibration-required | calibrated | unspecified.
Type `str`. """
self.time = None
""" Describes the time last calibration has been performed.
Type `FHIRDate` (represented as `str` in JSON). """
self.type = None
""" unspecified | offset | gain | two-point.
Type `str`. """
super(DeviceMetricCalibration, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetricCalibration, self).elementProperties()
js.extend(
[
("state", "state", str, "code", False, None, False),
("time", "time", fhirdate.FHIRDate, "instant", False, None, False),
("type", "type", str, "code", False, None, False),
]
)
return js
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + ".identifier"]
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + ".timing"]
| [
"[email protected]"
]
| |
c94938e0933471f0917f589c92279f67677f80c3 | 4492b8daf969f839f7803d1af1d3f80858dddd98 | /docs/source/webobapp.py | 28a0fe44c2dea7a8cea687116296c4bdba372815 | []
| no_license | aodag/my-pyramid-katas | 59af3b25dfdf68bcd7434cdcb4258c08c43b1012 | 99cf99ffb646c428cfb3c9d6eec7e593d865d576 | refs/heads/master | 2020-04-06T03:31:59.912229 | 2016-06-12T13:14:07 | 2016-06-12T13:14:07 | 34,158,089 | 0 | 1 | null | 2016-06-12T13:14:09 | 2015-04-18T08:14:55 | null | UTF-8 | Python | false | false | 241 | py | from webob import Request, Response
def application(environ, start_response):
request = Request(environ)
response = Response(request=request)
response.text = "Hello, world!"
return response(environ, start_response)
| [
"[email protected]"
]
| |
7bad46d3469e4d662073157681c56a6dcbe87753 | a5c33ac6d89b0445ff011d24943d441681fa4af3 | /tests/test_gino_sanic.py | 7f1f80f0573cb99af36c6eabaffd70e15dc209ec | [
"BSD-3-Clause"
]
| permissive | python-gino/gino-sanic | b34490c879d02b163da468d8567bedea70a3afa7 | 7bc7e98989f1936a17f38ec352a3a7dc7d217753 | refs/heads/master | 2021-08-24T13:53:16.353243 | 2020-04-18T17:43:26 | 2020-04-18T17:43:26 | 229,087,530 | 5 | 7 | NOASSERTION | 2021-06-02T06:17:14 | 2019-12-19T15:47:24 | Python | UTF-8 | Python | false | false | 4,512 | py | import asyncio
import os
import ssl
import gino
import pytest
import sanic
from gino.ext.sanic import Gino
from sanic.response import text, json
DB_ARGS = dict(
host=os.getenv("DB_HOST", "localhost"),
port=os.getenv("DB_PORT", 5432),
user=os.getenv("DB_USER", "postgres"),
password=os.getenv("DB_PASS", ""),
database=os.getenv("DB_NAME", "postgres"),
)
PG_URL = "postgresql://{user}:{password}@{host}:{port}/{database}".format(**DB_ARGS)
_MAX_INACTIVE_CONNECTION_LIFETIME = 59.0
def teardown_module():
# sanic server will close the loop during shutdown
asyncio.set_event_loop(asyncio.new_event_loop())
# noinspection PyShadowingNames
async def _app(config):
app = sanic.Sanic()
app.config.update(config)
app.config.update(
{
"DB_KWARGS": dict(
max_inactive_connection_lifetime=_MAX_INACTIVE_CONNECTION_LIFETIME,
),
}
)
db = Gino(app)
class User(db.Model):
__tablename__ = "gino_users"
id = db.Column(db.BigInteger(), primary_key=True)
nickname = db.Column(db.Unicode(), default="noname")
@app.route("/")
async def root(request):
conn = await request["connection"].get_raw_connection()
# noinspection PyProtectedMember
assert conn._holder._max_inactive_time == _MAX_INACTIVE_CONNECTION_LIFETIME
return text("Hello, world!")
@app.route("/users/<uid:int>")
async def get_user(request, uid):
method = request.args.get("method")
q = User.query.where(User.id == uid)
if method == "1":
return json((await q.gino.first_or_404()).to_dict())
elif method == "2":
return json((await request["connection"].first_or_404(q)).to_dict())
elif method == "3":
return json((await db.bind.first_or_404(q)).to_dict())
elif method == "4":
return json((await db.first_or_404(q)).to_dict())
else:
return json((await User.get_or_404(uid)).to_dict())
@app.route("/users", methods=["POST"])
async def add_user(request):
u = await User.create(nickname=request.form.get("name"))
await u.query.gino.first_or_404()
await db.first_or_404(u.query)
await db.bind.first_or_404(u.query)
await request["connection"].first_or_404(u.query)
return json(u.to_dict())
e = await gino.create_engine(PG_URL)
try:
try:
await db.gino.create_all(e)
yield app
finally:
await db.gino.drop_all(e)
finally:
await e.close()
@pytest.fixture
def ssl_ctx():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
@pytest.fixture
async def app():
async for a in _app(
{
"DB_HOST": DB_ARGS["host"],
"DB_PORT": DB_ARGS["port"],
"DB_USER": DB_ARGS["user"],
"DB_PASSWORD": DB_ARGS["password"],
"DB_DATABASE": DB_ARGS["database"],
}
):
yield a
@pytest.fixture
async def app_ssl(ssl_ctx):
async for a in _app(
{
"DB_HOST": DB_ARGS["host"],
"DB_PORT": DB_ARGS["port"],
"DB_USER": DB_ARGS["user"],
"DB_PASSWORD": DB_ARGS["password"],
"DB_DATABASE": DB_ARGS["database"],
"DB_SSL": ssl_ctx,
}
):
yield a
@pytest.fixture
async def app_dsn():
async for a in _app({"DB_DSN": PG_URL}):
yield a
def _test_index_returns_200(app):
request, response = app.test_client.get("/")
assert response.status == 200
assert response.text == "Hello, world!"
def test_index_returns_200(app):
_test_index_returns_200(app)
def test_index_returns_200_dsn(app_dsn):
_test_index_returns_200(app_dsn)
def _test(app):
for method in "01234":
request, response = app.test_client.get("/users/1?method=" + method)
assert response.status == 404
request, response = app.test_client.post("/users", data=dict(name="fantix"))
assert response.status == 200
assert response.json == dict(id=1, nickname="fantix")
for method in "01234":
request, response = app.test_client.get("/users/1?method=" + method)
assert response.status == 200
assert response.json == dict(id=1, nickname="fantix")
def test(app):
_test(app)
def test_ssl(app_ssl):
_test(app_ssl)
def test_dsn(app_dsn):
_test(app_dsn)
| [
"[email protected]"
]
| |
cf097de52c3b6aa3b00f61889614ee3666b50615 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/ml_engine/versions/__init__.py | c540906a54c9770851a877bd986b742229cbf2d1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 1,229 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml-engine versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Versions(base.Group):
"""Cloud ML Engine Versions commands.
A version is an implementation of a model, represented as a serialized
TensorFlow graph with trained parameters.
When you communicate with Cloud ML Engine services, you use the combination
of the model, version, and current project to identify a specific model
implementation that is deployed in the cloud.
"""
| [
"[email protected]"
]
| |
2982561042ff6a5c182705c130d0c7657f1b9216 | 1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2 | /stubs/google/appengine/api/taskqueue/taskqueue_stub.pyi | 7fd103268159098ac1c4bf312540530a305e7979 | [
"MIT"
]
| permissive | the-blue-alliance/the-blue-alliance | 3dc210a9611ce9b240907ffd420f78040318dcdc | 6d42f3cdb2f785d192f2871419e58aaae3445029 | refs/heads/py3 | 2023-08-22T21:02:36.398100 | 2023-08-22T19:14:01 | 2023-08-22T19:14:01 | 888,427 | 344 | 263 | MIT | 2023-09-14T18:35:20 | 2010-09-04T20:34:11 | HTML | UTF-8 | Python | false | false | 5,278 | pyi | from google.appengine.api import api_base_pb2 as api_base_pb2, apiproxy_stub as apiproxy_stub, apiproxy_stub_map as apiproxy_stub_map, queueinfo as queueinfo, request_info as request_info
from google.appengine.api.taskqueue import taskqueue as taskqueue
from google.appengine.runtime import apiproxy_errors as apiproxy_errors
from google.appengine.tools import queue_xml_parser as queue_xml_parser
from typing import Any
DEFAULT_RATE: str
DEFAULT_RATE_FLOAT: float
DEFAULT_BUCKET_SIZE: int
MAX_ETA: Any
MAX_PULL_TASK_SIZE_BYTES: Any
MAX_PUSH_TASK_SIZE_BYTES: Any
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE: Any
BUILT_IN_HEADERS: Any
DEFAULT_QUEUE_NAME: str
INF: float
QUEUE_MODE: Any
AUTOMATIC_QUEUES: Any
TIME_STR_FMT: str
def QueryTasksResponseToDict(queue_name, task_response, now, task_add_request_pb: Any | None = ...): ...
def ConvertGetQueuesResponseToQueuesDicts(response): ...
def ConvertTaskDictToTaskObject(task): ...
class _Group:
gettime: Any
def __init__(self, queue_yaml_parser: Any | None = ..., app_id: Any | None = ..., _all_queues_valid: bool = ..., _update_newest_eta: Any | None = ..., _testing_validate_state: bool = ..., gettime=...): ...
def GetQueuesAsDicts(self): ...
def HasQueue(self, queue_name): ...
def GetQueue(self, queue_name): ...
def GetQueues(self): ...
def GetNextPushTask(self): ...
def BulkAdd_Rpc(self, request, response) -> None: ...
def UpdateQueue_Rpc(self, request, response) -> None: ...
def FetchQueues_Rpc(self, request, response) -> None: ...
def FetchQueueStats_Rpc(self, request, response) -> None: ...
def QueryTasks_Rpc(self, request, response) -> None: ...
def FetchTask_Rpc(self, request, response) -> None: ...
def Delete_Rpc(self, request, response) -> None: ...
def DeleteQueue_Rpc(self, request, response) -> None: ...
def PauseQueue_Rpc(self, request, response) -> None: ...
def PurgeQueue_Rpc(self, request, response) -> None: ...
def QueryAndOwnTasks_Rpc(self, request, response) -> None: ...
def ModifyTaskLease_Rpc(self, request, response) -> None: ...
class Retry:
def __init__(self, task, queue) -> None: ...
def CanRetry(self, retry_count, age_usec): ...
def CalculateBackoffUsec(self, retry_count): ...
class _Queue:
queue_name: Any
bucket_refill_per_second: Any
bucket_capacity: Any
user_specified_rate: Any
retry_parameters: Any
max_concurrent_requests: Any
paused: Any
queue_mode: Any
acl: Any
target: Any
gettime: Any
task_name_archive: Any
task_add_request_pbs: Any
def __init__(self, queue_name, bucket_refill_per_second=..., bucket_capacity=..., user_specified_rate=..., retry_parameters: Any | None = ..., max_concurrent_requests: Any | None = ..., paused: bool = ..., queue_mode=..., acl: Any | None = ..., _testing_validate_state: Any | None = ..., target: Any | None = ..., gettime=...): ...
def VerifyIndexes(self) -> None: ...
def UpdateQueue_Rpc(self, request, response) -> None: ...
def FetchQueues_Rpc(self, request, response) -> None: ...
def QueryTasks_Rpc(self, request, response) -> None: ...
def FetchTask_Rpc(self, request, response) -> None: ...
def Delete_Rpc(self, request, response) -> None: ...
def QueryAndOwnTasks_Rpc(self, request, response) -> None: ...
def ModifyTaskLease_Rpc(self, request, response) -> None: ...
def IncRetryCount(self, task_name) -> None: ...
def GetTasksAsDicts(self): ...
def GetTaskAsDict(self, task_name): ...
def PurgeQueue(self) -> None: ...
def RunTaskNow(self, task) -> None: ...
def PostponeTask(self, task, new_eta_usec) -> None: ...
def Lookup(self, maximum, name: Any | None = ..., eta: Any | None = ...): ...
def Count(self): ...
def OldestTask(self): ...
def Oldest(self): ...
def Add(self, request, now) -> None: ...
def Delete(self, name): ...
def Populate(self, num_tasks): ...
class _TaskExecutor:
def __init__(self, default_host, request_data) -> None: ...
def ExecuteTask(self, task, queue): ...
class _BackgroundTaskScheduler:
task_executor: Any
default_retry_seconds: Any
def __init__(self, group, task_executor, retry_seconds, **kwargs) -> None: ...
def UpdateNextEventTime(self, next_event_time) -> None: ...
def Shutdown(self) -> None: ...
def MainLoop(self) -> None: ...
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
THREADSAFE: bool
gettime: Any
def __init__(self, service_name: str = ..., root_path: Any | None = ..., queue_config_path: Any | None = ..., auto_task_running: bool = ..., task_retry_seconds: int = ..., _all_queues_valid: bool = ..., default_http_server: str = ..., _testing_validate_state: bool = ..., request_data: Any | None = ..., gettime=...): ...
def EnableAutoTaskRunning(self) -> None: ...
def StartBackgroundExecution(self) -> None: ...
def Shutdown(self) -> None: ...
def GetQueues(self): ...
def GetTasks(self, queue_name): ...
def DeleteTask(self, queue_name, task_name) -> None: ...
def FlushQueue(self, queue_name) -> None: ...
def Clear(self): ...
def get_filtered_tasks(self, url: Any | None = ..., name: Any | None = ..., queue_names: Any | None = ...): ...
| [
"[email protected]"
]
| |
c161cc50cda53afee625d9ee9fece3ab6a44a0f4 | bc550f6966e30de27987bc803b2447bf02a2e44b | /task/Task.py | 3686337943d2de5eaa7959a023885159c81b507d | []
| no_license | v-komarov/psv3 | afe2a50a5498ee66f4146802ecbbb62bef5a9173 | deca97a9fac0865163f7c2d4fd5110caccb00a80 | refs/heads/master | 2021-01-18T10:52:35.444429 | 2016-06-06T09:19:27 | 2016-06-06T09:19:27 | 59,651,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,602 | py | #coding:utf-8
""" Изменение заявки """
import wx
import DBTools
import wx.lib.masked as masked
from WList import ListTaskWorker
from WList import ChWorker
from MList import ChMate
from MList import ListTaskMate
from task.RunSQL import GetListNameTask
from task.RunSQL import GetTask
from task.RunSQL import GetListStatus
from task.RunSQL import GetListFIO
from task.RunSQL import EditTask as TaskEdit
from task.RunSQL import AddTaskWorker
from task.RunSQL import DelTaskWorker
from task.RunSQL import GetListTaskWorker
from task.RunSQL import AddTaskMate
from task.RunSQL import DelTaskMate
from tools.Messages import NotAccess
from tools.Messages import ErrorData
from tools.Messages import SaveDone
class EditTask(wx.Dialog):
def __init__(
self, parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE, kod_rec='NONE'):
self.kod_rec = kod_rec
self.fio = ''
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
self.PostCreate(pre)
tID = wx.NewId()
tID2 = wx.NewId()
sizer = wx.BoxSizer(wx.VERTICAL)
#### ---- Строка интерфейса с датой и временем создания этой заявки ---
box = wx.BoxSizer(wx.HORIZONTAL)
label0 = wx.StaticText(self, -1, "Дата и время создания заявки: ")
self.field_00 = wx.TextCtrl(self, -1, "", size=(150,-1), style=wx.TE_READONLY)
box.Add(label0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.field_00, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
#### ---- Строка интерфейса с датой и временем закрытия этой заявки ---
box = wx.BoxSizer(wx.HORIZONTAL)
label000 = wx.StaticText(self, -1, "Дата и время закрытия заявки: ")
self.field_000 = wx.TextCtrl(self, -1, "", size=(150,-1), style=wx.TE_READONLY)
box.Add(label000, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.field_000, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
#### ---- Строка интерфейса с датой и временем заявки ---
box = wx.BoxSizer(wx.HORIZONTAL)
label1 = wx.StaticText(self, -1, "Дата")
self.field_0 = wx.DatePickerCtrl(self, -1, size=(120,-1), style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)
label2 = wx.StaticText(self, -1, "Время")
self.mytime = masked.TimeCtrl(self, -1, name="", fmt24hr=True, display_seconds = False)
label3 = wx.StaticText(self, -1, "Статус")
self.field_1 = wx.ComboBox(self, -1, "", size=(150,-1), choices=GetListStatus(), style=wx.CB_READONLY)
label4 = wx.StaticText(self, -1, "Тип")
self.field_2 = wx.ComboBox(self, -1, "", size=(150,-1), choices=['РЕМОНТ','МОНТАЖ'], style=wx.CB_READONLY)
box.Add(label1, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.field_0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(label2, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.mytime, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(label3, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.field_1, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(label4, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(self.field_2, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
#### ---- Строка интерфейса с текстом заявки ---
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Заявка")
self.field_3 = wx.ComboBox(self, -1, "", size=(500,-1), choices=GetListNameTask())
box.Add(label, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_3, 0, wx.ALIGN_LEFT|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_LEFT|wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTRE, 5)
#### --- Адрес заявки ---
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Улица")
label2 = wx.StaticText(self, -1, "Дом")
label3 = wx.StaticText(self, -1, "Квартира")
label4 = wx.StaticText(self, -1, "Подъезд")
label5 = wx.StaticText(self, -1, "Телефон")
self.field_4 = wx.TextCtrl(self, -1, "", size=(150,-1), style=wx.TE_READONLY)
self.field_5 = wx.TextCtrl(self, -1, "", size=(50,-1), style=wx.TE_READONLY)
self.field_6 = wx.TextCtrl(self, -1, "", size=(50,-1), style=wx.TE_READONLY)
self.field_7 = wx.TextCtrl(self, -1, "", size=(50,-1))
self.field_8 = wx.TextCtrl(self, -1, "", size=(150,-1))
box.Add(label, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_4, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label2, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_5, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label3, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_6, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label4, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_7, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label5, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_8, 0, wx.ALIGN_LEFT|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_LEFT|wx.ALL, 5)
#### --- Количество сотрудников, человеко-часы ----
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Планируемые чел./часы")
label2 = wx.StaticText(self, -1, "Количество исполнителей")
label3 = wx.StaticText(self, -1, "Фактическое чел./часы")
self.field_9 = wx.TextCtrl(self, -1, "", size=(50,-1))
self.field_10 = wx.TextCtrl(self, -1, "", size=(50,-1))
self.field_11 = wx.TextCtrl(self, -1, "", size=(50,-1))
box.Add(label, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_9, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label2, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_10, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(label3, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_11, 0, wx.ALIGN_LEFT|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_LEFT|wx.ALL, 5)
#### --- Примечание ----
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Примечание")
self.field_12 = wx.TextCtrl(self, -1, "", size=(600,100), style=wx.TE_MULTILINE)
box.Add(label, 0, wx.ALIGN_LEFT|wx.ALL, 5)
box.Add(self.field_12, 0, wx.ALIGN_LEFT|wx.ALL, 5)
#### ---- Кнопки управления ----
btnsizer = wx.BoxSizer(wx.VERTICAL)
btn = wx.Button(self, wx.ID_SAVE)
btn2 = wx.Button(self, wx.ID_CLOSE)
btnsizer.Add(btn, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
btnsizer.Add(btn2, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
box.Add(btnsizer, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_LEFT|wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTRE, 5)
#### --- Исполнители ----
box = wx.BoxSizer(wx.HORIZONTAL)
box_i = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, "Исполнители")
self.ctrl0 = ListTaskWorker(self,tID,style=wx.LC_REPORT|wx.LC_SORT_ASCENDING)
self.ctrl0.Populate(self.kod_rec)
self.ctrl0.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
box_i.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box_i.Add(self.ctrl0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn5 = wx.Button(self, 101, "Добавить")
btn6 = wx.Button(self, 102, "Удалить")
btnsizer.Add(btn5, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
btnsizer.Add(btn6, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
box_i.Add(btnsizer, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(box_i, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box_ii = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, "Материалы")
self.ctrl1 = ListTaskMate(self,tID,style=wx.LC_REPORT|wx.LC_SORT_ASCENDING)
self.ctrl1.Populate(self.kod_rec)
self.ctrl1.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
box_ii.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box_ii.Add(self.ctrl1, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn7 = wx.Button(self, 201, "Добавить")
btn8 = wx.Button(self, 202, "Удалить")
btnsizer.Add(btn7, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
btnsizer.Add(btn8, 0, wx.ALIGN_CENTRE|wx.ALL|wx.GROW, 5)
box_ii.Add(btnsizer, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
box.Add(box_ii, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
btnsizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
sizer.Fit(self)
self.ShowValue()
self.Bind(wx.EVT_BUTTON, self.Save, btn)
self.Bind(wx.EVT_BUTTON, self.Close, btn2)
self.Bind(wx.EVT_BUTTON, self.AddWorker, btn5)
self.Bind(wx.EVT_BUTTON, self.DelWorker, btn6)
self.Bind(wx.EVT_BUTTON, self.AddMate, btn7)
self.Bind(wx.EVT_BUTTON, self.DelMate, btn8)
self.Bind(wx.EVT_LISTBOX, self.EvtListBox, self.field_12)
self.ctrl0.Bind(wx.EVT_LIST_ITEM_SELECTED, self.ReadItem, self.ctrl0)
self.ctrl1.Bind(wx.EVT_LIST_ITEM_SELECTED, self.ReadItem2, self.ctrl0)
#### --- Присвоение значения по выбранной строке ---
def ReadItem(self,event):
self.ctrl0.currentItem = event.m_itemIndex
#### --- Присвоение значения по выбранной строке ---
def ReadItem2(self,event):
self.ctrl1.currentItem = event.m_itemIndex
#### --- Обработка лист бокса ---
def EvtListBox(self,evt):
self.fio = evt.GetString()
#### ---- Закрыть форму ---
def Close(self,evt):
self.Destroy()
#### ---- Сохранить данные ---
def Save(self,evt):
date0 = str(self.field_0.GetValue().GetYear())+'-' + str(self.field_0.GetValue().GetMonth()+1) +'-'+ str(self.field_0.GetValue().GetDay()) + ' ' + self.mytime.GetValue()
if self.field_2.GetValue().encode("utf-8") == 'НЕТ':
kod_type = 0
elif self.field_2.GetValue().encode("utf-8") == 'РЕМОНТ':
kod_type = 1
elif self.field_2.GetValue().encode("utf-8") == 'МОНТАЖ':
kod_type = 2
result = TaskEdit(self.kod_rec, date0, self.field_1.GetValue(), kod_type, self.field_3.GetValue(), self.field_7.GetValue(), self.field_8.GetValue(), self.field_9.GetValue(), self.field_10.GetValue(), self.field_11.GetValue(), self.field_12.GetValue())
if result == 'ERRORDATA':
ErrorData(self)
elif result == 'NOTACCESS':
NotAccess(self)
self.ShowValue()
elif result == 'OK':
SaveDone(self)
self.ShowValue()
#### ---- Добавить исполнителя ----
def AddWorker(self,evt):
dlg = ChWorker(self,-1,'Выбор исполнителя',size=(400,250),style=wx.DEFAULT_DIALOG_STYLE)
if dlg.ShowModal() == wx.ID_OK:
row_id = dlg.ctrl0.kod_record[dlg.ctrl0.currentItem]
result = AddTaskWorker(self.kod_rec,row_id)
if result == 'ERRORDATA':
ErrorData(self)
elif result == 'NOTACCESS':
NotAccess(self)
elif result == 'OK':
self.ctrl0.Populate(self.kod_rec)
self.ctrl0.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
dlg.Destroy()
#### ---- Удалить исполнителя ----
def DelWorker(self,evt):
dlg = wx.MessageDialog(self,"Удалить исполнителя?","Удаление",style=wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
row_id = self.ctrl0.kod_record[self.ctrl0.currentItem]
result = DelTaskWorker(self.kod_rec,row_id)
if result == 'OK':
self.ctrl0.Populate(self.kod_rec)
self.ctrl0.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
elif result == 'NOTACCESS':
NotAccess(self)
elif result == 'ERRORDATA':
ErrorData(self)
dlg.Destroy()
#### ---- Добавить материал ----
def AddMate(self,evt):
dlg = ChMate(self,-1,'Выбор материала',size=(400,250),style=wx.DEFAULT_DIALOG_STYLE)
if dlg.ShowModal() == wx.ID_OK:
row_id = dlg.ctrl0.kod_record[dlg.ctrl0.currentItem]
result = AddTaskMate(row_id,row_id.split('#')[1],row_id.split('#')[0],dlg.field_1.GetValue(),self.kod_rec)
if result == 'ERRORDATA':
ErrorData(self)
elif result == 'NOTACCESS':
NotAccess(self)
elif result == 'OK':
self.ctrl1.Populate(self.kod_rec)
self.ctrl1.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
dlg.Destroy()
#### ---- Удалить материал ----
def DelMate(self,evt):
dlg = wx.MessageDialog(self,"Удалить материал?","Удаление",style=wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
row_id = self.ctrl1.kod_record[self.ctrl1.currentItem]
result = DelTaskMate(row_id)
if result == 'OK':
self.ctrl1.Populate(self.kod_rec)
self.ctrl1.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
elif result == 'NOTACCESS':
NotAccess(self)
elif result == 'ERRORDATA':
ErrorData(self)
dlg.Destroy()
#### ---- Получение данных в форму ----
def ShowValue(self):
r = GetTask(self.kod_rec)
## --- Дата и время создания заявки ---
self.field_00.SetValue(r[19])
## --- Дата и время закрытия заявки ---
if r[21] == 1:
self.field_000.SetValue(r[22])
self.SetTitle("Заявка закрыта")
## --- Заявка удалена ---
if r[20] != '':
self.SetTitle("Заявка удалена!")
d0 = wx.DateTime()
d0.SetYear(r[4])
d0.SetMonth(r[5]-1)
d0.SetDay(r[6])
self.mytime.SetValue(r[3])
self.field_0.SetValue(d0)
self.field_1.SetValue(r[7])
if r[8] == 0:
self.field_2.SetValue('НЕТ')
elif r[8] == 1:
self.field_2.SetValue('РЕМОНТ')
elif r[8] == 2:
self.field_2.SetValue('МОНТАЖ')
self.field_3.SetValue(r[9])
self.field_4.SetValue(r[10])
self.field_5.SetValue(r[11])
self.field_6.SetValue(r[12])
self.field_7.SetValue(r[13])
self.field_8.SetValue(r[14])
self.field_9.SetValue(str(r[15]))
self.field_10.SetValue(str(r[16]))
self.field_11.SetValue(str(r[17]))
self.field_12.SetValue(r[18])
| [
"[email protected]"
]
| |
916a1b892d9ee34ba3b09ce362ccd49f91b02fb8 | c10ef416832b3e99e58fb93c85f414d94bbdbc2e | /py3canvas/tests/peer_reviews.py | 3f74127f0550f90e37aece9a9ce32d0dd96a9fde | [
"MIT"
]
| permissive | tylerclair/py3canvas | 83bab26d1624a11acffaeb0392c6a9a38f995f16 | 7485d458606b65200f0ffa5bbe597a9d0bee189f | refs/heads/master | 2021-10-26T03:27:48.418437 | 2021-10-23T15:07:26 | 2021-10-23T15:07:26 | 92,841,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | """PeerReviews API Tests for Version 1.0.
This is a testing template for the generated PeerReviewsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.peer_reviews import PeerReviewsAPI
from py3canvas.apis.peer_reviews import Peerreview
class TestPeerReviewsAPI(unittest.TestCase):
"""Tests for the PeerReviewsAPI."""
def setUp(self):
self.client = PeerReviewsAPI(secrets.instance_address, secrets.access_token)
def test_get_all_peer_reviews_courses_peer_reviews(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_courses_peer_reviews method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.get_all_peer_reviews_courses_peer_reviews(
assignment_id, course_id, include=None
)
def test_get_all_peer_reviews_sections_peer_reviews(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_sections_peer_reviews method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.get_all_peer_reviews_sections_peer_reviews(
assignment_id, section_id, include=None
)
def test_get_all_peer_reviews_courses_submissions(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_courses_submissions method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
r = self.client.get_all_peer_reviews_courses_submissions(
assignment_id, course_id, submission_id, include=None
)
def test_get_all_peer_reviews_sections_submissions(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_sections_submissions method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
r = self.client.get_all_peer_reviews_sections_submissions(
assignment_id, section_id, submission_id, include=None
)
def test_create_peer_review_courses(self):
"""Integration test for the PeerReviewsAPI.create_peer_review_courses method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_create_peer_review_sections(self):
"""Integration test for the PeerReviewsAPI.create_peer_review_sections method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_peer_review_courses(self):
"""Integration test for the PeerReviewsAPI.delete_peer_review_courses method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
user_id = None # Change me!!
r = self.client.delete_peer_review_courses(
assignment_id, course_id, submission_id, user_id
)
def test_delete_peer_review_sections(self):
"""Integration test for the PeerReviewsAPI.delete_peer_review_sections method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
user_id = None # Change me!!
r = self.client.delete_peer_review_sections(
assignment_id, section_id, submission_id, user_id
)
| [
"[email protected]"
]
| |
84c9536f5e1f78dd08f4468969601dbc75a7253a | 68e2b4177c7bf74460f5bd58671d406aa2f4ff85 | /tools/solver_diagnostics.py | 4e3f19c100fcebcd167b7ba5e4872447f4361b19 | [
"MIT"
]
| permissive | mjgpinheiro/pynosh | 8eaefcb1cad16e7707cf02c9c36cfe9cd5ce98eb | 331454b29246e6c009878589aad2dccb9fda6c30 | refs/heads/master | 2021-09-21T19:18:34.340558 | 2018-08-30T15:25:30 | 2018-08-30T15:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,156 | py | # -*- coding: utf-8 -*-
#
from scipy import rand, zeros, log10, argsort, inf
from numpy import ones, array, abs, kron, eye, random
from scipy.sparse import csr_matrix, isspmatrix_bsr, isspmatrix_csr
from pyamg.aggregation import smoothed_aggregation_solver
from pyamg.util.linalg import _approximate_eigenvalues, ishermitian
from pyamg.util.utils import print_table
def solver_diagnostics(
A,
fname="solver_diagnostic",
definiteness=None,
symmetry=None,
strength_list=None,
aggregate_list=None,
smooth_list=None,
Bimprove_list=None,
max_levels_list=None,
cycle_list=None,
krylov_list=None,
prepostsmoother_list=None,
B_list=None,
coarse_size_list=None,
):
"""Try many different different parameter combinations for
smoothed_aggregation_solver(...). The goal is to find appropriate SA
parameter settings for the arbitrary matrix problem A x = 0 using a
random initial guess.
Every combination of the input parameter lists is used to construct and
test an SA solver. Thus, be wary of the total number of solvers possible!
For example for an SPD CSR matrix, the default parameter lists generate 60
different smoothed aggregation solvers.
Symmetry and definiteness are automatically detected, but it is safest to
manually set these parameters through the ``definiteness' and ``symmetry'
parameters.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix in CSR or BSR format
fname : {string}
File name where the diagnostic results are dumped
Default: solver_diagnostic.txt
definiteness : {string}
'positive' denotes positive definiteness
'indefinite' denotes indefiniteness
Default: detected with a few iterations of Arnoldi iteration
symmetry : {string}
'hermitian' or 'nonsymmetric', denoting the symmetry of the matrix
Default: detected by testing if A induces an inner-product
strength_list : {list}
List of various parameter choices for the strength argument sent to
smoothed_aggregation_solver(...)
Default: [('symmetric', {'theta' : 0.0}),
('evolution', {'k':2, 'proj_type':'l2', 'epsilon':2.0}),
('evolution', {'k':2, 'proj_type':'l2', 'epsilon':4.0})]
aggregate_list : {list}
List of various parameter choices for the aggregate argument sent to
smoothed_aggregation_solver(...)
Default: ['standard']
smooth_list : {list}
List of various parameter choices for the smooth argument sent to
smoothed_aggregation_solver(...)
Default depends on the symmetry and definiteness parameters:
if definiteness == 'positive' and (symmetry=='hermitian' or symmetry=='symmetric'):
['jacobi', ('jacobi', {'filter' : True, 'weighting' : 'local'}),
('energy',{'krylov':'cg','maxiter':2, 'degree':1, 'weighting':'local'}),
('energy',{'krylov':'cg','maxiter':3, 'degree':2, 'weighting':'local'}),
('energy',{'krylov':'cg','maxiter':4, 'degree':3, 'weighting':'local'})]
if definiteness == 'indefinite' or symmetry=='nonsymmetric':
[('energy',{'krylov':'gmres','maxiter':2,'degree':1,'weighting':'local'}),
('energy',{'krylov':'gmres','maxiter':3,'degree':2,'weighting':'local'}),
('energy',{'krylov':'gmres','maxiter':3,'degree':3,'weighting':'local'})]
Bimprove_list : {list}
List of various parameter choices for the Bimprove argument sent to
smoothed_aggregation_solver(...)
Default: ['default', None]
max_levels_list : {list}
List of various parameter choices for the max_levels argument sent to
smoothed_aggregation_solver(...)
Default: [25]
cycle_list : {list}
List of various parameter choices for the cycle argument sent to
smoothed_aggregation_solver.solve()
Default: ['V', 'W']
krylov_list : {list}
List of various parameter choices for the krylov argument sent to
smoothed_aggregation_solver.solve(). Basic form is (string, dict),
where the string is a Krylov descriptor, e.g., 'cg' or 'gmres', and
dict is a dictionary of parameters like tol and maxiter. The dictionary
dict may be empty.
Default depends on the symmetry and definiteness parameters:
if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
[('gmres', {'tol':1e-8, 'maxiter':300})]
else:
[('cg', {'tol':1e-8, 'maxiter':300})]
prepostsmoother_list : {list}
List of various parameter choices for the presmoother and postsmoother
arguments sent to smoothed_aggregation_solver(...). Basic form is
[ (presmoother_descriptor, postsmoother_descriptor), ...].
Default depends on the symmetry parameter:
if symmetry == 'nonsymmetric' or definiteness == 'indefinite':
[ (('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2}),
('gauss_seidel_nr', {'sweep':'symmetric', 'iterations':2})) ]
else:
[ (('block_gauss_seidel',{'sweep':'symmetric','iterations':1}),
('block_gauss_seidel',{'sweep':'symmetric','iterations':1})) ]
B_list : {list}
List of various B parameter choices for the B and BH arguments sent to
smoothed_aggregation_solver(...). Basic form is [ (B, BH, string), ...].
B is a vector of left near null-space modes used to generate
prolongation, BH is a vector of right near null-space modes used to
generate restriction, and string is a python command(s) that can generate
your particular B and BH choice. B and BH must have a row-size equal
to the dimensionality of A. string is only used in the automatically
generated test script.
Default depends on whether A is BSR:
if A is CSR:
B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones')]
if A is BSR:
bsize = A.blocksize[0]
B_list = [(ones((A.shape[0],1)), ones((A.shape[0],1)), 'B, BH are all ones'),
(kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
kron(ones((A.shape[0]/bsize,1)), numpy.eye(bsize)),
'B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype),
eye(A.blocksize[0])); BH = B.copy()')]
coarse_size_list : {list}
List of various tuples containing pairs of the (max_coarse, coarse_solver)
parameters sent to smoothed_aggregation_solver(...).
Default: [ (300, 'pinv') ]
Notes
-----
Only smoothed_aggregation_solver(...) is used. The Ruge-Stuben solver
framework is not used.
60 total solvers are generated by the defaults for CSR SPD matrices. For
BSR SPD matrices, 120 total solvers are generated by the defaults. A
somewhat smaller number of total solvers is generated if the matrix is
indefinite or nonsymmetric. Every combination of the parameter lists is
attempted.
Generally, there are two types of parameter lists passed to this function.
Type 1 includes: cycle_list, strength_list, aggregate_list, smooth_list,
krylov_list, Bimprove_list, max_levels_list
-------------------------------------------
Here, you pass in a list of different parameters, e.g.,
cycle_list=['V','W'].
Type 2 includes: B_list, coarse_size_list, prepostsmoother_list
-------------------------------------------
This is similar to Type 1, only these represent lists of
pairs of parameters, e.g.,
coarse_size_list=[ (300, 'pinv'), (5000, 'splu')],
where coarse size_list is of the form
[ (max_coarse, coarse_solver), ...].
For detailed info on each of these parameter lists, see above.
Returns
-------
Two files are written:
(1) fname + '.py'
Use the function defined here to generate and run the best
smoothed aggregation method found. The only argument taken
is a BSR/CSR matrix.
(2) fname + '.txt'
This file outputs the solver profile for each method
tried in a sorted table listing the best solver first.
The detailed solver descriptions then follow the table.
See Also
--------
smoothed_aggregation_solver
Examples
--------
>>> from pyamg import gallery
>>> from solver_diagnostics import *
>>> A = gallery.poisson( (50,50), format='csr')
>>> solver_diagnostics(A, fname='isotropic_diffusion_diagnostics.txt', cycle_list=['V'])
"""
##
# Preprocess A
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
print(
"Implicit conversion of A to CSR in" "pyamg.smoothed_aggregation_solver"
)
except:
raise TypeError(
"Argument A must have type csr_matrix or "
"bsr_matrix, or be convertible to csr_matrix"
)
#
A = A.asfptype()
#
if A.shape[0] != A.shape[1]:
raise ValueError("expected square matrix")
print(
(
"\nSearching for optimal smoothed aggregation method for "
"(%d,%d) matrix" % A.shape
)
)
print(" ...")
##
# Detect symmetry
if symmetry is None:
if ishermitian(A, fast_check=True):
symmetry = "hermitian"
else:
symmetry = "nonsymmetric"
##
print(" Detected a " + symmetry + " matrix")
else:
print(" User specified a " + symmetry + " matrix")
##
# Detect definiteness
if definiteness is None:
[EVect, Lambda, H, V, breakdown_flag] = _approximate_eigenvalues(A, 1e-6, 40)
if Lambda.min() < 0.0:
definiteness = "indefinite"
print(" Detected indefiniteness")
else:
definiteness = "positive"
print(" Detected positive definiteness")
else:
print(" User specified definiteness as " + definiteness)
##
# Default B are (1) a vector of all ones, and
# (2) if A is BSR, the constant for each variable
if B_list is None:
B_list = [
(
ones((A.shape[0], 1), dtype=A.dtype),
ones((A.shape[0], 1), dtype=A.dtype),
"B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()",
)
]
if isspmatrix_bsr(A) and A.blocksize[0] > 1:
bsize = A.blocksize[0]
B_list.append(
(
kron(ones((A.shape[0] / bsize, 1), dtype=A.dtype), eye(bsize)),
kron(ones((A.shape[0] / bsize, 1), dtype=A.dtype), eye(bsize)),
"B = kron(ones((A.shape[0]/A.blocksize[0],1), dtype=A.dtype), eye(A.blocksize[0])); BH = B.copy()",
)
)
##
# Default is to try V- and W-cycles
if cycle_list is None:
cycle_list = ["V", "W"]
##
# Default strength of connection values
if strength_list is None:
strength_list = [
("symmetric", {"theta": 0.0}),
("evolution", {"k": 2, "proj_type": "l2", "epsilon": 2.0}),
("evolution", {"k": 2, "proj_type": "l2", "epsilon": 4.0}),
]
##
# Default aggregation strategies
if aggregate_list is None:
aggregate_list = ["standard"]
##
# Default prolongation smoothers
if smooth_list is None:
if definiteness == "positive" and (
symmetry == "hermitian" or symmetry == "symmetric"
):
smooth_list = [
"jacobi",
("jacobi", {"filter": True, "weighting": "local"}),
(
"energy",
{"krylov": "cg", "maxiter": 2, "degree": 1, "weighting": "local"},
),
(
"energy",
{"krylov": "cg", "maxiter": 3, "degree": 2, "weighting": "local"},
),
(
"energy",
{"krylov": "cg", "maxiter": 4, "degree": 3, "weighting": "local"},
),
]
elif definiteness == "indefinite" or symmetry == "nonsymmetric":
smooth_list = [
(
"energy",
{
"krylov": "gmres",
"maxiter": 2,
"degree": 1,
"weighting": "local",
},
),
(
"energy",
{
"krylov": "gmres",
"maxiter": 3,
"degree": 2,
"weighting": "local",
},
),
(
"energy",
{
"krylov": "gmres",
"maxiter": 4,
"degree": 3,
"weighting": "local",
},
),
]
else:
raise ValueError("invalid string for definiteness and/or symmetry")
##
# Default pre- and postsmoothers
if prepostsmoother_list is None:
if symmetry == "nonsymmetric" or definiteness == "indefinite":
prepostsmoother_list = [
(
("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 2}),
("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 2}),
)
]
else:
prepostsmoother_list = [
(
("block_gauss_seidel", {"sweep": "symmetric", "iterations": 1}),
("block_gauss_seidel", {"sweep": "symmetric", "iterations": 1}),
)
]
##
# Default Krylov wrapper
if krylov_list is None:
if symmetry == "nonsymmetric" or definiteness == "indefinite":
krylov_list = [("gmres", {"tol": 1e-8, "maxiter": 300})]
else:
krylov_list = [("cg", {"tol": 1e-8, "maxiter": 300})]
##
# Default Bimprove
if Bimprove_list is None:
Bimprove_list = ["default", None]
##
# Default basic solver parameters
if max_levels_list is None:
max_levels_list = [25]
if coarse_size_list is None:
coarse_size_list = [(300, "pinv")]
##
# Setup for ensuing numerical tests
# The results array will hold in each row, three values:
# iterations, operator complexity, and work per digit of accuracy
num_test = (
len(cycle_list)
* len(strength_list)
* len(aggregate_list)
* len(smooth_list)
* len(krylov_list)
* len(Bimprove_list)
* len(max_levels_list)
* len(B_list)
* len(coarse_size_list)
* len(prepostsmoother_list)
)
results = zeros((num_test, 3))
solver_descriptors = []
solver_args = []
##
# Zero RHS and random initial guess
random.seed(0)
b = zeros((A.shape[0], 1), dtype=A.dtype)
if A.dtype == complex:
x0 = rand(A.shape[0], 1) + 1.0j * rand(A.shape[0], 1)
else:
x0 = rand(A.shape[0], 1)
##
# Begin loops over parameter choices
print(" ...")
counter = -1
for cycle in cycle_list:
for krylov in krylov_list:
for max_levels in max_levels_list:
for max_coarse, coarse_solver in coarse_size_list:
for presmoother, postsmoother in prepostsmoother_list:
for B_index in range(len(B_list)):
for strength in strength_list:
for aggregate in aggregate_list:
for smooth in smooth_list:
for Bimprove in Bimprove_list:
counter += 1
print(
" Test %d out of %d"
% (counter + 1, num_test)
)
##
# Grab B vectors
B, BH, Bdescriptor = B_list[B_index]
##
# Store this solver setup
if "tol" in krylov[1]:
tol = krylov[1]["tol"]
else:
tol = 1e-6
if "maxiter" in krylov[1]:
maxiter = krylov[1]["maxiter"]
else:
maxiter = 300
##
descriptor = (
" Solve phase arguments:" + "\n"
" cycle = " + str(cycle) + "\n"
" krylov accel = "
+ str(krylov[0])
+ "\n"
" tol = " + str(tol) + "\n"
" maxiter = " + str(maxiter) + "\n"
" Setup phase arguments:" + "\n"
" max_levels = "
+ str(max_levels)
+ "\n"
" max_coarse = "
+ str(max_coarse)
+ "\n"
" coarse_solver = "
+ str(coarse_solver)
+ "\n"
" presmoother = "
+ str(presmoother)
+ "\n"
" postsmoother = "
+ str(postsmoother)
+ "\n"
" " + Bdescriptor + "\n"
" strength = " + str(strength) + "\n"
" aggregate = "
+ str(aggregate)
+ "\n"
" smooth = " + str(smooth) + "\n"
" Bimprove = " + str(Bimprove)
)
solver_descriptors.append(descriptor)
solver_args.append(
{
"cycle": cycle,
"accel": str(krylov[0]),
"tol": tol,
"maxiter": maxiter,
"max_levels": max_levels,
"max_coarse": max_coarse,
"coarse_solver": coarse_solver,
"B_index": B_index,
"presmoother": presmoother,
"postsmoother": postsmoother,
"strength": strength,
"aggregate": aggregate,
"smooth": smooth,
"Bimprove": Bimprove,
}
)
##
# Construct solver
try:
sa = smoothed_aggregation_solver(
A,
B=B,
BH=BH,
strength=strength,
smooth=smooth,
Bimprove=Bimprove,
aggregate=aggregate,
presmoother=presmoother,
max_levels=max_levels,
postsmoother=postsmoother,
max_coarse=max_coarse,
coarse_solver=coarse_solver,
)
##
# Solve system
residuals = []
x = sa.solve(
b,
x0=x0,
accel=krylov[0],
cycle=cycle,
tol=tol,
maxiter=maxiter,
residuals=residuals,
)
##
# Store results: iters, operator complexity, and
# work per digit-of-accuracy
results[counter, 0] = len(residuals)
results[
counter, 1
] = sa.operator_complexity()
resid_rate = (
residuals[-1] / residuals[0]
) ** (1.0 / (len(residuals) - 1.))
results[
counter, 2
] = sa.cycle_complexity() / abs(
log10(resid_rate)
)
except:
descriptor_indented = (
" "
+ descriptor.replace(
"\n", "\n "
)
)
print(" --> Failed this test")
print(" --> Solver descriptor is...")
print(descriptor_indented)
results[counter, :] = inf
##
# Sort results and solver_descriptors according to work-per-doa
indys = argsort(results[:, 2])
results = results[indys, :]
solver_descriptors = list(array(solver_descriptors)[indys])
solver_args = list(array(solver_args)[indys])
##
# Create table from results and print to file
table = [["solver #", "iters", "op complexity", "work per DOA"]]
for i in range(results.shape[0]):
if (results[i, :] == inf).all() == True:
# in this case the test failed...
table.append(["%d" % (i + 1), "err", "err", "err"])
else:
table.append(
[
"%d" % (i + 1),
"%d" % results[i, 0],
"%1.1f" % results[i, 1],
"%1.1f" % results[i, 2],
]
)
#
fptr = open(fname + ".txt", "w")
fptr.write(
"****************************************************************\n"
+ "* Begin Solver Diagnostic Results *\n"
+ "* *\n"
+ "* ''solver #'' refers to below solver descriptors *\n"
+ "* *\n"
+ "* ''iters'' refers to iterations taken *\n"
+ "* *\n"
+ "* ''op complexity'' refers to operator complexity *\n"
+ "* *\n"
+ "* ''work per DOA'' refers to work per digit of *\n"
+ "* accuracy to solve the algebraic system, i.e. it *\n"
+ "* measures the overall efficiency of the solver *\n"
+ "****************************************************************\n\n"
)
fptr.write(print_table(table))
##
# Now print each solver descriptor to file
fptr.write(
"\n****************************************************************\n"
+ "* Begin Solver Descriptors *\n"
+ "****************************************************************\n\n"
)
for i in range(len(solver_descriptors)):
fptr.write("Solver Descriptor %d\n" % (i + 1))
fptr.write(solver_descriptors[i])
fptr.write(" \n \n")
fptr.close()
##
# Now write a function definition file that generates the 'best' solver
fptr = open(fname + ".py", "w")
# Helper function for file writing
def to_string(a):
if type(a) == type((1,)):
return str(a)
elif type(a) == type("s"):
return "'%s'" % a
else:
return str(a)
#
fptr.write(
"#######################################################################\n"
)
fptr.write(
"# Function definition automatically generated by solver_diagnostics.py\n"
)
fptr.write("#\n")
fptr.write("# Use the function defined here to generate and run the best\n")
fptr.write("# smoothed aggregation method found by solver_diagnostics(...).\n")
fptr.write("# The only argument taken is a CSR/BSR matrix.\n")
fptr.write("#\n")
fptr.write("# To run: >>> # User must load/generate CSR/BSR matrix A\n")
fptr.write("# >>> from " + fname + " import " + fname + "\n")
fptr.write("# >>> " + fname + "(A)" + "\n")
fptr.write(
"#######################################################################\n\n"
)
fptr.write("from pyamg import smoothed_aggregation_solver\n")
fptr.write("from pyamg.util.linalg import norm\n")
fptr.write("from numpy import ones, array, arange, zeros, abs, random\n")
fptr.write("from scipy import rand, ravel, log10, kron, eye\n")
fptr.write("from scipy.io import loadmat\n")
fptr.write("from scipy.sparse import isspmatrix_bsr, isspmatrix_csr\n")
fptr.write("import pylab\n\n")
fptr.write("def " + fname + "(A):\n")
fptr.write(" ##\n # Generate B\n")
fptr.write(" " + B_list[B_index][2] + "\n\n")
fptr.write(" ##\n # Random initial guess, zero right-hand side\n")
fptr.write(" random.seed(0)\n")
fptr.write(" b = zeros((A.shape[0],1))\n")
fptr.write(" x0 = rand(A.shape[0],1)\n\n")
fptr.write(" ##\n # Create solver\n")
fptr.write(
" ml = smoothed_aggregation_solver(A, B=B, BH=BH,\n"
+ " strength=%s,\n" % to_string(solver_args[0]["strength"])
+ " smooth=%s,\n" % to_string(solver_args[0]["smooth"])
+ " Bimprove=%s,\n" % to_string(solver_args[0]["Bimprove"])
+ " aggregate=%s,\n" % to_string(solver_args[0]["aggregate"])
+ " presmoother=%s,\n" % to_string(solver_args[0]["presmoother"])
+ " postsmoother=%s,\n" % to_string(solver_args[0]["postsmoother"])
+ " max_levels=%s,\n" % to_string(solver_args[0]["max_levels"])
+ " max_coarse=%s,\n" % to_string(solver_args[0]["max_coarse"])
+ " coarse_solver=%s)\n\n" % to_string(solver_args[0]["coarse_solver"])
)
fptr.write(" ##\n # Solve system\n")
fptr.write(" res = []\n")
fptr.write(
" x = ml.solve(b, x0=x0, tol=%s, residuals=res, accel=%s, maxiter=%s, cycle=%s)\n"
% (
to_string(solver_args[0]["tol"]),
to_string(solver_args[0]["accel"]),
to_string(solver_args[0]["maxiter"]),
to_string(solver_args[0]["cycle"]),
)
)
fptr.write(" res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))\n")
fptr.write(" normr0 = norm(ravel(b) - ravel(A*x0))\n")
fptr.write(" print " "\n")
fptr.write(" print ml\n")
fptr.write(" print 'System size: ' + str(A.shape)\n")
fptr.write(" print 'Avg. Resid Reduction: %1.2f'%res_rate\n")
fptr.write(" print 'Iterations: %d'%len(res)\n")
fptr.write(
" print 'Operator Complexity: %1.2f'%ml.operator_complexity()\n"
)
fptr.write(
" print 'Work per DOA: %1.2f'%(ml.cycle_complexity()/abs(log10(res_rate)))\n"
)
fptr.write(
" print 'Relative residual norm: %1.2e'%(norm(ravel(b) - ravel(A*x))/normr0)\n\n"
)
fptr.write(" ##\n # Plot residual history\n")
fptr.write(" pylab.semilogy(array(res)/normr0)\n")
fptr.write(" pylab.title('Residual Histories')\n")
fptr.write(" pylab.xlabel('Iteration')\n")
fptr.write(" pylab.ylabel('Relative Residual Norm')\n")
fptr.write(" pylab.show()\n\n")
# Close file pointer
fptr.close()
print(" ...")
print(" --> Diagnostic Results located in " + fname + ".txt")
print(" ...")
print(
" --> See automatically generated function definition\n"
+ " ./"
+ fname
+ ".py.\n\n"
+ " Use the function defined here to generate and run the best\n"
+ " smoothed aggregation method found. The only argument taken\n"
+ " is a CSR/BSR matrix.\n\n"
+ " To run: >>> # User must load/generate CSR/BSR matrix A\n"
+ " >>> from "
+ fname
+ " import "
+ fname
+ "\n"
+ " >>> "
+ fname
+ "(A)"
)
| [
"[email protected]"
]
| |
018066b50c689152a24b32c9026365c72603e07e | 398d6a7fb50a2485ef17319028da73a94b166ef8 | /01Factory/1-8.py | d11ca0024613770a0e91a31efae0333d0a306ce1 | []
| no_license | Yuanoung/TemplateOfDesign | 3e7675de97d90f354b32de6863ad8c4b7e2b338a | ea1635ec42d12b1c869db20a31292e063e5d200e | refs/heads/master | 2021-04-30T14:28:08.961904 | 2018-02-12T09:52:36 | 2018-02-12T09:52:36 | 121,216,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | class Operation(object):
@staticmethod
def getResult(numberA, op, numberB):
if op == "+":
return numberA + numberB
elif op == "-":
return numberA - numberB
elif op == "*":
return numberA * numberB
else:
return numberA / numberB | [
"[email protected]"
]
| |
da4403af4ed8be3e36cf4bb7252b4e9888b00a01 | 93c02201c60da7f9f231f0a87ffe8b32729ce229 | /Array/kth-smallest-element.py | a00353263dc369f7777f5c84e850f8a1c7eefbd2 | []
| no_license | sudo-hemant/curated_questions_dsa | 54aebe021f58f757e519f508f9269798127c7495 | 8f1097274bfd1d2c6f25def6c16982bbf6d7461c | refs/heads/master | 2023-03-14T07:53:00.652525 | 2021-02-27T07:16:22 | 2021-02-27T07:16:22 | 339,599,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py |
# NOTE: it is min heap, every time we pop element, it pops minimum element
# and we need smallest element, so we are going to push element by changing its sign
import heapq
def kthSmallest(arr, l, r, k):
'''
arr : given array
l : starting index of the array i.e 0
r : ending index of the array i.e size-1
k : find kth smallest element and return using this function
'''
heap = []
for num in arr:
if len(heap) < k:
heapq.heappush(heap, -1 * num)
else:
curr_min = -1 * heapq.heappop(heap)
heapq.heappush(heap, -1 * min(curr_min, num))
return -1 * heapq.heappop(heap)
| [
"[email protected]"
]
| |
10d8de2da1b5848136bd6aa8f35b12ce3cfa57a3 | a39f8f5b66314b1084f3a88d8d7a1a791292ea2c | /examples/comboBox1.py | e32adc5343234eac78948d372b4ffc33fa5dc491 | []
| no_license | zwlyn/pyqt5try | 6d62e28795390c3049ddb6cbb6b761002149e47a | 21931c26f0d87b2aa3ba80eef5f0fcd46d54bcdd | refs/heads/master | 2020-08-01T02:57:17.683792 | 2019-10-09T11:10:11 | 2019-10-09T11:10:11 | 210,837,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # -*- coding: utf_8 -*-
import sys
from PyQt5.QtWidgets import (QWidget, QLabel,
QComboBox, QApplication)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.lbl = QLabel("Ubuntu", self)
combo = QComboBox(self)
combo.addItem("Ubuntu")
combo.addItem("Mandriva")
combo.addItem("Fedora")
combo.addItem("Arch")
combo.addItem("Gentoo")
combo.move(50, 50 )
self.lbl.move(50, 150)
combo.activated[str].connect(self.onActivated)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('QComboBox')
self.show()
def onActivated(self, text):
self.lbl.setText(text)
self.lbl.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
42ecacc92a7a98c20a2a14c5f509bf7fb1ac7325 | 060c340a1f0d24fbf7a3aae573f59ebe2f8a6bbf | /dynamics.py | 9a83bedf9d84c999329aba22c4ccc2d70c7cbfba | [
"MIT"
]
| permissive | pj1138/MayaToolbox | 0052f070de12b34820c4ef107a587b602afe94de | 8eabd96eeedcb4242dba4ec15617bad3b81496d7 | refs/heads/master | 2021-01-13T11:57:57.638751 | 2017-09-24T18:11:27 | 2017-09-24T18:11:27 | 29,816,700 | 0 | 0 | null | 2015-01-25T14:29:02 | 2015-01-25T14:29:01 | null | UTF-8 | Python | false | false | 1,067 | py | # DYNAMICS
import pymel.core as py
import maya.cmds as mc
import maya.mel as mel
from math import *
from xml.dom.minidom import *
from random import uniform as rnd
import os
import re
#~~
from mayatoolbox import *
from animation import *
def quickDyn(spread=5, num=10, joints=False, bake=False):
target = []
g = py.gravity()
for i in range(0,num):
c = py.polyCube()
target.append(c)
x = rnd(-spread,spread)
y = rnd(-spread,spread) + 10
z = rnd(-spread,spread)
py.move(x,y,z)
py.rotate(x,y,z)
s(target)
py.rigidBody()
for i in range(0,len(target)):
py.connectDynamic(target[i],f=g)
if(joints==False and bake==True):
bakeAnimation(target)
if(joints==True):
target2 = []
for i in range(0,len(target)):
s(target[i])
jnt = py.joint()
target2.append(jnt)
if(bake==True):
bakeAnimation(target2)
for i in range(0,len(target2)):
unparent(target2[i])
| [
"[email protected]"
]
| |
8924478933f7a7524dcaab4f3c7882e74a847575 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_nic_tr_status_registered.py | 58c44cf2045188ef955e413f45503f6c8c2d0294 | [
"MIT"
]
| permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,978 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.tr/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicTrStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.tr/status_registered.txt"
host = "whois.nic.tr"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 1)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, "mi154-metu")
eq_(self.record.admin_contacts[0].name, None)
eq_(self.record.admin_contacts[0].organization, "MarkMonitor, Inc")
eq_(self.record.admin_contacts[0].address, "Hidden upon user request")
eq_(self.record.admin_contacts[0].city, None)
eq_(self.record.admin_contacts[0].zip, None)
eq_(self.record.admin_contacts[0].state, None)
eq_(self.record.admin_contacts[0].country, None)
eq_(self.record.admin_contacts[0].country_code, None)
eq_(self.record.admin_contacts[0].phone, "Hidden upon user request")
eq_(self.record.admin_contacts[0].fax, "Hidden upon user request")
eq_(self.record.admin_contacts[0].email, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2001-08-23 00:00:00 UTC'))
def test_registrar(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrar)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, None)
eq_(self.record.registrant_contacts[0].name, "Google Inc.")
eq_(self.record.registrant_contacts[0].organization, None)
eq_(self.record.registrant_contacts[0].address, "1600 Amphitheatre Parkway\nMountain View CA")
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, "United States of America")
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, "+ 1-650-2530000-")
eq_(self.record.registrant_contacts[0].fax, "+ 1-650-2530001-")
eq_(self.record.registrant_contacts[0].email, "[email protected]")
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "btl1-metu")
eq_(self.record.technical_contacts[0].name, None)
eq_(self.record.technical_contacts[0].organization, "BERÝL TEKNOLOJÝ LTD. ÞTÝ.")
eq_(self.record.technical_contacts[0].address, "Ceyhun Atuf Kansu Cad. Bayraktar Ýþ Merkezi\nNo:114 G-4 Balgat\nAnkara,06520\nTürkiye")
eq_(self.record.technical_contacts[0].city, None)
eq_(self.record.technical_contacts[0].zip, None)
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, None)
eq_(self.record.technical_contacts[0].phone, "+ 90-312-4733035-")
eq_(self.record.technical_contacts[0].fax, "+ 90-312-4733039-")
eq_(self.record.technical_contacts[0].email, None)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-08-22 00:00:00 UTC'))
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer)
| [
"[email protected]"
]
| |
5bc5b5cfffe723fe4f784cb6707c7b054ae384ae | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-imageenhan/aliyunsdkimageenhan/request/v20190930/RecolorImageRequest.py | ff730d30e823cf7d8caf5085eb77484efb0abc51 | [
"Apache-2.0"
]
| permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,317 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimageenhan.endpoint import endpoint_data
class RecolorImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imageenhan', '2019-09-30', 'RecolorImage','imageenhan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Mode(self): # String
return self.get_body_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_body_params('Mode', Mode)
def get_ColorCount(self): # Integer
return self.get_body_params().get('ColorCount')
def set_ColorCount(self, ColorCount): # Integer
self.add_body_params('ColorCount', ColorCount)
def get_ColorTemplates(self): # RepeatList
return self.get_body_params().get('ColorTemplate')
def set_ColorTemplates(self, ColorTemplate): # RepeatList
for depth1 in range(len(ColorTemplate)):
if ColorTemplate[depth1].get('Color') is not None:
self.add_body_params('ColorTemplate.' + str(depth1 + 1) + '.Color', ColorTemplate[depth1].get('Color'))
def get_Url(self): # String
return self.get_body_params().get('Url')
def set_Url(self, Url): # String
self.add_body_params('Url', Url)
def get_RefUrl(self): # String
return self.get_body_params().get('RefUrl')
def set_RefUrl(self, RefUrl): # String
self.add_body_params('RefUrl', RefUrl)
| [
"[email protected]"
]
| |
76fbb1b839f66900f0ee35447d47bdd4a00613bb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03785/s325179868.py | b238757b012a1f39080269d6ad3146db49516bb8 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import sys
def input(): return sys.stdin.readline().strip()
def resolve():
n,c,k=map(int, input().split())
l=[int(input()) for i in range(n)]
l.sort()
saisyo=l[0]
ninzu=1
ans=0
for j in range(1,n):
x=l[j]-saisyo
if x<=k and ninzu<c:
ninzu+=1
else:
ans+=1
saisyo=l[j]
ninzu=1
print(ans+1)
resolve() | [
"[email protected]"
]
| |
a308321b6894a652f10d50f0e278a3312baaee0a | 9fe1c431568746622ae9533d4694097e576f960c | /prophyle/increment_version.py | 0c14ee5978e3dcb21724d129abcae06d142371b3 | [
"MIT"
]
| permissive | prophyle/prophyle | 819cc062714838cd543d99e65ec5075b5a54400c | aa1ed8eebe74d8557090422255f42ee18aaef839 | refs/heads/master | 2023-08-07T18:47:15.649605 | 2023-07-26T12:47:36 | 2023-07-26T12:47:36 | 49,076,502 | 13 | 3 | MIT | 2023-09-01T18:12:12 | 2016-01-05T16:03:04 | Python | UTF-8 | Python | false | false | 435 | py | #! /usr/bin/env python3
import os
import sys
vfn = os.path.join(os.path.dirname(sys.argv[0]), "version.py")
exec(open(vfn).read())
numbers = VERSION.split(".")
numbers[-1] = str(int(numbers[-1]) + 1)
version = ".".join(numbers)
with open(vfn, "w") as f:
f.write('try:\n')
f.write(' from __commit import *\n')
f.write('except ImportError:\n')
f.write(' pass\n')
f.write('VERSION = "{}"'.format(version))
| [
"[email protected]"
]
| |
1ca42c506eb72546a2daffbb5711eb10f61b711d | 1869655b39f57d0240bd08ed19df5f789264ee8e | /domino_puzzle_test.py | e9bca8ef6795f00cd1c2de90f0a3656297614c35 | [
"MIT"
]
| permissive | binoliMhatre/moonside | d75a8bb0f1fd91ea2eb0fe36de199d9f8ee4fd74 | b046c2281e5c4b02ce535b0d523b94e166ebafa3 | refs/heads/master | 2021-01-21T15:43:32.853940 | 2016-03-11T03:16:47 | 2016-03-11T03:16:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,537 | py | import unittest
from domino_puzzle import Domino, Cell, Board, BoardError, BoardGraph,\
CaptureBoardGraph
class DummyRandom(object):
def __init__(self,
randints=None,
choiceDominoes=None,
otherChoices=None):
self.randints = randints or {} # {(min, max): [i, j, k]}
self.choiceDominoes = choiceDominoes
self.otherChoices = otherChoices # {[choices]: [selection]}
def randint(self, a, b):
results = self.randints.get((a, b), None)
return results.pop(0) if results else 0
def choice(self, seq):
if type(seq[0]) is Domino:
return self.choiceDominoes.pop(0)
selections = self.otherChoices[seq]
return selections.pop(0)
class CellTest(unittest.TestCase):
def testRepr(self):
cell = Cell(4)
s = repr(cell)
self.assertEqual("Cell(4)", s)
def testPips(self):
cell = Cell(5)
pips = cell.pips
self.assertEqual(5, pips)
def testFindNeighbours(self):
board = Board.create("""\
x 3|2
1|0 x
""")
cell = board[1][0]
expected_neighbours = set([board[1][1]])
neighbours = set(cell.findNeighbours())
self.assertEqual(expected_neighbours, neighbours)
class BoardTest(unittest.TestCase):
def testRepr(self):
board = Board(4, 3)
s = repr(board)
self.assertEqual("Board(4, 3)", s)
def testAddCell(self):
board = Board(4, 3)
board.add(Cell(4), 1, 2)
cell = board[1][2]
self.assertEqual(4, cell.pips)
def testAddDomino(self):
board = Board(4, 3)
board.add(Domino(5, 6), 1, 2)
pips = board[1][2].pips
self.assertEqual(5, pips)
def testDisplay(self):
board = Board(4, 3)
board.add(Domino(5, 6), 1, 2)
expected_display = """\
x 5|6 x
x x x x
x x x x
"""
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testDisplayCropped(self):
board = Board.create("""\
3 x x x
-
2 0|2 x
x x x x
""")
expected_display = """\
3 x x
-
2 0|2
"""
self.assertMultiLineEqual(expected_display,
board.display(cropped=True))
def testDisplayCroppingBounds(self):
board = Board.create("""\
3 x x x
-
2 0|2 x
x x x x
""")
expected_display = """\
3 x x
-
2 0|2
"""
bounds = ['garbage', 'to', 'be', 'removed']
expected_bounds = [0, 1, 2, 2]
display = board.display(cropped=True, cropping_bounds=bounds)
self.assertMultiLineEqual(expected_display, display)
self.assertEqual(expected_bounds, bounds)
def testRotate(self):
board = Board(4, 3)
domino1 = Domino(5, 6)
board.add(domino1, 1, 2)
domino1.rotate(-90)
expected_display = """\
x 5 x x
-
x 6 x x
x x x x
"""
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testMoveRight(self):
board = Board(4, 3)
domino1 = Domino(5, 6)
board.add(domino1, 1, 2)
domino1.move(1, 0)
expected_display = """\
x x 5|6
x x x x
x x x x
"""
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testMoveLeft(self):
board = Board(4, 3)
domino1 = Domino(5, 6)
board.add(domino1, 1, 2)
domino1.move(-1, 0)
expected_display = """\
5|6 x x
x x x x
x x x x
"""
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testGetDirection(self):
dx, dy = Domino.get_direction('l')
self.assertEqual((-1, 0), (dx, dy))
def testRotateWithoutBoard(self):
domino1 = Domino(5, 6)
domino1.rotate(90)
self.assertEqual(90, domino1.degrees)
def testRemove(self):
board = Board(3, 4)
domino1 = Domino(1, 5)
board.add(domino1, 0, 0)
board.remove(domino1)
self.assertEqual([], board.dominoes)
def testRemoveAndRotate(self):
board = Board(3, 4)
domino1 = Domino(1, 5)
board.add(domino1, 0, 0)
board.remove(domino1)
domino1.rotate(270)
self.assertEqual(270, domino1.degrees)
def testRotateAndAdd(self):
board = Board(4, 3)
domino1 = Domino(5, 6)
domino1.rotate(-90)
board.add(domino1, 1, 2)
expected_display = """\
x 5 x x
-
x 6 x x
x x x x
"""
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testOccupied(self):
board = Board(4, 3)
board.add(Domino(2, 3), 1, 0)
with self.assertRaisesRegexp(BoardError, 'Position 1, 0 is occupied.'):
board.add(Domino(1, 2), 0, 0)
def testOffBoard(self):
board = Board(4, 3)
with self.assertRaisesRegexp(BoardError,
'Position 4, 0 is off the board.'):
board.add(Domino(1, 2), 3, 0)
def testBadMove(self):
start_state = """\
0|2 x
0|1 x
"""
board = Board.create(start_state)
domino1 = board[0][0].domino
with self.assertRaises(BoardError):
domino1.move(-1, 0)
self.assertMultiLineEqual(start_state, board.display())
def testFill(self):
dummy_random = DummyRandom(randints={(0, 4): [1, 1]}, # directions
choiceDominoes=[Domino(0, 0),
Domino(0, 1)])
board = Board(2, 2, max_pips=6)
expected_display = """\
0 1
- -
0 0
"""
board.fill(dummy_random)
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testFillWithRandomDomino(self):
dummy_random = DummyRandom(randints={(0, 4): [1, 1]}, # directions
choiceDominoes=[Domino(0, 5),
Domino(0, 2)])
board = Board(2, 2, max_pips=6)
expected_display = """\
5 2
- -
0 0
"""
board.fill(dummy_random)
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testFillWithFlip(self):
dummy_random = DummyRandom(randints={(0, 4): [1, 1], # directions
(0, 1): [1, 0]}, # flips
choiceDominoes=[Domino(0, 0),
Domino(0, 1)])
board = Board(2, 2, max_pips=6)
expected_display = """\
0 0
- -
0 1
"""
board.fill(dummy_random)
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testFillWithMoreRotation(self):
dummy_random = DummyRandom(randints={(0, 4): [1, 1, 1]}, # directions
choiceDominoes=[Domino(0, 0),
Domino(0, 1),
Domino(0, 2)])
board = Board(2, 3, max_pips=6)
expected_display = """\
0|2
0 1
- -
0 0
"""
board.fill(dummy_random)
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testFillWithBacktrack(self):
""" Force a backtrack.
This scenario will get to the following grid and then be forced to
backtrack.
x 3 4 x
- -
0 0 0 2
- -
0 0|1 0
"""
dummy_random = DummyRandom(
randints={(0, 4): [1, 0, 1, 1]}, # directions
choiceDominoes=[Domino(0, 0),
Domino(0, 1),
Domino(0, 2),
Domino(0, 3),
Domino(0, 4),
Domino(0, 5),
Domino(0, 4),
Domino(0, 5)])
board = Board(4, 3, max_pips=6)
expected_display = """\
0|4 0|5
0 0|3 2
- -
0 0|1 0
"""
board.fill(dummy_random)
display = board.display()
self.assertMultiLineEqual(expected_display, display)
def testExtraDominoes(self):
state = """\
0|0 x
1|1 x
"""
max_pips = 2
expected_extra_dominoes = [Domino(0, 1),
Domino(0, 2),
Domino(1, 2),
Domino(2, 2)]
board = Board.create(state, max_pips=max_pips)
self.assertEqual(expected_extra_dominoes, board.extra_dominoes)
def testFlip(self):
board = Board(3, 2, max_pips=6)
domino1 = Domino(1, 5)
expected_display = """\
x x x
5|1 x
"""
board.add(domino1, 0, 0)
domino1.flip()
self.assertMultiLineEqual(expected_display, board.display())
def testCreate(self):
state = """\
0|2 x
0|1 x
"""
board = Board.create(state)
display = board.display()
self.assertMultiLineEqual(state, display)
def testCreateRightEdge(self):
state = """\
x 0|2
0|1 x
"""
board = Board.create(state)
self.assertMultiLineEqual(state, board.display())
def testCreateVertical(self):
state = """\
1 0|2
-
0 x x
"""
board = Board.create(state)
self.assertMultiLineEqual(state, board.display())
def testCreateWithOtherMarkers(self):
state = """\
1 0?2
*
0 x x
"""
expected_display = """\
1 0|2
-
0 x x
"""
board = Board.create(state)
self.assertMultiLineEqual(expected_display, board.display())
def testCreateWithBorder(self):
state = """\
3 x x
-
2 0|2
"""
board = Board.create(state, border=1)
expected_display = """\
x x x x x
x 3 x x x
-
x 2 0|2 x
x x x x x
"""
self.assertMultiLineEqual(expected_display, board.display())
def testIsConnected(self):
state = """\
1 0|2 x x
-
0 0|4 0|3
"""
board = Board.create(state)
self.assertTrue(board.isConnected())
def testIsNotConnected(self):
state = """\
1 0|2 x x
-
0 x x 0|3
"""
board = Board.create(state)
self.assertFalse(board.isConnected())
def testHasNoLoner(self):
state = """\
1 0 x 1|3
- -
0 2 x 0|3
"""
board = Board.create(state)
self.assertFalse(board.hasLoner())
def testHasLoner(self):
state = """\
1 0 x 1|2
- -
0 2 x 0|3
"""
board = Board.create(state)
self.assertTrue(board.hasLoner())
def testEqual(self):
state = """\
0|4 0|5
0 0|3 2
- -
0 0|1 0
"""
board1 = Board.create(state)
board2 = Board.create(state)
eq_result = (board1 == board2)
neq_result = (board1 != board2)
self.assertTrue(eq_result)
self.assertFalse(neq_result)
def testEqualWithGap(self):
state = """\
0|4 0|5
0 x x 2
- -
0 0|1 0
"""
board1 = Board.create(state)
board2 = Board.create(state)
eq_result = (board1 == board2)
neq_result = (board1 != board2)
self.assertTrue(eq_result)
self.assertFalse(neq_result)
def testDifferentPips(self):
state1 = """\
0|4 0|5
0 0|3 2
- -
0 0|1 0
"""
state2 = """\
6|4 0|5
0 0|3 2
- -
0 0|1 0
"""
board1 = Board.create(state1)
board2 = Board.create(state2)
eq_result = (board1 == board2)
neq_result = (board1 != board2)
self.assertFalse(eq_result)
self.assertTrue(neq_result)
def testDifferentAlignment(self):
state1 = """\
0|4 0|5
0 0|3 2
- -
0 0|1 0
"""
state2 = """\
0|4 0|5
0 0 3 2
- - - -
0 0 1 0
"""
board1 = Board.create(state1)
board2 = Board.create(state2)
self.assertNotEqual(board1, board2)
class DominoTest(unittest.TestCase):
def testRepr(self):
domino1 = Domino(5, 3)
s = repr(domino1)
self.assertEqual("Domino(5, 3)", s)
def testInit(self):
domino1 = Domino(5, 3)
pips = domino1.head.pips
self.assertEqual(5, pips)
def testCreate(self):
expected_dominoes = [Domino(0, 0),
Domino(0, 1),
Domino(0, 2),
Domino(1, 1),
Domino(1, 2),
Domino(2, 2)]
dominoes = Domino.create(2)
self.assertEqual(expected_dominoes, dominoes)
def testEqual(self):
domino1 = Domino(5, 3)
domino2 = Domino(5, 3)
eq_result = domino1 == domino2
neq_result = domino1 != domino2
self.assertTrue(eq_result)
self.assertFalse(neq_result)
def testDifferentPips(self):
domino1 = Domino(5, 3)
domino2 = Domino(5, 4)
domino3 = Domino(6, 3)
eq_result = domino1 == domino2
neq_result = domino1 != domino2
self.assertFalse(eq_result)
self.assertTrue(neq_result)
self.assertNotEqual(domino1, domino3)
def testEqualFlipped(self):
domino1 = Domino(5, 3)
domino2 = Domino(3, 5)
eq_result = domino1 == domino2
neq_result = domino1 != domino2
self.assertTrue(eq_result)
self.assertFalse(neq_result)
def testRotateFullCircle(self):
domino1 = Domino(1, 5)
domino1.rotate(180)
domino1.rotate(180)
self.assertEqual(0, domino1.degrees)
def testRotateNegative(self):
domino1 = Domino(1, 5)
domino1.rotate(-90)
self.assertEqual(270, domino1.degrees)
def testFindNeighbours(self):
state = """\
1 0|2 x x
-
0 0|4 0|3
"""
board = Board.create(state)
domino1 = board[1][1].domino
expected_neighbours = set([board[0][1].domino, board[1][0].domino])
neighbours = domino1.findNeighbours()
self.assertEqual(expected_neighbours, neighbours)
def testIsMatch(self):
domino1 = Domino(0, 1)
self.assertFalse(domino1.isMatch(Domino(2, 2)))
self.assertTrue(domino1.isMatch(Domino(0, 2)))
self.assertTrue(domino1.isMatch(Domino(2, 1)))
self.assertTrue(domino1.isMatch(Domino(2, 0)))
self.assertTrue(domino1.isMatch(Domino(1, 2)))
def testName(self):
domino = Domino(1, 2)
name = domino.get_name()
self.assertEqual("12", name)
def testDescribeMove(self):
domino1 = Domino(1, 2)
dx, dy = 1, 0
expected_move = '12r'
move = domino1.describe_move(dx, dy)
self.assertEqual(expected_move, move)
def testDescribeMoveReversed(self):
domino1 = Domino(1, 2)
domino1.rotate(180)
dx, dy = 1, 0
expected_move = '21r'
move = domino1.describe_move(dx, dy)
self.assertEqual(expected_move, move)
def testDescribeMoveUpReversed(self):
domino1 = Domino(1, 2)
domino1.rotate(90)
dx, dy = 0, 1
expected_move = '21u'
move = domino1.describe_move(dx, dy)
self.assertEqual(expected_move, move)
class BoardGraphTest(unittest.TestCase):
def testWalkRight(self):
board = Board.create("""\
0|2 x
0|1 x
""")
graph = BoardGraph()
expected_states = set("""\
0|2
0|1
---
0|2 x
x 0|1
---
x 0|2
0|1 x
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testWalkLeft(self):
board = Board.create("""\
x 0|2
0|1 x
""")
graph = BoardGraph()
expected_states = set("""\
0|2
0|1
---
0|2 x
x 0|1
---
x 0|2
0|1 x
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testWalkDown(self):
board = Board.create("""\
x 3 x x x
-
x 2 0|2 x
x 0|1 x x
""")
graph = BoardGraph()
expected_states = set("""\
3 x x
-
2 0|2
0|1 x
---
3 x x
-
2 0|2
x 0|1
---
3 x x x
-
2 0|2 x
x x 0|1
---
3 0|2
-
2 0|1
---
3 0|2 x
-
2 x 0|1
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def ignoreWalkLast(self):
""" Switching to NetworkX broke this. Not really used, so ignore for now.
"""
board = Board.create("""\
3 x x
-
2 0|2
0|1 x
""")
graph = BoardGraph()
expected_last = """\
3 0|2 x
-
2 x 0|1
"""
graph.walk(board)
self.assertMultiLineEqual(expected_last, graph.last)
def testWalkNoSplit(self):
board = Board.create("""\
x 3|2 3|1 x
""")
graph = BoardGraph()
expected_states = set("""\
3|2 3|1
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testWalkNoLoner(self):
board = Board.create("""\
x 3 5 x
- -
x 2 4 x
x 3|5 x
""")
graph = BoardGraph()
expected_states = set("""\
3 5
- -
2 4
3|5
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
class CaptureBoardGraphTest(unittest.TestCase):
def testCaptureRight(self):
board = Board.create("""\
0|2 x
1|0 x
""")
graph = CaptureBoardGraph()
expected_states = set("""\
0|2
1|0
---
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testSomeUncaptured(self):
board = Board.create("""\
4|4 3
-
1|5 4
""")
graph = CaptureBoardGraph()
expected_states = set("""\
4|4 3
-
1|5 4
---
1|5
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testMoveWithoutCapture(self):
board = Board.create("""\
4|3
1|2
""")
graph = CaptureBoardGraph()
expected_states = set("""\
4|3
1|2
---
x 4|3
1|2 x
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
def testMoveLeftUpdatesOffset(self):
start_state = """\
4|3
1|2
"""
board = Board.create(start_state, border=1)
graph = CaptureBoardGraph()
expected_state = """\
x 4|3
1|2 x
"""
graph.walk(board)
offset = [1, 1] # position of bottom left corner (within border)
expected_offset = [1, 0] # equivalent position after move and cropping
state = graph.move(board[1][1].domino, -1, 0, offset)
self.assertEqual(expected_state, state)
self.assertEqual(expected_offset, offset)
def testSolution(self):
graph = CaptureBoardGraph()
expected_solution = ['34u', '24r']
board = Board.create("""\
6|2 3
-
2|4 4
""")
graph.walk(board)
solution = graph.get_solution()
self.assertEqual(expected_solution, solution)
def testDisconnectedBeforeCapture(self):
""" Board must be connected after move and after capture.
Here, move 62L is disconnected after the move, but connected after
the capture removes most of the dominoes. Test that the move is still
not allowed.
"""
board = Board.create("""\
x x x x 5
-
x x 6|2 3
6|6 2|4 x
""")
graph = CaptureBoardGraph()
expected_states = set("""\
x x x x 5
-
x x 6|2 3
6|6 2|4 x
""".split('---\n'))
states = graph.walk(board)
self.assertEqual(expected_states, states)
| [
"[email protected]"
]
| |
909cd365350879c5c496c6bb6d5c9e72bffcfcbd | dbd387349cdbe73200f3a3ab1023abd8a885ad93 | /oauth/models.py | 786d8b3271d8b0689e94fe0f8368ffe54198c956 | []
| no_license | AS207960/oauth | 5cb81eaea656f52c39ab2b0f9619bcbc0941661d | 3304d2deac926e6de35fe7f0af71bd78e70423e8 | refs/heads/master | 2023-07-24T01:38:43.116526 | 2023-07-15T22:11:24 | 2023-07-15T22:11:24 | 330,265,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,960 | py | from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.auth import get_user_model
import uuid
import django_keycloak_auth.clients
import as207960_utils.models
def sync_resource_to_keycloak(self, display_name, resource_type, scopes, urn, view_name, super_save, args, kwargs):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
created = False
if not self.pk:
created = True
super_save(*args, **kwargs)
create_kwargs = {
"name": f"{resource_type}_{self.id}",
"displayName": f"{display_name}: {str(self)}",
"ownerManagedAccess": True,
"scopes": scopes,
"type": urn,
"uri": reverse(view_name, args=(self.id,)) if view_name else None,
}
if created or not self.resource_id:
if self.user:
create_kwargs['owner'] = self.user.username
d = uma_client.resource_set_create(
token,
**create_kwargs
)
self.resource_id = d['_id']
super_save(*args, **kwargs)
else:
uma_client.resource_set_update(
token,
id=self.resource_id,
**create_kwargs
)
def delete_resource(resource_id):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
uma_client.resource_set_delete(token, resource_id)
def get_object_ids(access_token, resource_type, action):
scope_name = f"{action}-{resource_type}"
permissions = django_keycloak_auth.clients.get_authz_client().get_permissions(access_token)
permissions = permissions.get("permissions", [])
permissions = filter(
lambda p: scope_name in p.get('scopes', []) and p.get('rsname', "").startswith(f"{resource_type}_"),
permissions
)
object_ids = list(map(lambda p: p['rsname'][len(f"{resource_type}_"):], permissions))
return object_ids
def eval_permission(token, resource, scope, submit_request=False):
resource = str(resource)
permissions = django_keycloak_auth.clients.get_authz_client().get_permissions(
token=token,
resource_scopes_tuples=[(resource, scope)],
submit_request=submit_request
)
for permission in permissions.get('permissions', []):
for scope in permission.get('scopes', []):
if permission.get('rsid') == resource and scope == scope:
return True
return False
def get_resource_owner(resource_id):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
resource = uma_client.resource_set_read(token, resource_id)
owner = resource.get("owner", {}).get("id")
user = get_user_model().objects.filter(username=owner).first()
return user
class OAuthClient(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
realm = models.CharField(max_length=255)
client_id = models.CharField(max_length=255)
resource_id = models.UUIDField(null=True)
def __init__(self, *args, user=None, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
class Meta:
verbose_name = "OAuth Client"
verbose_name_plural = "OAuth Clients"
def __str__(self):
return self.client_id
@classmethod
def get_object_list(cls, access_token: str, action='view'):
return cls.objects.filter(pk__in=get_object_ids(access_token, 'oauth-client', action))
@classmethod
def has_class_scope(cls, access_token: str, action='view'):
scope_name = f"{action}-oauth-client"
return django_keycloak_auth.clients.get_authz_client() \
.eval_permission(access_token, f"oauth-client", scope_name)
def has_scope(self, access_token: str, action='view'):
scope_name = f"{action}-oauth-client"
return eval_permission(access_token, self.resource_id, scope_name)
def save(self, *args, **kwargs):
sync_resource_to_keycloak(
self,
display_name="OAuth Client", resource_type="oauth-client", scopes=[
'view-oauth-client',
'edit-oauth-client',
'delete-oauth-client',
],
urn="urn:as207960:domains:oauth_client", super_save=super().save, view_name='view_client',
args=args, kwargs=kwargs
)
def delete(self, *args, **kwargs):
super().delete(*args, *kwargs)
delete_resource(self.resource_id)
class PersonalAccessToken(models.Model):
id = as207960_utils.models.TypedUUIDField("oauth_pat", primary_key=True)
revoked = models.BooleanField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| [
"[email protected]"
]
| |
affd3c9683fcd45c1d12c534f88df28b264321b8 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/__init___parts/UriFormat.py | 87f1c147e2e568c5bda247aa1f44e7aca4eb4f3f | [
"MIT"
]
| permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | class UriFormat(Enum,IComparable,IFormattable,IConvertible):
"""
Controls how URI information is escaped.
enum UriFormat,values: SafeUnescaped (3),Unescaped (2),UriEscaped (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
SafeUnescaped=None
Unescaped=None
UriEscaped=None
value__=None
| [
"[email protected]"
]
| |
dbdaecc9c92364b03753ac08fe2fbbe65a16f506 | 8205128a5a3564a826b4ae432f082a9ed4134316 | /algo/binary-search/_0081_SearchInRotatedSortedArray2.py | 25bb149045c8c612cc771884a7e6fafa3dfd3833 | []
| no_license | ianlai/Note-Python | 507094bda9fefa0fe2f45a629076c679178a2c74 | ca95110b77152258573b6f1d43e39a316cdcb459 | refs/heads/master | 2023-03-19T03:26:35.532874 | 2021-03-10T16:10:51 | 2021-03-10T16:10:51 | 68,308,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | class Solution:
def search(self, nums: List[int], target: int) -> int:
# Edge cases
if nums is None or len(nums) == 0:
return False
# mset = set(nums)
# nums = list(mset)
start, end = 0, len(nums)-1
if nums[start] == target:
return True
if nums[end] == target:
return True
# Preprocess (remove redundants in two ends)
while start < end and nums[start] == nums[end]:
start += 1
# Binary search loop
while start + 1 < end:
mid = (start + end) // 2
#print(start, mid, end)
if target == nums[mid]:
return True
if nums[mid] >= nums[start]:
if nums[start] <= target <= nums[mid]:
end = mid
else:
start = mid
if nums[mid] <= nums[end]:
if nums[mid] <= target <= nums[end]:
start = mid
else:
end = mid
# Binary search check
if nums[start] == target:
return True
if nums[end] == target:
return True
return False | [
"[email protected]"
]
| |
3514a6464e59eb60826de67c90f064f388efa269 | c960c1bf7fd094c031a77a3545bcc797b845edcd | /backend/home/migrations/0002_load_initial_data.py | 82ffbac62de87f64c4e07131f4f9260d1101b9ae | []
| no_license | crowdbotics-apps/chatter-22176 | e6e67efb11219b9ad5b0a680e2fe75263335e961 | a9a30ab3887dc3a766fb99398c3c114ecbd8be99 | refs/heads/master | 2023-01-02T06:00:27.746009 | 2020-11-01T03:15:59 | 2020-11-01T03:15:59 | 309,014,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Chatter"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Chatter</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "chatter-22176.botics.co"
site_params = {
"name": "Chatter",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
]
| |
92660bcaac41f821096bf113adae2d40568e80c3 | 0d2f636592dc12458254d793f342857298c26f12 | /7-15.py | 112be3b6c1452fb33aa38df5d6f4edefdae54996 | []
| no_license | chenpc1214/test | c6b545dbe13e672f11c58464405e024394fc755b | 8610320686c499be2f5fa36ba9f11935aa6d657b | refs/heads/master | 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | buyers = [["Jamase",1030],["Curry",893],
["Durant",2050],["Jordan",990],
["David",2110],["Kevin",15000],
["Mary",10050],["Tom",8800],]
infinite = list()
VIP = list()
Gold = list()
while buyers:
fall_out_buyer = buyers.pop()
if fall_out_buyer[1] >= 10000:
infinite.append(fall_out_buyer)
elif 1000 <= fall_out_buyer[1] <= 10000:
VIP.append(fall_out_buyer)
else:
Gold.append(fall_out_buyer)
print("infinite_buyers的資料:",infinite)
print("VIP_buyers的資料:",VIP)
print("Gold_buyers的資料:",Gold)
| [
"[email protected]"
]
| |
4eed67d321cba99d1ee8fd552901b941afb1d2ef | 417f6a92e6179e9da3bc65ae5f56bb274cb47152 | /vindauga/types/vindauga_object.py | b88066e8a0be97c059dd2c3e90d72d7c53085592 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | gabbpuy/vindauga | 6504540f79afa0abed7677103ae50c848a1d18b8 | 04c4def7c1bea135b1b97fdc18f8f45ccd63c40a | refs/heads/master | 2023-06-07T23:01:14.918105 | 2023-06-05T02:07:48 | 2023-06-05T02:07:48 | 194,688,774 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
import gettext
import logging
logger = logging.getLogger(__name__)
gettext.install('vindauga')
class VindaugaObject:
_registry = {}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
try:
VindaugaObject._registry[cls.name] = cls
except AttributeError:
logger.info('A class has no name: %s', cls)
def destroy(self, o):
if o:
o.shutdown()
del o
def shutdown(self):
pass
| [
"[email protected]"
]
| |
66df3c0242eaf51dfc0f6b3c9c7880f5fb7e500e | bf6e7fba9eca62f40ba1a9532994c14e751fdfeb | /test/unit/test_internal_casing.py | b673b60e26e3cd3d9eca90225fd686fc2745b06f | [
"Apache-2.0"
]
| permissive | CyberGRX/py2neo | 11f1a765d2b629c7b6c3e86cb24e842638b3eec9 | e6a50a80f769f21d8024733c4bf83e899443d672 | refs/heads/v4.2-grx | 2023-04-20T06:46:39.158143 | 2023-04-05T22:04:27 | 2023-04-05T22:04:27 | 171,706,053 | 0 | 0 | Apache-2.0 | 2019-02-20T17:42:36 | 2019-02-20T16:13:12 | Python | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.internal.text import Words
def test_breakdown_of_string_with_spaces():
x = Words("hello world")
assert x.words == ("hello", "world")
def test_breakdown_of_string_with_underscores():
x = Words("hello_world")
assert x.words == ("hello", "world")
def test_breakdown_of_string_with_hyphens():
x = Words("hello-world")
assert x.words == ("hello", "world")
def test_breakdown_of_single_word_upper_case_string():
x = Words("HELLO")
assert x.words == ("HELLO",)
def test_breakdown_tuple():
x = Words(("hello", "world"))
assert x.words == ("hello", "world")
def test_upper():
x = Words("Hello world")
assert x.upper() == "HELLO WORLD"
def test_lower():
x = Words("Hello world")
assert x.lower() == "hello world"
def test_title():
x = Words("Hello WORLD")
assert x.title() == "Hello WORLD"
def test_snake():
x = Words("Hello world")
assert x.snake() == "hello_world"
def test_camel():
x = Words("Hello world")
assert x.camel() == "helloWorld"
def test_camel_with_upper_first():
x = Words("Hello world")
assert x.camel(upper_first=True) == "HelloWorld"
| [
"[email protected]"
]
| |
1a01b7e712420553340256f4f54e33c95365a01b | 474285a15bf21ac3638249397fe8045b150b1aa5 | /usage/bdrc/download_ocr_output.py | 7338e331e57b439d6491dd68e15773e2a7aec735 | []
| no_license | noncapture1/img2opf | 641f40d2db22823deae99ea83789d346940cb82d | af1f2bbcdfed39032fc44292d8105009120fd066 | refs/heads/master | 2023-04-17T14:12:00.180754 | 2021-04-29T04:25:27 | 2021-04-29T04:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import argparse
import logging
import sys
from pathlib import Path
from typing import Mapping
from bdrc_ocr import (
BATCH_PREFIX,
IMAGES,
OUTPUT,
SERVICE,
get_s3_bits,
get_s3_image_list,
get_s3_prefix_path,
get_volume_infos,
get_work_local_id,
ocr_output_bucket,
save_file,
)
logging.basicConfig(
filename=f"{__file__}.log",
format="%(asctime)s, %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
def get_json_fn(fn):
return f"{fn.split('.')[0]}.json.gz"
def get_s3_key(s3prefix, fn):
return s3prefix + "/" + fn
def save_file(bits, fn, imagegroup_output_dir):
imagegroup_output_dir.mkdir(exist_ok=True, parents=True)
output_fn = imagegroup_output_dir / fn
output_fn.write_bytes(bits.getvalue())
def download_ocr_result_for_vol(
volume_prefix_url, work_local_id, imagegroup, output_base_dir, s3_ocr_paths
):
imagegroup_s3prefix = s3_ocr_paths[OUTPUT]
for imageinfo in get_s3_image_list(volume_prefix_url):
imagegroup_output_dir = output_base_dir / work_local_id / imagegroup
ocr_result_fn = get_json_fn(imageinfo["filename"])
if (imagegroup_output_dir / ocr_result_fn).is_file():
continue
s3_key = get_s3_key(imagegroup_s3prefix, ocr_result_fn)
filebits = get_s3_bits(s3_key, ocr_output_bucket)
if filebits:
save_file(filebits, ocr_result_fn, imagegroup_output_dir)
def process(args):
work_local_id, work = get_work_local_id(args.work)
for vol_info in get_volume_infos(work):
imagegroup = vol_info["imagegroup"]
if imagegroup > args.end:
break
if imagegroup < args.start:
continue
if imagegroup in args.skip:
continue
print(f"[INFO] Processing {vol_info['imagegroup']} ....")
s3_ocr_paths = get_s3_prefix_path(
work_local_id=work_local_id,
imagegroup=vol_info["imagegroup"],
service=SERVICE,
batch_prefix=BATCH_PREFIX,
data_types=[IMAGES, OUTPUT],
)
download_ocr_result_for_vol(
volume_prefix_url=vol_info["volume_prefix_url"],
work_local_id=work_local_id,
imagegroup=vol_info["imagegroup"],
output_base_dir=Path(args.output_dir),
s3_ocr_paths=s3_ocr_paths,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("work")
parser.add_argument(
"--output_dir", "-o", default="./archive/output", help="start imagegroup"
)
parser.add_argument("--start", "-s", default=chr(0), help="start imagegroup")
parser.add_argument(
"--end", "-e", default=chr(sys.maxunicode), help="end imagegroup"
)
parser.add_argument(
"--skip", "-sk", default="", help="imagegroups to be skiped (in comma seperated"
)
args = parser.parse_args()
process(args)
| [
"[email protected]"
]
| |
613c451c771753b53f7b622d95595e42af3924d2 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/agfoodplatform/v20200512preview/get_farm_beats_model.py | 69a86e25db7b166badd191892f7224c7ea92a20c | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,205 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFarmBeatsModelResult',
'AwaitableGetFarmBeatsModelResult',
'get_farm_beats_model',
'get_farm_beats_model_output',
]
@pulumi.output_type
class GetFarmBeatsModelResult:
"""
FarmBeats ARM Resource.
"""
def __init__(__self__, id=None, instance_uri=None, location=None, name=None, provisioning_state=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_uri and not isinstance(instance_uri, str):
raise TypeError("Expected argument 'instance_uri' to be a str")
pulumi.set(__self__, "instance_uri", instance_uri)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceUri")
def instance_uri(self) -> str:
"""
Uri of the FarmBeats instance.
"""
return pulumi.get(self, "instance_uri")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
FarmBeats instance provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetFarmBeatsModelResult(GetFarmBeatsModelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFarmBeatsModelResult(
id=self.id,
instance_uri=self.instance_uri,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_farm_beats_model(farm_beats_resource_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFarmBeatsModelResult:
"""
FarmBeats ARM Resource.
:param str farm_beats_resource_name: FarmBeats resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['farmBeatsResourceName'] = farm_beats_resource_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:agfoodplatform/v20200512preview:getFarmBeatsModel', __args__, opts=opts, typ=GetFarmBeatsModelResult).value
return AwaitableGetFarmBeatsModelResult(
id=__ret__.id,
instance_uri=__ret__.instance_uri,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_farm_beats_model)
def get_farm_beats_model_output(farm_beats_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFarmBeatsModelResult]:
"""
FarmBeats ARM Resource.
:param str farm_beats_resource_name: FarmBeats resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
| [
"[email protected]"
]
| |
7aa3c98c1a1bde5aa1b86eb6281f3263d0d886e7 | 6f67606189b27ab3dfd20d9fa4b5dab00beb4302 | /MetamorphicTests/all_mutants/sales_forecasting_file/202.py | 92e5351ace082148914d990a9829630f4c2666b3 | [
"Apache-2.0"
]
| permissive | fjgao/Sales-forecasting-with-RNNs | b8d468946d5df8d694178ef0664717c62bf156b8 | 22b4639ecbb48381af53326ace94a3538201b586 | refs/heads/master | 2022-02-09T11:29:04.815900 | 2019-02-08T08:51:23 | 2019-02-08T08:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,615 | py | def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('mutpy', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | [
"[email protected]"
]
| |
b3e8729afa74fa57bf0fd12250cb52c0b8caf17f | aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5 | /experiment/model056_2.py | d6f2c7c470fb71e74ff55e8b640e598b04925467 | []
| no_license | kurupical/riiid | 7e68239cd50243fbb734bf433d60ebd7469cb180 | 7bab580ce03d03873748a6afc91092c11871465f | refs/heads/master | 2023-03-30T04:15:54.109815 | 2021-04-04T01:20:33 | 2021-04-04T01:20:33 | 302,828,112 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,996 | py | import numpy as np
import pandas as pd
import gc
import random
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from datetime import datetime as dt
import os
import glob
import pickle
import json
from feature_engineering.feature_factory_for_transformer import FeatureFactoryForTransformer
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
DurationPreviousContent, \
ElapsedTimeBinningEncoder
from experiment.common import get_logger
import time
from transformers import AdamW, get_linear_schedule_with_warmup
torch.manual_seed(0)
np.random.seed(0)
is_debug = False
is_make_feature_factory = False
load_pickle = True
epochs = 8
device = torch.device("cuda")
wait_time = 0
class SAKTDataset(Dataset):
def __init__(self, group, n_skill, n_part=8, max_seq=100, is_test=False, predict_mode=False):
super(SAKTDataset, self).__init__()
self.max_seq = max_seq
self.n_skill = n_skill
self.samples = group
self.is_test = is_test
self.n_part = n_part
self.predict_mode = predict_mode
self.user_ids = []
for user_id in group.keys():
q = group[user_id][("content_id", "content_type_id")]
if not is_test:
self.user_ids.append([user_id, -1])
else:
is_val = group[user_id]["is_val"]
for i in range(len(q)):
if is_val[i]:
self.user_ids.append([user_id, i+1])
def __len__(self):
return len(self.user_ids)
def __getitem__(self, index):
user_id = self.user_ids[index][0]
end = self.user_ids[index][1]
q_ = self.samples[user_id][("content_id", "content_type_id")]
ua_ = self.samples[user_id]["user_answer"]
part_ = self.samples[user_id]["part"]
elapsed_time_ = self.samples[user_id]["prior_question_elapsed_time_bin300"]
duration_previous_content_ = self.samples[user_id]["duration_previous_content_bin300"]
qa_ = self.samples[user_id]["answered_correctly"]
if not self.is_test:
seq_len = len(q_)
else:
start = np.max([0, end - self.max_seq])
q_ = q_[start:end]
part_ = part_[start:end]
qa_ = qa_[start:end]
ua_ = ua_[start:end]
elapsed_time_ = elapsed_time_[start:end]
duration_previous_content_ = duration_previous_content_[start:end]
seq_len = len(q_)
q = np.zeros(self.max_seq, dtype=int)
part = np.zeros(self.max_seq, dtype=int)
qa = np.zeros(self.max_seq, dtype=int)
ua = np.zeros(self.max_seq, dtype=int)
elapsed_time = np.zeros(self.max_seq, dtype=int)
duration_previous_content = np.zeros(self.max_seq, dtype=int)
if seq_len >= self.max_seq:
q[:] = q_[-self.max_seq:]
part[:] = part_[-self.max_seq:]
qa[:] = qa_[-self.max_seq:]
ua[:] = ua_[-self.max_seq:]
elapsed_time[:] = elapsed_time_[-self.max_seq:]
duration_previous_content[:] = duration_previous_content_[-self.max_seq:]
else:
q[-seq_len:] = q_
part[-seq_len:] = part_
qa[-seq_len:] = qa_
ua[-seq_len:] = ua_
elapsed_time[-seq_len:] = elapsed_time_
duration_previous_content[-seq_len:] = duration_previous_content_
target_id = q[1:]
part = part[1:]
elapsed_time = elapsed_time[1:]
duration_previous_content = duration_previous_content[:-1]
label = qa[1:]
x = q[:-1].copy()
x += (qa[:-1]-1) * self.n_skill
x[x < 0] = 0
return {
"x": x,
"target_id": target_id,
"part": part,
"elapsed_time": elapsed_time,
"duration_previous_content": duration_previous_content,
"label": label
}
class FFN(nn.Module):
def __init__(self, state_size=200):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(state_size, state_size)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = self.lr1(x)
x = self.relu(x)
x = self.lr2(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype('bool')
return torch.from_numpy(future_mask)
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, num_heads=8, dropout=0.2):
super(SAKTModel, self).__init__()
self.n_skill = n_skill
self.embed_dim = embed_dim
self.embedding = nn.Embedding(4 * n_skill + 1, embed_dim)
self.pos_embedding_enc = nn.Embedding(max_seq - 1, embed_dim)
self.pos_embedding_dec = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill + 1, embed_dim)
self.part_embedding = nn.Embedding(8, embed_dim)
self.elapsed_time_embedding = nn.Embedding(302, embed_dim)
self.duration_previous_content_embedding = nn.Embedding(302, embed_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads)
self.transformer_enc = nn.TransformerEncoder(encoder_layer=encoder_layer, num_layers=1)
decoder_layer = nn.TransformerDecoderLayer(d_model=embed_dim, nhead=num_heads)
self.transformer_dec = nn.TransformerDecoder(decoder_layer=decoder_layer, num_layers=1)
self.dropout = nn.Dropout(0.2)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids, parts, elapsed_time, duration_previous_content):
device = x.device
att_mask = future_mask(x.size(1)).to(device)
e = self.e_embedding(question_ids)
p = self.part_embedding(parts)
pos_id_enc = torch.arange(x.size(1)).unsqueeze(0).to(device)
pos_e = self.pos_embedding_enc(pos_id_enc)
e = e + pos_e + p
e = e.permute(1, 0, 2)
att_enc = self.transformer_enc(e, mask=att_mask)
# decoder
x = self.embedding(x)
pos_id_dec = torch.arange(x.size(1)).unsqueeze(0).to(device)
pos_x = self.pos_embedding_dec(pos_id_dec)
el_time_emb = self.elapsed_time_embedding(elapsed_time)
dur_emb = self.duration_previous_content_embedding(duration_previous_content)
x = x + pos_x + el_time_emb + dur_emb
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
att_dec = self.transformer_dec(tgt=x,
memory=att_enc,
tgt_mask=att_mask,
memory_mask=att_mask)
att_dec = att_dec.permute(1, 0, 2) # att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.layer_normal(att_dec)
x = self.ffn(x) + att_dec
x = self.pred(x)
return x.squeeze(-1)
def train_epoch(model, train_iterator, val_iterator, optim, criterion, scheduler, device="cuda"):
model.train()
train_loss = []
num_corrects = 0
num_total = 0
labels = []
outs = []
tbar = tqdm(train_iterator)
for item in tbar:
x = item["x"].to(device).long()
target_id = item["target_id"].to(device).long()
part = item["part"].to(device).long()
label = item["label"].to(device).float()
elapsed_time = item["elapsed_time"].to(device).long()
duration_previous_content = item["duration_previous_content"].to(device).long()
optim.zero_grad()
output = model(x, target_id, part, elapsed_time, duration_previous_content)
target_idx = (label.view(-1) >= 0).nonzero()
loss = criterion(output.view(-1)[target_idx], label.view(-1)[target_idx])
loss.backward()
optim.step()
scheduler.step()
train_loss.append(loss.item())
output = output[:, -1]
label = label[:, -1]
target_idx = (label.view(-1) >= 0).nonzero()
pred = (torch.sigmoid(output) >= 0.5).long()
num_corrects += (pred.view(-1)[target_idx] == label.view(-1)[target_idx]).sum().item()
num_total += len(label)
labels.extend(label.view(-1)[target_idx].data.cpu().numpy())
outs.extend(output.view(-1)[target_idx].data.cpu().numpy())
tbar.set_description('loss - {:.4f}'.format(loss))
acc = num_corrects / num_total
auc = roc_auc_score(labels, outs)
loss = np.mean(train_loss)
preds = []
labels = []
model.eval()
i = 0
for item in tqdm(val_iterator):
x = item["x"].to(device).long()
target_id = item["target_id"].to(device).long()
part = item["part"].to(device).long()
label = item["label"].to(device).float()
elapsed_time = item["elapsed_time"].to(device).long()
duration_previous_content = item["duration_previous_content"].to(device).long()
output = model(x, target_id, part, elapsed_time, duration_previous_content)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy())
i += 1
if i > 100:
break
auc_val = roc_auc_score(labels, preds)
return loss, acc, auc, auc_val
def main(params: dict,
output_dir: str):
import mlflow
print("start params={}".format(params))
logger = get_logger()
df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
# df = pd.read_pickle("../input/riiid-test-answer-prediction/split10/train_0.pickle").sort_values(["user_id", "timestamp"]).reset_index(drop=True)
if is_debug:
df = df.head(30000)
column_config = {
("content_id", "content_type_id"): {"type": "category"},
"user_answer": {"type": "category"},
"part": {"type": "category"},
"prior_question_elapsed_time_bin300": {"type": "category"},
"duration_previous_content_bin300": {"type": "category"}
}
if not load_pickle or is_debug:
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent()
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id="all",
load_feature=not is_debug,
save_feature=not is_debug)
print("all_predict")
df = feature_factory_manager.all_predict(df)
df = df[["user_id", "content_id", "content_type_id", "part", "user_answer", "answered_correctly", "prior_question_elapsed_time_bin300", "duration_previous_content_bin300"]]
print(df.head(10))
print("data preprocess")
train_idx = []
val_idx = []
np.random.seed(0)
for _, w_df in df[df["content_type_id"] == 0].groupby("user_id"):
if np.random.random() < 0.01:
# all val
val_idx.extend(w_df.index.tolist())
else:
train_num = int(len(w_df) * 0.95)
train_idx.extend(w_df[:train_num].index.tolist())
val_idx.extend(w_df[train_num:].index.tolist())
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
ff_for_transformer.make_dict(df=pd.DataFrame())
n_skill = len(ff_for_transformer.embbed_dict[("content_id", "content_type_id")])
if not load_pickle or is_debug:
df["is_val"] = 0
df["is_val"].loc[val_idx] = 1
w_df = df[df["is_val"] == 0]
w_df["group"] = (w_df.groupby("user_id")["user_id"].transform("count") - w_df.groupby("user_id").cumcount()) // params["max_seq"]
w_df["user_id"] = w_df["user_id"].astype(str) + "_" + w_df["group"].astype(str)
group = ff_for_transformer.all_predict(w_df)
dataset_train = SAKTDataset(group,
n_skill=n_skill,
max_seq=params["max_seq"])
del w_df
gc.collect()
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
if not load_pickle or is_debug:
group = ff_for_transformer.all_predict(df[df["content_type_id"] == 0])
dataset_val = SAKTDataset(group,
is_test=True,
n_skill=n_skill,
max_seq=params["max_seq"])
os.makedirs("../input/feature_engineering/model051", exist_ok=True)
if not is_debug and not load_pickle:
with open(f"../input/feature_engineering/model051/train.pickle", "wb") as f:
pickle.dump(dataset_train, f)
with open(f"../input/feature_engineering/model051/val.pickle", "wb") as f:
pickle.dump(dataset_val, f)
if not is_debug and load_pickle:
with open(f"../input/feature_engineering/model051/train.pickle", "rb") as f:
dataset_train = pickle.load(f)
with open(f"../input/feature_engineering/model051/val.pickle", "rb") as f:
dataset_val = pickle.load(f)
print("loaded!")
dataloader_train = DataLoader(dataset_train, batch_size=params["batch_size"], shuffle=True, num_workers=1)
dataloader_val = DataLoader(dataset_val, batch_size=params["batch_size"], shuffle=False, num_workers=1)
model = SAKTModel(n_skill, embed_dim=params["embed_dim"], max_seq=params["max_seq"], dropout=dropout)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=params["lr"],
weight_decay=0.01,
)
num_train_optimization_steps = int(len(dataloader_train) * epochs)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=params["num_warmup_steps"],
num_training_steps=num_train_optimization_steps)
criterion = nn.BCEWithLogitsLoss()
model.to(device)
criterion.to(device)
for epoch in range(epochs):
loss, acc, auc, auc_val = train_epoch(model, dataloader_train, dataloader_val, optimizer, criterion, scheduler, device)
print("epoch - {} train_loss - {:.3f} auc - {:.4f} auc-val: {:.4f}".format(epoch, loss, auc, auc_val))
preds = []
labels = []
for item in tqdm(dataloader_val):
x = item["x"].to(device).long()
target_id = item["target_id"].to(device).long()
part = item["part"].to(device).long()
label = item["label"].to(device).float()
elapsed_time = item["elapsed_time"].to(device).long()
duration_previous_content = item["duration_previous_content"].to(device).long()
output = model(x, target_id, part, elapsed_time, duration_previous_content)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy().tolist())
auc_transformer = roc_auc_score(labels, preds)
print("single transformer: {:.4f}".format(auc_transformer))
df_oof = pd.DataFrame()
# df_oof["row_id"] = df.loc[val_idx].index
print(len(dataloader_val))
print(len(preds))
df_oof["predict"] = preds
df_oof["target"] = labels
df_oof.to_csv(f"{output_dir}/transformers1.csv", index=False)
"""
df_oof2 = pd.read_csv("../output/ex_237/20201213110353/oof_train_0_lgbm.csv")
df_oof2.columns = ["row_id", "predict_lgbm", "target"]
df_oof2 = pd.merge(df_oof, df_oof2, how="inner")
auc_lgbm = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values)
print("lgbm: {:.4f}".format(auc_lgbm))
print("ensemble")
max_auc = 0
max_nn_ratio = 0
for r in np.arange(0, 1.05, 0.05):
auc = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values*(1-r) + df_oof2["predict"].values*r)
print("[nn_ratio: {:.2f}] AUC: {:.4f}".format(r, auc))
if max_auc < auc:
max_auc = auc
max_nn_ratio = r
print(len(df_oof2))
"""
if not is_debug:
mlflow.start_run(experiment_id=10,
run_name=os.path.basename(__file__))
for key, value in params.items():
mlflow.log_param(key, value)
mlflow.log_metric("auc_val", auc_transformer)
mlflow.end_run()
torch.save(model.state_dict(), f"{output_dir}/transformers.pth")
del model
with open(f"{output_dir}/transformer_param.json", "w") as f:
json.dump(params, f)
if is_make_feature_factory:
# feature factory
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent(is_partial_fit=True)
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id="all",
load_feature=not is_debug,
save_feature=not is_debug)
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
if is_debug:
df = df.head(10000)
df = df.sort_values(["user_id", "timestamp"]).reset_index(drop=True)
feature_factory_manager.fit(df)
df = feature_factory_manager.all_predict(df)
for dicts in feature_factory_manager.feature_factory_dict.values():
for factory in dicts.values():
factory.logger = None
feature_factory_manager.logger = None
with open(f"{output_dir}/feature_factory_manager.pickle", "wb") as f:
pickle.dump(feature_factory_manager, f)
ff_for_transformer.fit(df)
ff_for_transformer.logger = None
with open(f"{output_dir}/feature_factory_manager_for_transformer.pickle", "wb") as f:
pickle.dump(ff_for_transformer, f)
if __name__ == "__main__":
if not is_debug:
for _ in tqdm(range(wait_time)):
time.sleep(1)
output_dir = f"../output/{os.path.basename(__file__).replace('.py', '')}/{dt.now().strftime('%Y%m%d%H%M%S')}/"
os.makedirs(output_dir, exist_ok=True)
for lr in [1e-3]:
for dropout in [0.1]:
if is_debug:
batch_size = 8
else:
batch_size = 1024
params = {"embed_dim": 256,
"max_seq": 100,
"batch_size": batch_size,
"num_warmup_steps": 1000,
"lr": lr,
"dropout": dropout}
main(params, output_dir=output_dir) | [
"[email protected]"
]
| |
e9f8d7fa5afb1585c089bd9b3b7619c57d787d28 | 291f0aa9a40eeca26fb08106c952b9347db7dba7 | /apps/views/main.py | 1d6a6abd023956c00c22858049b0c5c03ba4415f | [
"Apache-2.0"
]
| permissive | gaohj/nzflask_bbs | fad10b93f8f495a94d5d6db6f5c60d85c1c85518 | 36a94c380b78241ed5d1e07edab9618c3e8d477b | refs/heads/master | 2022-12-12T21:43:17.417294 | 2020-03-20T10:28:22 | 2020-03-20T10:28:22 | 239,702,874 | 0 | 2 | Apache-2.0 | 2022-12-08T03:50:07 | 2020-02-11T07:34:01 | JavaScript | UTF-8 | Python | false | false | 1,235 | py | from flask import Blueprint,render_template,flash,redirect,url_for,request
from apps.forms import PostsForm
from flask_login import current_user
from apps.models import Posts
from apps.extensions import db
#实例化蓝本对象
main = Blueprint('main',__name__)
@main.route('/',methods=['GET','POST'])
def index():
form = PostsForm()
if form.validate_on_submit():
#判断用户是否登录
if current_user.is_authenticated:
#获取当前登录的用户
u = current_user._get_current_object()
p = Posts(content=form.content.data,user=u)
db.session.add(p)
db.session.commit()
return redirect(url_for('main.index'))
else:
flash('请先登录')
return redirect(url_for('users.login'))
#取出所有的博客 类视图 get方法
# posts = Posts.query.filter_by(rid=0).all()
page = request.args.get('page',1,type=int) #接收前端用户提交的页码
pagination =Posts.query.filter_by(rid=0).order_by(Posts.timestamp.desc()).paginate(page,per_page=6,error_out=False)
posts = pagination.items
return render_template('main/index.html',form=form,posts=posts,pagination=pagination)
| [
"[email protected]"
]
| |
61e2092b0af89c531bc53eaf2804e05cce81e5ac | be9046ba8823cc5fbb6064db33b195481985cd0e | /hindsight1/management/commands/import_info.py | 3015ebe26bc6a091b3104d89038397aff8642223 | [
"MIT"
]
| permissive | jcaguirre89/mysite | 9f692c6f2cd8dc427aba6d9dd3e4e9dc2a349fb2 | 3b118747d7129d7a249ea2ad7b6644e46e9816f1 | refs/heads/master | 2022-04-02T22:53:39.107234 | 2018-12-10T03:16:50 | 2018-12-10T03:16:50 | 115,764,839 | 0 | 0 | MIT | 2020-02-11T23:49:34 | 2017-12-30T01:04:00 | Python | UTF-8 | Python | false | false | 638 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 18 15:44:31 2017
@author: crist
"""
from hindsight1.models import Sp100
from django.core.management.base import BaseCommand
import os
#flip directory in production
directory = 'C:\\Users\\crist\\mysite\\hindsight1\\static\\hindsight1'
#directory = '/home/cristobal/mysite/hindsight1/static/hindsight1'
filename = 'sp100_info.csv'
fileDir=os.path.join(directory,filename)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
# Since the CSV headers match the model fields,
# you only need to provide the file's path
Sp100.objects.from_csv(fileDir) | [
"[email protected]"
]
| |
41d8ef2dda318ee3978e8bc8f7e4e6dfeef99995 | a52dbc76680dbe4938c4ae81290f2f36f2eae68d | /workflowrepository/urls.py | a41d5bf8d73d619cd0770f7c38858d147acc1f32 | []
| no_license | RodriGuerra98/psi | 4a9969e71a98b1aaf9b0a74d1cbd1d8ced19b425 | b90293c001e65465e6880fe4aaccf0d2d03262b0 | refs/heads/master | 2022-12-01T01:25:50.234245 | 2019-05-30T09:54:51 | 2019-05-30T09:54:51 | 154,207,653 | 0 | 0 | null | 2022-11-22T02:37:05 | 2018-10-22T19:55:28 | JavaScript | UTF-8 | Python | false | false | 1,289 | py | """workflowrepository URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url ,include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from data import views
import find
import upload
from django.conf.urls import handler404
from workflowrepository.views import mi_error_404
handler404 = mi_error_404
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('find.urls')),
url(r'^' ,include('upload.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
ff3d80ceee2e30e015c033c3b4658ab7e99e95cc | a9386fd8a14e66c27b5059f562dc239f2c4b0ff7 | /shared/aspace_agent_mapping/agent_parsers/Famname.py | a14f1904186ea9393cba3b3a0afbb7705a741163 | []
| no_license | bentley-historical-library/vandura | 20f93e2f9cf2370e40537f863da9f2f19db329a0 | 0fefc0bf92c2487987a9c23e70187718c3b949f0 | refs/heads/master | 2021-01-17T00:54:08.023435 | 2016-11-04T20:00:04 | 2016-11-04T20:00:04 | 37,206,505 | 0 | 18 | null | 2016-11-04T20:00:05 | 2015-06-10T15:45:33 | Python | UTF-8 | Python | false | false | 358 | py | import json
from vandura.shared.aspace_agent_mapping.agent_parsers.create_famname_json import parse_famname
class Famname:
def __init__(self, string, auth_id="", auth_source=""):
self.data_dict = parse_famname(string, auth_id, auth_source)
def get_aspace_json(self):
return json.dumps({"publish": True, "names": [self.data_dict]})
| [
"[email protected]"
]
| |
b9030902f019e9dce354e517634e289a787183b0 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3138.py | a30090de7d71ac0ade926757914e98e21dbc05e7 | []
| no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120001/2025C846-D327-844D-98F5-80D6FFADDBE6.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3138.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
]
| |
511e8fdfa552d15099fcd6f6e830d64f5431b14c | f4d9f4f267743ced708c891193d584615afebeda | /ch05/ch05.py | 57b40afa49903bc6642d19146b7167a8e48e6081 | [
"MIT"
]
| permissive | rongpenl/python-machine-learning-book-3rd-edition | 44766a4293d9d41d9e9e1d0df4823a46b0c8ccf7 | 18b0fe75f7f74bcc59e2258120c6338e2c3cd113 | refs/heads/master | 2023-03-27T03:20:12.986966 | 2021-03-31T04:59:43 | 2021-03-31T04:59:43 | 301,573,449 | 1 | 0 | MIT | 2021-03-31T04:59:44 | 2020-10-06T00:31:04 | null | UTF-8 | Python | false | false | 26,659 | py | # coding: utf-8
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from scipy.spatial.distance import pdist, squareform
from scipy.linalg import eigh
from distutils.version import LooseVersion as Version
from scipy import __version__ as scipy_version
from numpy import exp
from scipy import exp
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
# *Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2019
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 5 - Compressing Data via Dimensionality Reduction
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# *The use of `watermark` is optional. You can install this Jupyter extension via*
#
# conda install watermark -c conda-forge
#
# or
#
# pip install watermark
#
# *For more information, please see: https://github.com/rasbt/watermark.*
# ### Overview
# - [Unsupervised dimensionality reduction via principal component analysis 128](#Unsupervised-dimensionality-reduction-via-principal-component-analysis-128)
# - [The main steps behind principal component analysis](#The-main-steps-behind-principal-component-analysis)
# - [Extracting the principal components step-by-step](#Extracting-the-principal-components-step-by-step)
# - [Total and explained variance](#Total-and-explained-variance)
# - [Feature transformation](#Feature-transformation)
# - [Principal component analysis in scikit-learn](#Principal-component-analysis-in-scikit-learn)
# - [Supervised data compression via linear discriminant analysis](#Supervised-data-compression-via-linear-discriminant-analysis)
# - [Principal component analysis versus linear discriminant analysis](#Principal-component-analysis-versus-linear-discriminant-analysis)
# - [The inner workings of linear discriminant analysis](#The-inner-workings-of-linear-discriminant-analysis)
# - [Computing the scatter matrices](#Computing-the-scatter-matrices)
# - [Selecting linear discriminants for the new feature subspace](#Selecting-linear-discriminants-for-the-new-feature-subspace)
# - [Projecting examples onto the new feature space](#Projecting-examples-onto-the-new-feature-space)
# - [LDA via scikit-learn](#LDA-via-scikit-learn)
# - [Using kernel principal component analysis for nonlinear mappings](#Using-kernel-principal-component-analysis-for-nonlinear-mappings)
# - [Kernel functions and the kernel trick](#Kernel-functions-and-the-kernel-trick)
# - [Implementing a kernel principal component analysis in Python](#Implementing-a-kernel-principal-component-analysis-in-Python)
# - [Example 1 – separating half-moon shapes](#Example-1:-Separating-half-moon-shapes)
# - [Example 2 – separating concentric circles](#Example-2:-Separating-concentric-circles)
# - [Projecting new data points](#Projecting-new-data-points)
# - [Kernel principal component analysis in scikit-learn](#Kernel-principal-component-analysis-in-scikit-learn)
# - [Summary](#Summary)
# # Unsupervised dimensionality reduction via principal component analysis
# ## The main steps behind principal component analysis
# ## Extracting the principal components step-by-step
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
# if the Wine dataset is temporarily unavailable from the
# UCI machine learning repository, un-comment the following line
# of code to load the dataset from a local path:
# df_wine = pd.read_csv('wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
df_wine.head()
# Splitting the data into 70% training and 30% test subsets.
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
stratify=y,
random_state=0)
# Standardizing the data.
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
# ---
#
# **Note**
#
# Accidentally, I wrote `X_test_std = sc.fit_transform(X_test)` instead of `X_test_std = sc.transform(X_test)`. In this case, it wouldn't make a big difference since the mean and standard deviation of the test set should be (quite) similar to the training set. However, as remember from Chapter 3, the correct way is to re-use parameters from the training set if we are doing any kind of transformation -- the test set should basically stand for "new, unseen" data.
#
# My initial typo reflects a common mistake is that some people are *not* re-using these parameters from the model training/building and standardize the new data "from scratch." Here's simple example to explain why this is a problem.
#
# Let's assume we have a simple training set consisting of 3 examples with 1 feature (let's call this feature "length"):
#
# - train_1: 10 cm -> class_2
# - train_2: 20 cm -> class_2
# - train_3: 30 cm -> class_1
#
# mean: 20, std.: 8.2
#
# After standardization, the transformed feature values are
#
# - train_std_1: -1.21 -> class_2
# - train_std_2: 0 -> class_2
# - train_std_3: 1.21 -> class_1
#
# Next, let's assume our model has learned to classify examples with a standardized length value < 0.6 as class_2 (class_1 otherwise). So far so good. Now, let's say we have 3 unlabeled data points that we want to classify:
#
# - new_4: 5 cm -> class ?
# - new_5: 6 cm -> class ?
# - new_6: 7 cm -> class ?
#
# If we look at the "unstandardized "length" values in our training datast, it is intuitive to say that all of these examples are likely belonging to class_2. However, if we standardize these by re-computing standard deviation and and mean you would get similar values as before in the training set and your classifier would (probably incorrectly) classify examples 4 and 5 as class 2.
#
# - new_std_4: -1.21 -> class 2
# - new_std_5: 0 -> class 2
# - new_std_6: 1.21 -> class 1
#
# However, if we use the parameters from your "training set standardization," we'd get the values:
#
# - example5: -18.37 -> class 2
# - example6: -17.15 -> class 2
# - example7: -15.92 -> class 2
#
# The values 5 cm, 6 cm, and 7 cm are much lower than anything we have seen in the training set previously. Thus, it only makes sense that the standardized features of the "new examples" are much lower than every standardized feature in the training set.
#
# ---
# Eigendecomposition of the covariance matrix.
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print('\nEigenvalues \n%s' % eigen_vals)
# **Note**:
#
# Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
# <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
# This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
#
# ## Total and explained variance
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center',
label='Individual explained variance')
plt.step(range(1, 14), cum_var_exp, where='mid',
label='Cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('images/05_02.png', dpi=300)
plt.show()
# ## Feature transformation
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(key=lambda k: k[0], reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis],
eigen_pairs[1][1][:, np.newaxis]))
print('Matrix W:\n', w)
# **Note**
# Depending on which version of NumPy and LAPACK you are using, you may obtain the Matrix W with its signs flipped. Please note that this is not an issue: If $v$ is an eigenvector of a matrix $\Sigma$, we have
#
# $$\Sigma v = \lambda v,$$
#
# where $\lambda$ is our eigenvalue,
#
#
# then $-v$ is also an eigenvector that has the same eigenvalue, since
# $$\Sigma \cdot (-v) = -\Sigma v = -\lambda v = \lambda \cdot (-v).$$
X_train_std[0].dot(w)
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_03.png', dpi=300)
plt.show()
# ## Principal component analysis in scikit-learn
# **NOTE**
#
# The following four code cells has been added in addition to the content to the book, to illustrate how to replicate the results from our own PCA implementation in scikit-learn:
pca = PCA()
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot examples by class
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.6,
color=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
# Training logistic regression classifier using the first 2 principal components.
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
lr = LogisticRegression(multi_class='ovr', random_state=1, solver='lbfgs')
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_04.png', dpi=300)
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_05.png', dpi=300)
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
# # Supervised data compression via linear discriminant analysis
# ## Principal component analysis versus linear discriminant analysis
# ## The inner workings of linear discriminant analysis
# ## Computing the scatter matrices
# Calculate the mean vectors for each class:
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0))
print('MV %s: %s\n' % (label, mean_vecs[label - 1]))
# Compute the within-class scatter matrix:
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d)) # scatter matrix for each class
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter # sum class scatter matrices
print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1]))
# Better: covariance matrix since classes are not equally distributed:
print('Class label distribution: %s'
% np.bincount(y_train)[1:])
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0],
S_W.shape[1]))
# Compute the between-class scatter matrix:
mean_overall = np.mean(X_train_std, axis=0)
d = 13 # number of features
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train_std[y_train == i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1) # make column vector
mean_overall = mean_overall.reshape(d, 1) # make column vector
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1]))
# ## Selecting linear discriminants for the new feature subspace
# Solve the generalized eigenvalue problem for the matrix $S_W^{-1}S_B$:
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
# **Note**:
#
# Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
# <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
# This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
#
# Sort eigenvectors in descending order of the eigenvalues:
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:\n')
for eigen_val in eigen_pairs:
print(eigen_val[0])
tot = sum(eigen_vals.real)
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
cum_discr = np.cumsum(discr)
plt.bar(range(1, 14), discr, alpha=0.5, align='center',
label='Individual "discriminability"')
plt.step(range(1, 14), cum_discr, where='mid',
label='Cumulative "discriminability"')
plt.ylabel('"Discriminability" ratio')
plt.xlabel('Linear discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('images/05_07.png', dpi=300)
plt.show()
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real,
eigen_pairs[1][1][:, np.newaxis].real))
print('Matrix W:\n', w)
# ## Projecting examples onto the new feature space
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train == l, 0],
X_train_lda[y_train == l, 1] * (-1),
c=c, label=l, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower right')
plt.tight_layout()
# plt.savefig('images/05_08.png', dpi=300)
plt.show()
# ## LDA via scikit-learn
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
lr = LogisticRegression(multi_class='ovr', random_state=1, solver='lbfgs')
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_09.png', dpi=300)
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_10.png', dpi=300)
plt.show()
# # Using kernel principal component analysis for nonlinear mappings
# ## Implementing a kernel principal component analysis in Python
if scipy_version >= Version('1.4.1'):
else:
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_examples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_examples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# scipy.linalg.eigh returns them in ascending order
eigvals, eigvecs = eigh(K)
eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1]
# Collect the top k eigenvectors (projected examples)
X_pc = np.column_stack([eigvecs[:, i]
for i in range(n_components)])
return X_pc
# ### Example 1: Separating half-moon shapes
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('images/05_12.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_13.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50, 1))+0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50, 1))-0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_14.png', dpi=300)
plt.show()
# ### Example 2: Separating concentric circles
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('images/05_15.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_16.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_17.png', dpi=300)
plt.show()
# ## Projecting new data points
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_examples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
alphas: {NumPy ndarray}, shape = [n_examples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# scipy.linalg.eigh returns them in ascending order
eigvals, eigvecs = eigh(K)
eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1]
# Collect the top k eigenvectors (projected examples)
alphas = np.column_stack([eigvecs[:, i]
for i in range(n_components)])
# Collect the corresponding eigenvalues
lambdas = [eigvals[i] for i in range(n_components)]
return alphas, lambdas
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[25]
x_new
x_proj = alphas[25] # original projection
x_proj
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
# projection of the "new" datapoint
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
x_reproj
plt.scatter(alphas[y == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y == 1, 0], np.zeros((50)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black',
label='Original projection of point X[25]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green',
label='Remapped point X[25]', marker='x', s=500)
plt.yticks([], [])
plt.legend(scatterpoints=1)
plt.tight_layout()
# plt.savefig('images/05_18.png', dpi=300)
plt.show()
# ## Kernel principal component analysis in scikit-learn
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.tight_layout()
# plt.savefig('images/05_19.png', dpi=300)
plt.show()
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
| [
"[email protected]"
]
| |
94d8ad77d72ccf80b5d64fa32c4ea8bcb9553559 | 5ea1216c24b62c6beab3c6d9d2e2e06a9c58c796 | /总题库/105.ConstructBinaryTreefromPreorderandInorderTraversal.py | 1bdc7e3cc68a120486922f2d5225c61b60ba3b78 | []
| no_license | xiami2019/LeetCode | 596de2f093d52b58cf80421f67de03757578cd5f | 8d09a56672553ecee4af731796980b2c61c52df2 | refs/heads/master | 2021-01-02T15:15:13.984178 | 2020-07-08T01:20:15 | 2020-07-08T01:20:15 | 239,675,873 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTreeHelper(self, preorder: List[int], inorder: List[int], inStart: int, inEnd: int) -> TreeNode:
if inEnd - inStart < 0:
return None
node = TreeNode(preorder[0])
index = 0
while index <= inEnd and inorder[index] != preorder[0]:
index += 1
preorder.pop(0)
node.left = self.buildTreeHelper(preorder, inorder, inStart, index - 1)
node.right = self.buildTreeHelper(preorder, inorder, index + 1, inEnd)
return node
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
return self.buildTreeHelper(preorder, inorder, 0, len(inorder) - 1) | [
"[email protected]"
]
| |
c6bcd3805327404ac9bc97517bc70adb46ae2a6b | 52855d750ccd5f2a89e960a2cd03365a3daf4959 | /ABC/ABC46_B.py | 8301ae439c95268a2db1f06d809ef3d503da3f4d | []
| no_license | takuwaaan/Atcoder_Study | b15d4f3d15d48abb06895d5938bf8ab53fb73c08 | 6fd772c09c7816d147abdc50669ec2bbc1bc4a57 | refs/heads/master | 2021-03-10T18:56:04.416805 | 2020-03-30T22:36:49 | 2020-03-30T22:36:49 | 246,477,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | n,k = map(int,input().split())
s = k
while(n>1):
s *= (k-1)
n-=1
print(s) | [
"[email protected]"
]
| |
c6879728d54c08d070de5533473c59cb546bae77 | 4266e9b1c59ddef83eede23e0fcbd6e09e0fa5cb | /vs/gyp/test/mac/gyptest-type-envvars.py | b75e094636a0f57ec51dcaf30ca221e66a904e25 | [
"BSD-3-Clause"
]
| permissive | barrystudy/study | b3ba6ed652d1a0bcf8c2e88a2a693fa5f6bf2115 | 96f6bb98966d3633b47aaf8e533cd36af253989f | refs/heads/master | 2020-12-24T14:53:06.219236 | 2017-10-23T02:22:28 | 2017-10-23T02:22:28 | 41,944,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that MACH_O_TYPE etc are set correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='type_envvars')
test.build('test.gyp', test.ALL, chdir='type_envvars')
# The actual test is done by postbuild scripts during |test.build()|.
test.pass_test()
| [
"[email protected]"
]
| |
01eecb6722bbf993c6ea5ee09043fc12c6f5c5aa | 5afcc3b02b7f4fe14e90f33b0a42bfc51b278e19 | /matlab_ext/measurement/mc-assistant/projects/py_hw_models/test_sensor_channals.py | f1d57724af0694d74b3bcfb6ffb9bfafd0ed9a71 | [
"MIT",
"Apache-2.0"
]
| permissive | zaqwes8811/micro-apps | c9e51fa7931c9d5625e1517bad7b1593104a50c0 | bb1643562751dda70ae4f8bd632a171f1de05df5 | refs/heads/master | 2023-04-27T21:04:09.457192 | 2023-04-25T08:41:23 | 2023-04-25T08:41:23 | 11,820,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #-*- coding: utf-8 -*-
import random
import unittest
# App
import models.sensors.one_sensor as sensor_channal
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
# should raise an exception for an immutable sequence
self.assertRaises(TypeError, random.shuffle, (1,2,3))
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
def test_sample(self):
with self.assertRaises(ValueError):
random.sample(self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
if __name__ == '__main__':
#unittest.main()
pass
name = 'I' # Current
cfg = sensor_channal.get_sensor_cfg_new(
name, sensor_channal.kSensorCfgMap)
print cfg
| [
"[email protected]"
]
| |
d817e562bc6c67af216ebf7ecd5e1a2ab53daf12 | f9b3b867abfbfb01ab57cb249a2e0fcb372e435b | /examples/hcaptcha_request_proxy.py | f271ac3797a79202e129afa48a77d6d1ebc8801c | [
"MIT"
]
| permissive | ad-m/python-anticaptcha | af615cfd7549d48829adb441b837eed1373fe782 | 076922ee646483328c580c6623f7cb49a2ea4493 | refs/heads/master | 2022-05-17T17:18:51.638547 | 2022-03-28T03:44:41 | 2022-03-28T03:44:41 | 95,789,669 | 241 | 66 | MIT | 2022-03-28T03:38:57 | 2017-06-29T15:05:36 | Python | UTF-8 | Python | false | false | 1,564 | py | from six.moves.urllib import parse
import re
import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, HCaptchaTask
api_key = environ["KEY"]
proxy_url = environ["PROXY_URL"] # eg. socks5://user:password/123.123.123.123:8888/
site_key_pattern = 'data-sitekey="(.+?)"'
url = "http://hcaptcha.jawne.info.pl/"
client = AnticaptchaClient(api_key)
session = requests.Session()
EXPECTED_RESULT = "Your request have submitted successfully."
UA = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
)
def parse_url(url):
parsed = parse.urlparse(url)
return dict(
proxy_type=parsed.scheme,
proxy_address=parsed.hostname,
proxy_port=parsed.port,
proxy_login=parsed.username,
proxy_password=parsed.password,
)
def get_form_html():
return session.get(url).text
def get_token(form_html):
site_key = re.search(site_key_pattern, form_html).group(1)
proxy = parse_url(proxy_url)
task = HCaptchaTask(
website_url=url,
website_key=site_key,
user_agent=UA,
cookies="test=test",
**proxy
)
job = client.createTask(task)
job.join()
return job.get_solution_response()
def form_submit(token):
return requests.post(url, data={"g-recaptcha-response": token}).text
def process():
html = get_form_html()
token = get_token(html)
return form_submit(token)
if __name__ == "__main__":
assert EXPECTED_RESULT in process()
| [
"[email protected]"
]
| |
c3ab1e1ec0477a77f93cd10d4c0a7d9ae3f4be26 | 43acaf9718b0a62594ed8e42b6c01099acd2d075 | /apps/lista/migrations/0030_auto_20200402_1450.py | 8d2059b1ff3c88db1f6e081b9f5468bc816c8f87 | []
| no_license | JmSubelza/Demo | 2f357889975c183b4a0f627330a80e535823faea | affceeadb87f1f14fb4e481851a1ac107e512f48 | refs/heads/master | 2023-05-14T18:16:38.153963 | 2020-04-28T16:15:27 | 2020-04-28T16:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-04-02 19:50
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('lista', '0029_auto_20200331_1704'),
]
operations = [
migrations.AlterModelOptions(
name='listado',
options={'ordering': ['-periodo'], 'verbose_name': 'listado', 'verbose_name_plural': 'listados'},
),
migrations.AlterField(
model_name='listado',
name='fecha',
field=models.DateField(default=datetime.datetime(2020, 4, 2, 19, 50, 36, 334173, tzinfo=utc)),
),
migrations.AlterModelTable(
name='listado',
table='listado',
),
]
| [
"[email protected]"
]
| |
beb7d60cedbda9144e4ee017272c8d5542552808 | ec8414291c40bbdef2b43d4360ad2e046109056a | /datashape/promote.py | 20ba0884261dec8c8e714d97c0957577faadf5e5 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | llllllllll/datashape | bd1b2ad09b01bae014af438c754d21ee87bf72c5 | 891e397b3facceede5f277ef578cccdd2319fd6f | refs/heads/master | 2020-12-26T03:56:35.854808 | 2015-08-14T15:29:49 | 2015-08-14T15:29:49 | 37,879,722 | 0 | 0 | null | 2015-06-22T20:46:14 | 2015-06-22T20:46:14 | null | UTF-8 | Python | false | false | 1,561 | py | from __future__ import absolute_import
import numpy as np
import datashape
__all__ = ['promote', 'optionify']
def promote(lhs, rhs):
"""Promote two scalar dshapes to a possibly larger, but compatible type.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> y = int64
>>> promote(x, y)
?int64
>>> promote(int64, int64)
ctype("int64")
Notes
----
This uses ``numpy.result_type`` for type promotion logic. See the numpy
documentation at
http://docs.scipy.org/doc/numpy/reference/generated/numpy.result_type.html
"""
if lhs == rhs:
return lhs
else:
left, right = getattr(lhs, 'ty', lhs), getattr(rhs, 'ty', rhs)
dtype = np.result_type(datashape.to_numpy_dtype(left),
datashape.to_numpy_dtype(right))
return optionify(lhs, rhs, datashape.CType.from_numpy_dtype(dtype))
def optionify(lhs, rhs, dshape):
"""Check whether a binary operation's dshape came from
:class:`~datashape.coretypes.Option` typed operands and construct an
:class:`~datashape.coretypes.Option` type accordingly.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> x
?int32
>>> y = int64
>>> y
ctype("int64")
>>> optionify(x, y, int64)
?int64
"""
if hasattr(dshape.measure, 'ty'):
return dshape
if hasattr(lhs, 'ty') or hasattr(rhs, 'ty'):
return datashape.Option(dshape)
return dshape
| [
"[email protected]"
]
| |
3b314af16cc710d4bbcdf1d55df86397678882d4 | 99e65ad1427b8997a6d433f233bcc60ef2b9bc92 | /tests/contrib/test_dropbox.py | 5693d59696509d92837fedf07c9f98dcca8036e8 | [
"MIT"
]
| permissive | gregorynicholas/flask-dance | 29ea359ab98a661cf0920328132700399d32c7fb | eb3f947340a372cd596cb743353b7e3ed5682e76 | refs/heads/master | 2020-12-29T00:41:40.345560 | 2015-05-13T03:25:37 | 2015-05-13T03:25:37 | 36,555,631 | 1 | 1 | null | 2015-05-30T11:37:05 | 2015-05-30T11:37:05 | null | UTF-8 | Python | false | false | 3,767 | py | from __future__ import unicode_literals
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.dropbox import make_dropbox_blueprint, dropbox
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.backend import MemoryBackend
def test_blueprint_factory():
dropbox_bp = make_dropbox_blueprint(
app_key="foo",
app_secret="bar",
)
assert isinstance(dropbox_bp, OAuth2ConsumerBlueprint)
assert dropbox_bp.session.base_url == "https://api.dropbox.com/1/"
assert dropbox_bp.session.client_id == "foo"
assert dropbox_bp.client_secret == "bar"
assert dropbox_bp.authorization_url == "https://www.dropbox.com/1/oauth2/authorize"
assert dropbox_bp.token_url == "https://api.dropbox.com/1/oauth2/token"
def test_load_from_config():
app = Flask(__name__)
app.secret_key = "anything"
app.config["DROPBOX_OAUTH_APP_KEY"] = "foo"
app.config["DROPBOX_OAUTH_APP_SECRET"] = "bar"
dropbox_bp = make_dropbox_blueprint()
app.register_blueprint(dropbox_bp)
resp = app.test_client().get("/dropbox")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local():
responses.add(responses.GET, "https://dropbox.com")
# set up two apps with two different set of auth tokens
app1 = Flask(__name__)
dropbox_bp1 = make_dropbox_blueprint(
"foo1", "bar1", redirect_to="url1",
backend=MemoryBackend({"access_token": "app1"}),
)
app1.register_blueprint(dropbox_bp1)
app2 = Flask(__name__)
dropbox_bp2 = make_dropbox_blueprint(
"foo2", "bar2", redirect_to="url2",
backend=MemoryBackend({"access_token": "app2"}),
)
app2.register_blueprint(dropbox_bp2)
# outside of a request context, referencing functions on the `dropbox` object
# will raise an exception
with pytest.raises(RuntimeError):
dropbox.get("https://dropbox.com")
# inside of a request context, `dropbox` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
dropbox.get("https://dropbox.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
dropbox.get("https://dropbox.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
def test_force_reapprove():
app = Flask(__name__)
app.secret_key = "forced"
dropbox_bp = make_dropbox_blueprint("foo", "bar", force_reapprove=True)
app.register_blueprint(dropbox_bp)
with app.test_client() as client:
resp = client.get(
"/dropbox",
base_url="https://a.b.c",
follow_redirects=False,
)
# check that there is a `force_reapprove=true` query param in the redirect URL
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["force_reapprove"] == "true"
def test_disable_signup():
app = Flask(__name__)
app.secret_key = "apple-app-store"
dropbox_bp = make_dropbox_blueprint(
"foo", "bar", disable_signup=True,
)
app.register_blueprint(dropbox_bp)
with app.test_client() as client:
resp = client.get(
"/dropbox",
base_url="https://a.b.c",
follow_redirects=False,
)
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["disable_signup"] == "true"
| [
"[email protected]"
]
| |
12bef43bcb86da9e884cb5399a3449a2dfefee0a | 57e3e8e7beb7029297c34449f73e4f76b7a08643 | /third_party/nucleus/io/genomics_reader.py | e2e661ceb2fa477e87108a191960cd8a6fb03ee5 | [
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | gunjanbaid/deepvariant | d477b44f87961183b2ffa8e2bece617c4e210e3b | c2167e7c90f016905f309f118eb3897935ee7c5f | refs/heads/r0.7 | 2021-12-01T23:10:28.492723 | 2018-12-12T18:45:43 | 2018-12-12T18:47:06 | 168,020,350 | 3 | 1 | BSD-3-Clause | 2019-09-04T17:21:44 | 2019-01-28T19:13:04 | Python | UTF-8 | Python | false | false | 8,751 | py | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Classes that provide the interface for reading genomics data.
`GenomicsReader` defines the core API supported by readers, and is subclassed
directly or indirectly (via `DispatchingGenomicsReader`) for all concrete
implementations.
`TFRecordReader` is an implementation of the `GenomicsReader` API for reading
`TFRecord` files. This is usable for all data types when encoding data in
protocol buffers.
`DispatchingGenomicsReader` is an abstract class defined for convenience on top
of `GenomicsReader` that supports reading from either the native file format or
from `TFRecord` files of the corresponding protocol buffer used to encode data
of that file type. The input format assumed is dependent upon the filename of
the input data.
Concrete implementations for individual file types (e.g. BED, SAM, VCF, etc.)
reside in type-specific modules in this package. The instantiation of readers
may have reader-specific requirements documented there. General examples of the
`iterate()` and `query()` functionality are shown below.
```python
# Equivalent ways to iterate through all elements in a reader.
# 1. Using the reader itself as an iterable object.
kwargs = ... # Reader-specific keyword arguments.
with GenomicsReaderSubClass(output_path, **kwargs) as reader:
for proto in reader:
do_something(reader.header, proto)
# 2. Calling the iterate() method of the reader explicitly.
with GenomicsReaderSubClass(output_path, **kwargs) as reader:
for proto in reader.iterate():
do_something(reader.header, proto)
# Querying for all elements within a specific region of the genome.
from third_party.nucleus.protos import range_pb2
region = range_pb2.Range(reference_name='chr1', start=10, end=20)
with GenomicsReaderSubClass(output_path, **kwargs) as reader:
for proto in reader.query(region):
do_something(reader.header, proto)
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from absl import logging
import six
from tensorflow.python.lib.io import python_io
class GenomicsReader(six.Iterator):
"""Abstract base class for reading genomics data.
In addition to the abstractmethods defined below, subclasses should
also set a `header` member variable in their objects.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def iterate(self):
"""Returns an iterator for going through all the file's records."""
@abc.abstractmethod
def query(self, region):
"""Returns an iterator for going through the records in the region.
Args:
region: A nucleus.genomics.v1.Range.
Returns:
An iterator containing all and only records within the specified region.
"""
def __enter__(self):
"""Enter a `with` block."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Exit a `with` block. Typically, this will close the file."""
def __init__(self):
"""Initializer."""
# Some readers can only support one iterator at a time, so don't
# create one now. Rather, create it when needed in next().
self.iterator = None
def __iter__(self):
"""Allows users to use the object itself as an iterator."""
return self.iterate()
def __next__(self):
"""Allows users to use the object itself as an iterator."""
if self.iterator is None:
self.iterator = self.iterate()
return six.next(self.iterator)
class TFRecordReader(GenomicsReader):
"""A GenomicsReader that reads protocol buffers from a TFRecord file.
Example usage:
reader = TFRecordReader('/tmp/my_file.tfrecords.gz',
proto=tensorflow.Example)
for example in reader:
process(example)
Note that TFRecord files do not have headers, and do not need
to be wrapped in a "with" block.
"""
def __init__(self, input_path, proto, tf_options=None):
"""Initializes the TFRecordReader.
Args:
input_path: The filename of the file to read.
proto: The protocol buffer type the TFRecord file is expected to
contain. For example, variants_pb2.Variant or reads_pb2.Read.
tf_options: A python_io.TFRecordOptions object. If not set,
__init__ will create one with the compression type based on
whether input_path ends in '.gz' or not.
"""
super(TFRecordReader, self).__init__()
self.input_path = input_path
self.proto = proto
self.header = None
if not tf_options:
compressed = input_path.endswith('.gz')
tf_options = python_io.TFRecordOptions(
python_io.TFRecordCompressionType.GZIP if compressed else
python_io.TFRecordCompressionType.NONE)
self.tf_options = tf_options
def iterate(self):
"""Returns an iterator for going through all the file's records."""
# redacted
for buf in python_io.tf_record_iterator(self.input_path, self.tf_options):
yield self.proto.FromString(buf)
def query(self, region):
"""Returns an iterator for going through the records in the region.
NOTE: This function is not currently implemented by TFRecordReader as the
TFRecord format does not provide a general mechanism for fast random access
to elements in genome order.
"""
raise NotImplementedError('Can not query TFRecord file')
def __exit__(self, exit_type, exit_value, exit_traceback):
# tf_record_iterator closes the file when out of records.
pass
class DispatchingGenomicsReader(GenomicsReader):
"""A GenomicsReader that dispatches based on the file extension.
If '.tfrecord' is present in the filename, a TFRecordReader is used.
Otherwise, a native reader is.
Subclasses of DispatchingGenomicsReader must define the following methods:
* _native_reader()
* _record_proto()
"""
def __init__(self, input_path, **kwargs):
super(DispatchingGenomicsReader, self).__init__()
if '.tfrecord' in input_path:
self._reader = TFRecordReader(input_path, proto=self._record_proto(),
tf_options=kwargs.get('tf_options', None))
else:
# Remove tf_options, if present, from the arguments we pass to the
# native reader.
kwargs.pop('tf_options', None)
self._reader = self._native_reader(input_path, **kwargs)
logging.info('Reading %s with %s',
input_path, self._reader.__class__.__name__)
self.header = getattr(self._reader, 'header', None)
self._post_init_hook()
@abc.abstractmethod
def _native_reader(self, input_path, **kwargs):
"""Returns a GenomicsReader for reading the records `natively`.
Args:
input_path: The path to the native file to read.
**kwargs: Zero or more keyword arguments.
Returns:
A GenomicsReader.
"""
@abc.abstractmethod
def _record_proto(self):
"""Returns the protocol buffer type used by this reader."""
def iterate(self):
return self._reader.iterate()
def query(self, region):
return self._reader.query(region)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
def _post_init_hook(self):
"""Hook for subclasses to run code at the end of __init__."""
| [
"[email protected]"
]
| |
b0236f7b445c282954c5e14cd077d6706480f485 | c81d7dfef424b088bf2509a1baf406a80384ea5a | /venv/Lib/site-packages/twilio/rest/chat/v2/credential.py | e4678bd5de9eb5063b8a8661ef0187dffd60d614 | []
| no_license | Goutham2591/OMK_PART2 | 111210d78fc4845481ed55c852b8f2f938918f4a | cb54fb21ebf472bffc6ee4f634bf1e68303e113d | refs/heads/master | 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 | Python | UTF-8 | Python | false | false | 15,346 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CredentialList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the CredentialList
:param Version version: Version that contains the resource
:returns: twilio.rest.chat.v2.credential.CredentialList
:rtype: twilio.rest.chat.v2.credential.CredentialList
"""
super(CredentialList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Credentials'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams CredentialInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.credential.CredentialInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'])
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists CredentialInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.credential.CredentialInstance]
"""
return list(self.stream(limit=limit, page_size=page_size))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CredentialInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CredentialPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CredentialInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CredentialPage(self._version, response, self._solution)
def create(self, type, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Create a new CredentialInstance
:param CredentialInstance.PushService type: The type
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Newly created CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
data = values.of({
'Type': type,
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CredentialInstance(self._version, payload)
def get(self, sid):
"""
Constructs a CredentialContext
:param sid: The sid
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
return CredentialContext(self._version, sid=sid)
def __call__(self, sid):
"""
Constructs a CredentialContext
:param sid: The sid
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
return CredentialContext(self._version, sid=sid)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.CredentialList>'
class CredentialPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the CredentialPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.chat.v2.credential.CredentialPage
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
super(CredentialPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CredentialInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.credential.CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return CredentialInstance(self._version, payload)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.CredentialPage>'
class CredentialContext(InstanceContext):
""" """
def __init__(self, version, sid):
"""
Initialize the CredentialContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
super(CredentialContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid}
self._uri = '/Credentials/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CredentialInstance(self._version, payload, sid=self._solution['sid'])
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Updated CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return CredentialInstance(self._version, payload, sid=self._solution['sid'])
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.CredentialContext {}>'.format(context)
class CredentialInstance(InstanceResource):
""" """
class PushService(object):
GCM = "gcm"
APN = "apn"
FCM = "fcm"
def __init__(self, version, payload, sid=None):
"""
Initialize the CredentialInstance
:returns: twilio.rest.chat.v2.credential.CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
super(CredentialInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'friendly_name': payload['friendly_name'],
'type': payload['type'],
'sandbox': payload['sandbox'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid']}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CredentialContext for this CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
if self._context is None:
self._context = CredentialContext(self._version, sid=self._solution['sid'])
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def type(self):
"""
:returns: The type
:rtype: CredentialInstance.PushService
"""
return self._properties['type']
@property
def sandbox(self):
"""
:returns: The sandbox
:rtype: unicode
"""
return self._properties['sandbox']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Updated CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
certificate=certificate,
private_key=private_key,
sandbox=sandbox,
api_key=api_key,
secret=secret,
)
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.CredentialInstance {}>'.format(context)
| [
"[email protected]"
]
| |
2b3eb61d8eb886e3d08f7c5dec4b7027976b0bfb | bda0ecadb1fdede0a8935b35d5bb6eaa745d092a | /src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_cassandra_clusters_operations.py | a0e0f51b58da94c859d01cc51253568275b472bd | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | jongio/azure-cli-extensions | 059c785e01c6dc5ec89bf7f4d9d2c42ed8cebea3 | c2f1e4be94c65451a1d060291ae661a597531fd2 | refs/heads/master | 2021-11-23T23:49:26.690924 | 2021-10-23T14:09:49 | 2021-10-23T14:09:49 | 224,203,634 | 0 | 0 | MIT | 2020-06-16T16:26:32 | 2019-11-26T13:52:09 | null | UTF-8 | Python | false | false | 50,424 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CassandraClustersOperations(object):
"""CassandraClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListClusters"]
"""List all managed Cassandra clusters in this subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListClusters or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.ListClusters]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListClusters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListClusters', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/cassandraClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListClusters"]
"""List all managed Cassandra clusters in this resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListClusters or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.ListClusters]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListClusters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListClusters', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ClusterResource"
"""Get the properties of a managed Cassandra cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterResource, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ClusterResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ClusterResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a managed Cassandra cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def _create_update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.ClusterResource"
**kwargs # type: Any
):
# type: (...) -> "_models.ClusterResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ClusterResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ClusterResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ClusterResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def begin_create_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.ClusterResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ClusterResource"]
"""Create or update a managed Cassandra cluster. When updating, you must specify all writable
properties. To update only some properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:param body: The properties specifying the desired state of the managed Cassandra cluster.
:type body: ~azure.mgmt.cosmosdb.models.ClusterResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ClusterResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ClusterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ClusterResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.ClusterResource"
**kwargs # type: Any
):
# type: (...) -> "_models.ClusterResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ClusterResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ClusterResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ClusterResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.ClusterResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ClusterResource"]
"""Updates some of the properties of a managed Cassandra cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:param body: Parameters to provide for specifying the managed Cassandra cluster.
:type body: ~azure.mgmt.cosmosdb.models.ClusterResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ClusterResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ClusterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ClusterResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}'} # type: ignore
def _request_repair_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.RepairPostBody"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._request_repair_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'RepairPostBody')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_request_repair_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/repair'} # type: ignore
def begin_request_repair(
self,
resource_group_name, # type: str
cluster_name, # type: str
body, # type: "_models.RepairPostBody"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Request that repair begin on this cluster as soon as possible.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:param body: Specification of what keyspaces and tables to run repair on.
:type body: ~azure.mgmt.cosmosdb.models.RepairPostBody
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._request_repair_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_request_repair.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/repair'} # type: ignore
def _fetch_node_status_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ClusterNodeStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ClusterNodeStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
# Construct URL
url = self._fetch_node_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterNodeStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_fetch_node_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/fetchNodeStatus'} # type: ignore
def begin_fetch_node_status(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ClusterNodeStatus"]
"""Request the status of all nodes in the cluster (as returned by 'nodetool status').
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ClusterNodeStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ClusterNodeStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterNodeStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._fetch_node_status_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ClusterNodeStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_fetch_node_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/fetchNodeStatus'} # type: ignore
def list_backups(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListBackups"]
"""List the backups of this cluster that are available to restore.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListBackups or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.ListBackups]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBackups"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_backups.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListBackups', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_backups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/backups'} # type: ignore
def get_backup(
self,
resource_group_name, # type: str
cluster_name, # type: str
backup_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BackupResource"
"""Get the properties of an individual backup of this cluster that is available to restore.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name.
:type cluster_name: str
:param backup_id: Id of a restorable backup of a Cassandra cluster.
:type backup_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResource, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.BackupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01-preview"
accept = "application/json"
# Construct URL
url = self.get_backup.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=100, min_length=1, pattern=r'^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$'),
'backupId': self._serialize.url("backup_id", backup_id, 'str', max_length=15, min_length=1, pattern=r'^[0-9]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_backup.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/backups/{backupId}'} # type: ignore
| [
"[email protected]"
]
| |
47ab0810b795e184979408bc0e50bdf7fa92bd5c | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/urllib3-1.22/setup.py | 35e02aabe194d0fa87fe0847dc389aeb4b9afb9a | [
"Apache-2.0",
"MIT"
]
| permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 2,693 | py | #!/usr/bin/env python
from setuptools import setup
import os
import re
import codecs
base_path = os.path.dirname(__file__)
# Get the version (borrowed from SQLAlchemy)
with open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:
VERSION = re.compile(r".*__version__ = '(.*?)'",
re.S).match(fp.read()).group(1)
with codecs.open('README.rst', encoding='utf-8') as fp:
readme = fp.read()
with codecs.open('CHANGES.rst', encoding='utf-8') as fp:
changes = fp.read()
version = VERSION
setup(name='urllib3',
version=version,
description="HTTP library with thread-safe connection pooling, file post, and more.",
long_description=u'\n\n'.join([readme, changes]),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
keywords='urllib httplib threadsafe filepost http https ssl pooling',
author='Andrey Petrov',
author_email='[email protected]',
url='https://urllib3.readthedocs.io/',
license='MIT',
packages=['urllib3',
'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
'urllib3.packages.backports', 'urllib3.contrib',
'urllib3.contrib._securetransport', 'urllib3.util',
],
requires=[],
tests_require=[
# These are a less-specific subset of dev-requirements.txt, for the
# convenience of distro package maintainers.
'pytest',
'nose',
'mock',
'tornado',
],
test_suite='test',
extras_require={
'secure': [
'pyOpenSSL>=0.14',
'cryptography>=1.3.4',
'idna>=2.0.0',
'certifi',
"ipaddress",
],
'socks': [
'PySocks>=1.5.6,<2.0,!=1.5.7',
]
},
)
| [
"[email protected]"
]
| |
64f963a102feff0ad493f232bb9d0efc6d262257 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/leetenki_YOLOv2/YOLOv2-master/yolov2.py | 1a79bcae8f93a445e44d9f865d432feaf9cdbd67 | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 18,676 | py | import numpy as np
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.links as L
import chainer.functions as F
from lib.utils import *
from lib.functions import *
class YOLOv2(Chain):
"""
YOLOv2
- It takes (416, 416, 3) sized image as input
"""
def __init__(self, n_classes, n_boxes):
super(YOLOv2, self).__init__(
##### common layers for both pretrained layers and yolov2 #####
conv1 = L.Convolution2D(3, 32, ksize=3, stride=1, pad=1, nobias=True),
bn1 = L.BatchNormalization(32, use_beta=False, eps=2e-5),
bias1 = L.Bias(shape=(32,)),
conv2 = L.Convolution2D(32, 64, ksize=3, stride=1, pad=1, nobias=True),
bn2 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias2 = L.Bias(shape=(64,)),
conv3 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn3 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias3 = L.Bias(shape=(128,)),
conv4 = L.Convolution2D(128, 64, ksize=1, stride=1, pad=0, nobias=True),
bn4 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias4 = L.Bias(shape=(64,)),
conv5 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn5 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias5 = L.Bias(shape=(128,)),
conv6 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn6 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias6 = L.Bias(shape=(256,)),
conv7 = L.Convolution2D(256, 128, ksize=1, stride=1, pad=0, nobias=True),
bn7 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias7 = L.Bias(shape=(128,)),
conv8 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn8 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias8 = L.Bias(shape=(256,)),
conv9 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn9 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias9 = L.Bias(shape=(512,)),
conv10 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn10 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias10 = L.Bias(shape=(256,)),
conv11 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn11 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias11 = L.Bias(shape=(512,)),
conv12 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn12 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias12 = L.Bias(shape=(256,)),
conv13 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn13 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias13 = L.Bias(shape=(512,)),
conv14 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn14 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias14 = L.Bias(shape=(1024,)),
conv15 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn15 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias15 = L.Bias(shape=(512,)),
conv16 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn16 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias16 = L.Bias(shape=(1024,)),
conv17 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn17 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias17 = L.Bias(shape=(512,)),
conv18 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn18 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias18 = L.Bias(shape=(1024,)),
###### new layer
conv19 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn19 = L.BatchNormalization(1024, use_beta=False),
bias19 = L.Bias(shape=(1024,)),
conv20 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn20 = L.BatchNormalization(1024, use_beta=False),
bias20 = L.Bias(shape=(1024,)),
conv21 = L.Convolution2D(3072, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn21 = L.BatchNormalization(1024, use_beta=False),
bias21 = L.Bias(shape=(1024,)),
conv22 = L.Convolution2D(1024, n_boxes * (5 + n_classes), ksize=1, stride=1, pad=0, nobias=True),
bias22 = L.Bias(shape=(n_boxes * (5 + n_classes),)),
)
self.train = False
self.finetune = False
self.n_boxes = n_boxes
self.n_classes = n_classes
def __call__(self, x):
##### common layer
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), test=not self.train, finetune=self.finetune)), slope=0.1)
high_resolution_feature = reorg(h) # 高解像度特徴量をreorgでサイズ落として保存しておく
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), test=not self.train, finetune=self.finetune)), slope=0.1)
###### new layer
h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.concat((high_resolution_feature, h), axis=1) # output concatnation
h = F.leaky_relu(self.bias21(self.bn21(self.conv21(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = self.bias22(self.conv22(h))
return h
class YOLOv2Predictor(Chain):
def __init__(self, predictor):
super(YOLOv2Predictor, self).__init__(predictor=predictor)
self.anchors = [[5.375, 5.03125], [5.40625, 4.6875], [2.96875, 2.53125], [2.59375, 2.78125], [1.9375, 3.25]]
self.thresh = 0.6
self.seen = 0
self.unstable_seen = 5000
def __call__(self, input_x, t):
output = self.predictor(input_x)
batch_size, _, grid_h, grid_w = output.shape
self.seen += batch_size
x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
x = F.sigmoid(x) # xのactivation
y = F.sigmoid(y) # yのactivation
conf = F.sigmoid(conf) # confのactivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
prob = F.softmax(prob) # probablitiyのacitivation
# 教師データの用意
tw = np.zeros(w.shape, dtype=np.float32) # wとhが0になるように学習(e^wとe^hは1に近づく -> 担当するbboxの倍率1)
th = np.zeros(h.shape, dtype=np.float32)
tx = np.tile(0.5, x.shape).astype(np.float32) # 活性化後のxとyが0.5になるように学習()
ty = np.tile(0.5, y.shape).astype(np.float32)
if self.seen < self.unstable_seen: # centerの存在しないbbox誤差学習スケールは基本0.1
box_learning_scale = np.tile(0.1, x.shape).astype(np.float32)
else:
box_learning_scale = np.tile(0, x.shape).astype(np.float32)
tconf = np.zeros(conf.shape, dtype=np.float32) # confidenceのtruthは基本0、iouがthresh以上のものは学習しない、ただしobjectの存在するgridのbest_boxのみ真のIOUに近づかせる
conf_learning_scale = np.tile(0.1, conf.shape).astype(np.float32)
tprob = prob.data.copy() # best_anchor以外は学習させない(自身との二乗和誤差 = 0)
# 全bboxとtruthのiouを計算(batch単位で計算する)
x_shift = Variable(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape[1:]))
y_shift = Variable(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape[1:]))
w_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape[1:]))
h_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape[1:]))
x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
best_ious = []
for batch in range(batch_size):
n_truth_boxes = len(t[batch])
box_x = (x[batch] + x_shift) / grid_w
box_y = (y[batch] + y_shift) / grid_h
box_w = F.exp(w[batch]) * w_anchor / grid_w
box_h = F.exp(h[batch]) * h_anchor / grid_h
ious = []
for truth_index in range(n_truth_boxes):
truth_box_x = Variable(np.broadcast_to(np.array(t[batch][truth_index]["x"], dtype=np.float32), box_x.shape))
truth_box_y = Variable(np.broadcast_to(np.array(t[batch][truth_index]["y"], dtype=np.float32), box_y.shape))
truth_box_w = Variable(np.broadcast_to(np.array(t[batch][truth_index]["w"], dtype=np.float32), box_w.shape))
truth_box_h = Variable(np.broadcast_to(np.array(t[batch][truth_index]["h"], dtype=np.float32), box_h.shape))
truth_box_x.to_gpu(), truth_box_y.to_gpu(), truth_box_w.to_gpu(), truth_box_h.to_gpu()
ious.append(multi_box_iou(Box(box_x, box_y, box_w, box_h), Box(truth_box_x, truth_box_y, truth_box_w, truth_box_h)).data.get())
ious = np.array(ious)
best_ious.append(np.max(ious, axis=0))
best_ious = np.array(best_ious)
# 一定以上のiouを持つanchorに対しては、confを0に下げないようにする(truthの周りのgridはconfをそのまま維持)。
tconf[best_ious > self.thresh] = conf.data.get()[best_ious > self.thresh]
conf_learning_scale[best_ious > self.thresh] = 0
# objectの存在するanchor boxのみ、x、y、w、h、conf、probを個別修正
abs_anchors = self.anchors / np.array([grid_w, grid_h])
for batch in range(batch_size):
for truth_box in t[batch]:
truth_w = int(float(truth_box["x"]) * grid_w)
truth_h = int(float(truth_box["y"]) * grid_h)
truth_n = 0
best_iou = 0.0
for anchor_index, abs_anchor in enumerate(abs_anchors):
iou = box_iou(Box(0, 0, float(truth_box["w"]), float(truth_box["h"])), Box(0, 0, abs_anchor[0], abs_anchor[1]))
if best_iou < iou:
best_iou = iou
truth_n = anchor_index
# objectの存在するanchorについて、centerを0.5ではなく、真の座標に近づかせる。anchorのスケールを1ではなく真のスケールに近づかせる。学習スケールを1にする。
box_learning_scale[batch, truth_n, :, truth_h, truth_w] = 1.0
tx[batch, truth_n, :, truth_h, truth_w] = float(truth_box["x"]) * grid_w - truth_w
ty[batch, truth_n, :, truth_h, truth_w] = float(truth_box["y"]) * grid_h - truth_h
tw[batch, truth_n, :, truth_h, truth_w] = np.log(float(truth_box["w"]) / abs_anchors[truth_n][0])
th[batch, truth_n, :, truth_h, truth_w] = np.log(float(truth_box["h"]) / abs_anchors[truth_n][1])
tprob[batch, :, truth_n, truth_h, truth_w] = 0
tprob[batch, int(truth_box["label"]), truth_n, truth_h, truth_w] = 1
# IOUの観測
full_truth_box = Box(float(truth_box["x"]), float(truth_box["y"]), float(truth_box["w"]), float(truth_box["h"]))
predicted_box = Box(
(x[batch][truth_n][0][truth_h][truth_w].data.get() + truth_w) / grid_w,
(y[batch][truth_n][0][truth_h][truth_w].data.get() + truth_h) / grid_h,
np.exp(w[batch][truth_n][0][truth_h][truth_w].data.get()) * abs_anchors[truth_n][0],
np.exp(h[batch][truth_n][0][truth_h][truth_w].data.get()) * abs_anchors[truth_n][1]
)
predicted_iou = box_iou(full_truth_box, predicted_box)
tconf[batch, truth_n, :, truth_h, truth_w] = predicted_iou
conf_learning_scale[batch, truth_n, :, truth_h, truth_w] = 10.0
# debug prints
maps = F.transpose(prob[batch], (2, 3, 1, 0)).data
print("best confidences and best conditional probability and predicted class of each grid:")
for i in range(grid_h):
for j in range(grid_w):
print("%2d" % (int(conf[batch, :, :, i, j].data.max() * 100)), end=" ")
print(" ", end="")
for j in range(grid_w):
print("%2d" % (maps[i][j][int(maps[i][j].max(axis=1).argmax())].argmax()), end=" ")
print(" ", end="")
for j in range(grid_w):
print("%2d" % (maps[i][j][int(maps[i][j].max(axis=1).argmax())].max()*100), end=" ")
print()
print("best default iou: %.2f predicted iou: %.2f confidence: %.2f class: %s" % (best_iou, predicted_iou, conf[batch][truth_n][0][truth_h][truth_w].data, t[batch][0]["label"]))
print("-------------------------------")
print("seen = %d" % self.seen)
# loss計算
tx, ty, tw, th, tconf, tprob = Variable(tx), Variable(ty), Variable(tw), Variable(th), Variable(tconf), Variable(tprob)
box_learning_scale, conf_learning_scale = Variable(box_learning_scale), Variable(conf_learning_scale)
tx.to_gpu(), ty.to_gpu(), tw.to_gpu(), th.to_gpu(), tconf.to_gpu(), tprob.to_gpu()
box_learning_scale.to_gpu()
conf_learning_scale.to_gpu()
x_loss = F.sum((tx - x) ** 2 * box_learning_scale) / 2
y_loss = F.sum((ty - y) ** 2 * box_learning_scale) / 2
w_loss = F.sum((tw - w) ** 2 * box_learning_scale) / 2
h_loss = F.sum((th - h) ** 2 * box_learning_scale) / 2
c_loss = F.sum((tconf - conf) ** 2 * conf_learning_scale) / 2
p_loss = F.sum((tprob - prob) ** 2) / 2
print("x_loss: %f y_loss: %f w_loss: %f h_loss: %f c_loss: %f p_loss: %f" %
(F.sum(x_loss).data, F.sum(y_loss).data, F.sum(w_loss).data, F.sum(h_loss).data, F.sum(c_loss).data, F.sum(p_loss).data)
)
loss = x_loss + y_loss + w_loss + h_loss + c_loss + p_loss
return loss
def init_anchor(self, anchors):
self.anchors = anchors
def predict(self, input_x):
output = self.predictor(input_x)
batch_size, input_channel, input_h, input_w = input_x.shape
batch_size, _, grid_h, grid_w = output.shape
x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
x = F.sigmoid(x) # xのactivation
y = F.sigmoid(y) # yのactivation
conf = F.sigmoid(conf) # confのactivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
prob = F.softmax(prob) # probablitiyのacitivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
# x, y, w, hを絶対座標へ変換
x_shift = Variable(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape))
y_shift = Variable(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape))
w_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape))
h_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape))
#x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
box_x = (x + x_shift) / grid_w
box_y = (y + y_shift) / grid_h
box_w = F.exp(w) * w_anchor / grid_w
box_h = F.exp(h) * h_anchor / grid_h
return box_x, box_y, box_w, box_h, conf, prob
| [
"[email protected]"
]
| |
799aa43fab6f27917a139268f410d8f5eff2f188 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/services/types/campaign_draft_service.py | d35741458258a92a1854440473c3973b5687e633 | [
"Apache-2.0"
]
| permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 9,560 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v9.resources.types import (
campaign_draft as gagr_campaign_draft,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.services",
marshal="google.ads.googleads.v9",
manifest={
"GetCampaignDraftRequest",
"MutateCampaignDraftsRequest",
"PromoteCampaignDraftRequest",
"CampaignDraftOperation",
"MutateCampaignDraftsResponse",
"MutateCampaignDraftResult",
"ListCampaignDraftAsyncErrorsRequest",
"ListCampaignDraftAsyncErrorsResponse",
},
)
class GetCampaignDraftRequest(proto.Message):
r"""Request message for
[CampaignDraftService.GetCampaignDraft][google.ads.googleads.v9.services.CampaignDraftService.GetCampaignDraft].
Attributes:
resource_name (str):
Required. The resource name of the campaign
draft to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateCampaignDraftsRequest(proto.Message):
r"""Request message for
[CampaignDraftService.MutateCampaignDrafts][google.ads.googleads.v9.services.CampaignDraftService.MutateCampaignDrafts].
Attributes:
customer_id (str):
Required. The ID of the customer whose
campaign drafts are being modified.
operations (Sequence[google.ads.googleads.v9.services.types.CampaignDraftOperation]):
Required. The list of operations to perform
on individual campaign drafts.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v9.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CampaignDraftOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class PromoteCampaignDraftRequest(proto.Message):
r"""Request message for
[CampaignDraftService.PromoteCampaignDraft][google.ads.googleads.v9.services.CampaignDraftService.PromoteCampaignDraft].
Attributes:
campaign_draft (str):
Required. The resource name of the campaign
draft to promote.
validate_only (bool):
If true, the request is validated but no Long
Running Operation is created. Only errors are
returned.
"""
campaign_draft = proto.Field(proto.STRING, number=1,)
validate_only = proto.Field(proto.BOOL, number=2,)
class CampaignDraftOperation(proto.Message):
r"""A single operation (create, update, remove) on a campaign
draft.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v9.resources.types.CampaignDraft):
Create operation: No resource name is
expected for the new campaign draft.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v9.resources.types.CampaignDraft):
Update operation: The campaign draft is
expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: The campaign draft is expected to have a
valid resource name, in this format:
``customers/{customer_id}/campaignDrafts/{base_campaign_id}~{draft_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_campaign_draft.CampaignDraft,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_campaign_draft.CampaignDraft,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateCampaignDraftsResponse(proto.Message):
r"""Response message for campaign draft mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v9.services.types.MutateCampaignDraftResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCampaignDraftResult",
)
class MutateCampaignDraftResult(proto.Message):
r"""The result for the campaign draft mutate.
Attributes:
resource_name (str):
Returned for successful operations.
campaign_draft (google.ads.googleads.v9.resources.types.CampaignDraft):
The mutated campaign draft with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign_draft = proto.Field(
proto.MESSAGE, number=2, message=gagr_campaign_draft.CampaignDraft,
)
class ListCampaignDraftAsyncErrorsRequest(proto.Message):
r"""Request message for
[CampaignDraftService.ListCampaignDraftAsyncErrors][google.ads.googleads.v9.services.CampaignDraftService.ListCampaignDraftAsyncErrors].
Attributes:
resource_name (str):
Required. The name of the campaign draft from
which to retrieve the async errors.
page_token (str):
Token of the page to retrieve. If not specified, the first
page of results will be returned. Use the value obtained
from ``next_page_token`` in the previous response in order
to request the next page of results.
page_size (int):
Number of elements to retrieve in a single
page. When a page request is too large, the
server may decide to further limit the number of
returned resources.
"""
resource_name = proto.Field(proto.STRING, number=1,)
page_token = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
class ListCampaignDraftAsyncErrorsResponse(proto.Message):
r"""Response message for
[CampaignDraftService.ListCampaignDraftAsyncErrors][google.ads.googleads.v9.services.CampaignDraftService.ListCampaignDraftAsyncErrors].
Attributes:
errors (Sequence[google.rpc.status_pb2.Status]):
Details of the errors when performing the
asynchronous operation.
next_page_token (str):
Pagination token used to retrieve the next page of results.
Pass the content of this string as the ``page_token``
attribute of the next request. ``next_page_token`` is not
returned for the last page.
"""
@property
def raw_page(self):
return self
errors = proto.RepeatedField(
proto.MESSAGE, number=1, message=status_pb2.Status,
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
]
| |
7f97e535826d343972a73c7e1377a0d2fcd7d1e0 | ce7da62e2d6e7820fd66031299702b08bf1260fd | /15-Threading/print_time.py | c8143269ef6cb4acee35a93cd26224c9677a4b4f | []
| no_license | philipz/fasttrack-python | f0644aacf62d0130fa4631f4973fa6d64c46989c | cb4cafebbd04e9b66353fec4919f9cd0e161e84e | refs/heads/master | 2021-01-17T08:50:38.702044 | 2014-07-16T14:26:14 | 2014-07-16T14:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import time
import threading
def print_time():
while True:
print time.ctime()
time.sleep(1)
t = threading.Thread(target=print_time)
t.setDaemon(True)
t.start()
time.sleep(10)
| [
"[email protected]"
]
| |
df2a22532f04f775815daf02c36c2768d748d1d9 | 3c59b7bde01cfbc1fbd170883393e8ebf7a0a92f | /백준/1074번 Z.py | 0b07500773bb2212f4d32f574c05561e7542de28 | []
| no_license | gf234/python_problem_solving | 93ae00d940091131d8f8b06e478e385e4c2a4503 | 4c95751f5a687215c14bf61c37e6dc2e7e752342 | refs/heads/main | 2023-05-10T07:28:12.351006 | 2021-06-14T04:59:33 | 2021-06-14T04:59:33 | 314,479,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | n, r, c = map(int, input().split())
answer = 0
while n:
mid = 2**(n-1)
sum = 4**(n-1)
if r < mid:
if c < mid:
pass
else:
c -= mid
answer += sum
else:
if c < mid:
r -= mid
answer += sum*2
else:
r -= mid
c -= mid
answer += sum*3
n -= 1
print(answer)
| [
"[email protected]"
]
| |
dc41a632cd458f4e263f559d9c3c0c90d16474d1 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L54/54-23_MD_NVT_rerun/set_1ns_equi_1.py | d5950a09373a0929e4576ed73deb39bcd3249847 | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L54/MD_NVT_rerun/ti_one-step/54_23/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../54-23_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
338ce5bfb6eb581344b48a4f6403e87e9de8d671 | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q1296_Divide_Array_in_Sets_of_K_Consecutive_Numbers.py | e29aed5088a3550289b744c6e3c78f9e8e068699 | []
| no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
class Solution:
def isPossibleDivide(self, nums: List[int], k: int) -> bool:
nums.sort()
n = len(nums)
if n%k!=0:
return False
while nums:
first_num = nums[0]
for i in range(k):
index = bisect_left(nums,first_num+i)
if nums[index]!=first_num+i:
return False
nums.pop(index)
return True
sol = Solution()
# input
nums = [1,2,3,3,4,4,5,6]
k = 4
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [3,2,1,2,3,4,3,4,5,9,10,11]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [3,3,2,2,1,1]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [1,2,3,4]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = False
print(output, answer, answer == output)
# input
nums = [1,1,2,2,3,3]
k = 2
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = False
print(output, answer, answer == output)
| [
"[email protected]"
]
| |
2370bf29a494f7f47ab8a1880ffe74984620fb45 | 2ca9e61829dd28113abb971d7db1c46cec64f10c | /app.py | 9244898c8240111091aa08a35e372220a5d2c367 | []
| no_license | Fordalex/task_manager | 90b8591591ea49be16dd32805de21cd8a939ccea | 4f9ba9057ddb2b1fdd52ce5d664796dd07529ced | refs/heads/master | 2023-05-10T05:49:20.194423 | 2020-01-14T11:05:38 | 2020-01-14T11:05:38 | 232,828,008 | 0 | 0 | null | 2023-05-01T21:19:27 | 2020-01-09T14:29:17 | HTML | UTF-8 | Python | false | false | 3,117 | py | import os
from flask import Flask, render_template, redirect, request, url_for
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
app = Flask(__name__)
app.config["MONGO_DBNAME"] = 'task_manager'
app.config["MONGO_URI"] = 'mongodb+srv://root:[email protected]/task_manager?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route('/')
@app.route('/get_tasks')
def get_tasks():
return render_template("tasks.html", tasks=mongo.db.tasks.find())
@app.route('/add_task')
def add_task():
return render_template('addtask.html', categories=mongo.db.categories.find())
@app.route('/insert_task', methods=['POST'])
def insert_task():
tasks = mongo.db.tasks
tasks.insert_one(request.form.to_dict())
return redirect(url_for('get_tasks'))
@app.route('/edit_task/<task_id>')
def edit_task(task_id):
the_task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
all_categories = mongo.db.categories.find()
return render_template('edittask.html', task=the_task,
categories=all_categories)
@app.route('/update_task/<task_id>', methods=["POST"])
def update_task(task_id):
tasks = mongo.db.tasks
tasks.update({'_id': ObjectId(task_id)},
{
'task_name': request.form.get('task_name'),
'category_name': request.form.get('category_name'),
'task_description': request.form.get('task_description'),
'due_date': request.form.get('due_date'),
'is_urgent': request.form.get('is_urgent')
})
return redirect(url_for('get_tasks'))
@app.route('/delete_task/<task_id>')
def delete_task(task_id):
mongo.db.tasks.remove({'_id': ObjectId(task_id)})
return redirect(url_for('get_tasks'))
@app.route('/get_categories')
def get_categories():
return render_template('categories.html',
categories=mongo.db.categories.find())
@app.route('/delete_category/<category_id>')
def delete_category(category_id):
mongo.db.categories.remove({'_id': ObjectId(category_id)})
return redirect(url_for('get_categories'))
@app.route('/edit_category/<category_id>')
def edit_category(category_id):
return render_template('editcategory.html',
category=mongo.db.categories.find_one(
{'_id': ObjectId(category_id)}))
@app.route('/update_category/<category_id>', methods=['POST'])
def update_category(category_id):
mongo.db.categories.update(
{'_id': ObjectId(category_id)},
{'category_name': request.form.get('category_name')})
return redirect(url_for('get_categories'))
@app.route('/insert_category', methods=['POST'])
def insert_category():
category_doc = {'category_name': request.form.get('category_name')}
mongo.db.categories.insert_one(category_doc)
return redirect(url_for('get_categories'))
@app.route('/add_category')
def add_category():
return render_template('addcategory.html')
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=os.environ.get('PORT'),
debug=True)
| [
"[email protected]"
]
| |
48a6fe8f5a0e1c8b1711c0c50824fa37bf0d24f2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/336.py | 8db68177c284762ccb22195d0da91c770b0ce592 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | #Name: Robin Park
#Username: robinp
#Google Code Jam Round 1B 2017
import random
import math
def isValid(arr):
length = len(arr)
for k in range(length):
if arr[k%length] == arr[(k+1)%length]:
return False
return True
def solve(N, R, O, Y, G, B, V):
if R > N/2 or Y > N/2 or B > N/2:
return "IMPOSSIBLE"
if R == 0:
if Y == B:
return "YB"*int(N/2)
if Y == 0:
if R == B:
return "RB"*int(N/2)
if B == 0:
if R == Y:
return "YR"*int(N/2)
if R == Y and Y == B:
return "RYB"*int(N/3)
min_color = min(R, Y, B) # recur over R, Y, B a la euclidean algorithm style
R = R - min_color
Y = Y - min_color
B = B - min_color
new_N = R + Y + B
#if R >= new_N/2 or Y >= new_N/2 or B >= new_N/2:
# return "IMPOSSIBLE"
if R == Y and R == 0:
if B <= min_color:
return "BRBY"*B + "BRY"*(min_color-B)
if B == Y and Y == 0:
if R <= min_color:
return "RYRB"*R + "RYB"*(min_color-R)
if R == B and R == 0:
if Y <= min_color:
return "YRYB"*Y + "YRB"*(min_color-Y)
if R == 0:
if Y > B:
if Y - B <= min_color:
return "RYBY"*(Y-B) + "RBY"*(min_color-Y+B) + "BY"*B
else:
return "IMPOSSIBLE"
else:
if B - Y <= min_color:
return "RBYB"*(B-Y) + "RYB"*(min_color-B+Y) + "YB"*Y
else:
return "IMPOSSIBLE"
if Y == 0:
if B > R:
if B - R <= min_color:
return "YBRB"*(B-R) + "YRB"*(min_color-B+R) + "RB"*R
else:
return "IMPOSSIBLE"
else:
if R - B <= min_color:
return "YRBR"*(R-B) + "YBR"*(min_color-R+B) + "BR"*B
else:
return "IMPOSSIBLE"
if B == 0:
if R > Y:
if R - Y <= min_color:
return "BRYR"*(R-Y) + "BYR"*(min_color-R+Y) + "YR"*Y
else:
return "IMPOSSIBLE"
else:
if Y - R <= min_color:
return "BYRY"*(Y-R) + "BRY"*(min_color-Y+R) + "RY"*R
else:
return "IMPOSSIBLE"
if __name__ == '__main__':
with open('unicorn.in', 'r') as file, open('unicorn.out', 'w') as w:
T = int(file.readline().strip())
for t in range(T):
N, R, O, Y, G, B, V = map(int, file.readline().strip().split())
w.write('Case #' + str(t+1) + ': ')
w.write(solve(N, R, O, Y, G, B, V))
w.write('\n')
print("done")
| [
"[email protected]"
]
| |
53f6236b4cdfc792e33b41c54cf098a380b42ad8 | cdc95cd7cbce8d9e904bb769b981f8b87d86ca7e | /Geeks for geeks/Dynamic Programming/Subset Sum Problem.py | 91572f0834aad557afef3571b28d5cc6dd656c0a | []
| no_license | amit-kr-debug/CP | c99ba608edf943b807d9cb707a9f10820ef1d6d6 | 1423a558904c4497c505c34ec38345ee979a036b | refs/heads/master | 2023-05-10T15:51:35.905745 | 2021-06-13T15:59:15 | 2021-06-13T15:59:15 | 291,243,005 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | """
Given an array of non-negative integers and a value sum, determine if there is a subset of the given set with sum equal to given sum.
Examples:
Input : arr[] = {4, 1, 10, 12, 5, 2},
sum = 9
Output : TRUE
{4, 5} is a subset with sum 9.
Input : arr[] = {1, 8, 2, 5},
sum = 4
Output : FALSE
There exists no subset with sum 4.
"""
# User function Template for Python3
def subsetSum(arr, N, S) :
dp = [[False for x in range(S + 1)] for y in range(N + 1)]
for i in range(N + 1) :
dp[i][0] = True
for i in range(1, S + 1) :
dp[0][i] = False
for i in range(1, N + 1) :
for j in range(1, S + 1) :
if arr[i - 1] <= j :
dp[i][j] = dp[i][j - arr[i - 1]] or dp[i - 1][j]
else :
dp[i][j] = dp[i - 1][j]
if dp[N][S] :
return 1
return 0
| [
"[email protected]"
]
| |
c6c8f87d0f8a443a9de6ef96207e645fd2b836e0 | 527fd39d3a1555800c2c32025fdd15fd86ba6672 | /Decorators/decorator.py | a11513c818b71b811384630675baface6261b694 | []
| no_license | rohanwarange/Python-Tutorials | cfd39551f7ff62bd032946976ba3820474e42405 | 53d8fb226f94d027ae7999f9678697206d37d83a | refs/heads/master | 2023-06-18T10:45:36.884324 | 2021-07-07T17:44:22 | 2021-07-07T17:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | def decorator_func(any_function):
def wrapper_function(*args,**kwargs):
print("this is awasom function")
return any_function(*args,**kwargs)
return wrapper_function
# this is awasom function
@decorator_func
def func(a):
print(f"This is function with argument{a}")
# def func():
# print(f"This is Function")
@decorator_func
def add(a,b):
return a+b
print(add(2,3)) | [
"[email protected]"
]
| |
44c089e6367d19a106156ab03e0795e412f567ef | c1f205b4cc86456ad9d8c4b05c2c7a2a09818ec5 | /10_preprocessing.py | 272dfa9b82bd04c39cab4cf3a7a7f4a972d66de0 | []
| no_license | GINK03/deep-recommnder | d6c7c41188224c721f31b72333167cba16c11a4e | 3039c03755b73a04adde6ef84ff2c7da6987dddb | refs/heads/master | 2020-04-22T14:38:32.307010 | 2019-02-05T02:13:19 | 2019-02-05T02:13:19 | 170,450,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import glob
from io import StringIO
import pandas as pd
def get_movie(fn):
lines = open(fn).readlines()
movie = lines.pop(0).strip()
csv = ''.join(lines)
csv = StringIO(csv)
df = pd.read_csv(csv, header=None, sep=',')
df.columns = ['userId', 'score', 'date']
df['movieId'] = movie.replace(':', '')
df = df.drop(['date'], axis=1)
#print(df.head())
return df
dfs = []
files = glob.glob('./download/training_set/*.txt')
for index, fn in enumerate(files):
print(index, len(files), fn)
df = get_movie(fn)
dfs.append(df)
from pathlib import Path
df = pd.concat(dfs, axis=0)
Path('works/dataset').mkdir(exist_ok=True, parents=True)
df.to_csv('works/dataset/preprocess.csv', index=None)
| [
"[email protected]"
]
| |
5d61a7e605d2e57dbf982682dbb8931f9342f0fd | 9b6632b532c1ece623c8c0bd81fc1fac88ee423c | /gluster/peer_op.py | 0939576d9edbe310fb6b3f1e8847fc094ab297c6 | []
| no_license | sun7shines/GlusterFS | 8542bc213d97e001952606881e0e3c42941901f9 | 1e1b3da72fe030307bb45b4c42260477fc826902 | refs/heads/master | 2021-01-20T13:48:42.785399 | 2015-09-08T07:11:30 | 2015-09-08T07:11:30 | 42,085,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # -*- coding: utf-8 -*-
import operation.gluster.peer_db
import operation.gluster.peer_cmd
import operation.gluster.volume_clr
import operation.gluster.volume_ifo
import system.network.dns_service_op
import vmd_utils
import support.uuid_op
import os
def create_peer(param):
#没有错误返回,如果增加错误返回可以添加新的事件
#检查主机,是否包含gluster cluster信息
gluster_ip = param.get('gluster_ip')
operation.gluster.volume_clr.clear_peer_cfgs()
flag,sysid = operation.gluster.volume_ifo.getsysid()
if flag:
operation.gluster.peer_db.insert_peer(sysid,gluster_ip)
target_ip = param.get('target_ip')
if target_ip and target_ip != 'None':
(flag, psh) = vmd_utils.get_rpcConnection(target_ip)
if not flag:
return False,psh
flag,msg = psh.do_probe_peer(gluster_ip)
if not flag:
return False,msg
cmd = "echo '%s' > /var/lib/glusterd/glfs_ip" % (gluster_ip)
os.system(cmd)
return True,''
def delete_peer(param):
#检查host上是否存在 被使用的brick
dcuuid = operation.gluster.peer_db.get_host_dcuuid()
gluster_ip = operation.gluster.peer_db.get_host_gluster_ip()
if not gluster_ip:
gluster_ip = system.network.dns_service_op.get_localhost_ip()
is_vcuuid,vcuuid,vc_ip=support.uuid_op.get_vc_uuid()
if is_vcuuid and vcuuid!="127.0.0.1":
_,target_ip = operation.gluster.peer_db.get_available_peer_target_ip(dcuuid,gluster_ip, vcuuid,vc_ip)
if target_ip and target_ip != 'None':
operation.gluster.peer_cmd.detach_peer(target_ip,gluster_ip)
operation.gluster.peer_db.clear_peer()
operation.gluster.volume_clr.clear_peer_cfgs()
return True,''
| [
"[email protected]"
]
| |
369eeecbf665af61fc172953556fb50eddc111be | 69142ca100a022b73d96f467fb6944b17a2cbc2b | /myapp/myapp/__init__.py | 9d573a723f9bb8c7bbee476b7bbbaa8b72f30258 | []
| no_license | toscawidgets/tw2.core-docs-turbogears | 40a3b0189a68b2b30ce216a46c3e990454bfe25f | 535a715c22ac389207ef0c5d8ef6821ab505ebb2 | refs/heads/master | 2021-01-02T22:39:39.704822 | 2013-08-26T21:18:59 | 2013-08-26T21:18:59 | 1,913,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | # -*- coding: utf-8 -*-
"""The myapp package"""
| [
"[email protected]"
]
| |
84e58d3da90fb71cd6644555c0120c70d49027a7 | e0a51ac08f13f4d3d89ccd770225a9ca0cecb80a | /seucorretor/seucorretor/settings/localtests.py | f9f76f542e0e0c472e4eea530a8fba85bc505816 | []
| no_license | MarcosDihl/corretaza-buscador | 8bbc94a81f7414a3cbc4a1b7ce7b841431209b1c | a3579059839f32c585dda05775fa525fdd34121e | refs/heads/master | 2022-04-04T03:36:47.360708 | 2018-01-31T03:05:13 | 2018-01-31T03:05:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | """
Make the tests run faster on localmachines
IMPORTANT: Avoid using this settins on staging and CI environments
"""
from .base import *
ADMINS = (
('', ''),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'dbtest.sqlite3'),
}
}
ALLOWED_HOSTS = ['localhost', '127.0.0.1', ]
| [
"[email protected]"
]
| |
e5f484cd73d29d1f7bd4fc93cb9787b6732685cd | f4a1c3157b2544cf0240579607acb116de0818bd | /lookups/v1/phone_number/fetch-payfone-tcpa-compliance/fetch-payfone-tcpa-compliance.6.x.py | 2c83bf73afdc86fe9c84bbbc668dc323c1508bfb | []
| no_license | raybanain/sample-code | 31568cc3bcfd87ca33937740d7c264ab40f23b04 | 8778483e064ba571523fa5cc32e677fe5844e7a5 | refs/heads/master | 2020-03-22T05:02:54.452336 | 2018-06-28T18:23:04 | 2018-06-28T18:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
phone_number = client.lookups.phone_numbers('+16502530000') \
.fetch(add_ons='payfone_tcpa_compliance', add_ons_data={
'payfone_tcpa_compliance.right_party_contacted_date': '20160101'
})
print(phone_number.caller_name)
| [
"[email protected]"
]
| |
edcdc247341adbfd4332e2863e2faae3274b9082 | 8353888e4970dda70b2f4dbd7944e948ff024b94 | /games/migrations/0012_leaderboard.py | b6a42f52663f11dd7ff44ee2b5b66ad4ef2ec530 | [
"MIT"
]
| permissive | munisisazade/diplom_isi | fa420f8f7d960c65dc193d50b5989a69e2d43491 | 767531ef3a4b090d1bc0963e687b5215d6f92f53 | refs/heads/master | 2022-12-08T11:02:16.618962 | 2018-05-01T15:46:04 | 2018-05-01T15:46:04 | 131,720,529 | 1 | 1 | MIT | 2022-12-08T00:44:33 | 2018-05-01T14:09:30 | CSS | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-16 13:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0011_auto_20170810_1759'),
]
operations = [
migrations.CreateModel(
name='LeaderBoard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('games', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.GameTime')),
],
),
]
| [
"[email protected]"
]
| |
963de42e51d86c314d88dc61b8a532258341648c | 2377f54ac0450a8ed3f09ca54bda972b8ff654f6 | /src/Monotonic_Manual.py | e2b7915e192f2a49860969db8e4d9ca6fe4cdeec | []
| no_license | jeffalstott/technologytimeseries_forecasting | 2d45bfe88d3c1206372130e6ab4412d4098f6109 | cc3c11a781d18ef45ba140d4eadf09005ed5bc86 | refs/heads/master | 2021-09-18T06:22:40.038626 | 2018-01-15T20:05:46 | 2018-01-15T20:05:46 | 65,255,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,129 | py |
# coding: utf-8
# In[ ]:
from pystan import StanModel
n_jobs = 4
import pandas as pd
import seaborn as sns
sns.set_color_codes()
import pickle
get_ipython().magic('pylab inline')
models = pickle.load(open('model.pkl', 'rb'))
# In[ ]:
def test_model_inference(model_name, Y=None, predictors=None, generated_data='data_latent', models=models,
generator_iter=50, inference_iter=1000):
if Y is None:
Y = pd.DataFrame(rand(100,5))
if predictors is None:
stan_data = models[model_name]['stan_data_creator'](Y, run_inference=False)
else:
stan_data = models[model_name]['stan_data_creator'](Y, predictors,run_inference=False)
stan_data = {**stan_data,
**models[model_name]['parameter_priors']}
generated_example = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=generator_iter)
sample = 20
generated_parameters = {}
for parameter in models[model_name]['model_parameters']:
generated_parameters[parameter] = generated_example[parameter][sample]
generated_data = pd.DataFrame(generated_example[generated_data][sample])
if predictors is None:
stan_data = models[model_name]['stan_data_creator'](generated_data, run_inference=True)
else:
stan_data = models[model_name]['stan_data_creator'](generated_data, predictors,run_inference=True)
stan_data = {**stan_data,
**models[model_name]['parameter_priors']}
model_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=inference_iter)
true_parameters_inferred_scores = {}
true_parameters_inferred_score_within_95CI = 0
n_parameters = 0
from scipy.stats import percentileofscore
for parameter in models[model_name]['model_parameters']:
parameter_samples = model_fit[parameter]
if parameter_samples.ndim>2:
parameter_samples = parameter_samples.reshape(parameter_samples.shape[0],
prod(parameter_samples.shape[1:]))
true_parameters_inferred_scores[parameter] = array(list(map(percentileofscore,
parameter_samples.T,
generated_parameters[parameter].ravel())))
true_parameters_inferred_score_within_95CI += sum((true_parameters_inferred_scores[parameter]>2.5) &
(true_parameters_inferred_scores[parameter]<97.5)
)
n_parameters += true_parameters_inferred_scores[parameter].size
return true_parameters_inferred_score_within_95CI/n_parameters#, true_parameters_inferred_score_within_95CI
from pystan.misc import _summary, _array_to_table
def _print_stanfit(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2):
if fit.mode == 1:
return "Stan model '{}' is of mode 'test_grad';\n" "sampling is not conducted.".format(fit.model_name)
elif fit.mode == 2:
return "Stan model '{}' does not contain samples.".format(fit.model_name)
if pars is None:
pars = fit.sim['pars_oi']
fnames = fit.sim['fnames_oi']
n_kept = [s - w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
header = ""#Inference for Stan model: {}.\n".format(fit.model_name)
header += "{} chains, each with iter={}; warmup={}; thin={}; \n"
header = header.format(fit.sim['chains'], fit.sim['iter'], fit.sim['warmup'],
fit.sim['thin'], sum(n_kept))
header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n"
header = header.format(n_kept[0], sum(n_kept))
footer = "\n\nSamples were drawn using {} at {}.\n"# "For each parameter, n_eff is a crude measure of effective sample size,\n"\
# "and Rhat is the potential scale reduction factor on split chains (at \n"\
# "convergence, Rhat=1)."
sampler = fit.sim['samples'][0]['args']['sampler_t']
date = fit.date.strftime('%c') # %c is locale's representation
footer = footer.format(sampler, date)
s = _summary(fit, pars, probs)
body = _array_to_table(s['summary'], s['summary_rownames'],
s['summary_colnames'], digits_summary)
return header + body + footer
def plot_time_series_inference(model_fit, var='data_latent', x=None,
ax=None, ind=0, **kwargs):
from scipy.stats import scoreatpercentile
ci_thresholds = [2.5, 25, 75, 97.5]
if len(model_fit[var].shape)<3:
data = model_fit[var]
else:
data = model_fit[var][:,:,ind]
CIs = scoreatpercentile(data, ci_thresholds, axis=0)
CIs = pd.DataFrame(data=CIs.T, columns=ci_thresholds)
if ax is None:
ax=gca()
if x is None:
x = arange(data.shape[1])
ax.fill_between(x, CIs[2.5], CIs[97.5],alpha=.5, **kwargs)
ax.fill_between(x, CIs[25], CIs[75], **kwargs)
# In[3]:
data_directory = '../data/'
empirical_data = pd.read_csv(data_directory+'time_series.csv',index_col=0)
empirical_data = empirical_data.reindex(arange(empirical_data.index[0],empirical_data.index[-1]+1))
metadata = pd.read_csv(data_directory+'time_series_metadata.csv')
target_tech_names = metadata.loc[(metadata['Source']=='Farmer_Lafond')*(metadata['Type']=='Price'), 'Name']
empirical_time_series = log(empirical_data[target_tech_names])
# valid_time_series = sum(~empirical_time_series.loc[1976:].isnull())>3
# valid_domains = metadata.set_index('Name').loc[valid_time_series.index[valid_time_series]]['Domain'].unique()
# print("Number of valid domains: %i"%valid_domains.size)
# In[56]:
model_name = 'Y~ARMA'
models[model_name] = {}
models[model_name]['code'] = """
data {
int T; // number of time steps
int K; // Number of time series
int P; // Number of predictors
int L; // Number of lags for ARMA element
matrix[T, K] Y; // data to model
matrix[T, P] predictors[K]; // predictors
int first_observation[K]; // index of first observation in each time series
int last_observation[K]; // index of last observation in each time series
int n_missing_observations_before_first_and_last; // number of missing observations before and after the end of the time series
int n_missing_updates_between_first_and_last; // number of missing updates (steps between each observation) with the time series
int run_inference;
// priors
real mu_prior_location;
real mu_prior_scale;
real sigma_prior_location;
real sigma_prior_scale;
real phi_prior_location;
real phi_prior_scale;
real theta_prior_location;
real theta_prior_scale;
//real beta_prior_location;
//real beta_prior_scale;
}
parameters {
vector[K] mu;
vector[K] sigma;
matrix[K,L] phi;
//matrix[K,L] theta;
//matrix[K,P] beta;
vector[n_missing_observations_before_first_and_last] free_latent_parameters;
vector[n_missing_updates_between_first_and_last] restricted_latent_parameters;
}
transformed parameters {
matrix[T,K] Y_latent;
// Fill the latent data before and after the observed data with completely unrestricted parameters
{
int free_param_counter;
free_param_counter = 1;
for (k in 1:K){
if (first_observation[k]>1){
Y_latent[1:first_observation[k]-1, k] =
free_latent_parameters[free_param_counter:free_param_counter+first_observation[k]-1];
free_param_counter = free_param_counter + first_observation[k]-1;
}
if (last_observation[k]<T){
Y_latent[last_observation[k]+1:T, k] =
free_latent_parameters[free_param_counter:free_param_counter+T-last_observation[k]];
free_param_counter = free_param_counter + T-last_observation[k];
}
}
}
// Fill the latent data within the observed data with either data values or restricted parameters
{
int restricted_param_counter;
int gap_width;
real previous_value;
int previous_value_index;
restricted_param_counter = 1;
for (k in 1:K){
previous_value = Y[first_observation[k],k];
Y_latent[first_observation[k],k] = Y[first_observation[k],k];
previous_value_index = first_observation[k];
for (t in first_observation[k]+1:last_observation[k]){
if (Y[t,k]>-999){
gap_width = t-previous_value_index-1;
if (gap_width>0){
// These are the unobserved UPDATES between observed time steps.
// I.e. If Y_3 and Y_1 are observed, by Y_2 is not, these are (Y_3 - Y_2) and (Y_2-Y_1)
// We will say that these updates have to sum up to the observed difference between Y_3 and Y_1.
// The unobserved time steps then have values that are the cumulative sum of these updates.
Y_latent[previous_value_index+1:t, k] =
cumulative_sum(
restricted_latent_parameters[restricted_param_counter:(restricted_param_counter+gap_width+1)]
/ sum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width+1])
* (Y[t,k] - previous_value)
) + previous_value;
// Don't need to include the last update in this sum, since we can explicitly grab the level
// that we get to form the data itself.
//data_latent[previous_value_index+1:t-1, k] =
//cumsum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width])
//+ previous_value;
}
Y_latent[t,k] = Y[t,k];
previous_value = Y[t,k];
previous_value_index = t;
}
}
}
}
}
model {
matrix[T,K] err;
matrix[T,K] nu;
mu ~ normal(mu_prior_location, mu_prior_scale);
sigma ~ cauchy(sigma_prior_location, sigma_prior_scale);
//for (i in 1:rows(beta)){
// beta[i] ~ normal(beta_prior_location, beta_prior_scale);
//}
phi[:,1] ~ normal(1, phi_prior_scale); //prior is centered around random walk
if (L>1){
for (i in 2:L){
phi[:,i] ~ normal(phi_prior_location, phi_prior_scale);
}
}
//for (i in 1:rows(theta)){
// theta[i] ~ normal(theta_prior_location, theta_prior_scale);
//}
for (k in 1:K){
err[:,k] ~ normal(0, sigma[k]);
}
if(run_inference==1){
for (k in 1:K) {
for (t in (L+1):T){
nu[t,k] = mu[k] + phi[k]*Y_latent[t-L:t-1, k];// + theta[k]*err[t-L:t-1, k]; //+ exp(beta[k]*predictors[k][t])
err[t,k] = Y_latent[t,k] - nu[t,k];
}
nu[1,k] = mu[k] + phi[k,1]*mu[k]; //+ exp(beta[k]*predictors[k][1])
err[1,k] = Y_latent[1,k] - nu[1,k];
if (L>1){
for (t in 2:L){
nu[t,k] = mu[k] + phi[k,1:t-1]*Y_latent[1:t-1, k];// + theta[k, 1:t-1]*err[1:t-1, k]; //+ exp(beta[k]*predictors[k][t])
err[t,k] = Y_latent[t,k] - nu[t,k];
}
}
}
}
}
"""
models[model_name]['stan_model'] = StanModel(model_code=models[model_name]['code'])
models[model_name]['parameter_priors'] = {
'mu_prior_location': 0,
'mu_prior_scale': 1,
'sigma_prior_location': 0,
'sigma_prior_scale': 1,
'phi_prior_location': 0,
'phi_prior_scale': 1,
'theta_prior_location': 0,
'theta_prior_scale': 1,
# 'beta_prior_location': 0,
# 'beta_prior_scale': 1,
}
models[model_name]['model_parameters'] = unique([i.split('_prior')[0] for i in models[model_name]['parameter_priors'].keys()])
def stan_data_creator(Y, predictors=None, L=3, run_inference=True):
Y = Y.copy()
T = Y.shape[0]
K = Y.shape[1]
Y.index = range(T)
Y.columns = range(K)
first_observation = Y.apply(lambda x: x.first_valid_index())
last_observation = Y.apply(lambda x: x.last_valid_index())
n_missing_observations_before_first_and_last = sum(first_observation)+sum((T-1)-last_observation)
n_missing_updates_between_first_and_last = sum([Y.loc[first_observation[k]:last_observation[k], k].diff().isnull()[1:].sum() for k in range(K)])
if predictors is None:
predictors = ones((K,T,0))
stan_data = {'Y':Y.fillna(-999),
'T': T,
'K': K,
'L': L,
'first_observation': first_observation.astype('int')+1,
'last_observation': last_observation.astype('int')+1,
'n_missing_observations_before_first_and_last': n_missing_observations_before_first_and_last,
'n_missing_updates_between_first_and_last': n_missing_updates_between_first_and_last,
'P': predictors.shape[-1],
'predictors': predictors,
'run_inference': int(run_inference),
}
return stan_data
models[model_name]['stan_data_creator'] = stan_data_creator
# In[63]:
model_name = 'Y~AR'
models[model_name] = {}
models[model_name]['code'] = """
data {
int T; // number of time steps
int K; // Number of time series
int P; // Number of lags for AR element
matrix[T, K] Y; // data to model
int first_observation[K]; // index of first observation in each time series
int last_observation[K]; // index of last observation in each time series
int n_missing_observations_before_first_and_last; // number of missing observations before and after the end of the time series
int n_missing_updates_between_first_and_last; // number of missing updates (steps between each observation) with the time series
int run_inference;
// priors
real mu_prior_location;
real mu_prior_scale;
real sigma_prior_location;
real sigma_prior_scale;
real phi_prior_location;
real phi_prior_scale;
//real theta_prior_location;
//real theta_prior_scale;
}
parameters {
vector[K] mu;
vector[K] sigma;
matrix[K,P] phi;
vector[n_missing_observations_before_first_and_last] free_latent_parameters;
vector[n_missing_updates_between_first_and_last] restricted_latent_parameters;
}
transformed parameters {
matrix[T,K] Y_latent;
// Fill the latent data before and after the observed data with completely unrestricted parameters
{
int free_param_counter;
free_param_counter = 1;
for (k in 1:K){
if (first_observation[k]>1){
Y_latent[1:first_observation[k]-1, k] =
free_latent_parameters[free_param_counter:free_param_counter+first_observation[k]-1];
free_param_counter = free_param_counter + first_observation[k]-1;
}
if (last_observation[k]<T){
Y_latent[last_observation[k]+1:T, k] =
free_latent_parameters[free_param_counter:free_param_counter+T-last_observation[k]];
free_param_counter = free_param_counter + T-last_observation[k];
}
}
}
// Fill the latent data within the observed data with either data values or restricted parameters
{
int restricted_param_counter;
int gap_width;
real previous_value;
int previous_value_index;
restricted_param_counter = 1;
for (k in 1:K){
previous_value = Y[first_observation[k],k];
Y_latent[first_observation[k],k] = Y[first_observation[k],k];
previous_value_index = first_observation[k];
for (t in first_observation[k]+1:last_observation[k]){
if (Y[t,k]>-999){
gap_width = t-previous_value_index-1;
if (gap_width>0){
// These are the unobserved UPDATES between observed time steps.
// I.e. If Y_3 and Y_1 are observed, by Y_2 is not, these are (Y_3 - Y_2) and (Y_2-Y_1)
// We will say that these updates have to sum up to the observed difference between Y_3 and Y_1.
// The unobserved time steps then have values that are the cumulative sum of these updates.
Y_latent[previous_value_index+1:t, k] =
cumulative_sum(
restricted_latent_parameters[restricted_param_counter:(restricted_param_counter+gap_width+1)]
/ sum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width+1])
* (Y[t,k] - previous_value)
) + previous_value;
// Don't need to include the last update in this sum, since we can explicitly grab the level
// that we get to form the data itself.
//data_latent[previous_value_index+1:t-1, k] =
//cumsum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width])
//+ previous_value;
}
Y_latent[t,k] = Y[t,k];
previous_value = Y[t,k];
previous_value_index = t;
}
}
}
}
}
model {
matrix[T,K] err;
matrix[T,K] nu;
mu ~ normal(mu_prior_location, mu_prior_scale);
sigma ~ cauchy(sigma_prior_location, sigma_prior_scale);
if (P>0){
phi[:,1] ~ normal(1, phi_prior_scale); //prior is centered around random walk
}
if (P>1){
for (p in 2:P){
phi[:,p] ~ normal(phi_prior_location, phi_prior_scale);
}
}
for (k in 1:K) {
nu[:,k] = rep_vector(mu[k], T);
if (P>0){
for (t in P+1:T){
nu[t,k] = nu[t,k] + phi[k]*Y_latent[t-P:t-1,k];
}
}
}
err = Y_latent - nu;
for (k in 1:K){
err[P+1:T,k] ~ normal(0, sigma[k]);
}
}
"""
models[model_name]['stan_model'] = StanModel(model_code=models[model_name]['code'])
models[model_name]['parameter_priors'] = {
'mu_prior_location': 0,
'mu_prior_scale': 1,
'sigma_prior_location': 0,
'sigma_prior_scale': 1,
'phi_prior_location': 0,
'phi_prior_scale': 1,
# 'theta_prior_location': 0,
# 'theta_prior_scale': 1,
# 'beta_prior_location': 0,
# 'beta_prior_scale': 1,
}
models[model_name]['model_parameters'] = unique([i.split('_prior')[0] for i in models[model_name]['parameter_priors'].keys()])
def stan_data_creator(Y, predictors=None, p=1, run_inference=True):
Y = Y.copy()
T = Y.shape[0]
K = Y.shape[1]
Y.index = range(T)
Y.columns = range(K)
first_observation = Y.apply(lambda x: x.first_valid_index())
last_observation = Y.apply(lambda x: x.last_valid_index())
n_missing_observations_before_first_and_last = sum(first_observation)+sum((T-1)-last_observation)
n_missing_updates_between_first_and_last = sum([Y.loc[first_observation[k]:last_observation[k], k].diff().isnull()[1:].sum() for k in range(K)])
stan_data = {'Y':Y.fillna(-999),
'T': T,
'K': K,
'P': p,
'first_observation': first_observation.astype('int')+1,
'last_observation': last_observation.astype('int')+1,
'n_missing_observations_before_first_and_last': n_missing_observations_before_first_and_last,
'n_missing_updates_between_first_and_last': n_missing_updates_between_first_and_last,
'run_inference': int(run_inference),
}
return stan_data
models[model_name]['stan_data_creator'] = stan_data_creator
# In[70]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~AR'\nY = pd.DataFrame(rand(100,3))\n# Y.iloc[0] = nan\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nmodel_fit")
# In[66]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~AR'\nY = pd.DataFrame(cumsum(rand(100,3)*3, axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nmodel_fit")
# In[40]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~AR'\nY = pd.DataFrame(cumsum(cumsum(rand(100,3)*3, axis=0), axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=2), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)")
# In[37]:
model_name = 'Y~ARMA'
models[model_name] = {}
models[model_name]['code'] = """
data {
int T; // number of time steps
int K; // Number of time series
int<lower=0,upper=T-1> P; // Number of lags for AR element
int<lower=0,upper=T-1> Q; // Number of lags for MA element
matrix[T, K] Y; // data to model
int first_observation[K]; // index of first observation in each time series
int last_observation[K]; // index of last observation in each time series
int n_missing_observations_before_first_and_last; // number of missing observations before and after the end of the time series
int n_missing_updates_between_first_and_last; // number of missing updates (steps between each observation) with the time series
int run_inference;
// priors
real mu_prior_location;
real mu_prior_scale;
real sigma_prior_location;
real sigma_prior_scale;
real phi_prior_location;
real phi_prior_scale;
real theta_prior_location;
real theta_prior_scale;
}
parameters {
vector[K] mu;
vector[K] sigma;
matrix[K,P] phi;
matrix<lower = -1, upper = 1>[K,Q] theta;
vector[n_missing_observations_before_first_and_last] free_latent_parameters;
vector[n_missing_updates_between_first_and_last] restricted_latent_parameters;
}
transformed parameters {
matrix[T,K] Y_latent;
// Fill the latent data before and after the observed data with completely unrestricted parameters
{
int free_param_counter;
free_param_counter = 1;
for (k in 1:K){
if (first_observation[k]>1){
Y_latent[1:first_observation[k]-1, k] =
free_latent_parameters[free_param_counter:free_param_counter+first_observation[k]-1];
free_param_counter = free_param_counter + first_observation[k]-1;
}
if (last_observation[k]<T){
Y_latent[last_observation[k]+1:T, k] =
free_latent_parameters[free_param_counter:free_param_counter+T-last_observation[k]];
free_param_counter = free_param_counter + T-last_observation[k];
}
}
}
// Fill the latent data within the observed data with either data values or restricted parameters
{
int restricted_param_counter;
int gap_width;
real previous_value;
int previous_value_index;
restricted_param_counter = 1;
for (k in 1:K){
previous_value = Y[first_observation[k],k];
Y_latent[first_observation[k],k] = Y[first_observation[k],k];
previous_value_index = first_observation[k];
for (t in first_observation[k]+1:last_observation[k]){
if (Y[t,k]>-999){
gap_width = t-previous_value_index-1;
if (gap_width>0){
// These are the unobserved UPDATES between observed time steps.
// I.e. If Y_3 and Y_1 are observed, by Y_2 is not, these are (Y_3 - Y_2) and (Y_2-Y_1)
// We will say that these updates have to sum up to the observed difference between Y_3 and Y_1.
// The unobserved time steps then have values that are the cumulative sum of these updates.
Y_latent[previous_value_index+1:t, k] =
cumulative_sum(
restricted_latent_parameters[restricted_param_counter:(restricted_param_counter+gap_width+1)]
/ sum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width+1])
* (Y[t,k] - previous_value)
) + previous_value;
// Don't need to include the last update in this sum, since we can explicitly grab the level
// that we get to form the data itself.
//data_latent[previous_value_index+1:t-1, k] =
//cumsum(restricted_latent_parameters[restricted_param_counter:restricted_param_counter+gap_width])
//+ previous_value;
}
Y_latent[t,k] = Y[t,k];
previous_value = Y[t,k];
previous_value_index = t;
}
}
}
}
}
model {
matrix[T,K] err;
matrix[T,K] nu;
mu ~ normal(mu_prior_location, mu_prior_scale);
sigma ~ cauchy(sigma_prior_location, sigma_prior_scale);
if (P>0){
phi[:,1] ~ normal(1, phi_prior_scale); //prior is centered around random walk
}
if (P>1){
for (p in 2:P){
phi[:,p] ~ normal(phi_prior_location, phi_prior_scale);
}
}
for (k in 1:K) {
nu[:,k] = rep_vector(mu[k], T);
if (P>0){
for (t in P+1:T){
nu[t,k] = nu[t,k] + phi[k]*Y_latent[t-P:t-1,k];
}
}
if (Q==0){
err[:,k] = Y_latent[:,k] - nu[:,k];
}
else{
//Need to sort out initial cases here.
nu[1,k] = mu[k] + phi[k,1]*mu[k];
err[1,k] = Y_latent[1,k] - nu[1,k];
if (Q>1){
for (t in 2:Q){
nu[t,k] = nu[t,k] + phi[k,1:t-1]*Y_latent[1:t-1, k] + theta[k,1:t-1]*err[1:t-1, k];
err[t,k] = Y_latent[t,k] - nu[t,k];
}
}
for (t in Q+1:T){
nu[t,k] = nu[t,k] + theta[k]*err[t-Q:t-1,k]; // Damn. This adding thetas effect on top of phis effect won't work. They have to be calculated together. Or does it? It depends on whether the phis are working on lagged Y_latent or lagged nu. They're working on lagged Y_latent, so we should be safe, right?
err[t,k] = Y_latent[t,k] - nu[t,k];
}
}
}
for (k in 1:K){
err[max(P+1,Q+1):T,k] ~ normal(0, sigma[k]);
}
}
"""
models[model_name]['stan_model'] = StanModel(model_code=models[model_name]['code'])
models[model_name]['parameter_priors'] = {
'mu_prior_location': 0,
'mu_prior_scale': 1,
'sigma_prior_location': 0,
'sigma_prior_scale': 1,
'phi_prior_location': 0,
'phi_prior_scale': 1,
'theta_prior_location': 0,
'theta_prior_scale': 1,
# 'beta_prior_location': 0,
# 'beta_prior_scale': 1,
}
models[model_name]['model_parameters'] = unique([i.split('_prior')[0] for i in models[model_name]['parameter_priors'].keys()])
def stan_data_creator(Y, predictors=None, p=1, q=1, run_inference=True):
Y = Y.copy()
T = Y.shape[0]
K = Y.shape[1]
Y.index = range(T)
Y.columns = range(K)
first_observation = Y.apply(lambda x: x.first_valid_index())
last_observation = Y.apply(lambda x: x.last_valid_index())
n_missing_observations_before_first_and_last = sum(first_observation)+sum((T-1)-last_observation)
n_missing_updates_between_first_and_last = sum([Y.loc[first_observation[k]:last_observation[k], k].diff().isnull()[1:].sum() for k in range(K)])
stan_data = {'Y':Y.fillna(-999),
'T': T,
'K': K,
'P': p,
'Q': q,
'first_observation': first_observation.astype('int')+1,
'last_observation': last_observation.astype('int')+1,
'n_missing_observations_before_first_and_last': n_missing_observations_before_first_and_last,
'n_missing_updates_between_first_and_last': n_missing_updates_between_first_and_last,
'run_inference': int(run_inference),
}
return stan_data
models[model_name]['stan_data_creator'] = stan_data_creator
# In[ ]:
nu[1,k] = mu[k] + phi[k,1]*mu[k];
err[1,k] = Y_latent[1,k] - nu[1,k];
if (P>1){
for (t in 2:P){
nu[t,k] = mu[k] + dot_product(phi[k,1:t-1],Y_latent[1:t-1, k]);
err[t,k] = Y_latent[t,k] - nu[t,k];
}
}
for (t in (P+1):T){
y[2:(N - 1)] ~ normal(alpha + beta * y[1:(N - 1)], sigma);
nu[t,k] = mu[k] + dot_product(phi[k],Y_latent[t-P:t-1, k]);
err[t,k] = Y_latent[t,k] - nu[t,k];
}
}
# In[38]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(rand(100,3)*3)\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=0,q=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[39]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(randn(100,3)*3, axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=1,q=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[40]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(randn(100,3)*3, axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=1,q=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[41]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(randn(100,3)*3)\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=0,q=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[45]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(randn(100,3)*3, axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=1,q=3), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[48]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(randn(100,3)*3, axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=2,q=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[49]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(cumsum(randn(100,3)*3, axis=0),axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=2,q=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[50]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nY = pd.DataFrame(cumsum(cumsum(randn(100,3)*3, axis=0),axis=0))\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=2,q=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[99]:
m = """
data {
int<lower=0> K;
int<lower=0> N;
real y[N];
}
parameters {
real alpha;
real beta[K];
real<lower=0> sigma;
} model {
alpha ~ normal(0,1);
beta ~ normal(0,1);
sigma ~ normal(0,1);
for (n in (K+1):N) {
real mu;
mu = alpha;
for (k in 1:K)
mu = mu + beta[k] * y[n-k];
y[n] ~ normal(mu, sigma);
}
}
"""
model = StanModel(model_code=m)
# In[100]:
# Y = pd.DataFrame(cumsum(cumsum(randn(1000,3), axis=0),axis=0))
# Y = pd.DataFrame(randn(1000))
# Y.iloc[2:] += Y.iloc[:-2] + Y.iloc[1:-1]
# Y = pd.DataFrame(cumsum(randn(1000,3), axis=0))
n = 1000
Y = zeros(n)
Y[0] = randn()
Y[1] = randn()+.5*Y[0]
for i in range(2,n):
Y[i] = randn()+Y[i-1]+.5*Y[i-2]
model_fit = model.sampling(data={'K': 2,
'N': n,
'y': Y}, n_jobs=n_jobs,iter=500)
print(model_fit)
# In[98]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\n# Y = pd.DataFrame(cumsum(cumsum(randn(100,3)*3, axis=0),axis=0))\nY = pd.DataFrame(Y)\nstan_data = {**models[model_name]['stan_data_creator'](Y,p=2,q=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[30]:
model_fit.plot(['mu', 'phi', 'theta'])
# In[20]:
model_fit.plot(['mu', 'phi'])
# In[ ]:
get_ipython().run_cell_magic('time', '', "\nmodel_name = 'Y~ARMA'\nstan_data = {**models[model_name]['stan_data_creator'](pd.DataFrame(rand(100,1)),p=0, q=0), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)\nprint(model_fit)")
# In[43]:
get_ipython().run_cell_magic('time', '', "Y = empirical_time_series.loc[1960:1970]\nany_data = Y.isnull().all(axis=0)\nY = Y[any_data[~any_data].index].iloc[:,[0,1,2,3,]]\n\nmodel_name = 'Y~ARMA'\nstan_data = {**models[model_name]['stan_data_creator'](Y,L=1), **models[model_name]['parameter_priors']} \n\nmodel_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs,iter=500)")
# In[ ]:
from scipy.stats import gaussian_kde
def predict_with_model(model_name,
time_series,
predictors,
training_years,
horizons,
time_series_from_each_time_period,
technology_forecast_models_log_pd,
# technology_forecast_models_parameters,
technology_forecast_models_95CI,
# technology_forecast_models_Y_sim,
technology_forecast_models_fit,
target_tech_names,
model_code=None,
model_parameters=None,
parameter_priors=None,
print_output=True):
if model_code is None:
model_code = models[model_name]['code']
if model_parameters is None:
model_parameters = models[model_name]['model_parameters']
if parameter_priors is None:
parameter_priors = models[model_name]['parameter_priors']
technology_forecast_models_log_pd[model_name] = pd.Panel(items=target_tech_names,
major_axis=horizons,
minor_axis=training_years)
technology_forecast_models_95CI[model_name] = pd.Panel(items=target_tech_names,
major_axis=horizons,
minor_axis=training_years)
# technology_forecast_models_parameters[model_name] = pd.Panel(items=target_tech_names,
# major_axis=model_parameters,
# minor_axis=training_years)
# technology_forecast_models_Y_sim[model_name] = {}
technology_forecast_models_fit[model_name] = {}
for training_year in training_years:
print(training_year)
forecast_start_ind = int(training_year-first_year)
time_series_from_time_period = time_series_from_each_time_period[training_year]
n_time_series_from_time_period = len(time_series_from_time_period)
if predictors is not None:
stan_data = stan_data_from_Y(time_series.loc[:training_year,
time_series_from_time_period],
forecast_to_observation=time_series.shape[0],
predictors=predictors[time_series_from_time_period])
else:
stan_data = stan_data_from_Y(time_series.loc[:training_year,
time_series_from_time_period],
forecast_to_observation=time_series.shape[0])
stan_data = {**stan_data, **parameter_priors}
###
model_fit = models[model_name]['stan_model'].sampling(data=stan_data, n_jobs=n_jobs)
Y_sim = model_fit['Y_sim']
# technology_forecast_models_Y_sim[model_name][training_year] = Y_sim
if print_output:
print(_print_stanfit(model_fit, model_parameters))
technology_forecast_models_fit[model_name] = model_fit
# for parameter in model_parameters:
# technology_forecast_models_parameters[model_name]
# p = model_fit[parameter].mean(axis=0)
# if type(p)==numpy.ndarray:
# for i in range(len(p)):
# technology_forecast_models_parameters[model_name].ix[time_series_from_time_period,
# parameter+'_%i'%i,
# training_year] = p[i]
# else:
# technology_forecast_models_parameters[model_name].ix[time_series_from_time_period,
# parameter,
# training_year] = p
for horizon in horizons:
if horizon=='all':
forecast_stop_ind = time_series.shape[0]
else:
forecast_stop_ind = horizon+forecast_start_ind
times, techs = where(time_series[time_series_from_time_period].notnull())
techs_to_forecast = techs[(forecast_start_ind<times)*(times<forecast_stop_ind)]
times_to_forecast = times[(forecast_start_ind<times)*(times<forecast_stop_ind)]
lpd = list(map(lambda x,y: x.logpdf(y)[0],
map(gaussian_kde, Y_sim[:,times_to_forecast,techs_to_forecast].T),
time_series[time_series_from_time_period].values[times_to_forecast, techs_to_forecast]))
lpd = array(lpd)
lpd[lpd==-inf] = log(finfo('d').tiny)
lpd = pd.groupby(pd.Series(lpd),techs_to_forecast).sum()
lpd = lpd.reindex(arange(len(time_series_from_time_period)))
lpd.index = time_series_from_time_period
technology_forecast_models_log_pd[model_name].ix[time_series_from_time_period,
horizon,training_year] = lpd
CI95 = portion_of_forecast_within_CI(model_fit, 'Y_sim',
time_series[time_series_from_time_period].values,
forecast_start_ind,
forecast_stop_ind)
technology_forecast_models_95CI[model_name].ix[time_series_from_time_period,
horizon,training_year] = CI95
# In[133]:
print(_print_stanfit(model_fit, pars=['mu', 'sigma']))
| [
"[email protected]"
]
| |
f05da529322bfe042d838fae947143e6c3dae144 | 4f04ce5667f895889cfe54ed5f0dec6f5e7d4e4e | /bert_brain/data_sets/choice_of_plausible_alternatives.py | 3dcaca9a9b92be43edf07b18daef78b2337f1dc4 | []
| no_license | danrsc/bert_brain | e172859b7ab93b0a05ed7c5b936778fae134eabb | eca204f163018270ac6b6687c2f3b6b5b158a89c | refs/heads/master | 2022-11-28T14:32:45.420452 | 2020-08-03T00:14:42 | 2020-08-03T00:14:42 | 167,277,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | import os
import json
from dataclasses import dataclass
import numpy as np
from .input_features import RawData, KindData, ResponseKind, FieldSpec
from .corpus_base import CorpusBase, CorpusExampleUnifier, path_attribute_field
__all__ = ['ChoiceOfPlausibleAlternatives']
@dataclass(frozen=True)
class ChoiceOfPlausibleAlternatives(CorpusBase):
path: str = path_attribute_field('choice_of_plausible_alternatives_path')
@staticmethod
def _read_examples(path, example_manager: CorpusExampleUnifier, labels):
examples = list()
with open(path, 'rt') as f:
for line in f:
fields = json.loads(line.strip('\n'))
premise = fields['premise'].split()
multipart_id = len(example_manager)
choices = list()
while True:
choice_name = 'choice{}'.format(len(choices) + 1)
if choice_name not in fields:
break
choices.append(fields[choice_name].split())
question_expansions = {
'cause': 'What was the cause of this?',
'effect': 'What happened as a result?'}
if fields['question'] not in question_expansions:
raise ValueError('Uknown question type: {}'.format(fields['question']))
question = question_expansions[fields['question']].split()
label = fields['label'] if 'label' in fields else 1
for index_choice, choice in enumerate(choices):
data_ids = -1 * np.ones(len(premise) + len(question) + len(choice), dtype=np.int64)
# doesn't matter which word we attach the label to since we specify below that is_sequence=False
data_ids[0] = len(labels)
choice_label = 1 if label == index_choice else 0
examples.append(example_manager.add_example(
example_key=None,
words=premise + question + choice,
sentence_ids=[0] * len(premise) + [1] * len(question) + [2] * len(choice),
data_key='copa',
data_ids=data_ids,
start=0,
stop=len(premise),
start_sequence_2=len(premise),
stop_sequence_2=len(premise) + len(question),
start_sequence_3=len(premise) + len(question),
stop_sequence_3=len(premise) + len(question) + len(choice),
multipart_id=multipart_id))
labels.append(choice_label)
return examples
@classmethod
def response_key(cls) -> str:
return 'copa'
@classmethod
def num_classes(cls) -> int:
return 2
def _load(self, example_manager: CorpusExampleUnifier, use_meta_train: bool):
labels = list()
train = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'train.jsonl'), example_manager, labels)
meta_train = None
if use_meta_train:
from sklearn.model_selection import train_test_split
idx_train, idx_meta_train = train_test_split(np.arange(len(train)), test_size=0.2)
meta_train = [train[i] for i in idx_meta_train]
train = [train[i] for i in idx_train]
validation = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'val.jsonl'), example_manager, labels)
test = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'test.jsonl'), example_manager, labels)
labels = np.array(labels, dtype=np.float64)
labels.setflags(write=False)
return RawData(
input_examples=train,
validation_input_examples=validation,
test_input_examples=test,
meta_train_input_examples=meta_train,
response_data={type(self).response_key(): KindData(ResponseKind.generic, labels)},
is_pre_split=True,
field_specs={type(self).response_key(): FieldSpec(is_sequence=False)})
| [
"[email protected]"
]
| |
96a0b1057d72d29888d23095ba8c569604278730 | 27be1bab83751703d94a5d2387cc316fcad5192e | /bin/thresholder.py | 9199f1cc0653afc4e891dba5b7188dbf489ce885 | []
| no_license | golamrabbii/rapido-env | b57ebad1f04dcaef60943f097bb976391428eb94 | 8c0f533a49f76e293af96822459f2cdc42c87def | refs/heads/main | 2023-05-22T01:06:26.180692 | 2021-06-07T12:09:25 | 2021-06-07T12:09:25 | 374,652,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | #!/home/rapido-live/rapido-env35/bin/python3.5
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = eval(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| [
"[email protected]"
]
| |
e462ebb803471d1bf3b942ca4c3a191aa1d00f36 | 910590eef6ef4dbccd73f5a3c665e4e06ebd58a3 | /sklearn_porter/classifier/BernoulliNB/__init__.py | 7c61f2d7115ac5da83585a7cfac9a4815b6c2230 | [
"MIT"
]
| permissive | prashanthgedde/sklearn-porter | 9a6226dd443fd76171d275a84712bae7fe58339e | 70f2fc7e9e924b803c896035840c4c28c5c4007f | refs/heads/master | 2021-01-23T04:59:45.676275 | 2017-03-23T23:56:08 | 2017-03-23T23:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,295 | py | # -*- coding: utf-8 -*-
import numpy as np
from ...Template import Template
class BernoulliNB(Template):
"""
See also
--------
...
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'indent': ' ',
},
}
# @formatter:on
def __init__(self, model, target_language='java', target_method='predict', **kwargs):
super(BernoulliNB, self).__init__(model, target_language=target_language, target_method=target_method, **kwargs)
self.model = model
# self.n_features = len(model.sigma_[0])
self.n_classes = len(model.classes_)
self.n_features = len(model.feature_log_prob_[0])
# jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
# jll += self.class_log_prior_ + neg_prob.sum(axis=1)
# Create class prior probabilities:
priors = [self.temp('type').format(repr(p)) for p in
model.class_log_prior_]
priors = ', '.join(priors)
self.priors = self.temp('arr[]').format(type='double', name='priors',
values=priors)
# Create probabilities:
# probs = []
# for prob in model.feature_log_prob_:
# tmp = [self.temp('type').format(repr(p)) for p in prob]
# tmp = self.temp('arr').format(', '.join(tmp))
# probs.append(tmp)
# probs = ', '.join(probs)
# self.pos_probs = self.temp('arr[][]').format(type='double',
# name='posProbs',
# values=probs)
# Create negative probabilities:
neg_prob = np.log(1 - np.exp(model.feature_log_prob_))
probs = []
for prob in neg_prob:
tmp = [self.temp('type').format(repr(p)) for p in prob]
tmp = self.temp('arr').format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.neg_probs = self.temp('arr[][]').format(type='double',
name='negProbs',
values=probs)
delta_probs = (model.feature_log_prob_ - neg_prob).T
probs = []
for prob in delta_probs:
tmp = [self.temp('type').format(repr(p)) for p in prob]
tmp = self.temp('arr').format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.del_probs = self.temp('arr[][]').format(type='double',
name='delProbs',
values=probs)
def export(self, class_name, method_name):
"""
Port a trained model to the syntax of a chosen programming language.
Parameters
----------
:param model : GaussianNB
An instance of a trained GaussianNB classifier.
"""
self.class_name = class_name
self.method_name = method_name
if self.target_method == 'predict':
return self.predict()
def predict(self):
"""
Port the predict method.
Returns
-------
:return: out : string
The ported predict method.
"""
return self.create_class(self.create_method())
def create_method(self):
"""
Build the model method or function.
Returns
-------
:return out : string
The built method as string.
"""
n_indents = 1 if self.target_language in ['java'] else 0
return self.temp('method.predict', n_indents=n_indents,
skipping=True).format(**self.__dict__)
def create_class(self, method):
"""
Build the model class.
Returns
-------
:return out : string
The built class as string.
"""
self.__dict__.update(dict(method=method))
return self.temp('class').format(**self.__dict__)
| [
"[email protected]"
]
| |
198860c44630bb080bcbf2da9c6818be18e5abfc | e755453c853ae400d94f562ad215b59166b63782 | /tests/trees_tests/strategies.py | bb194a8823fc239f882e0ca9573791bea619c9fe | [
"MIT"
]
| permissive | lycantropos/dendroid | 0cb3e276dd9c476b82b0b7a17c25c2e05616a993 | fd11c74a395eb791caf803c848805569869080f6 | refs/heads/master | 2023-04-07T11:07:55.550796 | 2023-03-27T00:46:03 | 2023-03-27T00:46:03 | 215,369,321 | 0 | 1 | MIT | 2020-09-24T05:02:02 | 2019-10-15T18:29:36 | Python | UTF-8 | Python | false | false | 4,990 | py | from functools import partial
from operator import attrgetter
from typing import (Callable,
List,
Tuple)
from hypothesis import strategies
from dendroid import (avl,
binary,
red_black,
splay)
from dendroid.hints import (Item,
Key)
from tests.strategies import (non_empty_values_lists_with_orders,
single_values_with_orders,
to_values_lists_with_orders,
two_or_more_values_with_orders,
values_lists_with_orders,
values_with_orders_strategies)
from tests.utils import (Node,
Strategy,
Tree,
ValuesListWithOrder,
ValuesListsWithOrder,
compose,
has_size_two_or_more)
factories = (strategies.sampled_from([binary.map_, avl.map_, red_black.map_,
splay.map_])
.map(partial(compose, attrgetter('tree'))))
def values_list_with_order_to_items_list(values_list_with_order
: ValuesListWithOrder) -> List[Item]:
values_list, order = values_list_with_order
return ([(value, value) for value in values_list]
if order is None
else [(order(value), value) for value in values_list])
items_lists = (values_lists_with_orders
.map(values_list_with_order_to_items_list))
non_empty_items_lists = (non_empty_values_lists_with_orders
.map(values_list_with_order_to_items_list))
single_items = (single_values_with_orders
.map(values_list_with_order_to_items_list))
two_or_more_items = (two_or_more_values_with_orders
.map(values_list_with_order_to_items_list))
def to_tree(factory: Callable[..., Tree], items: List[Item]) -> Tree:
return factory(*items)
empty_trees = strategies.builds(to_tree, factories,
strategies.builds(list))
trees = strategies.builds(to_tree, factories, items_lists)
non_empty_trees = strategies.builds(to_tree, factories,
non_empty_items_lists)
trees_with_two_or_more_nodes = (strategies.builds(to_tree, factories,
two_or_more_items)
.filter(has_size_two_or_more))
def to_tree_with_key(factory: Callable[..., Tree],
items: List[Item]) -> Tuple[Tree, Key]:
*rest_items, (key, _) = items
return factory(*rest_items), key
empty_trees_with_keys = strategies.builds(to_tree_with_key,
factories, single_items)
trees_with_keys = strategies.builds(to_tree_with_key, factories,
non_empty_items_lists)
def to_non_empty_trees_with_their_keys(tree: Tree
) -> Strategy[Tuple[Tree, Key]]:
return strategies.tuples(strategies.just(tree),
strategies.sampled_from(tree.keys))
non_empty_trees_with_their_keys = (
non_empty_trees.flatmap(to_non_empty_trees_with_their_keys))
def to_non_empty_trees_with_their_nodes(tree: Tree
) -> Strategy[Tuple[Tree, Node]]:
return strategies.tuples(strategies.just(tree),
strategies.sampled_from(list(tree)))
non_empty_trees_with_their_nodes = (
non_empty_trees.flatmap(to_non_empty_trees_with_their_nodes))
def values_lists_with_order_to_items_lists(values_lists_with_order
: ValuesListsWithOrder
) -> Tuple[List[Item], ...]:
*values_lists, order = values_lists_with_order
return (tuple([(value, value) for value in values_list]
for values_list in values_lists)
if order is None
else tuple([(order(value), value) for value in values_list]
for values_list in values_lists))
def to_trees_tuple(factory: Callable[..., Tree],
items_lists: List[List[Item]]
) -> Tuple[Tree, ...]:
return tuple(factory(*items_list) for items_list in items_lists)
trees_pairs = strategies.builds(
to_trees_tuple,
factories,
(values_with_orders_strategies
.flatmap(partial(to_values_lists_with_orders,
sizes=[(0, None)] * 2))
.map(values_lists_with_order_to_items_lists)))
trees_triplets = strategies.builds(
to_trees_tuple,
factories,
(values_with_orders_strategies
.flatmap(partial(to_values_lists_with_orders,
sizes=[(0, None)] * 3))
.map(values_lists_with_order_to_items_lists)))
| [
"[email protected]"
]
| |
3909bfd514fec87c30a31354c2ca587966716117 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_23518.py | 096795b280f800dbed9b6a50ed385dffc35215e3 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | # Python multiprocessing within mpi
mpirun -np 1 --bind-to none junk.py
| [
"[email protected]"
]
| |
9711398bdfc3dc34609e4442b4e41256f5e89cec | 2f114ea4068b47949532955ef52d54478cb322fa | /venv/Lib/site-packages/sqlalchemy/dialects/mysql/__init__.py | 067d00386e1363160144b7d7f2d3abe719673497 | []
| no_license | tgkaiching/tgcb | b8f7fcd6761d3a772df13964527c89412ffa8045 | d0dec634191eb26fb5fa875e5ab608981b79f7f7 | refs/heads/master | 2022-10-09T17:25:50.604485 | 2018-08-16T00:55:56 | 2018-08-16T00:55:56 | 139,693,981 | 1 | 2 | null | 2022-10-02T04:37:09 | 2018-07-04T08:40:35 | Python | UTF-8 | Python | false | false | 1,185 | py | # mysql/__init__.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
| [
"[email protected]"
]
| |
9eed196e985af87474e283f2699e0539d16aef11 | c6414efe635bc5ea4680252f66dd24e2ce3bd087 | /test/test_eigen.py | a4653627b63d47f5e8e345cfcc9b7ce168516102 | []
| no_license | nicolasfauchereau/spectrum | 7e180dc625c8a9c486df5399246593acb7b69ca2 | de3fea857f2d8e883258b6999ec1a43a230602db | refs/heads/master | 2021-01-19T13:45:47.786187 | 2017-03-08T13:07:00 | 2017-03-08T13:07:00 | 88,107,023 | 2 | 1 | null | 2017-04-13T00:01:52 | 2017-04-13T00:01:52 | null | UTF-8 | Python | false | false | 560 | py | from spectrum import *
import numpy
#from spectrum import MINEIGVAL
from nose.tools import assert_almost_equal
def test_mineigval():
tol = 1e-10
T0=3
T = numpy.array([-2+.5j, .7-1j],dtype=complex)
eigval, eigvec = MINEIGVAL(T0 , T, tol)
print('Eigenvalue=',eigval)
print('Eigenvector=',eigvec)
assert_almost_equal(eigval, .488694078106)
expected_eigvec = numpy.array([ 0.13790622 -1.74155903e-02j , 0.21272177 -4.65701963e-18j, 0.13790622 +1.74155903e-02j])
assert_almost_equal(eigvec.all(), expected_eigvec.all())
| [
"[email protected]"
]
| |
dfd5fe1cc7aa6d241684d6759ef0894b6ec15e4f | 87ced16167203723557f75dc005c3aaae7e3f404 | /online-judges/leetcode/making-a-large-island.py | 3207a4b14ad2c990a0669813a191359769dcac11 | []
| no_license | joao-conde/competitive-programming | 87e0c46f06bc017eea2701b9be860ee614c0e159 | 0d2d7375f0603142febab69707496d3b5b985054 | refs/heads/master | 2023-08-07T01:47:19.864827 | 2023-07-25T11:43:39 | 2023-07-25T11:43:39 | 132,962,490 | 6 | 3 | null | 2020-04-20T23:15:25 | 2018-05-10T22:55:01 | C++ | UTF-8 | Python | false | false | 1,802 | py | # https://leetcode.com/problems/making-a-large-island/
from collections import defaultdict
class Solution:
def find_island(self, grid, si, sj, seen):
points = set()
if (si, sj) in seen:
return points
if si < 0 or sj < 0:
return points
if si >= len(grid) or sj >= len(grid):
return points
if grid[si][sj] == 0:
return points
seen.add((si, sj))
points.add((si, sj))
points.update(self.find_island(grid, si + 1, sj, seen))
points.update(self.find_island(grid, si - 1, sj, seen))
points.update(self.find_island(grid, si, sj + 1, seen))
points.update(self.find_island(grid, si, sj - 1, seen))
return points
def largestIsland(self, grid: list[list[int]]) -> int:
largest = 0
seen = set()
islands = defaultdict(lambda: set())
for i in range(len(grid)):
for j in range(len(grid)):
island = self.find_island(grid, i, j, seen)
largest = max(largest, len(island))
for si, sj in island:
islands[(si, sj)] = island
for i in range(len(grid)):
for j in range(len(grid)):
if grid[i][j] == 1:
continue
flipped = set().union(
islands[(i + 1, j)],
islands[(i - 1, j)],
islands[(i, j + 1)],
islands[(i, j - 1)],
)
largest = max(largest, len(flipped) + 1)
return largest
# Tests
solver = Solution()
assert solver.largestIsland([[1, 0], [0, 1]]) == 3
assert solver.largestIsland([[1, 1], [1, 0]]) == 4
assert solver.largestIsland([[1, 1], [1, 1]]) == 4
| [
"[email protected]"
]
| |
b1bf4084761434d71fa9e9b667adbfd207cd9ffd | 51108a50ffb48ad154f587c230045bb783f22240 | /bfgame/attacks/melee.py | d8a34595ca3b6e66cc6ad08e789a13ab8b479c5a | [
"MIT"
]
| permissive | ChrisLR/BasicDungeonRL | c90bd0866c457557cccbad24e14689d5d6db7b00 | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | refs/heads/master | 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 | MIT | 2019-08-05T16:28:23 | 2017-09-20T21:35:19 | Python | UTF-8 | Python | false | false | 1,899 | py | import inspect
from bflib import dice
from core.attacks.base import Attack
class MeleeAttack(Attack):
base_attack = None
needs_weapon = False
@classmethod
def make_melee_hit_roll(cls, attacker, defender, sneak_attack=False):
target_ac = defender.combat.armor_class
if target_ac is None:
target_ac = 0
modifier = 0
modifier += attacker.combat.attack_bonus
modifier += attacker.stats.strength_modifier if attacker.stats else 0
# TODO If attacker is behind defender, +2 to hit roll
# TODO If attacker invisible, +4
# TODO If defender invisible, -4
# TODO If defender is pinned, +
if sneak_attack:
modifier += 4
if not defender.health.conscious:
modifier += 8
roll = dice.D20.manual_roll_total(1)
if roll == 1:
return False
if roll == 20:
# TODO Some defenders CANNOT be hit, it should still fail.
return True
roll += modifier
if roll >= target_ac:
# TODO Some defenders CANNOT be hit, it should still fail.
return True
else:
return False
@classmethod
def make_melee_damage_roll(cls, attacker, damage_dice, other_modifier=0, sneak_attack=False):
total_damage = 0
if inspect.isclass(damage_dice):
total_damage += damage_dice.manual_roll_total(1)
else:
total_damage += damage_dice.roll_total()
total_damage += attacker.stats.strength_modifier if attacker.stats else 0
total_damage += other_modifier
if total_damage <= 0:
if sneak_attack:
return 2
else:
return 1
else:
if sneak_attack:
return total_damage * 2
else:
return total_damage
| [
"[email protected]"
]
| |
c81c476ebf8843aad428297899a3c6fe89568a1a | 1ebf64e6526b050bb770ac401ba21d4d44bca495 | /Testing/demo files/veQ/marl.py | b62f5c8b8cf79ccf3d1a660a0ae806ec6b4e4875 | []
| no_license | sihaanssr/BE-major-MARL | 1b928a44cc38a9319d512b0a89d767ece9747fd0 | 8427c7ffbabd0bae085bf1cf4259210b619d6f20 | refs/heads/main | 2023-05-05T02:49:25.179793 | 2021-05-28T13:45:12 | 2021-05-28T13:45:12 | 368,431,309 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | '''
Created on 7/10/2016
@author: CarolinaHiguera
'''
import var
exec(compile(open("./var.py", "rb").read(), "./var.py", 'exec'))
# import arrivalRateGen
# exec(compile(open("./arrivalRateGen.py", "rb").read(), "./arrivalRateGen.py", 'exec'))
# import fun
# exec(compile(open("./fun.py", "rb").read(), "./fun.py", 'exec'))
# import train2_RL
# exec(compile(open("./train2_RL.py", "rb").read(), "./train2_RL.py", 'exec'))
import test2_RL
exec(compile(open("./test2_RL.py", "rb").read(), "./test2_RL.py", 'exec'))
global baselineMean, baselineMedian, baselineMin
#=========== DISCRETIZE SPACE STATE FOR EACH AGENT
#arrivalRateGen.createPolyFlow()
#fun.learnDiscretization(var.totalDaysObs)
#fun.writeDataClusters()
#fun.plotClusterHistograms()
#=========== TRAINING PROCESS
#print('---------- Training --------------')
#train2_RL.train()
#=========== TESTING PROCESS
print('---------- Testing ---------------')
test2_RL.test()
print('----------- END -----------')
| [
"[email protected]"
]
| |
850456b7d377656d9b88254f9b4014dc903358f5 | 6e4e6b64c035881f1cff39db616b0a80e1568c51 | /fes2016QualA/q3.py | 3f46237051c316ebde9c9bd3a46679ec69f95363 | []
| no_license | Lischero/Atcoder | f7471a85ee553e3ae791e3e5670468aea1fa53cc | f674d6a20a56eebdafa6d50d5d2d0f4030e5eace | refs/heads/master | 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding:utf-8 -*-
s = list(input())
k = int(input())
for tmp in range(len(s)):
if k == 0:
break
a = 26-(ord(s[tmp]) - ord('a'))
if s[tmp] != 'a' and k >= a:
k -= a
s[tmp] = 'a'
else:
pass
if k > 0:
s[len(s)-1] = chr((ord(s[len(s)-1])+k%26))
print(''.join(s))
| [
"[email protected]"
]
| |
257bc81119c88a14a85b2ca30ba61dca1c79409b | 8c1b8ef19c55de8da1341995053769b03af433a4 | /code/processing/growth_curves_plate_reader/20200708_r1_O1_T_beta-thujaplicin/growth_plate_reader_analysis.py | fa5c3b78a45ecd67ec2238eb1e7b5b006c20f759 | [
"MIT",
"CC-BY-4.0",
"CC-BY-ND-4.0",
"CC-BY-ND-3.0"
]
| permissive | mrazomej/evo_mwc | badb78238a129cc0c863af3ca424691a188cb87b | b69c800c5518d906cd2c65334c6feffdbab5acf1 | refs/heads/master | 2023-07-20T04:13:53.025102 | 2021-05-19T01:57:59 | 2021-05-19T01:57:59 | 185,700,015 | 0 | 1 | MIT | 2023-07-06T21:42:28 | 2019-05-09T00:49:59 | Jupyter Notebook | UTF-8 | Python | false | false | 7,762 | py | # -*- coding: utf-8 -*-
# %%
import numpy as np
import pandas as pd
import string
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import evo_mwc.viz
import evo_mwc.fitderiv
import seaborn as sns
import statsmodels.api as sm
import git
# Import libraries necessary for Bayesian analysis
import cmdstanpy
import arviz as az
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define directory where stan file exists
standir = f"{homedir}/evo_mwc/stan_code/"
matplotlib.use('Agg')
evo_mwc.viz.pboc_style_mpl()
# Find date
workdir = os.getcwd().split('/')[-1]
DATE = int(workdir.split('_')[0])
RUN_NO = int(workdir.split('_')[1][-1])
# Define parameters to group strains by
GROUP = ['strain', 'neg_selection']
# Define if you only want to plot existing results
REPLOT = False
# %%
# Load the data.
data = pd.read_csv(f'output/{DATE}_r{RUN_NO}_growth_plate.csv')
# Generate a dictionary of the mean blank at each time point.
blank_vals = {t: val['OD600'].mean() for t, val in
data[data['strain'] == 'blank'].groupby(['time_min'])}
# Add mean blank values for each time point to the dataframe,
# as well as background subtracted OD values.
for k, v in blank_vals.items():
data.loc[data['time_min'] == k, 'blank_val'] = v
data['OD_sub'] = data['OD600'] - data['blank_val']
# %%
# Compute growth rate for individual well data
# Group data by well and strain
# NOTE: The strain grouping is to remove blanks from analysis
data_group = data.groupby(['well', 'strain'])
# List groups
groups = [group for group, data in data_group]
# Initialize data frame to save derivatives
df_gp = pd.DataFrame([])
# Check if the analysis should be done
if (not REPLOT):
print("Compiling Stan program")
sm = cmdstanpy.CmdStanModel(
stan_file=f"{standir}/gp_growth_rate_prior_deriv.stan"
)
# Loop through groups
for group, df in data_group:
# Check if the group is not a blank
if group[1] == 'blank':
continue
print(group)
# Build input as required by the Gaussian process function.
# Define time points were data was measured
t = df["time_min"].values
# Define number of time points
N = len(t)
# Define OD measurements
y = df["OD600"].values
# Define where PPC samples will be taken
t_predict = t
# Define number of points in PPC
N_predict = len(t_predict)
# Pack parameters in dictionary
data = {
"N" : N, # number of time points
"t": t, # time points where data was evaluated
"y": y, # data's optical density
"N_predict": N_predict, # number of datum in PPC
"t_predict": t_predict, # time points where PPC is evaluated
"alpha_param": [0, 1], # parameters for alpha prior
"sigma_param": [0, 1], # parameters for sigma prior
"rho_param": [1000, 1000], # parameters for rho prior
}
print(f"Sampling GP for well {group[0]}")
samples = sm.sample(
data=data,
chains=6,
iter_sampling=400,
show_progress=False,
)
print("Done!")
samples = az.from_cmdstanpy(posterior=samples)
# Extract GP OD data, stacking together chains and draws as a single
# dimension
data_ppc = samples.posterior["y_predict"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "y_predict_dim_0")
# Append inferred OD columns
df = df.assign(
gp_OD600 = np.median(data_ppc.squeeze().values, axis=0),
gp_OD600_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Extract GP derivative data, stacking together chains and draws as a
# single dimension
data_ppc = samples.posterior["dy_predict"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "dy_predict_dim_0")
# Append inferred derivative columns
df = df.assign(
gp_growth_rate = np.median(data_ppc.squeeze().values, axis=0),
gp_growth_rate_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Extract GP doubling time data, stacking together chains and draws as a
# single dimension
data_ppc = samples.posterior["doubling_time"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "doubling_time_dim_0")
# Append inferred derivative columns
df = df.assign(
gp_doubling_time = np.median(data_ppc.squeeze().values, axis=0),
gp_doubling_time_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Append dataframe
df_gp = pd.concat([df_gp, df], ignore_index=True)
# Export result
df_gp.to_csv(f'output/{DATE}_r{RUN_NO}_gp_per_well.csv',
index=False)
# Read derivatives
df_gp = pd.read_csv(f'output/{DATE}_r{RUN_NO}_gp_per_well.csv')
# group derivatives
df_gp_group = df_gp.groupby(['well', 'strain'])
# Print growth curve and its derivative for each group
# Initialize multi-page PDF
with PdfPages('output/growth_rate_per_well.pdf') as pdf:
# Loop through groups
for group in groups:
# check that there are no blanks
if group[1] == 'blank':
continue
# Initialize figure
fig, ax = plt.subplots(2, 1, figsize=(4, 4), sharex=True)
# Extract curve data
growth_data = data_group.get_group(group)
rate_data = df_gp_group.get_group(group)
# Plot plate reade data
ax[0].plot(growth_data.time_min, growth_data.OD600, lw=0,
marker='.')
# Plot growth rate with credible region
ax[1].plot(rate_data.time_min, rate_data.gp_growth_rate)
ax[1].fill_between(rate_data.time_min,
rate_data.gp_growth_rate +
rate_data.gp_growth_rate_std,
rate_data.gp_growth_rate -
rate_data.gp_growth_rate_std,
alpha=0.5)
# Label plot
ax[0].set_title(str(group))
ax[0].set_ylabel(r'OD$_{600}$')
ax[1].set_ylabel(r'growth rate (min$^{-1}$)')
ax[1].set_xlabel('time (min)')
plt.tight_layout()
pdf.savefig()
plt.close()
# Make summary figure of growth rates.
# find number of rows and columns from layout
layout = pd.read_excel(f'./{DATE}_plate_layout.xlsx', sheet_name='well',
header=None).values
layout_shape = layout.shape
# Initlaize plot
fig, ax = plt.subplots(
layout_shape[0],
layout_shape[1],
figsize=(8, 4),
sharex=True,
sharey=True
)
# Loop through each well
for group, df in df_gp_group:
# Find corresponding row and column of plot
r, c = [int(x) for x in np.where(layout == group[0])]
# Set plot axis
# Plot growth rate
ax[r][c].plot(df.sort_values('time_min').time_min,
df.sort_values('time_min').gp_growth_rate)
# Set ylim for plot
ax[0][0].set_ylim([
df.gp_growth_rate.min() - 0.001,
df.gp_growth_rate.max() + 0.001
])
# Remove axis from all plots
ax = ax.ravel() # ravel list of axis
# Loop through axis
for a in ax:
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
fig.suptitle(f'{DATE}_r{RUN_NO} whole plate growth rates', y=0.95)
plt.savefig(f'output/growth_rate_summary.png',
bbox_inches='tight')
| [
"[email protected]"
]
| |
85bd3c963b228d902b75d1155a4d7c4abe708fdd | 586383ed657389cc67ca6c822b3ebd7e91e4d5a9 | /app_page_cap_img/models.py | 38ed270aeb636e415f69df0ba512aa59a72cbf83 | []
| no_license | idelfrides/app_capturepage_django | d510e824ca57e598ec7c8bcc2e9e7c7fa04099f6 | 6ad6d87e76deb6075195ee2117c0974a6b480b5f | refs/heads/master | 2022-06-14T17:44:15.945803 | 2022-06-07T20:30:18 | 2022-06-07T20:30:18 | 225,614,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,651 | py | from distutils.command.config import config
from django.db import models
from django.conf import settings
from .managers import Manager
POSITION_CHOICES = (
('E', 'Esquerda'),
('D', 'Direita'),
('C', 'Centralizado'),
)
TYPE_MIDEA_CHOICES = (
('I', 'Imagem'),
('V', 'Vídeo')
)
class PageCapImage(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=1,
on_delete=models.CASCADE
)
material = models.CharField(
max_length=100,
default='E-book vendas online'
)
headline = models.TextField(
default='Coloque sua Headline aqui.'
)
copy_descricao = models.TextField(
default='Sua Copy descrição aqui.'
)
image = models.ImageField(
upload_to='images/',
null=True,
blank=True
)
update = models.DateTimeField(
auto_now=True,
auto_now_add=False
)
timestamp = models.DateTimeField(
auto_now=False,
auto_now_add=True
)
def __str__(self):
return self.material
class Meta:
verbose_name_plural = 'Material'
class Configuracao(models.Model):
tipo_media = models.CharField(
choices=TYPE_MIDEA_CHOICES,
default='Imagem',
max_length=20
)
media_position = models.CharField(
choices=POSITION_CHOICES,
default='Esquerda',
max_length=20
)
update = models.DateTimeField(
auto_now=True,
auto_now_add=False
)
timestamp = models.DateTimeField(
auto_now=False,
auto_now_add=True
)
def __str__(self):
config_ = "Configurações"
return config_
class Meta:
verbose_name_plural = 'Configuracoes'
class Media(models.Model):
imagem = models.ImageField(upload_to='images/')
video = models.FileField(
upload_to='videos/',
null=True,
blank=True
)
arquivo_pdf = models.FileField(upload_to='files/')
update = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
# man = Manager()
# c = man.set_count(1)
nome = "Media" # + str(self.count)
return nome
class Meta:
verbose_name_plural = 'Medias'
# def get_absolute_url(self):
# return "app_name/%s/" %(self.id)
class LeadsEmail(models.Model):
email = models.EmailField(
default='[email protected]'
)
timestamp = models.DateTimeField(auto_now=True)
def __str__(self):
return self.email | [
"[email protected]"
]
| |
e4569c644f81db0fc0225544d6c4b3d580442a12 | e5329001263e67a4d3c13d57bb91f2502280e206 | /InvTL/lm_py/py/apigen/source/html.py | a63ac7682d1d66ccb1f8647f5feb7f48d5f1d7fc | [
"MIT"
]
| permissive | yanhongliu/DARLAB | d9432db6e005a39e33501d7ffffe6e648b95b3fc | f739318c9620b44ef03d155f791c7ed4111d80fa | refs/heads/master | 2021-05-27T19:58:58.458846 | 2014-02-04T12:09:26 | 2014-02-04T12:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,751 | py |
""" html - generating ad-hoc html out of source browser
"""
import py
from py.xml import html, raw
from compiler import ast
import time
from py.__.apigen.source.color import Tokenizer, PythonSchema
from py.__.apigen.source.browser import parse_path
class CompilationException(Exception):
""" raised when something goes wrong while importing a module """
class HtmlEnchanter(object):
def __init__(self, mod):
self.mod = mod
self.create_caches()
def create_caches(self):
mod = self.mod
linecache = {}
for item in mod.get_children():
linecache[item.firstlineno] = item
self.linecache = linecache
def enchant_row(self, num, row):
# add some informations to row, like functions defined in that
# line, etc.
try:
item = self.linecache[num]
# XXX: this should not be assertion, rather check, but we want to
# know if stuff is working
pos = row.find(item.name)
assert pos != -1
end = len(item.name) + pos
chunk = html.a(row[pos:end], href="#" + item.listnames(),
name=item.listnames())
return [row[:pos], chunk, row[end:]]
except KeyError:
return [row] # no more info
def prepare_line(text, tokenizer, encoding):
""" adds html formatting to text items (list)
only processes items if they're of a string type (or unicode)
"""
ret = []
for item in text:
if type(item) in [str, unicode]:
tokens = tokenizer.tokenize(item)
for t in tokens:
if not isinstance(t.data, unicode):
data = unicode(t.data, encoding)
else:
data = t.data
if t.type in ['keyword', 'alt_keyword', 'number',
'string', 'comment']:
ret.append(html.span(data, class_=t.type))
else:
ret.append(data)
else:
ret.append(item)
return ret
def prepare_module(path, tokenizer, encoding):
path = py.path.local(path)
try:
mod = parse_path(path)
except:
# XXX don't try to catch SystemExit: it's actually raised by one
# of the modules in the py lib on import :(
exc, e, tb = py.std.sys.exc_info()
del tb
raise CompilationException('while compiling %s: %s - %s' % (
path, e.__class__.__name__, e))
lines = [unicode(l, encoding) for l in path.readlines()]
enchanter = HtmlEnchanter(mod)
ret = []
for i, line in enumerate(lines):
text = enchanter.enchant_row(i + 1, line)
if text == ['']:
text = [raw(' ')]
else:
text = prepare_line(text, tokenizer, encoding)
ret.append(text)
return ret
class HTMLDocument(object):
def __init__(self, encoding, tokenizer=None):
self.encoding = encoding
self.html = root = html.html()
self.head = head = self.create_head()
root.append(head)
self.body = body = self.create_body()
root.append(body)
self.table, self.tbody = table, tbody = self.create_table()
body.append(table)
if tokenizer is None:
tokenizer = Tokenizer(PythonSchema)
self.tokenizer = tokenizer
def create_head(self):
return html.head(
html.title('source view'),
html.style("""
body, td {
background-color: #FFF;
color: black;
font-family: monospace, Monaco;
}
table, tr {
margin: 0px;
padding: 0px;
border-width: 0px;
}
a {
color: blue;
font-weight: bold;
text-decoration: none;
}
a:hover {
color: #005;
}
.lineno {
text-align: right;
color: #555;
width: 3em;
padding-right: 1em;
border: 0px solid black;
border-right-width: 1px;
}
.code {
padding-left: 1em;
white-space: pre;
}
.comment {
color: purple;
}
.string {
color: #777;
}
.keyword {
color: blue;
}
.alt_keyword {
color: green;
}
""", type='text/css'),
)
def create_body(self):
return html.body()
def create_table(self):
table = html.table(cellpadding='0', cellspacing='0')
tbody = html.tbody()
table.append(tbody)
return table, tbody
def add_row(self, lineno, text):
if text == ['']:
text = [raw(' ')]
else:
text = prepare_line(text, self.tokenizer, self.encoding)
self.tbody.append(html.tr(html.td(str(lineno), class_='lineno'),
html.td(class_='code', *text)))
def __unicode__(self):
# XXX don't like to use indent=0 here, but else py.xml's indentation
# messes up the html inside the table cells (which displays formatting)
return self.html.unicode(indent=0)
def create_html(mod):
# out is some kind of stream
#*[html.tr(html.td(i.name)) for i in mod.get_children()]
lines = mod.path.open().readlines()
enchanter = HtmlEnchanter(mod)
enc = get_module_encoding(mod.path)
doc = HTMLDocument(enc)
for i, row in enumerate(lines):
row = enchanter.enchant_row(i + 1, row)
doc.add_row(i + 1, row)
return unicode(doc)
style = html.style("""
body, p, td {
background-color: #FFF;
color: black;
font-family: monospace, Monaco;
}
td.type {
width: 2em;
}
td.name {
width: 30em;
}
td.mtime {
width: 13em;
}
td.size {
text-alignment: right;
}
""")
def create_dir_html(path, href_prefix=''):
h = html.html(
html.head(
html.title('directory listing of %s' % (path,)),
style,
),
)
body = html.body(
html.h1('directory listing of %s' % (path,)),
)
h.append(body)
table = html.table()
body.append(table)
tbody = html.tbody()
table.append(tbody)
items = list(path.listdir())
items.sort(key=lambda p: p.basename)
items.sort(key=lambda p: not p.check(dir=True))
for fpath in items:
tr = html.tr()
tbody.append(tr)
td1 = html.td(fpath.check(dir=True) and 'D' or 'F', class_='type')
tr.append(td1)
href = fpath.basename
if href_prefix:
href = '%s%s' % (href_prefix, href)
if fpath.check(dir=True):
href += '/'
td2 = html.td(html.a(fpath.basename, href=href), class_='name')
tr.append(td2)
td3 = html.td(time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(fpath.mtime())), class_='mtime')
tr.append(td3)
if fpath.check(dir=True):
size = ''
unit = ''
else:
size = fpath.size()
unit = 'B'
for u in ['kB', 'MB', 'GB', 'TB']:
if size > 1024:
size = round(size / 1024.0, 2)
unit = u
td4 = html.td('%s %s' % (size, unit), class_='size')
tr.append(td4)
return unicode(h)
def create_unknown_html(path):
h = html.html(
html.head(
html.title('Can not display page'),
style,
),
html.body(
html.p('The data URL (%s) does not contain Python code.' % (path,))
),
)
return h.unicode()
_reg_enc = py.std.re.compile(r'coding[:=]\s*([-\w.]+)')
def get_module_encoding(path):
if hasattr(path, 'strpath'):
path = path.strpath
if path[-1] in ['c', 'o']:
path = path[:-1]
fpath = py.path.local(path)
fp = fpath.open()
lines = []
try:
# encoding is only allowed in the first two lines
for i in range(2):
lines.append(fp.readline())
finally:
fp.close()
match = _reg_enc.search('\n'.join(lines))
if match:
return match.group(1)
return 'ISO-8859-1'
| [
"[email protected]"
]
| |
07bb340293a0865e794ea8be4102ebb9ec0411d2 | b1e785280635716d50d68d628d0d76b20dc4c386 | /game_tracker/wsgi.py | 3cf1c1ee788b8014eb824d61ad71b6c4b652404d | []
| no_license | CoreyWilson319/game_tracker | 17f684c59a466bcbc47a3940a434bd1cbba78c3b | e1f8962159f87d603bb0d928633876509ce76bdd | refs/heads/main | 2023-02-21T13:27:44.377667 | 2021-01-27T14:17:04 | 2021-01-27T14:17:04 | 331,335,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for game_tracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'game_tracker.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
f3b2527f458a0a0722b5b65fafc85ecc6248a55a | f54070cd3048a3645cb25f301592a904d387a1c9 | /python_prgrams/testpython/func5.py | a2728d09a5097957477c0ba6bffc8d4ef0ec27dd | []
| no_license | mak705/Python_interview | 02bded60417f1e6e2d81e1f6cde6961d95da2a8e | aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d | refs/heads/master | 2020-03-22T21:03:34.018919 | 2019-11-15T08:51:34 | 2019-11-15T08:51:34 | 140,653,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | def even(x):
if x % 2 == 0:
print "Yes"
else:
print "No"
| [
"[email protected]"
]
| |
71fe7e8212514b76527330fe88222c93e5297c78 | 157d2a2f4031c58e5504bcbac5348ff53883facc | /rDj48/enroll/forms.py | 9b2ec9d69b4dcf64f0fc4ebc028105648ccddbd1 | []
| no_license | optirg-39/Django_gekSh | d78b635fd3ee88addd084b68ec35c6284adfb55c | 1129a6df35c110dfeeeaaf1a76b2ebc192a5f1ce | refs/heads/master | 2023-04-15T13:09:03.067099 | 2021-04-26T12:15:35 | 2021-04-26T12:15:35 | 352,018,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django import forms
from .models import User
from django.forms import ModelForm
class UserForm(forms.ModelForm):
class Meta:
model=User
fields=['name','email','password']
| [
"[email protected]"
]
| |
750ef2857f71cdbfb166b0d44ab0fb803c25890c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_2/232.py | 75a478a83dc3509f3ffb15597d23d5c54bbb573b | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | #!/usr/bin/env python
"train timetable"
import sys
class Event(dict):
"Event"
LOC_A, LOC_B = "A", "B"
TYP_DEP, TYP_ARR = '1departure', '0arrival'
def __init__(self, time, orig, type, turnaround):
super(Event, self).__init__()
self['time'] = self.parse_time(time)
if type == Event.TYP_ARR:
self['time'] += turnaround
self['orig'] = orig
self['dest'] = self.other_location(orig)
self['type'] = type
def other_location(self, loc):
if loc == Event.LOC_A:
return Event.LOC_B
return Event.LOC_A
def parse_time(self, time):
hours, mins = time.strip().split(':')
hours, mins = int(hours), int(mins)
return hours * 60 + mins
@staticmethod
def cmp(ev_a, ev_b):
if ev_a['time'] == ev_b['time']:
return cmp(ev_a['type'], ev_b['type'])
return cmp(ev_a['time'], ev_b['time'])
def read_input(finp):
N = int(finp.readline())
for n in xrange(N):
T = int(finp.readline())
NA, NB = finp.readline().strip().split()
NA, NB = int(NA), int(NB)
events = []
for na in xrange(NA):
departure, arrival = finp.readline().strip().split()
events.append(Event(departure, Event.LOC_A, Event.TYP_DEP, T))
events.append(Event(arrival, Event.LOC_A, Event.TYP_ARR, T))
for nb in xrange(NB):
departure, arrival = finp.readline().strip().split()
events.append(Event(departure, Event.LOC_B, Event.TYP_DEP, T))
events.append(Event(arrival, Event.LOC_B, Event.TYP_ARR, T))
if False: print n, na, nb
events.sort(cmp=Event.cmp)
#from com.moveki import progbase
#progbase.yaml_dump('-', events)
needed_in = {
Event.LOC_A : 0,
Event.LOC_B : 0,
}
max_needed_in = {
Event.LOC_A : 0,
Event.LOC_B : 0,
}
for e in events:
if e['type'] == Event.TYP_ARR:
needed_in[e['dest']] -= 1
elif e['type'] == Event.TYP_DEP:
needed_in[e['orig']] += 1
if needed_in[e['orig']] > max_needed_in[e['orig']]:
max_needed_in[e['orig']] = needed_in[e['orig']]
#print "-------------"
#progbase.yaml_dump('-', e)
#progbase.yaml_dump('-', needed_in)
else:
raise RuntimeError("oops")
max_needed_in['ncase'] = n + 1
print "Case #%(ncase)d: %(A)d %(B)d" % (max_needed_in)
#progbase.yaml_dump('-', max_needed_in)
def main():
read_input(sys.stdin)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
d2f0fb47039e9ea4a28cea4462aa8c961e1c6681 | 2bc7659be83178c43b1592efbe1d79c62fc4fa36 | /Python/1156 홀수 짝수 구별.py | 01b65d2728a07c3e51224d626ee60cbb6a70d8f1 | []
| no_license | KIMSUBIN17/Code-Up-Algorithm | ede6f443fcf640ecf58282c582da43e124ca44af | 831180c28d234366a1d3cf118bd2a615dc404f00 | refs/heads/master | 2023-07-22T21:42:06.990542 | 2021-09-05T08:36:32 | 2021-09-05T08:36:32 | 286,932,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | n = int(input())
if n % 2 == 0:
print('even')
else :
print('odd')
| [
"[email protected]"
]
| |
14d1fcc9d5916920ca2b2c816f8c4fd4d335dcf8 | ed44c039862c6bde4c790c29f49d4e1012ae04ff | /sep11/venv/bin/rst2xml.py | e63dbdf9839a7860856a699d0d912f53ddf6e6f3 | []
| no_license | ravijaya/sep13 | 983bc2fc62a03c607478400dbdf9f91acc028b5d | fca95700ec9e3b56fc99621396d72ae411b3be92 | refs/heads/master | 2022-09-19T05:04:29.422670 | 2019-09-13T13:17:21 | 2019-09-13T13:17:21 | 208,267,991 | 0 | 0 | null | 2022-09-13T23:02:52 | 2019-09-13T13:15:15 | Python | UTF-8 | Python | false | false | 646 | py | #!/home/ravijaya/Trainings/Python-Devops/sep11/venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"[email protected]"
]
| |
65d8dad340c685fb2a9eb0d09bd3e8560bf36bc5 | fd02e8924ba325f2a62bbf97e460740a65559c74 | /PythonStart/Blackhorse/HM_Class/384封装案例-需求分析01.py | b7dc1e8c2a808237cebcf1689430d8d72663d433 | []
| no_license | ShiJingChao/Python- | 51ee62f7f39e0d570bdd853794c028020ca2dbc2 | 26bc75c1981a1ffe1b554068c3d78455392cc7b2 | refs/heads/master | 2020-07-08T00:05:16.532383 | 2019-10-14T15:19:49 | 2019-10-14T15:19:49 | 203,512,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # CLASS 384——385-386面向对象封装案例
# 1.封装是面向对象编程的一大特点
# 2.面向对象编程的第一个步——将属性和方法封装到一个抽象的类中
# 3.外界使用类创建对象,然后让对象调用方法
# 4.对象方法的细节都被封装在类的内部
# 一个对象的属性可以是另一个类创建的对象
# 01 士兵突击
class Gun:
def __init__(self, model):
# 1.枪的型号
self.model = model
# 2.子弹的数量
self.bullet_count = 0
def add_bullet(self, count):
self.bullet_count += count
def shoot(self):
if self.bullet_count <= 0:
print("%s没有子弹,请加子弹" % self.model)
self.bullet_count -= 1
print("%s哒哒哒,剩余子弹%d" % (self.model, self.bullet_count))
class Soldier():
def __init__(self, name):
self.name = name
self.gun = None
# 1.创建枪对象
ak47 = Gun("AK47")
ak47.add_bullet(50)
ak47.shoot()
tuoni = Soldier("托尼")
tuoni.gun = ak47
print(tuoni.gun)
# 386——创建初始化方法
# 开发士兵类
# 假设每一个新兵都没有枪
# 定义没有初始值的属性
# 在定义属性时,如果不知道设置什么初始值,可以设置为None
# None关键字表示什么都没有
# 可以表示一个空对象,没有方法和属性,是一个特殊的常量
# 可以将None赋值给任意一个变量
# fire 方法需求
# 1.判断是否有枪,没有枪没办法冲锋
# 2.喊一声口号
# 3.填装子弹
# 4.射击
| [
"[email protected]"
]
| |
ae52c62a6fcd0663134cd4a812fc27643c009470 | 2a120a15321101c09fbc2016f28f49662a64a692 | /Codes/AdS/First try/pyeq2/ExtendedVersionHandlers/ExtendedVersionHandler_Offset.py | 1a22f5514e1488978c538a26039acc3d71bd853e | [
"BSD-2-Clause"
]
| permissive | afarahi/QFT | ba7abd1f3520faa31a521d1db4ce313e684e478e | d8e676b8e80033b69d7df99f5ed2897273a8055f | refs/heads/master | 2021-01-16T18:21:11.210549 | 2012-09-16T11:54:05 | 2012-09-16T11:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | # pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: [email protected]
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: ExtendedVersionHandler_Offset.py 21 2012-03-10 19:48:51Z [email protected] $
import pyeq2
import IExtendedVersionHandler
class ExtendedVersionHandler_Offset(IExtendedVersionHandler.IExtendedVersionHandler):
def AssembleDisplayHTML(self, inModel):
return inModel._HTML + " + Offset"
def AssembleDisplayName(self, inModel):
return inModel._baseName + " With Offset"
def AssembleSourceCodeName(self, inModel):
return inModel.__class__.__name__ + "_Offset"
def AssembleCoefficientDesignators(self, inModel):
return inModel._coefficientDesignators + ['Offset']
# overridden from abstract parent class
def AppendAdditionalCoefficientBounds(self, inModel):
if inModel.upperCoefficientBounds != []:
inModel.upperCoefficientBounds.append(None)
if inModel.lowerCoefficientBounds != []:
inModel.lowerCoefficientBounds.append(None)
def AssembleOutputSourceCodeCPP(self, inModel):
return inModel.SpecificCodeCPP() + "\ttemp += Offset;\n"
# overridden from abstract parent class
def GetAdditionalDataCacheFunctions(self, inModel, inDataCacheFunctions):
return inDataCacheFunctions
def GetAdditionalModelPredictions(self, inBaseModelCalculation, inCoeffs, inDataCacheDictionary, inModel):
return self.ConvertInfAndNanToLargeNumber(inBaseModelCalculation + inCoeffs[len(inCoeffs)-1])
# overridden from abstract parent class
def CanLinearSolverBeUsedForSSQABS(self, inModelFlag):
return False
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.