blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b44b6f0f3f0b9d259ad52416362ca4d246b0348 | 342fc6f60c688a21b9ba4a8e8b64438d77039ba2 | /CNCS/CNCS/nxs/raw.py | c37c120916552715a26b08dd44b35ff7a2eded11 | [] | no_license | mcvine/instruments | 854001fe35063b1c8c86e80495093ce72884771f | 8e41d89c353995dcf5362a657a8bb5af08ff186c | refs/heads/master | 2023-04-03T11:01:53.232939 | 2023-04-02T04:16:07 | 2023-04-02T04:16:07 | 120,621,268 | 1 | 0 | null | 2023-04-02T04:16:08 | 2018-02-07T13:51:36 | Python | UTF-8 | Python | false | false | 3,916 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2008-2015 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
"""
This module helps creating "raw" CNCS nexus file.
"""
def write(events, tofbinsize, path):
""" write neutron events into a CNCS nexus file
The events is a numpy array of "event" records.
An event record has three fields:
* pixelID
* tofChannelNo
* p
tofbinsize * tofChannelNo is the tof for the bin
path is the output path
"""
# implementation details
# -1. h5py is used for handling the file.
# 0. make a new file by first copying a template file to a new file, and then adding new data
# 1. events are splitted to banks and saved. for a bank, all events are in bank{i}_events
# 2. any bank must have at least one event. if there are no events, we must assign fake ones
import shutil, sys
shutil.copyfile(nxs_template, path)
import time; time.sleep(0.5) # bad bad
import h5py
f = h5py.File(path, 'a')
entry = f['entry']
# XXX: hack
etz_attrs = {
'units': np.string_('second'),
'offset': np.string_('2012-08-23T11:23:53.833508666-04:00'),
'offset_seconds': 714583433,
'offset_nanoseconds': 833508666,
}
for bank in range(nbanks):
# print bank
sys.stdout.write('.')
# bank events
pixelidstart = bank * pixelsperbank
pixelidend = pixelidstart + pixelsperbank
bevts = events[(events['pixelID']<pixelidend) * (events['pixelID']>=pixelidstart)]
if not bevts.size:
# fake events. mantid cannot handle empty events
bevts = events[0:1].copy()
evt = bevts[0]
evt['pixelID'] = pixelidstart
evt['tofChannelNo'] = 0
evt['p'] = 0
# bank events directory
be = entry['bank%s_events' % (bank+bank_id_offset)]
be['event_id'] = bevts['pixelID'] + pixel_id_offset
be['event_time_offset'] = np.array(bevts['tofChannelNo'], dtype='float32') * tofbinsize
be['event_time_offset'].attrs['units'] = np.string_('microsecond')
be['event_weight'] = np.array(bevts['p'], dtype='float32')
be['event_index'] = np.array([0, len(bevts)], dtype='uint64')
be['event_time_zero'] = np.array([0, 1./60], dtype='float64')
etz = be['event_time_zero']
# hack
etz_attrs['target'] = np.string_('/entry/instrument/bank%s/event_time_zero' % (bank+bank_id_offset))
for k,v in etz_attrs.items(): etz.attrs[k] = v
# XXX: should this be a float and the sum of all weights?
# XXX: michael reuter said this is not really used
be['total_counts'][0] = len(bevts)
# bank directory
b = entry['bank%s' % (bank+bank_id_offset)]
# XXX: should this be float array?
# XXX: michael reuter said this is not really used
# compute histogram
# h, edges = np.histogram(bevts['pixelID'], pixelsperbank, range=(pixelidstart-0.5, pixelidend-0.5)) # weights = ?
# h.shape = 8, 128
# b['data_x_y'][:] = np.array(h, dtype='uint32')
continue
# XXX: should it be a float?
# entry['total_counts'][0] = len(events)
#
f.close()
#
sys.stdout.write('\n')
return
bank_id_offset = 1
pixelsperbank = 8 * 128
pixel_id_offset = (bank_id_offset-1)*pixelsperbank
nbanks = 50
npixels = nbanks * pixelsperbank
import os
from mcvine import resources as res
nxs_template = os.path.join(
res.instrument('CNCS'), 'nxs',
'cncs-raw-events-template.nxs',
)
import numpy as np
# End of file
| [
"[email protected]"
] | |
6e7957bb1f333a3da864d18a81ae420ab74e4ffa | f19c5436c7173835a3f1d064541ee742178e213a | /mah/Programmers/메뉴 리뉴얼.py | 20b5a552218aadd52b2828f25d2f9f8a092c26d5 | [] | no_license | hongsungheejin/Algo-Study | f1c521d01147a6f74320dbc8efe3c1037e970e73 | d6cb8a2cc6495ccfcfb3477330a3af95895fae32 | refs/heads/main | 2023-07-06T10:58:27.258128 | 2021-07-29T02:11:13 | 2021-07-29T02:11:13 | 379,269,918 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | from itertools import combinations
def solution(orders, course):
candi = {}
course = set(course)
for order in orders:
order = sorted(order)
for i in range(2, len(order)+1):
for combi in combinations(order, i):
combi = "".join(combi)
if combi in candi:
candi[combi] += 1
else:
candi[combi] = 1
answer = []
candis = {k:v for k, v in sorted(candi.items(), key=lambda x: (len(x[0]), x[1])) if v>=2}
for c in course:
tmp = {}
max_v = 0
for k, v in sorted(candis.items(), key=lambda x:x[0]):
if len(k) == c:
max_v = max(max_v, v)
if v in tmp: tmp[v].append(k)
else: tmp[v] = [k]
if max_v in tmp:
answer.extend(tmp[max_v])
return sorted(answer) | [
"[email protected]"
] | |
f607cc5e2526bcc268de801f40a60c5f8d777c39 | 558ad954a7b150ce95a30e5b1b4d277ed8286d46 | /0x04-python-more_data_structures/8-simple_delete.py | 48e0c39dd411cfe4884cd6a191de83073610e039 | [] | no_license | Indifestus/holbertonschool-higher_level_programming | 9cf41f53d164a6612ea982c28468d2a330212920 | aaaa08577888828016557826f85a98893d8e9cca | refs/heads/master | 2023-03-15T19:06:48.626734 | 2018-01-15T02:27:29 | 2018-01-15T02:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #!/usr/bin/python3
def simple_delete(my_dict, key=""):
if my_dict is not None:
my_dict.pop(key, None)
return my_dict
| [
"[email protected]"
] | |
77921aade12cd93cfbbbffb1e59a7444b7ad84c1 | d0d088be9ba855fbc1798d55a0874faee192d8b5 | /posthog/api/person.py | e1d035c0bd7b0f564145380d16c7e281ae576d71 | [
"MIT"
] | permissive | pplonski/posthog | bf62d1bfb36a007adb180faecd418a8d1337f904 | 9ae6854254085bbe10cc4f9c98820d9efed52424 | refs/heads/master | 2021-01-08T17:36:18.303885 | 2020-02-20T19:38:07 | 2020-02-20T19:38:07 | 242,096,368 | 2 | 0 | MIT | 2020-02-21T09:00:14 | 2020-02-21T09:00:14 | null | UTF-8 | Python | false | false | 2,781 | py | from posthog.models import Event, Team, Person, PersonDistinctId
from rest_framework import serializers, viewsets, response, request
from rest_framework.decorators import action
from django.db.models import Q, Prefetch, QuerySet, Subquery, OuterRef
from .event import EventSerializer
from typing import Union
from .base import CursorPagination
class PersonSerializer(serializers.HyperlinkedModelSerializer):
last_event = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
class Meta:
model = Person
fields = ['id', 'name', 'distinct_ids', 'properties', 'last_event', 'created_at']
def get_last_event(self, person: Person) -> Union[dict, None]:
if not self.context['request'].GET.get('include_last_event'):
return None
last_event = Event.objects.filter(team_id=person.team_id, distinct_id__in=person.distinct_ids).order_by('-timestamp').first()
if last_event:
return {'timestamp': last_event.timestamp}
else:
return None
def get_name(self, person: Person) -> str:
if person.properties.get('email'):
return person.properties['email']
if len(person.distinct_ids) > 0:
return person.distinct_ids[-1]
return person.pk
class PersonViewSet(viewsets.ModelViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
pagination_class = CursorPagination
def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet:
if request.GET.get('id'):
people = request.GET['id'].split(',')
queryset = queryset.filter(id__in=people)
if request.GET.get('search'):
parts = request.GET['search'].split(' ')
contains = []
for part in parts:
if ':' in part:
queryset = queryset.filter(properties__has_key=part.split(':')[1])
else:
contains.append(part)
queryset = queryset.filter(properties__icontains=' '.join(contains))
queryset = queryset.prefetch_related(Prefetch('persondistinctid_set', to_attr='distinct_ids_cache'))
return queryset
def get_queryset(self):
queryset = super().get_queryset()
team = self.request.user.team_set.get()
queryset = queryset.filter(team=team)
queryset = self._filter_request(self.request, queryset)
return queryset.order_by('-id')
@action(methods=['GET'], detail=False)
def by_distinct_id(self, request):
person = self.get_queryset().get(persondistinctid__distinct_id=str(request.GET['distinct_id']))
return response.Response(PersonSerializer(person, context={'request': request}).data) | [
"[email protected]"
] | |
67036aa7f5e73b06e2cc28232521344169dd679e | 5006a6965c21e5b828300eedf907eb55ec5b8b27 | /bnpy/callbacks/CBCalcHeldoutMetricsTopicModel.py | 57f0d662cfa836d5634ada5bdb6b7f599e3c9e2c | [
"BSD-3-Clause"
] | permissive | birlrobotics/bnpy | 1804d0fed9c3db4c270f4cd6616b30323326f1ec | 8f297d8f3e4a56088d7755134c329f63a550be9e | refs/heads/master | 2021-07-09T14:36:31.203450 | 2018-02-09T07:16:41 | 2018-02-09T07:16:41 | 96,383,050 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | '''
CBCalcHeldoutMetricsTopicModel.py
Learning alg callback extension for fitting topic models on heldout data.
When applied, will perform heldout inference at every parameter-save checkpoint.
Usage
--------
Add the following keyword arg to any call to bnpy.run
--customFuncPath CBCalcHeldoutMetricsTopicModel.py
Example
-------
$ python -m bnpy.Run BarsK10V900 FiniteTopicModel Mult VB \
--K 10 --nLap 50 \
--saveEvery 10 \
--customFuncPath CBCalcHeldoutMetricsTopicModel
Notes
--------
Uses the custom-function interface for learning algorithms.
This interface means that the functions onAlgorithmComplete and onBatchComplete
defined here will be called at appropriate time in *every* learning algorithm.
See LearnAlg.py's eval_custom_function for details.
'''
from __future__ import print_function
import os
import numpy as np
import scipy.io
import InferHeldoutTopics
import HeldoutMetricsLogger
SavedLapSet = set()
def onAlgorithmComplete(**kwargs):
''' Runs at completion of the learning algorithm.
Keyword Args
--------T
All workspace variables passed along from learning alg.
'''
if kwargs['lapFrac'] not in SavedLapSet:
runHeldoutCallback(**kwargs)
def onBatchComplete(**kwargs):
''' Runs viterbi whenever a parameter-saving checkpoint is reached.
Keyword Args
--------
All workspace variables passed along from learning alg.
'''
global SavedLapSet
if kwargs['isInitial']:
SavedLapSet = set()
HeldoutMetricsLogger.configure(
**kwargs['learnAlg'].BNPYRunKwArgs['OutputPrefs'])
if not kwargs['learnAlg'].isSaveParamsCheckpoint(kwargs['lapFrac'],
kwargs['iterid']):
return
if kwargs['lapFrac'] in SavedLapSet:
return
SavedLapSet.add(kwargs['lapFrac'])
runHeldoutCallback(**kwargs)
def runHeldoutCallback(**kwargs):
''' Run heldout metrics evaluation on test dataset.
Kwargs will contain all workspace vars passed from the learning alg.
Keyword Args
------------
hmodel : current HModel object
Data : current Data object
representing *entire* dataset (not just one chunk)
Returns
-------
None. MAP state sequences are saved to a MAT file.
Output
-------
MATfile format: Lap0020.000MAPStateSeqs.mat
'''
taskpath = kwargs['learnAlg'].savedir
for splitName in ['validation', 'test']:
elapsedTime = kwargs['learnAlg'].get_elapsed_time()
InferHeldoutTopics.evalTopicModelOnTestDataFromTaskpath(
dataSplitName=splitName,
taskpath=taskpath,
elapsedTime=elapsedTime,
queryLap=kwargs['lapFrac'],
printFunc=HeldoutMetricsLogger.pprint,
**kwargs)
| [
"[email protected]"
] | |
6eda11f72415c2c9a36b7f5635e2560ef63bf01a | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1318+062/sdB_pg_1318+062_lc.py | ff49a4e872dad3cb97afe62d31f086a25e90d3e8 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[200.185083,5.983667], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1318+062/sdB_pg_1318+062_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
02d4497caa6522455555c81d2715262be07fb67f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_24610.py | ed0905d8047a088ab943cc1e32bc4dbc7d30b821 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | # Why int() argument must be a string or a number, not 'list'?
PForm
| [
"[email protected]"
] | |
053950d8dee6b200c63e069154c6d9c6ba7b21af | 02442f7d3bd75da1b5b1bf6b981cc227906a058c | /rocon/build/rocon_app_platform/rocon_app_manager/catkin_generated/pkg.develspace.context.pc.py | 3de9876b63c7300094cd88e5c7d2b10e59c73d88 | [] | no_license | facaisdu/RaspRobot | b4ff7cee05c70ef849ea4ee946b1995432a376b7 | e7dd2393cdabe60d08a202aa103f796ec5cd2158 | refs/heads/master | 2020-03-20T09:09:28.274814 | 2018-06-14T08:51:46 | 2018-06-14T08:51:46 | 137,329,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rocon_app_manager"
PROJECT_SPACE_DIR = "/home/sclab_robot/turtlebot_ws/rocon/devel"
PROJECT_VERSION = "0.8.0"
| [
"[email protected]"
] | |
5c9f9ce0e28a0947dd8edbcea57820ca55c76184 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/SHOffer/YCHF_KCBYCHF_SHBP_153.py | 57c5b458804546c0a77bf642879eaa200c682c30 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_SHBP_153(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_SHBP_153')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_SHBP_153(self):
title = '重启上海报盘(沪A最优五档即成转限价:分笔成交_累积成交金额 >= 手续费 且手续费小于最小值)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688011', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':5,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
34d788e9ab997f619139b8af4b45a786cee0aac0 | ce27a376fa4f6a25008674d007c670a4a0b8bda7 | /defects_thresholding.py | 1c96261ba4ebe8222fcc90b839c16ced1c0d9cfa | [] | no_license | jrr1984/defects_analysis | 22139b7734478b6261cf9efeaae755a2c5c71c79 | 2e43b65f1b936516f4a4c8f7feb5d46468864957 | refs/heads/master | 2020-12-10T20:00:39.977833 | 2020-04-16T12:00:22 | 2020-04-16T12:00:22 | 233,694,615 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,701 | py | from skimage.filters import threshold_yen,threshold_isodata
from skimage import io,measure,img_as_float,morphology
from skimage.measure import regionprops_table
from skimage.color import label2rgb
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
import pandas as pd
import glob
import time
start_time = time.time()
pixels_to_microns = 0.586
proplist = ['equivalent_diameter','area']
path = "C:/Users/juanr/Documents/mediciones_ZEISS/TILING/NIR/norm/*.tif"
data= []
holes_data = []
i=0
for file in glob.glob(path):
img = io.imread(file)
img = img_as_float(img)
thresh = threshold_yen(img)
binary = img <= thresh
binary_var = img <= (thresh - 0.1*thresh)
masked_binary = ndimage.binary_fill_holes(binary)
masked_binary_var = ndimage.binary_fill_holes(binary_var)
hols = masked_binary.astype(int) - binary
hols_var = masked_binary_var.astype(int) - binary_var
lab = measure.label(hols,connectivity=2)
lab_var = measure.label(hols_var, connectivity=2)
cleaned_holes = morphology.remove_small_objects(lab, connectivity=2)
cleaned_holes_var = morphology.remove_small_objects(lab_var, connectivity=2)
label_image = measure.label(masked_binary,connectivity=2)
label_image_var = measure.label(masked_binary_var, connectivity=2)
label_final = morphology.remove_small_objects(label_image, min_size=15)
label_final_var = morphology.remove_small_objects(label_image_var, min_size=15)
if label_final.any()!=0 and label_final_var.any() !=0:
props = regionprops_table(label_final, intensity_image=img, properties=proplist)
props_var = regionprops_table(label_final_var, intensity_image=img, properties=proplist)
props_df = pd.DataFrame(props)
props_df_var = pd.DataFrame(props_var)
props_df['error_diameter'] = abs(round((props_df['equivalent_diameter'] - props_df_var['equivalent_diameter'])*pixels_to_microns))
props_df['error_area'] = abs(round((props_df['area'] - props_df_var['area']) * pixels_to_microns ** 2))
props_df['img'] = i
data.append(props_df)
print('defects_df')
print(props_df)
print('error')
print(props_df['error_diameter'])
if cleaned_holes.any()!= 0 and cleaned_holes_var.any() != 0:
props_holes = regionprops_table(cleaned_holes, intensity_image=img, properties=proplist)
props_holes_var = regionprops_table(cleaned_holes_var, intensity_image=img, properties=proplist)
holes_df = pd.DataFrame(props_holes)
holes_df_var = pd.DataFrame(props_holes_var)
holes_df['error_diameter'] = abs(round((holes_df['equivalent_diameter'] - holes_df_var['equivalent_diameter'])*pixels_to_microns))
holes_df['error_area'] = abs(round((holes_df['area'] - holes_df_var['area']) * pixels_to_microns**2))
holes_df['img'] = i
holes_data.append(holes_df)
print('holes_df')
print(holes_df)
print('error holes')
print(holes_df['error_diameter'])
print(file, i)
i += 1
df = pd.concat(data)
df['equivalent_diameter'] = round(df['equivalent_diameter'] * pixels_to_microns)
df['area'] = round(df['area'] * pixels_to_microns **2)
df.to_pickle("C:/Users/juanr/Documents/data_mediciones/defects/defectsNIR_df.pkl")
holes_df = pd.concat(holes_data)
holes_df['equivalent_diameter'] = round(holes_df['equivalent_diameter'] * pixels_to_microns)
holes_df['area'] = round(holes_df['area'] * pixels_to_microns **2)
holes_df.to_pickle("C:/Users/juanr/Documents/data_mediciones/defects/defectsholesNIR_df.pkl")
print("--- %s minutes ---" % ((time.time() - start_time)/60)) | [
"[email protected]"
] | |
bb53fe452117f99a8d8f7b1e33f47e1ab79db0c2 | 77b16dcd465b497c22cf3c096fa5c7d887d9b0c2 | /Cron_Philip/Assignments/flaskolympics/olympics3/server.py | 3c8cc483f0488a3e80700542e08036210ca2f614 | [
"MIT"
] | permissive | curest0x1021/Python-Django-Web | a7cf8a45e0b924ce23791c18f6a6fb3732c36322 | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | refs/heads/master | 2020-04-26T17:14:20.277967 | 2016-10-18T21:54:39 | 2016-10-18T21:54:39 | 173,706,702 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from flask import Flask, render_template, session
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def myfirstfunction():
if not 'title' in session:
session['title'] = 'hello world'
return render_template('index.html', name="Mike")
if __name__ == '__main__':
app.run(debug = True)
| [
"[email protected]"
] | |
dc23bbd95004a5f6fa4e5a6ef31d8b013040ba34 | 6874015cb6043d1803b61f8978627ddce64963b4 | /django/db/backends/postgresql/operations.py | 0edcf42febaa364b316750501cb20183caacea8e | [
"BSD-3-Clause",
"Python-2.0"
] | permissive | yephper/django | 25fbfb4147211d08ec87c41e08a695ac016454c6 | cdd1689fb354886362487107156978ae84e71453 | refs/heads/master | 2021-01-21T12:59:14.443153 | 2016-04-27T09:51:41 | 2016-04-27T09:51:41 | 56,134,291 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,101 | py | from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_ids(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
list of newly created IDs.
"""
return [item[0] for item in cursor.fetchall()]
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "age(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super(DatabaseOperations, self).subtract_temporals(internal_type, lhs, rhs)
| [
"[email protected]"
] | |
2bcd1788de6e9a593abedae6ed61b48c43c67654 | 06d6c9346331e392f6d8067eb9ee52d38ae5fab8 | /carver/pe/setup.py | 299b8bff264703b5031d4a1ddd6b11e7c4e69e92 | [
"Apache-2.0"
] | permissive | maydewd/stoq-plugins-public | 5d5e824dda0c78acab4ff9aef72f567e6b85e555 | 8b2877b5091ae731437ef35a95d4debdbf0a19f3 | refs/heads/master | 2020-03-22T18:57:41.061748 | 2018-06-12T14:36:42 | 2018-06-12T14:36:42 | 140,494,475 | 0 | 0 | Apache-2.0 | 2018-07-10T22:39:08 | 2018-07-10T22:39:08 | null | UTF-8 | Python | false | false | 371 | py | from setuptools import setup, find_packages
setup(
name="pe",
version="0.10",
author="Jeff Ito, Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Carve portable executable files from a data stream",
packages=find_packages(),
include_package_data=True,
)
| [
"[email protected]"
] | |
a2db15dc70256c5ac16e2d712ccd8393faf996ac | c820e028be4239bc20e76af41574e561ba8d8e02 | /gsw/version.py | 2f5fd65dfc4d9ab9ae7c7b3df560f34efabacd78 | [
"MIT"
] | permissive | lukecampbell/python-gsw | 7657c2e3a0dbadad00ff17557f4ca45f971f3964 | c555921b5f1fcbc1c1a3565172b946f782d15db4 | refs/heads/master | 2016-09-06T16:54:47.074484 | 2013-02-20T20:00:03 | 2013-02-20T20:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | #!/usr/bin/env python
version = '3.0.1a1'
| [
"[email protected]"
] | |
ab523c3751accac0cb2820f8f76621d3ca5474ab | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_172/ch88_2020_05_06_12_07_01_120079.py | 65c8bdbe203ac21abf9a6631e62483803e27d184 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | class Retangulo:
def _init_(self,coord1, coord2):
coord1 = Ponto(x1, y1)
coord2 = Ponto(x2, y2)
def calcula_perimetro(self):
base = x2 - x1
altura = y2 - y1
p = 2*base + 2*altura
def calcula_area(self):
base = x2 - x1
altura = y2 - y1
a = base*altura | [
"[email protected]"
] | |
998e74d73408d3c5bf3bf99ce5df17a7a52ee3f8 | 0a40a0d63c8fce17f4a686e69073a4b18657b160 | /test/functional/rpc_bip38.py | b70349a25ed83fb3fc00d631b1bc8dcd9eb3f3e4 | [
"MIT"
] | permissive | MotoAcidic/Cerebellum | 23f1b8bd4f2170c1ed930eafb3f2dfff07df1c24 | 6aec42007c5b59069048b27db5a8ea1a31ae4085 | refs/heads/main | 2023-05-13T06:31:23.481786 | 2021-06-09T15:28:28 | 2021-06-09T15:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The CEREBELLUM developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for BIP38 encrypting and decrypting addresses."""
from test_framework.test_framework import CerebellumTestFramework
from test_framework.util import assert_equal
class Bip38Test(CerebellumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
password = 'test'
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
self.log.info('encrypt address %s' % (address))
bip38key = self.nodes[0].bip38encrypt(address, password)['Encrypted Key']
self.log.info('decrypt bip38 key %s' % (bip38key))
assert_equal(self.nodes[1].bip38decrypt(bip38key, password)['Address'], address)
if __name__ == '__main__':
Bip38Test().main()
| [
"[email protected]"
] | |
fe57a510beaf39e45c60b51b452a5c31026ab28d | 3ecce3646d66033d214db3749be63e78d4f663e9 | /Assignment 4/load_utils.py | 9b4f3fc6a5fb3ab71f6dc4b5ce5cbba2fb817a22 | [
"Apache-2.0"
] | permissive | pradyumnakr/EIP-3.0 | f36aaed042d65beef163b08dbb0de05139e3fee7 | 67bc5168b169406d7567f3d1d3b9b35fc7dd61af | refs/heads/master | 2022-01-27T15:23:00.013031 | 2019-07-28T17:25:35 | 2019-07-28T17:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,699 | py | def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print(f'loading training data for synset {(i + 1)}/{len(wnids)}')
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 64, 64, 3), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(1, 0, 2)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 64, 64, 3), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(1, 0, 2)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 64, 64, 3), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(1, 0, 2)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
data = load_tiny_imagenet('/content/tiny-imagenet-200/', dtype=np.float32, subtract_mean=True)
| [
"[email protected]"
] | |
09c006664cf108d6ae9fc0f41fcb8e22fcea4877 | a9e60d0e5b3b5062a81da96be2d9c748a96ffca7 | /configurations/i21-config/scripts/functions/sample_vessel_vacuum_control.py | 055be6350f0c567e280cfe42194b79f557165ef8 | [] | no_license | openGDA/gda-diamond | 3736718596f47607335ada470d06148d7b57526e | bbb64dcfd581c30eddb210c647db5b5864b59166 | refs/heads/master | 2023-08-16T08:01:11.075927 | 2023-08-15T16:01:52 | 2023-08-15T16:01:52 | 121,757,699 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | '''
define function to control the sample vessel vacuum valves for sample changes
Created on 18 Jul 2023
@author: fy65
'''
import installation
from gda.device.scannable import ScannableMotionBase
from gda.epics import CAClient
# control PV = BL21I-EA-SMPL-01:SEQ:CTRL
# state PV = BL21I-EA-SMPL-01:SEQ:CTRL:STATE_RBV
class SampleVesselValvesControl(ScannableMotionBase):
def __init__(self, name, pv):
self.setName(name)
self.setInputNames([name])
self.setOutputFormat(["%d"])
self.control = CAClient(pv)
self.state = CAClient(pv + ":STATE_RBV")
self.control.configure()
self.state.configure()
self.val = 0
def getPosition(self):
if installation.isLive():
return int(self.control.get()) #0 - Close, 1 - Open
if installation.isDummy():
return self.val
def asynchronousMoveTo(self, val):
if installation.isLive():
self.control.caput(int(val))
if installation.isDummy():
self.val = val
if val == 1:
print("Open sample vessel valves")
if val == 0:
print("Close sample vessel valves")
def isBusy(self):
if installation.isLive():
return int(self.state.caget()) != 2 #2 - Ready, 1 - Opening, 0 - Closing
if installation.isDummy():
return False
sample_vessel_valves = SampleVesselValvesControl("sample_vessel_valves", "BL21I-EA-SMPL-01:SEQ:CTRL")
| [
"[email protected]"
] | |
2f9db9f890c9233e5af1669088468a7683d1af35 | 0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664 | /awssam/fullfeblog/blog/migrations/0002_auto_20201208_1414.py | b61387acb7dcbe792fb0d7d8887e97d528f46789 | [] | no_license | mrpal39/ev_code | 6c56b1a4412503604260b3346a04ef53a2ba8bf2 | ffa0cf482fa8604b2121957b7b1d68ba63b89522 | refs/heads/master | 2023-03-24T03:43:56.778039 | 2021-03-08T17:48:39 | 2021-03-08T17:48:39 | 345,743,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | # Generated by Django 3.1.4 on 2020-12-08 14:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-publish',)},
),
migrations.RenameField(
model_name='post',
old_name='content',
new_name='body',
),
migrations.RenameField(
model_name='post',
old_name='date_posted',
new_name='publish',
),
migrations.AddField(
model_name='post',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default=django.utils.timezone.now, max_length=250, unique_for_date='publish'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='status',
field=models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10),
),
migrations.AddField(
model_name='post',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=250),
),
]
| [
"[email protected]"
] | |
5d911f4022457d7e47942adf723047dc59cefa2f | 4a5f3b26fca176a80ca8eca796bc646bb225b017 | /attentive-reader-2/sgu.py | 8ddc21a3a0732b54672764fcd0003dcc2dec4e7a | [] | no_license | musyoku/NLP | 9a63dc882b07b017f7cfc72d863c4d9e5cbeff5e | 9b040bb960b65fb2a1c330adafa6c52e3284a0c1 | refs/heads/master | 2021-01-21T04:53:57.029200 | 2016-07-10T17:08:03 | 2016-07-10T17:08:03 | 55,848,677 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py | import numpy
import chainer
from chainer import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import softplus
from chainer.functions.activation import tanh
from chainer.functions.math import clip
from chainer import link
from chainer.links.connection import linear
from chainer import variable
def hard_sigmoid(x):
return clip.clip(x * 0.2 + 0.5, 0.0, 1.0)
class SGU(link.Chain):
def __init__(self, in_size, out_size):
super(SGU, self).__init__(
W_xh=linear.Linear(in_size, out_size),
W_zxh=linear.Linear(out_size, out_size),
W_xz=linear.Linear(in_size, out_size),
W_hz=linear.Linear(out_size, out_size),
)
def __call__(self, h, x):
x_g = self.W_xh(x)
z_g = tanh.tanh(self.W_zxh(x_g * h))
z_out = softplus.softplus(z_g * h)
z_t = hard_sigmoid(self.W_xz(x) + self.W_hz(h))
h_t = (1 - z_t) * h + z_t * z_out
return h_t
class StatefulSGU(SGU):
def __init__(self, in_size, out_size):
super(StatefulSGU, self).__init__(in_size, out_size)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(StatefulSGU, self).to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(StatefulSGU, self).to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp == numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
xp = cuda.get_array_module(x)
zero = variable.Variable(xp.zeros_like(x.data))
z_out = softplus.softplus(zero)
z_t = hard_sigmoid(self.W_xz(x))
h_t = z_t * z_out
else:
h_t = SGU.__call__(self, self.h, x)
self.h = h_t
return h_t
class DSGU(link.Chain):
def __init__(self, in_size, out_size):
super(DSGU, self).__init__(
W_xh=linear.Linear(in_size, out_size),
W_zxh=linear.Linear(out_size, out_size),
W_go=linear.Linear(out_size, out_size),
W_xz=linear.Linear(in_size, out_size),
W_hz=linear.Linear(out_size, out_size),
)
def __call__(self, h, x):
x_g = self.W_xh(x)
z_g = tanh.tanh(self.W_zxh(x_g * h))
z_out = sigmoid.sigmoid(self.W_go(z_g * h))
z_t = hard_sigmoid(self.W_xz(x) + self.W_hz(h))
h_t = (1 - z_t) * h + z_t * z_out
return h_t
class StatefulDSGU(DSGU):
def __init__(self, in_size, out_size):
super(StatefulDSGU, self).__init__(in_size, out_size)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(StatefulDSGU, self).to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(StatefulDSGU, self).to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp == numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
z_t = hard_sigmoid(self.W_xz(x))
h_t = z_t * 0.5
else:
h_t = DSGU.__call__(self, self.h, x)
self.h = h_t
return h_t | [
"[email protected]"
] | |
2908f0e3db2a300277114b39d46d25d3ea5e1012 | 2d3976964d8923a1e91e31af702bd68fbf37d474 | /runTask/server.py | 1bd36c0754e0d042ad090870e35b568521b7c88d | [] | no_license | barry800414/master_thesis | 2f6900fb2964891849dadef9283ed6e7f11cc696 | 01a0cac30ab63fcf818f1f43959634094b624af5 | refs/heads/master | 2020-05-29T08:53:32.810702 | 2016-06-04T02:03:52 | 2016-06-04T02:03:52 | 38,382,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/env python3
from multiprocessing.managers import BaseManager
import queue
import sys
if __name__ == '__main__':
port = 3333
if len(sys.argv) == 2:
port = int(sys.argv[1])
q = queue.Queue()
# a QueueManager hold a queue q, which automatically handle race condition
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue', callable = lambda: q)
m = QueueManager(address = ('0.0.0.0', port), authkey = b'barry800414')
s = m.get_server()
print('Server is running now (port:%d) ...' % (port), file=sys.stderr)
s.serve_forever()
| [
"[email protected]"
] | |
f50f22f4257ef2bd4b135c4c4b543869c019f8b8 | 4eeb40dcc265caf4a2b84bc90a28d481930d6a8a | /cssproject/cssproject/wsgi.py | e87cec6d202682e65310c1cd76e7ac0245d43209 | [] | no_license | mprasu/Sample-Projects | eb7fc46e81b09d7c97c238047e3c93b6fff3fb8d | 7363baf630900ab2babb4af2afe77911d8a548b2 | refs/heads/master | 2020-04-16T06:43:16.345750 | 2019-01-12T07:07:34 | 2019-01-12T07:07:34 | 165,358,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for cssproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cssproject.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
58cbe82bcc8bd6afeed52101fca9d77621105eef | 4be56098894a95da5964622fc4102b69e4530ab6 | /题库/1399.页面推荐.py | 5a140da6d4ab9fa5c70e7d7e978fdf740737d005 | [] | no_license | ACENDER/LeetCode | 7c7c7ecc8d0cc52215272f47ec34638637fae7ac | 3383b09ab1246651b1d7b56ab426a456f56a4ece | refs/heads/master | 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 1399.页面推荐.py
| [
"[email protected]"
] | |
ddbeff68f2104fbd657620867d9acc172c5adecb | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0190_Reverse_Bits/__init__.py | 13d13496fce71652ff8239e68ab130a72e9cc66e | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 656 | py | ID = '190'
TITLE = 'Reverse Bits'
DIFFICULTY = 'Easy'
URL = 'https://oj.leetcode.com/problems/reverse-bits/'
BOOK = False
PROBLEM = r"""Reverse bits of a given 32 bits unsigned integer.
For example, given input 43261596 (represented in binary as
**00000010100101000001111010011100**), return 964176192 (represented in binary
as **00111001011110000010100101000000**).
**Follow up**:
If this function is called many times, how would you optimize it?
Related problem: [Reverse Integer](/problems/reverse-integer/)
**Credits:**
Special thanks to [@ts](https://oj.leetcode.com/discuss/user/ts) for adding
this problem and creating all test cases.
"""
| [
"[email protected]"
] | |
0da90c73bc71313602b59d4b1cce999930cd4017 | 637669abf38aa06d786458bcb552d0d5dc188302 | /claripy/ast/__init__.py | 2da826a5b43d467502f3d34eadb856d283ede3f4 | [
"BSD-2-Clause"
] | permissive | angr/claripy | c5603b52f829a9b29630ed6665ab7ec294cb8157 | b35449fecd129dc46a0cabdd6499354e89b38a68 | refs/heads/master | 2023-09-05T18:48:19.736126 | 2023-09-05T17:17:45 | 2023-09-05T17:17:45 | 40,328,505 | 260 | 115 | BSD-2-Clause | 2023-09-11T22:09:06 | 2015-08-06T21:50:19 | Python | UTF-8 | Python | false | false | 1,376 | py | # pylint:disable=redefined-outer-name
from typing import TYPE_CHECKING
# Mypy is severely confused by this delayed import trickery, but works if we just pretend that the import
# happens here already
if TYPE_CHECKING:
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations
else:
Bits = lambda *args, **kwargs: None
BV = lambda *args, **kwargs: None
VS = lambda *args, **kwargs: None
FP = lambda *args, **kwargs: None
Bool = lambda *args, **kwargs: None
Int = lambda *args, **kwargs: None
Base = lambda *args, **kwargs: None
true = lambda *args, **kwargs: None
false = lambda *args, **kwargs: None
String = lambda *args, **kwargs: None
all_operations = None
def _import():
global Bits, BV, VS, FP, Bool, Int, Base, String, true, false, all_operations
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations
__all__ = ("Bits", "BV", "VS", "FP", "Bool", "true", "false", "Int", "Base", "String", "all_operations")
| [
"[email protected]"
] | |
5d8cfdb679b337f26330b1c109a88a1680180caf | d569476dd95496339c34b231621ff1f5dfd7fe49 | /PyTest/SteamSender/tests/test_send_cards.py | 996577a1586476bfeec33e7f74f1ba41cfd2b17e | [] | no_license | monteua/Tests | 10f21f9bae027ce1763c73e2ea7edaf436140eae | 553e5f644466683046ea180422727ccb37967b98 | refs/heads/master | 2021-01-23T10:28:49.654273 | 2018-05-09T09:11:30 | 2018-05-09T09:11:30 | 93,061,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from PageObject.SteamActions import SteamHome
from accounts import accounts
accounts_list = accounts()
def test_send_trade(driver):
for login in accounts_list:
if login == 'monte_ua13':
password = ""
else:
password = ""
SteamHome(driver).open_browser()
SteamHome(driver).enter_credentials(login, password)
SteamHome(driver).pass_steam_guard()
SteamHome(driver).open_trade_url()
SteamHome(driver).log_off()
| [
"[email protected]"
] | |
4770757cc653f027b500d6f75168f8318a702d86 | 7f2612e5132e1583e5ba9758f299a8f301f0dc70 | /FB/5-longest-palindromic-substring.py | fb44ee0f8a6db9b0e87b7abf9cf4a48bd884a73a | [] | no_license | taeheechoi/coding-practice | 380e263a26ed4de9e542c51e3baa54315127ae4f | 9528b5e85b0ea2960c994ffea62b5be86481dc38 | refs/heads/main | 2022-07-09T11:22:18.619712 | 2022-06-28T14:55:51 | 2022-06-28T14:55:51 | 447,082,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Solution:
# Time O(N^2) Space O(1)
def longestPalindrome(self, s):
res = ''
for i in range(len(s)):
odd = self.is_pal(s, i, i)
even = self.is_pal(s, i, i+1)
res = max(odd, even, res, key=len)
return res
def is_pal(self, s, l, r):
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
return s[l+1: r] | [
"[email protected]"
] | |
d3b5e095fa1dab8e9c98895fa11a48312d856b56 | 874f46f4510b321ec3110ac8d5d5e572175c5544 | /Generator_Tests/TestFrec/scripts/generator.py | 94df7463f40e16990b3f6614572ff87accc2eb5a | [] | no_license | JordiEspinozaMendoza/Simulacion | bb271aee0908693ff0e36470dae98216096d9066 | fac1cdf5010a34a853a8b13d93209bcbde616e64 | refs/heads/main | 2023-05-31T14:06:21.329271 | 2021-06-14T02:52:06 | 2021-06-14T02:52:06 | 367,148,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | import sys
import os
import pandas as pd
sys.setrecursionlimit(5000)
# X = Semilla
# a = Multiplicador
# c = Constante aditiva
# m = Modulo
def Operacion(X, a, c, m):
Resi = ((a*X)+c) % m
return Resi
def createDataFrame(data):
df = pd.DataFrame(data, columns=["n","Xn","Xn+1","Rn"])
cols = list(df.columns)
return df.to_string(), df, cols
def Recursivo(self, X0, a, c, m, conta,Detener, ArraySemilla, data):
try:
for Semilla in ArraySemilla:
if X0==Semilla:
Detener = True
if Detener==True or conta==325:
pass
else:
data["n"].append(conta+1)
data["Xn"].append(X0)
data["Xn+1"].append(Operacion(X0,a,c,m))
data["Rn"].append(Operacion(X0,a,c,m)/m)
conta = conta + 1
ArraySemilla.append(X0)
Recursivo(Operacion(X0,a,c,m),a,c,m,conta,Detener, ArraySemilla, data)
except Exception as e:
print(str(e))
| [
"[email protected]"
] | |
ce65095ee46c58e871cd6b80c4cfe769ace6e7a1 | f5f7f8d12956e4bff6e1c5f6fab10b006690f195 | /luffy/settings.py | fe7c34ae1af2839496be8ef590c0c49e0a16121b | [] | no_license | chenrun666/luffy | 1fbee911d1d7f86e5c7b1ed7f47e84f6f1ee9846 | 59f6229e16978ab9c40ef948807c717c2cddaea9 | refs/heads/master | 2020-04-07T16:09:20.306754 | 2018-11-21T08:45:29 | 2018-11-21T08:45:29 | 158,517,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | """
Django settings for luffy project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h*zthsj)s$^_5kxkdbk+^gy2ih+vh6kpw#wu$uy^0bce((+k)9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'course.apps.CourseConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'course.mymiddleware.accessmiddleware.CrossDomainMiddleWare',
]
ROOT_URLCONF = 'luffy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'luffy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {"max_connections": 100}
# "PASSWORD": "密码",
}
}
}
| [
"[email protected]"
] | |
23fbea60c2bea452a414dcf5f255cd4eabdab38a | 437e905d8c214dc25c559b1dc03eaf9f0c85326f | /is28/vyacheslavleva28/lab6/function.py | 1522faa137dc1fcb8f84d4cc4b96a551fd47870d | [] | no_license | AnatolyDomrachev/karantin | 542ca22c275e39ef3491b1c0d9838e922423b5a9 | 0d9f60207e80305eb713fd43774e911fdbb9fbad | refs/heads/master | 2021-03-29T03:42:43.954727 | 2020-05-27T13:24:36 | 2020-05-27T13:24:36 | 247,916,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | def vvod():
a = []
for i in range(10):
x = float(input())
a.append(x)
return a
def rachet(a):
res = True
for i in range(0,len(a)-1):
if a[i]> a[i+1]:
res = False
return res
def vyvod(data):
print(result)
data = vvod()
print(data)
result = rachet(data)
print(result)
vyvod(result)
print(vyvod) | [
"[email protected]"
] | |
06aae58ab947c90ed7bc942a02ffa420afd0287b | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_6/models/network_interface_trace_get_response.py | 711d740178ee303c6379e1c1ec389c67bd15cca7 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,335 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.6, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_6 import models
class NetworkInterfaceTraceGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[NetworkInterfaceTrace]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.NetworkInterfaceTrace]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[NetworkInterfaceTrace]): A list of network trace run result.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceTraceGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceTraceGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceTraceGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b9b22ed2ac4565940e04c8fac0f36e72bf88ef75 | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/23-08-21_20210912011408.py | d242edf35564cc66ff35c5dd66a540fa6f9fc0b8 | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | class Empresa:
def __init__(self,nom="El mas barato",ruc="0999999999",tel="042971234",dir="Juan Montalvo"):
self.nombre=nom
self.ruc=ruc
self.telefono=tel
self.direccion=dir
def mostrarEmpresa(self):
print("Empresa: {:17}, RUC: {}".format(self.nombre,self.ruc))
class Cliente:
def __init__(self,nom,ced,tel):
self.nombre=nom
self.cedula=ced
self.telefono=tel
def mostrarCliente(self):
print(self.nombre,self.cedula,self.telefono)
class ClienteCorporativo(Cliente):
def __init__(self,nomb,cedu,telecontrato):
super().__init__(nomb,cedu,tele,contrato)
self.__contrato=contrato
@property
def contrato(self): #getter: obtener el valor del atributo privado
return self.__contrato
@contrato.setter
def contrato(self,value): #setter: asigna el valor del atributo privado
if value:
self.__contrato=value
else:
self.__contrato="Sin contrato"
def mostrarCliente(self):
print(self.nombre, self.__contrato)
class ClientePersonal(Cliente):
def __init__(self,nom,ced,tel,promocion=True):
super().__init__(nom,ced,tel,)
self.__promocion=promocion
@property
def promocion(self): #getter: obtener el valor del atributo privado
return self.__promocion
def mostrarCliente(self):
print(self.nombre, self.__promocion)
class Articulo:
secuencia=0
iva=0.12
def __init__(self,des,pre,sto):
Articulo.secuencia+=1
self.codigo=Articulo.secuencia
self.descripcion= des
self.precio=pre
self.stock=sto
def mostraArticulo(self):
print(self.codigo,self.nombre)
class DetVenta:
linea=0
def __init__(self,articulo,cantidad):
DetVenta.linea+=1
self.lineaDetalle=DetVenta.linea
self.articulo=articulo
self.precio=articulo.precio
self.cantidad=cantidad
class CabVenta:
def __init__(self,fac,empresa,fecha,cliente,tot=0):
self.empresa=empresa
self.factura=fac
self.fecha=fecha
self.cliente=cliente
self.total=tot
self.detalleVen=[]
def agregarDetalle(self,articulo,cantidad):
detalle=DetVenta(articulo,cantidad)
self.total+=detalle.precio*detalle.cantidad
self.detalleVen.append(detalle)
def mostrarVenta(self,empNombre,empRuc):
print("Empresa {:17} r")
# emp=Empresa("El mas barato","0953156049","0998132446","Coop. Juan Montalvo")
# emp.mostrarEmpresa()
# print(emp.nombre)
cli1=ClientePersonal("Jose","0912231499","042567890",True)
cli1.mostrarCliente
art1=Articulo("Aceite",2,100)
art1.mostraArticulo()
art2=Articulo("Coca Cola",1,200)
art2.mostraArticulo()
art3=Articulo("Leche",1.5,200)
art3.mostraArticulo()
print(Articulo.iva()) | [
"[email protected]"
] | |
e7a3ca9fa15a77897aa6fde5e7b69ee9bb2f853d | ac350894488b34318c11a65d35a0f8fdf69b7d50 | /products/migrations/0001_initial.py | 545343aa9abd1f1393c114e71c6c8e1aed73463f | [] | no_license | phrac/onemgin | 508f052304ddbc03f45e994ebe33769ae30d9336 | 7a029dbca1bd2725ceabc0741c7cfb47290aadb7 | refs/heads/master | 2021-01-16T19:31:10.929508 | 2015-09-08T23:53:43 | 2015-09-08T23:53:43 | 12,391,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Barcode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.FileField(null=True, upload_to=b'barcodes/ean13/')),
],
),
migrations.CreateModel(
name='BarcodeType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('onemg', models.CharField(unique=True, max_length=13)),
('ean', models.CharField(unique=True, max_length=13)),
('upc', models.CharField(unique=True, max_length=12)),
('jan', models.CharField(max_length=13, null=True)),
('gtin', models.CharField(max_length=14, null=True)),
('nsn', models.CharField(max_length=14, null=True)),
('isbn10', models.CharField(max_length=10, null=True)),
('isbn13', models.CharField(max_length=13, null=True)),
('asin', models.CharField(max_length=10, null=True)),
('brand', models.CharField(max_length=128, null=True)),
('manufacturer', models.CharField(max_length=128, null=True)),
('mpn', models.CharField(max_length=64, null=True)),
('part_number', models.CharField(max_length=64, null=True)),
('sku', models.CharField(max_length=64, null=True)),
('model_number', models.CharField(max_length=64, null=True)),
('length', models.FloatField(null=True)),
('width', models.FloatField(null=True)),
('height', models.FloatField(null=True)),
('weight', models.FloatField(null=True)),
('description', models.CharField(max_length=512, null=True)),
('image_url', models.CharField(max_length=512, null=True)),
('amazon_url', models.URLField(null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.AddField(
model_name='barcode',
name='product',
field=models.ForeignKey(to='products.Product'),
),
migrations.AddField(
model_name='barcode',
name='type',
field=models.ForeignKey(to='products.BarcodeType'),
),
migrations.AlterUniqueTogether(
name='barcode',
unique_together=set([('product', 'type')]),
),
]
| [
"[email protected]"
] | |
ddc87bfca79fabe3d914696f58497118d2d0d193 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info.py | d51b3347b77c7b18680b18281fcd2bb012c5ead3 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,359 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info import ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo(unittest.TestCase):
"""ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo(self):
"""Test ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info.ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ee4b23bbf32042a37a0d791f5b2ca1db58e8570e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2741/60666/264322.py | a8362f4be09e9d763b52af7aceca5c10738a7630 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | nums=eval(input())
if len(nums)<2:
print(nums)
else:
count=1
temp=1
for i in range(len(nums)-1):
if nums[i]<nums[i+1]:
count+=1
else:
temp=max(count,temp)
count=1
print(max(count,temp)) | [
"[email protected]"
] | |
17e6f75ed18e0677f37465f1e06fd694ac1f207c | 7790e3a3f2de068fef343585ec856983591997a2 | /employee/templatetags/custom_math.py | f84010231a266d25ecf80f4bd85b0e1e5c8705ff | [] | no_license | mehdi1361/tadbir | ce702a9a02672826f0bf06e8d5cf0644efe31949 | c0a67710099f713cf96930e25df708625de89a6f | refs/heads/master | 2021-06-04T07:35:37.624372 | 2018-07-23T05:25:04 | 2018-07-23T05:25:04 | 148,870,028 | 0 | 0 | null | 2019-10-22T21:40:28 | 2018-09-15T04:40:26 | HTML | UTF-8 | Python | false | false | 484 | py | from django import template
from django.db.models import Sum
from bank.models import File
register = template.Library()
@register.simple_tag
def add(a, b):
return a + b
@register.simple_tag
def count_files(user):
files = File.objects.filter(employees__employee=user)
return files.count()
@register.simple_tag
def sum_main_deposit(user):
result = File.objects.filter(employees__employee=user).aggregate(Sum('main_deposit'))
return result['main_deposit__sum']
| [
"[email protected]"
] | |
616cf3654526e0f3ecb4547651c5536bb2e4bc82 | 9c5116ab446a0fba4dfaaa1685cbd3a1042dc054 | /kubernetes/test/test_v1_image_stream.py | 3129b06ad6d6ea315c9b6d88a781b4978cc33449 | [
"Apache-2.0"
] | permissive | caruccio/client-python | fc11a354ce15507c94308e35b6790b6776e01e6e | cb65186027ce68beedcd7752c488b8e3b5c0968e | refs/heads/master | 2021-01-25T08:18:45.601502 | 2017-06-08T13:14:06 | 2017-06-08T13:14:06 | 93,747,698 | 0 | 0 | null | 2017-06-08T12:37:32 | 2017-06-08T12:37:32 | null | UTF-8 | Python | false | false | 4,144 | py | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use kubernetes.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a kubernetes.client. By listing and beginning a watch from the returned resourceVersion, kubernetes.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so kubernetes.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but kubernetes.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_image_stream import V1ImageStream
class TestV1ImageStream(unittest.TestCase):
""" V1ImageStream unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ImageStream(self):
"""
Test V1ImageStream
"""
model = kubernetes.client.models.v1_image_stream.V1ImageStream()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
33c679ef31d8a55ff6125c693fa10ac8d9f24460 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /hypercube/hypercube_integrals.py | d41fbf8b960343823c0f1eb202c112bb2e36bffd | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,020 | py | #! /usr/bin/env python3
#
def hypercube01_monomial_integral ( m, e ):
#*****************************************************************************80
#
## HYPERCUBE01_MONOMIAL_INTEGRAL: integrals over the unit hypercube in M dimensions.
#
# Discussion:
#
# The integration region is
#
# 0 <= X(1:M) <= 1,
#
# The monomial is F(X) = product ( 1 <= I <= M ) X(I)^E(I).
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Philip Davis, Philip Rabinowitz,
# Methods of Numerical Integration,
# Second Edition,
# Academic Press, 1984, page 263.
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer E(M), the exponents. Each exponent must be nonnegative.
#
# Output, real INTEGRAL, the integral.
#
from sys import exit
for i in range ( 0, m ):
if ( e[i] < 0 ):
print ( '' )
print ( 'HYPERCUBE01_MONOMIAL_INTEGRAL - Fatal error!' )
print ( ' All exponents must be nonnegative.' )
error ( 'HYPERCUBE01_MONOMIAL_INTEGRAL - Fatal error!' )
integral = 1.0
for i in range ( 0, m ):
integral = integral / float ( e[i] + 1 )
return integral
def hypercube01_monomial_integral_test ( ):
#*****************************************************************************80
#
## HYPERCUBE01_MONOMIAL_INTEGRAL_TEST tests HYPERCUBE01_MONOMIAL_INTEGRAL.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
m = 3
n = 4192
test_num = 20
print ( '' )
print ( 'HYPERCUBE01_MONOMIAL_INTEGRAL_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' HYPERCUBE01_MONOMIAL_INTEGRAL returns the integral of a monomial' )
print ( ' over the interior of the unit hypercube in 3D.' )
print ( ' Compare with a Monte Carlo estimate.' )
print ( '' )
print ( ' Using M = %d' % ( m ) )
#
# Get sample points.
#
seed = 123456789
x, seed = hypercube01_sample ( m, n, seed )
print ( '' )
print ( ' Number of sample points used is %d' % ( n ) )
#
# Randomly choose exponents.
#
print ( '' )
print ( ' Ex Ey Ez MC-Estimate Exact Error' )
print ( '' )
for test in range ( 0, test_num ):
e, seed = i4vec_uniform_ab ( m, 0, 4, seed )
value = monomial_value ( m, n, e, x )
result = hypercube01_volume ( m ) * np.sum ( value ) / float ( n )
exact = hypercube01_monomial_integral ( m, e )
error = abs ( result - exact )
for i in range ( 0, m ):
print ( ' %2d' % ( e[i] ) ),
print ( ' %14.6g %14.6g %10.2g' % ( result, exact, error ) )
#
# Terminate.
#
print ( '' )
print ( 'HYPERCUBE01_MONOMIAL_INTEGRAL_TEST:' )
print ( ' Normal end of execution.' )
return
def hypercube01_sample ( m, n, seed ):
#*****************************************************************************80
#
## HYPERCUBE01_SAMPLE samples points in the unit hypercube in M dimensions.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of points.
#
# Input/output, integer SEED, a seed for the random
# number generator.
#
# Output, real X(M,N), the points.
#
x, seed = r8mat_uniform_01 ( m, n, seed )
return x, seed
def hypercube01_sample_test ( ):
#*****************************************************************************80
#
## HYPERCUBE01_SAMPLE_TEST tests HYPERCUBE01_SAMPLE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'HYPERCUBE01_SAMPLE_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' HYPERUBE01_SAMPLE samples the unit hypercube' )
print ( ' in M dimensions.' )
m = 3
n = 10
seed = 123456789
x, seed = hypercube01_sample ( m, n, seed )
r8mat_transpose_print ( m, n, x, ' Sample points in the unit hypercube.' )
#
# Terminate.
#
print ( '' )
print ( 'HYPERCUBE01_SAMPLE_TEST' )
print ( ' Normal end of execution.' )
return
def hypercube01_volume ( m ):
#*****************************************************************************80
#
## HYPERCUBE01_VOLUME returns the volume of the unit hypercube in M dimensions.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Output, real VALUE, the volume.
#
value = 1.0
return value
def hypercube01_volume_test ( ) :
#*****************************************************************************80
#
## HYPERCUBE01_VOLUME tests HYPERCUBE01_VOLUME.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'HYPERCUBE01_VOLUME_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' HYPERCUBE01_VOLUME returns the volume of the unit hypercube' )
print ( ' in M dimensions.' )
m = 3
value = hypercube01_volume ( m )
print ( '' )
print ( ' HYPERCUBE01_VOLUME(%d) = %g' % ( m, value ) )
#
# Terminate.
#
print ( '' )
print ( 'HYPERCUBE01_VOLUME_TEST' )
print ( ' Normal end of execution.' )
return
def hypercube_integrals_test ( ):
#*****************************************************************************80
#
## HYPERCUBE_INTEGRALS_TEST tests the HYPERCUBE_INTEGRALS library.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 June 2015
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'HYPERCUBE_INTEGRALS_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Test the HYPERCUBE_INTEGRALS library.' )
#
# Utility functions.
#
i4vec_print_test ( )
i4vec_transpose_print_test ( )
i4vec_uniform_ab_test ( )
r8mat_print_test ( )
r8mat_print_some_test ( )
r8mat_transpose_print_test ( )
r8mat_transpose_print_some_test ( )
r8mat_uniform_01_test ( )
r8mat_uniform_ab_test ( )
#
# Library functions.
#
hypercube01_monomial_integral_test ( )
hypercube01_sample_test ( )
hypercube01_volume_test ( )
monomial_value_test ( )
#
# Terminate.
#
print ( '' )
print ( 'HYPERCUBE_INTEGRALS_TEST:' )
print ( ' Normal end of execution.' )
return
def i4vec_print ( n, a, title ):
#*****************************************************************************80
#
## I4VEC_PRINT prints an I4VEC.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 August 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the dimension of the vector.
#
# Input, integer A(N), the vector to be printed.
#
# Input, string TITLE, a title.
#
print ( '' )
print ( title )
print ( '' )
for i in range ( 0, n ):
print ( '%6d %6d' % ( i, a[i] ) )
return
def i4vec_print_test ( ):
#*****************************************************************************80
#
## I4VEC_PRINT_TEST tests I4VEC_PRINT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 September 2016
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'I4VEC_PRINT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' I4VEC_PRINT prints an I4VEC.' )
n = 4
v = np.array ( [ 91, 92, 93, 94 ], dtype = np.int32 )
i4vec_print ( n, v, ' Here is an I4VEC:' )
#
# Terminate.
#
print ( '' )
print ( 'I4VEC_PRINT_TEST:' )
print ( ' Normal end of execution.' )
return
def i4vec_transpose_print ( n, a, title ):
#*****************************************************************************80
#
## I4VEC_TRANSPOSE_PRINT prints an I4VEC "transposed".
#
# Example:
#
# A = (/ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 /)
# TITLE = 'My vector: '
#
# My vector:
#
# 1 2 3 4 5
# 6 7 8 9 10
# 11
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 September 2018
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of components of the vector.
#
# Input, integer A(N), the vector to be printed.
#
# Input, string TITLE, a title.
#
if ( 0 < len ( title ) ):
print ( title, end = '' )
if ( 0 < n ):
for i in range ( 0, n ):
print ( ' %d' % ( a[i] ), end = '' )
if ( ( i + 1 ) % 20 == 0 or i == n - 1 ):
print ( '' )
else:
print ( '(empty vector)' )
return
def i4vec_transpose_print_test ( ):
#*****************************************************************************80
#
## I4VEC_TRANSPOSE_PRINT_TEST tests I4VEC_TRANSPOSE_PRINT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 09 September 2018
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'I4VEC_TRANSPOSE_PRINT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' I4VEC_TRANSPOSE_PRINT prints an I4VEC' )
print ( ' with 5 entries to a row, and an optional title.' )
n = 12
a = np.zeros ( n, dtype = np.int32 )
for i in range ( 0, n ):
a[i] = i + 1
print ( '' )
i4vec_transpose_print ( n, a, ' My array: ' )
#
# Terminate.
#
print ( '' )
print ( 'I4VEC_TRANSPOSE_PRINT_TEST:' )
print ( ' Normal end of execution.' )
return
def i4vec_uniform_ab ( n, a, b, seed ):
#*****************************************************************************80
#
## I4VEC_UNIFORM_AB returns a scaled pseudorandom I4VEC.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 05 April 2013
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# Peter Lewis, Allen Goodman, James Miller,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer N, the number of entries in the vector.
#
# Input, integer A, B, the minimum and maximum acceptable values.
#
# Input, integer SEED, a seed for the random number generator.
#
# Output, integer C(N), the randomly chosen integer vector.
#
# Output, integer SEED, the updated seed.
#
import numpy as np
from sys import exit
i4_huge = 2147483647
seed = np.floor ( seed )
if ( seed < 0 ):
seed = seed + i4_huge
if ( seed == 0 ):
print ( '' )
print ( 'I4VEC_UNIFORM_AB - Fatal error!' )
print ( ' Input SEED = 0!' )
exit ( 'I4VEC_UNIFORM_AB - Fatal error!' )
seed = np.floor ( seed )
a = round ( a )
b = round ( b )
c = np.zeros ( n, dtype = np.int32 )
for i in range ( 0, n ):
k = ( seed // 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
seed = ( seed % i4_huge )
if ( seed < 0 ):
seed = seed + i4_huge
r = seed * 4.656612875E-10
#
# Scale R to lie between A-0.5 and B+0.5.
#
r = ( 1.0 - r ) * ( min ( a, b ) - 0.5 ) \
+ r * ( max ( a, b ) + 0.5 )
#
# Use rounding to convert R to an integer between A and B.
#
value = round ( r )
value = max ( value, min ( a, b ) )
value = min ( value, max ( a, b ) )
c[i] = value
return c, seed
def i4vec_uniform_ab_test ( ):
#*****************************************************************************80
#
## I4VEC_UNIFORM_AB_TEST tests I4VEC_UNIFORM_AB.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 October 2014
#
# Author:
#
# John Burkardt
#
import platform
n = 20
a = -100
b = 200
seed = 123456789
print ( '' )
print ( 'I4VEC_UNIFORM_AB_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' I4VEC_UNIFORM_AB computes pseudorandom values' )
print ( ' in an interval [A,B].' )
print ( '' )
print ( ' The lower endpoint A = %d' % ( a ) )
print ( ' The upper endpoint B = %d' % ( b ) )
print ( ' The initial seed is %d' % ( seed ) )
print ( '' )
v, seed = i4vec_uniform_ab ( n, a, b, seed )
i4vec_print ( n, v, ' The random vector:' )
#
# Terminate.
#
print ( '' )
print ( 'I4VEC_UNIFORM_AB_TEST:' )
print ( ' Normal end of execution.' )
return
def monomial_value ( m, n, e, x ):
#*****************************************************************************80
#
## MONOMIAL_VALUE evaluates a monomial.
#
# Discussion:
#
# This routine evaluates a monomial of the form
#
# product ( 1 <= i <= m ) x(i)^e(i)
#
# The combination 0.0^0, if encountered, is treated as 1.0.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 07 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of evaluation points.
#
# Input, integer E(M), the exponents.
#
# Input, real X(M,N), the point coordinates.
#
# Output, real V(N), the monomial values.
#
import numpy as np
v = np.ones ( n )
for i in range ( 0, m ):
if ( 0 != e[i] ):
for j in range ( 0, n ):
v[j] = v[j] * x[i,j] ** e[i]
return v
def monomial_value_test ( ):
#*****************************************************************************80
#
## MONOMIAL_VALUE_TEST tests MONOMIAL_VALUE on sets of data in various dimensions.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 07 April 2015
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'MONOMIAL_VALUE_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Use monomial_value() to evaluate some monomials' )
print ( ' in dimensions 1 through 3.' )
e_min = -3
e_max = 6
n = 5
seed = 123456789
x_min = -2.0
x_max = +10.0
for m in range ( 1, 4 ):
print ( '' )
print ( ' Spatial dimension M = %d' % ( m ) )
e, seed = i4vec_uniform_ab ( m, e_min, e_max, seed )
i4vec_transpose_print ( m, e, ' Exponents:' )
x, seed = r8mat_uniform_ab ( m, n, x_min, x_max, seed )
#
# To make checking easier, make the X values integers.
#
for i in range ( 0, m ):
for j in range ( 0, n ):
x[i,j] = round ( x[i,j] )
v = monomial_value ( m, n, e, x )
print ( '' )
print ( ' V(X) ', end = '' )
for i in range ( 0, m ):
print ( ' X(%d)' % ( i ), end = '' )
print ( '' )
print ( '' )
for j in range ( 0, n ):
print ( '%14.6g ' % ( v[j] ), end = '' )
for i in range ( 0, m ):
print ( '%10.4f' % ( x[i,j] ), end = '' )
print ( '' )
#
# Terminate.
#
print ( '' )
print ( 'MONOMIAL_VALUE_TEST' )
print ( ' Normal end of execution.' )
return
def r8mat_print ( m, n, a, title ):
#*****************************************************************************80
#
## R8MAT_PRINT prints an R8MAT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 August 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the number of rows in A.
#
# Input, integer N, the number of columns in A.
#
# Input, real A(M,N), the matrix.
#
# Input, string TITLE, a title.
#
r8mat_print_some ( m, n, a, 0, 0, m - 1, n - 1, title )
return
def r8mat_print_test ( ):
#*****************************************************************************80
#
## R8MAT_PRINT_TEST tests R8MAT_PRINT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 February 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'R8MAT_PRINT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_PRINT prints an R8MAT.' )
m = 4
n = 6
v = np.array ( [ \
[ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0 ],
[ 21.0, 22.0, 23.0, 24.0, 25.0, 26.0 ],
[ 31.0, 32.0, 33.0, 34.0, 35.0, 36.0 ],
[ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0 ] ], dtype = np.float64 )
r8mat_print ( m, n, v, ' Here is an R8MAT:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_PRINT_TEST:' )
print ( ' Normal end of execution.' )
return
def r8mat_print_some ( m, n, a, ilo, jlo, ihi, jhi, title ):
#*****************************************************************************80
#
## R8MAT_PRINT_SOME prints out a portion of an R8MAT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 February 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns of the matrix.
#
# Input, real A(M,N), an M by N matrix to be printed.
#
# Input, integer ILO, JLO, the first row and column to print.
#
# Input, integer IHI, JHI, the last row and column to print.
#
# Input, string TITLE, a title.
#
incx = 5
print ( '' )
print ( title )
if ( m <= 0 or n <= 0 ):
print ( '' )
print ( ' (None)' )
return
for j2lo in range ( max ( jlo, 0 ), min ( jhi + 1, n ), incx ):
j2hi = j2lo + incx - 1
j2hi = min ( j2hi, n )
j2hi = min ( j2hi, jhi )
print ( '' )
print ( ' Col: ', end = '' )
for j in range ( j2lo, j2hi + 1 ):
print ( '%7d ' % ( j ), end = '' )
print ( '' )
print ( ' Row' )
i2lo = max ( ilo, 0 )
i2hi = min ( ihi, m )
for i in range ( i2lo, i2hi + 1 ):
print ( '%7d :' % ( i ), end = '' )
for j in range ( j2lo, j2hi + 1 ):
print ( '%12g ' % ( a[i,j] ), end = '' )
print ( '' )
return
def r8mat_print_some_test ( ):
#*****************************************************************************80
#
## R8MAT_PRINT_SOME_TEST tests R8MAT_PRINT_SOME.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 October 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'R8MAT_PRINT_SOME_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_PRINT_SOME prints some of an R8MAT.' )
m = 4
n = 6
v = np.array ( [ \
[ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0 ],
[ 21.0, 22.0, 23.0, 24.0, 25.0, 26.0 ],
[ 31.0, 32.0, 33.0, 34.0, 35.0, 36.0 ],
[ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0 ] ], dtype = np.float64 )
r8mat_print_some ( m, n, v, 0, 3, 2, 5, ' Here is an R8MAT:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_PRINT_SOME_TEST:' )
print ( ' Normal end of execution.' )
return
def r8mat_transpose_print ( m, n, a, title ):
#*****************************************************************************80
#
## R8MAT_TRANSPOSE_PRINT prints an R8MAT, transposed.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 August 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the number of rows in A.
#
# Input, integer N, the number of columns in A.
#
# Input, real A(M,N), the matrix.
#
# Input, string TITLE, a title.
#
r8mat_transpose_print_some ( m, n, a, 0, 0, m - 1, n - 1, title )
return
def r8mat_transpose_print_test ( ):
#*****************************************************************************80
#
## R8MAT_TRANSPOSE_PRINT_TEST tests R8MAT_TRANSPOSE_PRINT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 October 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'R8MAT_TRANSPOSE_PRINT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_TRANSPOSE_PRINT prints an R8MAT.' )
m = 4
n = 3
v = np.array ( [ \
[ 11.0, 12.0, 13.0 ],
[ 21.0, 22.0, 23.0 ],
[ 31.0, 32.0, 33.0 ],
[ 41.0, 42.0, 43.0 ] ], dtype = np.float64 )
r8mat_transpose_print ( m, n, v, ' Here is an R8MAT, transposed:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_TRANSPOSE_PRINT_TEST:' )
print ( ' Normal end of execution.' )
return
def r8mat_transpose_print_some ( m, n, a, ilo, jlo, ihi, jhi, title ):
#*****************************************************************************80
#
## R8MAT_TRANSPOSE_PRINT_SOME prints a portion of an R8MAT, transposed.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 November 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns of the matrix.
#
# Input, real A(M,N), an M by N matrix to be printed.
#
# Input, integer ILO, JLO, the first row and column to print.
#
# Input, integer IHI, JHI, the last row and column to print.
#
# Input, string TITLE, a title.
#
incx = 5
print ( '' )
print ( title )
if ( m <= 0 or n <= 0 ):
print ( '' )
print ( ' (None)' )
return
for i2lo in range ( max ( ilo, 0 ), min ( ihi, m - 1 ), incx ):
i2hi = i2lo + incx - 1
i2hi = min ( i2hi, m - 1 )
i2hi = min ( i2hi, ihi )
print ( '' )
print ( ' Row: ' ),
for i in range ( i2lo, i2hi + 1 ):
print ( '%7d ' % ( i ) ),
print ( '' )
print ( ' Col' )
j2lo = max ( jlo, 0 )
j2hi = min ( jhi, n - 1 )
for j in range ( j2lo, j2hi + 1 ):
print ( '%7d :' % ( j ) ),
for i in range ( i2lo, i2hi + 1 ):
print ( '%12g ' % ( a[i,j] ) ),
print ( '' )
return
def r8mat_transpose_print_some_test ( ):
#*****************************************************************************80
#
## R8MAT_TRANSPOSE_PRINT_SOME_TEST tests R8MAT_TRANSPOSE_PRINT_SOME.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 October 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'R8MAT_TRANSPOSE_PRINT_SOME_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_TRANSPOSE_PRINT_SOME prints some of an R8MAT, transposed.' )
m = 4
n = 6
v = np.array ( [ \
[ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0 ],
[ 21.0, 22.0, 23.0, 24.0, 25.0, 26.0 ],
[ 31.0, 32.0, 33.0, 34.0, 35.0, 36.0 ],
[ 41.0, 42.0, 43.0, 44.0, 45.0, 46.0 ] ], dtype = np.float64 )
r8mat_transpose_print_some ( m, n, v, 0, 3, 2, 5, ' R8MAT, rows 0:2, cols 3:5:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_TRANSPOSE_PRINT_SOME_TEST:' )
print ( ' Normal end of execution.' )
return
def r8mat_uniform_01 ( m, n, seed ):
#*****************************************************************************80
#
## R8MAT_UNIFORM_01 returns a unit pseudorandom R8MAT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 April 2013
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# Peter Lewis, Allen Goodman, James Miller,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns in the array.
#
# Input, integer SEED, the integer "seed" used to generate
# the output random number.
#
# Output, real R(M,N), an array of random values between 0 and 1.
#
# Output, integer SEED, the updated seed. This would
# normally be used as the input seed on the next call.
#
import numpy
from math import floor
from sys import exit
i4_huge = 2147483647
seed = floor ( seed )
if ( seed < 0 ):
seed = seed + i4_huge
if ( seed == 0 ):
print ( '' )
print ( 'R8MAT_UNIFORM_01 - Fatal error!' )
print ( ' Input SEED = 0!' )
exit ( 'R8MAT_UNIFORM_01 - Fatal error!' )
r = numpy.zeros ( ( m, n ) )
for j in range ( 0, n ):
for i in range ( 0, m ):
k = ( seed // 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
seed = ( seed % i4_huge )
if ( seed < 0 ):
seed = seed + i4_huge
r[i][j] = seed * 4.656612875E-10
return r, seed
def r8mat_uniform_01_test ( ):
#*****************************************************************************80
#
## R8MAT_UNIFORM_01_TEST tests R8MAT_UNIFORM_01.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 October 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
m = 5
n = 4
seed = 123456789
print ( '' )
print ( 'R8MAT_UNIFORM_01_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_UNIFORM_01 computes a random R8MAT.' )
print ( '' )
print ( ' 0 <= X <= 1' )
print ( ' Initial seed is %d' % ( seed ) )
v, seed = r8mat_uniform_01 ( m, n, seed )
r8mat_print ( m, n, v, ' Random R8MAT:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_UNIFORM_01_TEST:' )
print ( ' Normal end of execution.' )
return
def r8mat_uniform_ab ( m, n, a, b, seed ):
#*****************************************************************************80
#
## R8MAT_UNIFORM_AB returns a scaled pseudorandom R8MAT.
#
# Discussion:
#
# An R8MAT is an array of R8's.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 April 2013
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# Peter Lewis, Allen Goodman, James Miller,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns in the array.
#
# Input, real A, B, the range of the pseudorandom values.
#
# Input, integer SEED, the integer "seed" used to generate
# the output random number.
#
# Output, real R(M,N), an array of random values between 0 and 1.
#
# Output, integer SEED, the updated seed. This would
# normally be used as the input seed on the next call.
#
import numpy
from math import floor
from sys import exit
i4_huge = 2147483647
seed = floor ( seed )
if ( seed < 0 ):
seed = seed + i4_huge
if ( seed == 0 ):
print ( '' )
print ( 'R8MAT_UNIFORM_AB - Fatal error!' )
print ( ' Input SEED = 0!' )
exit ( 'R8MAT_UNIFORM_AB - Fatal error!' )
r = numpy.zeros ( ( m, n ) )
for j in range ( 0, n ):
for i in range ( 0, m ):
k = ( seed // 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
seed = floor ( seed )
seed = ( seed % i4_huge )
if ( seed < 0 ):
seed = seed + i4_huge
r[i][j] = a + ( b - a ) * seed * 4.656612875E-10
return r, seed
def r8mat_uniform_ab_test ( ):
#*****************************************************************************80
#
## R8MAT_UNIFORM_AB_TEST tests R8MAT_UNIFORM_AB.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 31 October 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
m = 5
n = 4
a = -1.0
b = +5.0
seed = 123456789
print ( '' )
print ( 'R8MAT_UNIFORM_AB_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8MAT_UNIFORM_AB computes a random R8MAT.' )
print ( '' )
print ( ' %g <= X <= %g' % ( a, b ) )
print ( ' Initial seed is %d' % ( seed ) )
v, seed = r8mat_uniform_ab ( m, n, a, b, seed )
r8mat_print ( m, n, v, ' Random R8MAT:' )
#
# Terminate.
#
print ( '' )
print ( 'R8MAT_UNIFORM_AB_TEST:' )
print ( ' Normal end of execution.' )
return
def timestamp ( ):
#*****************************************************************************80
#
## TIMESTAMP prints the date as a timestamp.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import time
t = time.time ( )
print ( time.ctime ( t ) )
return None
def timestamp_test ( ):
#*****************************************************************************80
#
## TIMESTAMP_TEST tests TIMESTAMP.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 December 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import platform
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' TIMESTAMP prints a timestamp of the current date and time.' )
print ( '' )
timestamp ( )
#
# Terminate.
#
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
timestamp ( )
hypercube_integrals_test ( )
timestamp ( )
| [
"[email protected]"
] | |
e057ef624b94fe0256123ec91bdf0734eb2d87bd | a79bc871a72d2c39bcbb7cb4242a7d469770bed0 | /masking_api_60/api/file_format_api.py | 750b5fa51da54a2f71b8f4f29ba1d59f7edf3fc1 | [] | no_license | pioro/masking_api_60 | 5e457249ab8a87a4cd189f68821167fa27c084f2 | 68473bdf0c05cbe105bc7d2e2a24e75a9cbeca08 | refs/heads/master | 2023-01-03T08:57:49.943969 | 2020-10-30T11:42:15 | 2020-10-30T11:42:15 | 279,624,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,223 | py | # coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from masking_api_60.api_client import ApiClient
class FileFormatApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_file_format(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
else:
(data) = self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
return data
def create_file_format_with_http_info(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format_with_http_info(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format', 'file_format_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format' is set
if ('file_format' not in params or
params['file_format'] is None):
raise ValueError("Missing the required parameter `file_format` when calling `create_file_format`") # noqa: E501
# verify the required parameter 'file_format_type' is set
if ('file_format_type' not in params or
params['file_format_type'] is None):
raise ValueError("Missing the required parameter `file_format_type` when calling `create_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file_format' in params:
local_var_files['fileFormat'] = params['file_format'] # noqa: E501
if 'file_format_type' in params:
form_params.append(('fileFormatType', params['file_format_type'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_format(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def delete_file_format_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `delete_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_file_formats(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
return data
def get_all_file_formats_with_http_info(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_file_formats" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormatList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_format_by_id(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def get_file_format_by_id_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_format_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `get_file_format_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_format(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
return data
def update_file_format_with_http_info(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format_with_http_info(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `update_file_format`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
14b450a72c93ad9b78cf7685fe19e4122eb15c24 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_mp21.py | 562fa5609e8ddc81fe2febf073542f27d358c618 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._mp2_info1 import _mp2_info1
class _mp21(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Method = None
self.Big_send = None
self.MP2_INFO = _mp2_info1()
self._name = "MP2"
self._keywords = {'Method': 'METHOD', 'Big_send': 'BIG_SEND'}
self._subsections = {'MP2_INFO': 'MP2_INFO'}
self._attributes = ['Section_parameters']
| [
"[email protected]"
] | |
4d23735583d49ed6fba1925bf636572e5d146be5 | 2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b | /121.买卖股票的最佳时机.py | 7cd0e5ce63fc4da08187b59ea4f973e49037b644 | [] | no_license | mqinbin/python_leetcode | 77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3 | 73e0c81867f38fdf4051d8f58d0d3dc245be081e | refs/heads/main | 2023-03-10T18:27:36.421262 | 2021-02-25T07:24:10 | 2021-02-25T07:24:10 | 314,410,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #
# @lc app=leetcode.cn id=121 lang=python3
#
# [121] 买卖股票的最佳时机
#
# @lc code=start
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for i in range(1, len(prices)):
max_profit = max(prices[i] - min_price ,max_profit)
min_price = min(min_price, prices[i])
return max_profit
# @lc code=end
| [
"[email protected]"
] | |
147b3bc0148ddc69e31304519e65c37ad3c790e6 | 80de5ac86ce85b5aa93788d5d2325d88b87b47f7 | /cf/1334/c.py | 0d9603f1d8a8e97a68d5e3f095f080f1f5405a4e | [] | no_license | ethicalrushi/cp | 9a46744d647053fd3d2eaffc52888ec3c190f348 | c881d912b4f77acfde6ac2ded0dc9e0e4ecce1c1 | refs/heads/master | 2022-04-24T07:54:05.350193 | 2020-04-27T20:27:31 | 2020-04-27T20:27:31 | 257,911,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | t = int(input())
for _ in range(t):
n = int(input())
a =[]
for i in range(n):
u, v = [int(x) for x in input().strip().split()]
a.append([u,v])
if n==1:
res=a[0]
else:
mn = 10**10
si = None
for i in range(1,n):
if a[i][0]>a[i-1][1]:
diff = a[i-1][1]
else:
diff = a[i][0]
if diff<mn:
mn = diff
si = i
if a[0][0]>a[-1][1]:
diff = a[-1][1]
else:
diff = a[0][0]
if diff<mn:
mn = diff
si = 0
# print(si)
if si is None:
res = min(a[i][0] for i in range(n))
else:
# res=0
res=a[si][0]
ct=1
prev_i=si
i = si+1
if i==n:
i=0
while ct<n:
# print(i, prev_i, res)
res+=max(0,a[i][0]-a[prev_i][1])
prev_i = i
i+=1
if i==n:
i=0
ct+=1
print(res) | [
"[email protected]"
] | |
ccf640a6f3089b61899c512ea864d117a27d00e3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /a7WiKcyrTtggTym3f_11.py | 38c97ae03767b14cd4f73e59493d45390792e3c0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | """
Create a function that takes two numbers as arguments and return the LCM of
the two numbers.
### Examples
lcm(3, 5) ➞ 15
lcm(14, 28) ➞ 28
lcm(4, 6) ➞ 12
### Notes
* Don't forget to return the result.
* You may want to use the GCD function to make this a little easier.
* LCM stands for least common multiple, the smallest multiple of both integers.
"""
def lcm(a, b):
m = max(a,b)
while True:
if m%a==0 and m%b==0:
return m
m += 1
| [
"[email protected]"
] | |
a9151a391b64c038d80fc25c24e8ae9bcc938c36 | 927fc31a0144c308a5c8d6dbe46ba8f2728276c9 | /tasks/final_tasks/file_handling/2.count_word_in_file.py | 7ad9f89f0c38383b2a89b17194e5f946ad3c11d8 | [] | no_license | ChandraSiva11/sony-presamplecode | b3ee1ba599ec90e357a4b3a656f7a00ced1e8ad3 | 393826039e5db8a448fa4e7736b2199c30f5ed24 | refs/heads/master | 2023-01-14T00:09:19.185822 | 2020-11-23T02:07:00 | 2020-11-23T02:07:00 | 299,527,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # Python Program to Count the Number of Words in a Text File
def main():
num_words = 0
with open('text_doc.txt', 'r') as f:
for line in f:
words = line.split()
num_words += len(words)
print('Number of words', num_words)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
ef74b6c780caea8be24fb7a36c1bd5e228e66148 | f36a9701975eec736b5e43ab09ec318eee80c8cc | /pyspeckit/spectrum/widgets.py | ae949705761f3d5018daacf9ece04cedd453487e | [
"MIT"
] | permissive | soylentdeen/pyspeckit | e995f38531256d85313038a0ddeb181a4c6480b8 | 11c449c6951468f2c07dfda3b1177b138f810f16 | refs/heads/master | 2021-01-18T11:32:51.659032 | 2013-06-26T00:39:22 | 2013-06-26T00:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,757 | py | from matplotlib.widgets import Widget,Button,Slider
from matplotlib import pyplot
import matplotlib
class dictlist(list):
def __init__(self, *args):
list.__init__(self, *args)
self._dict = {}
self._dict_index = {}
for ii,value in enumerate(self):
if len(value) == 2:
self._dict[value[0]] = value[1]
self._dict_index[value[0]] = ii
self._dict_index[ii] = value[0]
else:
self._dict[ii] = value
self._dict_index[ii] = ii
def __getitem__(self, key):
if type(key) is int:
return super(dictlist,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, value):
if type(key) is int:
super(dictlist,self).__setitem__(key,value)
self._dict[self._dict_index[key]] = value
else:
if key in self._dict:
self._dict[key] = value
self[self._dict_index[key]] = value
else:
self._dict[key] = value
self._dict_index[key] = len(self)
self._dict_index[len(self)] = key
self.append(value)
def __slice__(self, s1, s2):
pass
def values(self):
return [self._dict[self._dict_index[ii]] for ii in xrange(len(self))]
def keys(self):
return [self._dict_index[ii] for ii in xrange(len(self))]
class ModifiableSlider(Slider):
def set_valmin(self, valmin):
"""
Change the minimum value of the slider
"""
self.valmin = valmin
self.ax.set_xlim((self.valmin,self.valmax))
if self.val < self.valmin:
self.set_val(self.valmin)
if self.valinit < self.valmin:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
def set_valmax(self, valmax):
"""
Change the maximum value of the slider
"""
self.valmax = valmax
self.ax.set_xlim((self.valmin,self.valmax))
if self.val > self.valmax:
self.set_val(self.valmax)
if self.valinit > self.valmax:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
class FitterSliders(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, specfit, targetfig, npars=1, toolfig=None, parlimitdict={}):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.parlimitdict = parlimitdict
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
if hasattr(targetfig.canvas.manager,'window'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.2,right=0.9)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.2, right=0.9)
bax = self.toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
self.set_sliders(parlimitdict)
def reset(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in self.sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in self.sliders:
slider.reset()
# reset drawon
for slider, b in zip(self.sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = self.toolfig.subplotpars.validate
self.toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(reset)
self.toolfig.subplotpars.validate = validate
def clear_sliders(self):
"""
Get rid of the sliders...
"""
try:
for sl in self.sliders:
sl.ax.remove()
except NotImplementedError:
for sl in self.sliders:
self.specfit.Spectrum.plotter.figure.delaxes(sl.ax)
self.specfit.Spectrum.plotter.refresh()
def set_sliders(self, parlimitdict={}):
"""
Set the slider properties, actions, and values
can also reset their limits
"""
def update(value):
mpp = [slider.val for slider in self.sliders]
for line in self.specfit.modelplot:
line.set_ydata(self.specfit.get_model_frompars(line.get_xdata(),mpp))
# update components too
for ii,line in enumerate(self.specfit._plotted_components):
xdata = line.get_xdata()
modelcomponents = self.specfit.fitter.components(xdata, mpp, **self.specfit._component_kwargs)
for jj,data in enumerate(modelcomponents):
if ii % 2 == jj:
# can have multidimensional components
if len(data.shape) > 1:
for d in (data):
line.set_ydata(d)
else:
line.set_ydata(data)
self.specfit.Spectrum.plotter.refresh()
self.sliders = dictlist()
npars = len(self.specfit.parinfo)
for param in self.specfit.parinfo:
name = param['parname']
value = param['value']
limited = param['limited']
limits = param['limits']
# make one less subplot so that there's room for buttons
# param['n'] is zero-indexed, subplots are 1-indexed
ax = self.toolfig.add_subplot(npars+1,1,param['n']+1)
ax.set_navigate(False)
if name in parlimitdict:
limits = parlimitdict[name]
limited = [True,True]
if limited[0]:
vmin = limits[0]
elif value != 0:
vmin = min([value/4.0,value*4.0])
else:
vmin = -1
if limited[1]:
vmax = limits[1]
elif value != 0:
vmax = max([value/4.0,value*4.0])
else:
vmax = 1
self.sliders[name] = ModifiableSlider(ax,
name, vmin, vmax, valinit=value)
self.sliders[-1].on_changed(update)
def get_values(self):
return [s.val for s in self.sliders]
class FitterTools(Widget):
"""
A tool to monitor and play with :class:`pyspeckit.spectrum.fitter` properties
--------------------------
| Baseline range [x,x] |
| Baseline order - |
| (Baseline subtracted) |
| |
| Fitter range [x,x] |
| Fitter type ------- |
| Fitter Guesses [p,w] |
| ... ... |
| ... ... |
| |
| (Fit) (BL fit) (reset) |
--------------------------
"""
def __init__(self, specfit, targetfig, toolfig=None, nsubplots=12):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.baseline = specfit.Spectrum.baseline
self.plotter = specfit.Spectrum.plotter
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
self.toolfig.canvas.set_window_title("Fit Tools for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.05,right=0.95)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.0, right=1.0)
#bax = self.toolfig.add_axes([0.6, 0.05, 0.15, 0.075])
#self.buttonrefresh = Button(bax, 'Refresh')
# buttons ruin everything.
# fax = self.toolfig.add_axes([0.1, 0.05, 0.15, 0.075])
# self.buttonfit = Button(fax, 'Fit')
#
# resetax = self.toolfig.add_axes([0.7, 0.05, 0.15, 0.075])
# self.buttonreset = Button(resetax, 'Reset')
# resetblax = self.toolfig.add_axes([0.3, 0.05, 0.15, 0.075])
# self.buttonresetbl = Button(resetblax, 'Reset BL')
# resetfitax = self.toolfig.add_axes([0.5, 0.05, 0.15, 0.075])
# self.buttonresetfit = Button(resetfitax, 'Reset fit')
def refresh(event):
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
def fit(event):
self.specfit.button3action(event)
def reset_fit(event):
self.specfit.guesses = []
self.specfit.npeaks = 0
self.specfit.includemask[:] = True
self.refresh(event)
def reset_baseline(event):
self.baseline.unsubtract()
self.refresh(event)
def reset(event):
reset_baseline(event)
reset_fit(event)
self.plotter()
self.refresh(event)
# during refresh there can be a temporary invalid state
# depending on the order of the refresh so we turn off
# validation for the refreshting
#validate = self.toolfig.subplotpars.validate
#self.toolfig.subplotpars.validate = False
#self.buttonrefresh.on_clicked(refresh)
#self.toolfig.subplotpars.validate = validate
# these break everything.
# self.buttonfit.on_clicked(fit)
# self.buttonresetfit.on_clicked(reset_fit)
# self.buttonresetbl.on_clicked(reset_baseline)
# self.buttonreset.on_clicked(reset)
#menuitems = []
#for label in ('polynomial','blackbody','log-poly'):
# def on_select(item):
# print 'you selected', item.labelstr
# item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
# on_select=on_select)
# menuitems.append(item)
#menu = Menu(fig, menuitems)
self.axes = [self.toolfig.add_subplot(nsubplots,1,spnum, frame_on=False, navigate=False, xticks=[], yticks=[])
for spnum in xrange(1,nsubplots+1)]
#self.axes = self.toolfig.add_axes([0,0,1,1])
self.use_axes = [0,1,2,4,5,6,7,8,9,10,11]
self.labels = dict([(axnum,None) for axnum in self.use_axes])
self.update_information()
self.targetfig.canvas.mpl_connect('button_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('key_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('draw_event',self.refresh)
def refresh(self, event):
try:
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
except:
# ALWAYS fail silently
# this is TERRIBLE coding practice, but I have no idea how to tell the object to disconnect
# when the figure is closed
pass
def update_information(self, **kwargs):
self.information = [
("Baseline Range","(%g,%g)" % (self.baseline.xmin,self.baseline.xmax)),
("Baseline Order","%i" % (self.baseline.order)),
("Baseline Subtracted?","%s" % (self.baseline.subtracted)),
("Fitter Range","(%g,%g)" % (self.specfit.xmin,self.specfit.xmax)),
("Fitter Type","%s" % (self.specfit.fittype)),
]
for ii in xrange(self.specfit.npeaks):
guesses = tuple(self.specfit.guesses[ii:ii+3])
if len(guesses) == 3:
self.information += [("Fitter guesses%i:" % ii , "p: %g c: %g w: %g" % guesses) ]
else:
break
self.show_labels(**kwargs)
def show_selected_region(self):
self.specfit.highlight_fitregion()
def show_label(self, axis, text, xloc=0.0, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_value(self, axis, text, xloc=0.5, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_labels(self, **kwargs):
for axnum,(label,text) in zip(self.use_axes, self.information):
if self.labels[axnum] is not None and len(self.labels[axnum]) == 2:
labelobject,textobject = self.labels[axnum]
labelobject.set_label(label)
textobject.set_text(text)
else:
self.labels[axnum] = (self.show_label(self.axes[axnum],label),
self.show_value(self.axes[axnum],text))
def update_info_texts(self):
for newtext,textobject in zip(self.information.values(), self.info_texts):
textobject.set_text(newtext)
#import parinfo
#
#class ParameterButton(parinfo.Parinfo):
# """
# A class to manipulate individual parameter values
# """
# def __init__(self,
| [
"[email protected]"
] | |
407c2a6677c326a7a56789bea899851a9a6a5764 | dda862418770f3885256d96e9bdb13d0759c5f43 | /codeforces/april-fools-day/is_it_rated.py | 2f78bf4c3d6d9df72b4d6880be8c4503b3f93453 | [
"MIT"
] | permissive | bellatrixdatacommunity/data-structure-and-algorithms | d56ec485ebe7a5117d4922caeb0cd44c5dddc96f | d24c4001a797c12347973263a0f4f98939e86900 | refs/heads/master | 2022-12-03T00:51:07.944915 | 2020-08-13T20:30:51 | 2020-08-13T20:30:51 | 270,268,375 | 4 | 0 | MIT | 2020-08-13T20:30:53 | 2020-06-07T10:19:36 | Python | UTF-8 | Python | false | false | 114 | py | """
[A. Is it rated?](https://codeforces.com/contest/1331/problem/A)
"""
print("No") # The contest was not rated
| [
"[email protected]"
] | |
dc518d3adbaa5570a85345dacbb2b97213280b09 | eb35535691c4153ba2a52774f0e40468dfc6383d | /hash_table/uncommon_words.py | 849d39c6b50e9e3e7e62e2067fc6a68f1b0c2178 | [] | no_license | BJV-git/leetcode | 1772cca2e75695b3407bed21af888a006de2e4f3 | dac001f7065c3c5b210024d1d975b01fb6d78805 | refs/heads/master | 2020-04-30T19:04:12.837450 | 2019-03-21T21:56:24 | 2019-03-21T21:56:24 | 177,027,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py |
def uncommon_words(A,B):
A=A.split(' ')
B=B.split(' ')
res=[]
d={}
for i in A:
d[i] = d.get(i,0)+1
for i in B:
d[i] = d.get(i,0)+1
for i in d:
if d[i]==1:
res.append(i)
return res | [
"[email protected]"
] | |
244746f59dab7356af77d6b088d09be0109e7eea | 5e76a420178dcb9008d6e4c12543ad0e3a50c289 | /python/104.py | 188ebec7d7866ddc2ac4ab6f887b025327467442 | [] | no_license | LichAmnesia/LeetCode | da6b3e883d542fbb3cae698a61750bd2c99658fe | e890bd480de93418ce10867085b52137be2caa7a | refs/heads/master | 2020-12-25T14:22:58.125158 | 2017-07-18T06:44:53 | 2017-07-18T06:44:53 | 67,002,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
# @Author: Lich_Amnesia
# @Email: [email protected]
# @Date: 2016-09-18 17:38:27
# @Last Modified time: 2016-09-18 17:41:20
# @FileName: 104.py
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
| [
"[email protected]"
] | |
1d898f4d7db5808af12b3e9bd413033060f8403f | dfaf6f7ac83185c361c81e2e1efc09081bd9c891 | /k8sdeployment/k8sstat/python/kubernetes/test/test_v1_local_object_reference.py | db02de623a1ffb63d799a47e9d655bb2206d76b9 | [
"MIT",
"Apache-2.0"
] | permissive | JeffYFHuang/gpuaccounting | d754efac2dffe108b591ea8722c831d979b68cda | 2c63a63c571240561725847daf1a7f23f67e2088 | refs/heads/master | 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 | MIT | 2021-03-25T23:44:50 | 2020-03-05T02:44:15 | JavaScript | UTF-8 | Python | false | false | 994 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1LocalObjectReference(unittest.TestCase):
"""V1LocalObjectReference unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1LocalObjectReference(self):
"""Test V1LocalObjectReference"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_local_object_reference.V1LocalObjectReference() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d301667e9da5f7d349fdf435dc6c5bdd2dd9d67e | 46bd3e3ba590785cbffed5f044e69f1f9bafbce5 | /env/lib/python3.8/site-packages/pip/_vendor/pep517/envbuild.py | 7e6160fc539bc7bd382d6a660739256889eb380f | [] | no_license | adamkluk/casper-getstarted | a6a6263f1547354de0e49ba2f1d57049a5fdec2b | 01e846621b33f54ed3ec9b369e9de3872a97780d | refs/heads/master | 2023-08-13T11:04:05.778228 | 2021-09-19T22:56:59 | 2021-09-19T22:56:59 | 408,036,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2dc493d0c01299c40d2ce16a0cfc43a12d648e4825c7c17a784868049f835a48
size 6112
| [
"[email protected]"
] | |
7be171b3c6ccd20d4e7c354d4e4620d1a88c649d | fa1faa5c480ba249fbec18c0fb79b696d6b4bdf9 | /4 - Arrays/RemoveKDigits.py | 2c3dd044de47a9f8f777661c108947dbbc7b6b7f | [] | no_license | AbhiniveshP/CodeBreakersCode | 10dad44c82be352d7e984ba6b7296a7324f01713 | 7dabfe9392d74ec65a5811271b5b0845c3667848 | refs/heads/master | 2022-11-14T11:58:24.364934 | 2020-07-11T22:34:04 | 2020-07-11T22:34:04 | 268,859,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | class Solution:
# Time: O(N) --> a max of double visit
# Space: O(N)
def removeKdigits(self, num: str, k: int) -> str:
stack = []
# before pushing a digit to stack, take care that it is monotonically increasing stack, also k > 0 and stack not empty
for i in range(len(num)):
currentNumber = int(num[i])
while (len(stack) > 0 and k > 0 and currentNumber < stack[-1]):
stack.pop()
k -= 1
stack.append(currentNumber)
# as stack is monotonically increasing => we can pop all lastly added elements until k <= 0
while (k > 0):
stack.pop()
k -= 1
# remove all leading zeros
cursor = 0
while (cursor < len(stack)):
if (stack[cursor] != 0):
break
cursor += 1
stack = stack[cursor:]
# edge case
if (len(stack) == 0):
return '0'
# now join the stack again
return ''.join([str(n) for n in stack]) | [
"[email protected]"
] | |
20900db7b1b8044e1bf0b27b91907868005a426c | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayInsSceneSellerActivitySignModel.py | 4ef2bcff18867f0f8ba427a6a7c71a574c386b9c | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,623 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneSellerActivitySignModel(object):
def __init__(self):
self._biz_data = None
self._channel_account_id = None
self._channel_account_type = None
self._sp_code = None
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def channel_account_id(self):
return self._channel_account_id
@channel_account_id.setter
def channel_account_id(self, value):
self._channel_account_id = value
@property
def channel_account_type(self):
return self._channel_account_type
@channel_account_type.setter
def channel_account_type(self, value):
self._channel_account_type = value
@property
def sp_code(self):
return self._sp_code
@sp_code.setter
def sp_code(self, value):
self._sp_code = value
def to_alipay_dict(self):
params = dict()
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.channel_account_id:
if hasattr(self.channel_account_id, 'to_alipay_dict'):
params['channel_account_id'] = self.channel_account_id.to_alipay_dict()
else:
params['channel_account_id'] = self.channel_account_id
if self.channel_account_type:
if hasattr(self.channel_account_type, 'to_alipay_dict'):
params['channel_account_type'] = self.channel_account_type.to_alipay_dict()
else:
params['channel_account_type'] = self.channel_account_type
if self.sp_code:
if hasattr(self.sp_code, 'to_alipay_dict'):
params['sp_code'] = self.sp_code.to_alipay_dict()
else:
params['sp_code'] = self.sp_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneSellerActivitySignModel()
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'channel_account_id' in d:
o.channel_account_id = d['channel_account_id']
if 'channel_account_type' in d:
o.channel_account_type = d['channel_account_type']
if 'sp_code' in d:
o.sp_code = d['sp_code']
return o
| [
"[email protected]"
] | |
d6e1af3c1f70472c05f440c578e0bb66519b95d3 | 205d581673e3960c99e6b8fe1475efb661421cb3 | /bikeshed/update/main.py | 1be2b76b3f73f81060b4b4fa57d6141ebd24f5e6 | [
"CC0-1.0"
] | permissive | TBBle/bikeshed | 08f9137f7a561d154720297b76ced061cdd6a04a | 5834a15f311a639c0b59ff2edbf3a060391d15ff | refs/heads/master | 2021-01-12T18:33:43.213471 | 2017-09-29T20:56:24 | 2017-09-29T20:56:24 | 81,327,888 | 0 | 0 | null | 2017-02-08T12:30:22 | 2017-02-08T12:30:21 | null | UTF-8 | Python | false | false | 3,886 | py | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import os
from . import updateCrossRefs
from . import updateBiblio
from . import updateCanIUse
from . import updateLinkDefaults
from . import updateTestSuites
from . import updateLanguages
from . import manifest
from .. import config
from ..messages import *
def update(anchors=False, biblio=False, caniuse=False, linkDefaults=False, testSuites=False, languages=False, path=None, dryRun=False, force=False):
if path is None:
path = config.scriptPath("spec-data")
# Update via manifest by default, falling back to a full update only if failed or forced.
if not force:
success = manifest.updateByManifest(path=path, dryRun=dryRun)
if not success:
say("Falling back to a manual update...")
force = True
if force:
# If all are False, update everything
updateAnyway = not (anchors or biblio or caniuse or linkDefaults or testSuites or languages)
if anchors or updateAnyway:
updateCrossRefs.update(path=path, dryRun=dryRun)
if biblio or updateAnyway:
updateBiblio.update(path=path, dryRun=dryRun)
if caniuse or updateAnyway:
updateCanIUse.update(path=path, dryRun=dryRun)
if linkDefaults or updateAnyway:
updateLinkDefaults.update(path=path, dryRun=dryRun)
if testSuites or updateAnyway:
updateTestSuites.update(path=path, dryRun=dryRun)
if languages or updateAnyway:
updateLanguages.update(path=path, dryRun=dryRun)
manifest.createManifest(path=path, dryRun=dryRun)
def fixupDataFiles():
'''
Checks the readonly/ version is more recent than your current mutable data files.
This happens if I changed the datafile format and shipped updated files as a result;
using the legacy files with the new code is quite bad!
'''
try:
localVersion = int(open(localPath("version.txt"), 'r').read())
except IOError:
localVersion = None
try:
remoteVersion = int(open(remotePath("version.txt"), 'r').read())
except IOError, err:
warn("Couldn't check the datafile version. Bikeshed may be unstable.\n{0}", err)
return
if localVersion == remoteVersion:
# Cool
return
# If versions don't match, either the remote versions have been updated
# (and we should switch you to them, because formats may have changed),
# or you're using a historical version of Bikeshed (ditto).
try:
for filename in os.listdir(remotePath()):
copyanything(remotePath(filename), localPath(filename))
except Exception, err:
warn("Couldn't update datafiles from cache. Bikeshed may be unstable.\n{0}", err)
return
def updateReadonlyDataFiles():
'''
Like fixupDataFiles(), but in the opposite direction --
copies all my current mutable data files into the readonly directory.
This is a debugging tool to help me quickly update the built-in data files,
and will not be called as part of normal operation.
'''
try:
for filename in os.listdir(localPath()):
if filename.startswith("readonly"):
continue
copyanything(localPath(filename), remotePath(filename))
except Exception, err:
warn("Error copying over the datafiles:\n{0}", err)
return
def copyanything(src, dst):
import shutil
import errno
try:
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno in [errno.ENOTDIR, errno.EINVAL]:
shutil.copy(src, dst)
else:
raise
def localPath(*segs):
return config.scriptPath("spec-data", *segs)
def remotePath(*segs):
return config.scriptPath("spec-data", "readonly", *segs)
| [
"[email protected]"
] | |
3183747cd1835046d97a500fd56fc5a714d8f69c | f90a30cfafc5d786a3dc269f3ca48dce3fc59028 | /Payload_Types/apfell/mythic/agent_functions/iterm.py | 94b35b48c3156d56770b68fba7a567e64efb0415 | [
"BSD-3-Clause",
"MIT"
] | permissive | NotoriousRebel/Mythic | 93026df4a829b7b88de814e805fdce0ab19f3ab9 | 4576654af4025b124edb88f9cf9d0821f0b73070 | refs/heads/master | 2022-12-03T01:19:20.868900 | 2020-08-18T03:48:55 | 2020-08-18T03:48:55 | 288,780,757 | 1 | 0 | NOASSERTION | 2020-08-19T16:20:19 | 2020-08-19T16:20:18 | null | UTF-8 | Python | false | false | 920 | py | from CommandBase import *
import json
class ITermArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
pass
class ITermCommand(CommandBase):
cmd = "iTerm"
needs_admin = False
help_cmd = "iTerm"
description = "Read the contents of all open iTerm tabs if iTerms is open, otherwise just inform the operator that it's not currently running"
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
attackmapping = ["T1139", "T1056"]
argument_class = ITermArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass | [
"[email protected]"
] | |
62f15e21cc7da0172f76ec0118796903115796ca | 4944541b0cd0fa48a01581ffce5e7ce16f5cf8d7 | /src/Backend/MbkExam/Notification/serializers.py | a64b1c49829f6af25ac8f32051e5c5e42e2348cb | [] | no_license | aballah-chamakh/the_exam | 49a5b5c9d28c61b2283f2d42d2b2fb771dd48bf4 | dbbbdc7a955ca61572f26430a7788407eaf0c632 | refs/heads/main | 2023-03-28T13:19:18.148630 | 2021-04-03T22:12:51 | 2021-04-03T22:12:51 | 354,404,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | from rest_framework import serializers
from .models import AdminNotification,StudentNotification
class AdminNotificationSerializer(serializers.ModelSerializer):
student_username = serializers.CharField(source='student.user.username')
student_img = serializers.CharField(source="student.image.url")
student_slug = serializers.SlugField(source="student.slug")
student_email = serializers.CharField(source="student.user.email")
class Meta :
model = AdminNotification
fields = ('student_email','student_img','student_username',"student_slug",'event_type','event_msg','event_slug','datetime','viewed')
class StudentNotificationSerializer(serializers.ModelSerializer):
student_slug = serializers.SlugField(source="student.slug")
class Meta :
model = StudentNotification
fields = ('student_slug','event_type','event_msg','event_slug','datetime','viewed') | [
"[email protected]"
] | |
4c800d767661ee69f80d462a929fd68be4f8b58f | a39dbda2d9f93a126ffb189ec51a63eb82321d64 | /mongoengine/queryset/__init__.py | 026a7acdd533719065dcc1c7c1955565b13d6f6f | [
"MIT"
] | permissive | closeio/mongoengine | 6e22ec67d991ea34c6fc96e9b29a9cbfa945132b | b083932b755a9a64f930a4a98b0129f40f861abe | refs/heads/master | 2023-04-30T04:04:52.763382 | 2023-04-20T07:13:41 | 2023-04-20T07:13:41 | 5,533,627 | 21 | 5 | MIT | 2023-04-20T07:13:42 | 2012-08-23T23:02:20 | Python | UTF-8 | Python | false | false | 525 | py | from mongoengine.errors import (DoesNotExist, MultipleObjectsReturned,
InvalidQueryError, OperationError,
NotUniqueError)
from mongoengine.queryset.field_list import *
from mongoengine.queryset.manager import *
from mongoengine.queryset.queryset import *
from mongoengine.queryset.transform import *
from mongoengine.queryset.visitor import *
__all__ = (field_list.__all__ + manager.__all__ + queryset.__all__ +
transform.__all__ + visitor.__all__)
| [
"[email protected]"
] | |
da0f752f37d66f5033607317460320c51b7d99e2 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_f/vpc_create.py | 72acec139ecc5774ba67c1d8199de44fc116c546 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-vpc : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-vpc.html
describe-vpcs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-vpcs.html
"""
write_parameter("ec2", "create-vpc") | [
"[email protected]"
] | |
eb4921718ea76bd76fd0d09bef6d3040445b07fe | bfd6ac084fcc08040b94d310e6a91d5d804141de | /PulseSequences2/multi2d_test2.py | 1609e844e7a2e84b959142d2d35d97635fe46e69 | [] | no_license | jqwang17/HaeffnerLabLattice | 3b1cba747b8b62cada4467a4ea041119a7a68bfa | 03d5bedf64cf63efac457f90b189daada47ff535 | refs/heads/master | 2020-12-07T20:23:32.251900 | 2019-11-11T19:26:41 | 2019-11-11T19:26:41 | 232,792,450 | 1 | 0 | null | 2020-01-09T11:23:28 | 2020-01-09T11:23:27 | null | UTF-8 | Python | false | false | 671 | py | import numpy as np
from common.devel.bum.sequences.pulse_sequence import pulse_sequence
from labrad.units import WithUnit as U
from treedict import TreeDict
from common.client_config import client_info as cl
from multi_test import multi_test
class multi2d_test2(pulse_sequence):
is_2dimensional = True
is_composite = True
show_params = ['NSY.pi_time']
scannable_params = {
'Heating.background_heating_time': [(0., 5000., 500., 'us'), 'current']
}
fixed_params = {'StateReadout.ReadoutMode':'pmt'}
sequence = multi_test
@classmethod
def run_finally(cls, cxn, parameter_dct, all_data, data_x):
return 0.1
| [
"[email protected]"
] | |
5fda096a90541b4f8f01c8692ee9f34c6977c70a | b40a140a911279f3c61737367ab8f3b7c15fe98b | /avakas/get_parameters_file.py | 6f6976a02b4d1dc3baa10e6796e10d3f55ed8aa2 | [] | no_license | AurelienNioche/HotellingBathtub | 80fef9b4106454ec339a6c106c52738f1e95e77b | 5b370a20b1d2417022fd2a6de8a7a4baeeda321e | refs/heads/master | 2021-05-06T13:02:04.130850 | 2018-02-16T22:47:01 | 2018-02-16T22:47:01 | 113,213,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import os
def get_parameters_file(i):
parameters_files = sorted(
[os.path.join("tasks", f)
for f in os.listdir("tasks") if os.path.isfile(os.path.join("tasks", f))])
return parameters_files[i]
| [
"[email protected]"
] | |
7c238c319c6f6d8ba62cadcb28faf56b3f32ab3b | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /AtCoder/AtCoder Beginner Contest 247/B.py | 973864707113b363529868eab237a721c0f7de7b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from collections import defaultdict
n = int(raw_input())
d1 = defaultdict(int)
d2 = defaultdict(int)
names = []
for i in xrange(n):
name1, name2 = raw_input().split()
d1[name1] += 1
d2[name2] += 1
names.append((name1, name2))
flag = True
for (name1, name2) in names:
if name1 == name2:
if d1[name1] > 1 or d2[name1] > 1:
flag = False
break
else:
if ((d1[name1] <= 1 and d2[name1] == 0) or
(d1[name2] == 0 and d2[name2] <= 1)):
pass
else:
flag = False
break
if flag:
print 'Yes'
else:
print 'No'
'''
^^^^TEST^^^^
3
tanaka taro
tanaka jiro
suzuki hanako
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
aaa bbb
xxx aaa
bbb yyy
-----
No
$$$TEST$$$
^^^^TEST^^^^
2
tanaka taro
tanaka taro
-----
No
$$$TEST$$$
^^^^TEST^^^^
3
takahashi chokudai
aoki kensho
snu ke
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
a a
b b
c a
-----
No
$$$TEST$$$
'''
| [
"[email protected]"
] | |
8987a79b8238e079d6527786951d545fffd1ab1c | f1614f3531701a29a33d90c31ab9dd6211c60c6b | /test/menu_sun_integration/infrastructure/aws/sqs/mocks/customer_mock.py | a7b78f7010ca6a18c5de255b002fa7e7ea1d8312 | [] | no_license | pfpacheco/menu-sun-api | 8a1e11543b65db91d606b2f3098847e3cc5f2092 | 9bf2885f219b8f75d39e26fd61bebcaddcd2528b | refs/heads/master | 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | def mock_queue_make_api_call(self, operation_name, kwarg):
if operation_name == 'SendMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'MessageId': '85e8a505-2ba4-4fa3-a93c-cc30bf5e65e7',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
if operation_name == 'ReceiveMessage':
return {'Messages': [{'MessageId': '92de7972-f8e5-4998-a182-3977455f8cb0',
'ReceiptHandle': 'AQEBWvhuG9mMCVO0LE7k'
'+flexfAzfGFn4yGRI5Xm60pwu1RwlGot4GqWveL1tOYmUTM63bwR+OFj5CL'
'/e1ZchKlZ0DTF6rc9Q+pyNdbIKckaVrfgbYySsZDkr68AtoWzFoIf0U68SUO83ys0ydK'
'+TSHgpw38zKICpupwccqe67HDu2Vve6ATFtjHa10+w3fU6l63NRFnmNeDjuDw'
'/uq86s0puouRFHQmoeNlLg'
'/5wjlT1excIDKxlIvJFBoc420ZgxulvIOcblqUxcGIG6Ah6x3aJw27q14vT'
'+0wRi9aoQ8dG0ys57OeWjlRRG3UII1J5uiShet9F15CKF3GZatNEZOOXkIqdQO'
'+lMHIhwMt7wls2EMtVO4KFIdWokzIFhidzfAHMTANCoAD26gUsp2Z9UyZaA==',
'MD5OfBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'Body': '{"integration_type": "BRF","seller_id": 1,"seller_code": "ABC",'
'"document": "00005234000121",'
'"cep": "09185030",'
'"credit_limit": "103240.72",'
'"customer_id": "1",'
'"payment_terms":['
'{"deadline": 5,"description": "Payment 5","payment_type": "BOLETO"},'
'{"deadline": 10,"description": "Payment 10","payment_type": "CHEQUE"}],'
'"seller_metafields": [{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_7",'
'"value": "007"},{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_14",'
'"value": "014"}],'
'"customer_metafields": [{"namespace": "Customer Namespace 1",'
'"key": "Customer Key 1",'
'"value": "Customer VALUE 1"},{"namespace": "Customer Namespace 2",'
'"key": "Customer Key 2","value": "Customer VALUE 2"}]}'},
],
'ResponseMetadata': {'RequestId': '0ffbdfb3-809f-539e-84dd-899024785f25',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '0ffbdfb3-809f-539e-84dd-899024785f25',
'date': 'Fri, 18 Oct 2019 11:31:51 GMT',
'content-type': 'text/xml',
'content-length': '892'}, 'RetryAttempts': 0}}
if operation_name == 'DeleteMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
| [
"[email protected]"
] | |
04481c8e9c3a8ab5864fbd9d4073e09189de4c58 | 0953f9aa0606c2dfb17cb61b84a4de99b8af6d2c | /python/ray/serve/http_proxy.py | e129f5d60cab56228bd2a379ba2a9be0ab162c29 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | oscarknagg/ray | da3dc03e24945ff4d5718fd35fc1b3408d8907eb | 20d47873c9e8f5bbb80fe36e5d16256c337c4db3 | refs/heads/master | 2023-09-01T01:45:26.364731 | 2021-10-21T07:46:52 | 2021-10-21T07:46:52 | 382,402,491 | 2 | 1 | Apache-2.0 | 2021-09-15T12:34:41 | 2021-07-02T16:25:05 | Python | UTF-8 | Python | false | false | 13,432 | py | import asyncio
import socket
import time
import pickle
from typing import Callable, List, Dict, Optional, Tuple
import uvicorn
import starlette.responses
import starlette.routing
import ray
from ray import serve
from ray.exceptions import RayActorError, RayTaskError
from ray.serve.common import EndpointInfo, EndpointTag
from ray.serve.long_poll import LongPollNamespace
from ray.util import metrics
from ray.serve.utils import logger
from ray.serve.handle import RayServeHandle
from ray.serve.http_util import HTTPRequestWrapper, receive_http_body, Response
from ray.serve.long_poll import LongPollClient
from ray.serve.handle import DEFAULT
MAX_REPLICA_FAILURE_RETRIES = 10
async def _send_request_to_handle(handle, scope, receive, send):
http_body_bytes = await receive_http_body(scope, receive, send)
headers = {k.decode(): v.decode() for k, v in scope["headers"]}
handle = handle.options(
method_name=headers.get("X-SERVE-CALL-METHOD".lower(), DEFAULT.VALUE),
shard_key=headers.get("X-SERVE-SHARD-KEY".lower(), DEFAULT.VALUE),
http_method=scope["method"].upper(),
http_headers=headers,
)
# scope["router"] and scope["endpoint"] contain references to a router
# and endpoint object, respectively, which each in turn contain a
# reference to the Serve client, which cannot be serialized.
# The solution is to delete these from scope, as they will not be used.
# TODO(edoakes): this can be removed once we deprecate the old API.
if "router" in scope:
del scope["router"]
if "endpoint" in scope:
del scope["endpoint"]
# NOTE(edoakes): it's important that we defer building the starlette
# request until it reaches the backend replica to avoid unnecessary
# serialization cost, so we use a simple dataclass here.
request = HTTPRequestWrapper(scope, http_body_bytes)
# Perform a pickle here to improve latency. Stdlib pickle for simple
# dataclasses are 10-100x faster than cloudpickle.
request = pickle.dumps(request)
retries = 0
backoff_time_s = 0.05
while retries < MAX_REPLICA_FAILURE_RETRIES:
object_ref = await handle.remote(request)
try:
result = await object_ref
break
except RayActorError:
logger.warning("Request failed due to replica failure. There are "
f"{MAX_REPLICA_FAILURE_RETRIES - retries} retries "
"remaining.")
await asyncio.sleep(backoff_time_s)
backoff_time_s *= 2
retries += 1
if isinstance(result, RayTaskError):
error_message = "Task Error. Traceback: {}.".format(result)
await Response(
error_message, status_code=500).send(scope, receive, send)
elif isinstance(result, starlette.responses.Response):
await result(scope, receive, send)
else:
await Response(result).send(scope, receive, send)
class LongestPrefixRouter:
"""Router that performs longest prefix matches on incoming routes."""
def __init__(self, get_handle: Callable):
# Function to get a handle given a name. Used to mock for testing.
self._get_handle = get_handle
# Routes sorted in order of decreasing length.
self.sorted_routes: List[str] = list()
# Endpoints associated with the routes.
self.route_info: Dict[str, EndpointTag] = dict()
# Contains a ServeHandle for each endpoint.
self.handles: Dict[str, RayServeHandle] = dict()
def endpoint_exists(self, endpoint: EndpointTag) -> bool:
return endpoint in self.handles
def update_routes(self,
endpoints: Dict[EndpointTag, EndpointInfo]) -> None:
logger.debug(f"Got updated endpoints: {endpoints}.")
existing_handles = set(self.handles.keys())
routes = []
route_info = {}
for endpoint, info in endpoints.items():
# Default case where the user did not specify a route prefix.
if info.route is None:
route = f"/{endpoint}"
else:
route = info.route
routes.append(route)
route_info[route] = endpoint
if endpoint in self.handles:
existing_handles.remove(endpoint)
else:
self.handles[endpoint] = self._get_handle(endpoint)
# Clean up any handles that are no longer used.
for endpoint in existing_handles:
del self.handles[endpoint]
# Routes are sorted in order of decreasing length to enable longest
# prefix matching.
self.sorted_routes = sorted(routes, key=lambda x: len(x), reverse=True)
self.route_info = route_info
def match_route(self, target_route: str
) -> Tuple[Optional[str], Optional[RayServeHandle]]:
"""Return the longest prefix match among existing routes for the route.
Args:
target_route (str): route to match against.
Returns:
(matched_route (str), serve_handle (RayServeHandle)) if found,
else (None, None).
"""
for route in self.sorted_routes:
if target_route.startswith(route):
matched = False
# If the route we matched on ends in a '/', then so does the
# target route and this must be a match.
if route.endswith("/"):
matched = True
# If the route we matched on doesn't end in a '/', we need to
# do another check to ensure that either this is an exact match
# or the next character in the target route is a '/'. This is
# to guard against the scenario where we have '/route' as a
# prefix and there's a request to '/routesuffix'. In this case,
# it should *not* be a match.
elif (len(target_route) == len(route)
or target_route[len(route)] == "/"):
matched = True
if matched:
endpoint = self.route_info[route]
return route, self.handles[endpoint]
return None, None
class HTTPProxy:
"""This class is meant to be instantiated and run by an ASGI HTTP server.
>>> import uvicorn
>>> uvicorn.run(HTTPProxy(controller_name, controller_namespace))
"""
def __init__(self, controller_name: str, controller_namespace: str):
# Set the controller name so that serve will connect to the
# controller instance this proxy is running in.
ray.serve.api._set_internal_replica_context(None, None,
controller_name, None)
# Used only for displaying the route table.
self.route_info: Dict[str, EndpointTag] = dict()
def get_handle(name):
return serve.api._get_global_client().get_handle(
name,
sync=False,
missing_ok=True,
_internal_pickled_http_request=True,
)
self.prefix_router = LongestPrefixRouter(get_handle)
self.long_poll_client = LongPollClient(
ray.get_actor(controller_name, namespace=controller_namespace), {
LongPollNamespace.ROUTE_TABLE: self._update_routes,
},
call_in_event_loop=asyncio.get_event_loop())
self.request_counter = metrics.Counter(
"serve_num_http_requests",
description="The number of HTTP requests processed.",
tag_keys=("route", ))
def _update_routes(self,
endpoints: Dict[EndpointTag, EndpointInfo]) -> None:
self.route_info: Dict[str, Tuple[EndpointTag, List[str]]] = dict()
for endpoint, info in endpoints.items():
route = info.route if info.route is not None else f"/{endpoint}"
self.route_info[route] = endpoint
self.prefix_router.update_routes(endpoints)
async def block_until_endpoint_exists(self, endpoint: EndpointTag,
timeout_s: float):
start = time.time()
while True:
if time.time() - start > timeout_s:
raise TimeoutError(
f"Waited {timeout_s} for {endpoint} to propagate.")
for existing_endpoint in self.route_info.values():
if existing_endpoint == endpoint:
return
await asyncio.sleep(0.2)
async def _not_found(self, scope, receive, send):
current_path = scope["path"]
response = Response(
f"Path '{current_path}' not found. "
"Please ping http://.../-/routes for route table.",
status_code=404)
await response.send(scope, receive, send)
async def __call__(self, scope, receive, send):
"""Implements the ASGI protocol.
See details at:
https://asgi.readthedocs.io/en/latest/specs/index.html.
"""
assert scope["type"] == "http"
self.request_counter.inc(tags={"route": scope["path"]})
if scope["path"] == "/-/routes":
return await starlette.responses.JSONResponse(self.route_info)(
scope, receive, send)
route_prefix, handle = self.prefix_router.match_route(scope["path"])
if route_prefix is None:
return await self._not_found(scope, receive, send)
# Modify the path and root path so that reverse lookups and redirection
# work as expected. We do this here instead of in replicas so it can be
# changed without restarting the replicas.
if route_prefix != "/":
assert not route_prefix.endswith("/")
scope["path"] = scope["path"].replace(route_prefix, "", 1)
scope["root_path"] = route_prefix
await _send_request_to_handle(handle, scope, receive, send)
@ray.remote(num_cpus=0)
class HTTPProxyActor:
def __init__(self,
host: str,
port: int,
controller_name: str,
controller_namespace: str,
http_middlewares: Optional[List[
"starlette.middleware.Middleware"]] = None): # noqa: F821
if http_middlewares is None:
http_middlewares = []
self.host = host
self.port = port
self.setup_complete = asyncio.Event()
self.app = HTTPProxy(controller_name, controller_namespace)
self.wrapped_app = self.app
for middleware in http_middlewares:
self.wrapped_app = middleware.cls(self.wrapped_app,
**middleware.options)
# Start running the HTTP server on the event loop.
# This task should be running forever. We track it in case of failure.
self.running_task = asyncio.get_event_loop().create_task(self.run())
async def ready(self):
"""Returns when HTTP proxy is ready to serve traffic.
Or throw exception when it is not able to serve traffic.
"""
done_set, _ = await asyncio.wait(
[
# Either the HTTP setup has completed.
# The event is set inside self.run.
self.setup_complete.wait(),
# Or self.run errored.
self.running_task,
],
return_when=asyncio.FIRST_COMPLETED)
# Return None, or re-throw the exception from self.running_task.
return await done_set.pop()
async def block_until_endpoint_exists(self, endpoint: EndpointTag,
timeout_s: float):
await self.app.block_until_endpoint_exists(endpoint, timeout_s)
async def run(self):
sock = socket.socket()
# These two socket options will allow multiple process to bind the the
# same port. Kernel will evenly load balance among the port listeners.
# Note: this will only work on Linux.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
try:
sock.bind((self.host, self.port))
except OSError:
# The OS failed to bind a socket to the given host and port.
raise ValueError(
f"""Failed to bind Ray Serve HTTP proxy to '{self.host}:{self.port}'.
Please make sure your http-host and http-port are specified correctly.""")
# Note(simon): we have to use lower level uvicorn Config and Server
# class because we want to run the server as a coroutine. The only
# alternative is to call uvicorn.run which is blocking.
config = uvicorn.Config(
self.wrapped_app,
host=self.host,
port=self.port,
lifespan="off",
access_log=False)
server = uvicorn.Server(config=config)
# TODO(edoakes): we need to override install_signal_handlers here
# because the existing implementation fails if it isn't running in
# the main thread and uvicorn doesn't expose a way to configure it.
server.install_signal_handlers = lambda: None
self.setup_complete.set()
await server.serve(sockets=[sock])
| [
"[email protected]"
] | |
84bd69b3aecc431f55e1f816dbfe988f0e2443fc | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/mlxnas004/question1.py | 615d9525446c91fd8b2b6c646c028b7d0a290c6e | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #nasha meoli
#mlxnas004
#leap year
x = eval(input("Enter a year:\n"))
condition_1 = x%400
condition_2 = x%4
condition_3 = x%100
if (condition_1 == 0) or ((condition_2 == 0) and (condition_3 >= 1)):
print(x,"is a leap year.")
else:
print(x,"is not a leap year.") | [
"[email protected]"
] | |
b796b20a4d9e957f27a98c703b071bbc111e9bde | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/ancpvlanrange_58418cab117460d5be96e7c24e4e1bfb.py | 00c591b0ead7d391ee148ba1bd8b5a0ea079d425 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 12,400 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AncpVlanRange(Base):
"""
The AncpVlanRange class encapsulates a required ancpVlanRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'ancpVlanRange'
def __init__(self, parent):
super(AncpVlanRange, self).__init__(parent)
@property
def VlanIdInfo(self):
"""An instance of the VlanIdInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e.VlanIdInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e import VlanIdInfo
return VlanIdInfo(self)
@property
def Enabled(self):
"""Disabled ranges won't be configured nor validated.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def FirstId(self):
"""DEPRECATED The first ID to be used for the first VLAN tag.
Returns:
number
"""
return self._get_attribute('firstId')
@FirstId.setter
def FirstId(self, value):
self._set_attribute('firstId', value)
@property
def IdIncrMode(self):
"""Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Returns:
number
"""
return self._get_attribute('idIncrMode')
@IdIncrMode.setter
def IdIncrMode(self, value):
self._set_attribute('idIncrMode', value)
@property
def Increment(self):
"""DEPRECATED Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('increment')
@Increment.setter
def Increment(self, value):
self._set_attribute('increment', value)
@property
def IncrementStep(self):
"""DEPRECATED Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('incrementStep')
@IncrementStep.setter
def IncrementStep(self, value):
self._set_attribute('incrementStep', value)
@property
def InnerEnable(self):
"""DEPRECATED Enable the inner VLAN.
Returns:
bool
"""
return self._get_attribute('innerEnable')
@InnerEnable.setter
def InnerEnable(self, value):
self._set_attribute('innerEnable', value)
@property
def InnerFirstId(self):
"""DEPRECATED The first ID to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerFirstId')
@InnerFirstId.setter
def InnerFirstId(self, value):
self._set_attribute('innerFirstId', value)
@property
def InnerIncrement(self):
"""DEPRECATED Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('innerIncrement')
@InnerIncrement.setter
def InnerIncrement(self, value):
self._set_attribute('innerIncrement', value)
@property
def InnerIncrementStep(self):
"""DEPRECATED Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('innerIncrementStep')
@InnerIncrementStep.setter
def InnerIncrementStep(self, value):
self._set_attribute('innerIncrementStep', value)
@property
def InnerPriority(self):
"""DEPRECATED The 802.1Q priority to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerPriority')
@InnerPriority.setter
def InnerPriority(self, value):
self._set_attribute('innerPriority', value)
@property
def InnerTpid(self):
"""DEPRECATED The TPID value in the inner VLAN Tag.
Returns:
str
"""
return self._get_attribute('innerTpid')
@InnerTpid.setter
def InnerTpid(self, value):
self._set_attribute('innerTpid', value)
@property
def InnerUniqueCount(self):
"""DEPRECATED Number of unique inner VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('innerUniqueCount')
@InnerUniqueCount.setter
def InnerUniqueCount(self, value):
self._set_attribute('innerUniqueCount', value)
@property
def Name(self):
"""Name of range
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def ObjectId(self):
"""Unique identifier for this object
Returns:
str
"""
return self._get_attribute('objectId')
@property
def Priority(self):
"""DEPRECATED The 802.1Q priority to be used for the outer VLAN tag.
Returns:
number
"""
return self._get_attribute('priority')
@Priority.setter
def Priority(self, value):
self._set_attribute('priority', value)
@property
def Tpid(self):
"""DEPRECATED The TPID value in the outer VLAN Tag.
Returns:
str
"""
return self._get_attribute('tpid')
@Tpid.setter
def Tpid(self, value):
self._set_attribute('tpid', value)
@property
def UniqueCount(self):
"""DEPRECATED Number of unique first VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('uniqueCount')
@UniqueCount.setter
def UniqueCount(self, value):
self._set_attribute('uniqueCount', value)
def update(self, Enabled=None, FirstId=None, IdIncrMode=None, Increment=None, IncrementStep=None, InnerEnable=None, InnerFirstId=None, InnerIncrement=None, InnerIncrementStep=None, InnerPriority=None, InnerTpid=None, InnerUniqueCount=None, Name=None, Priority=None, Tpid=None, UniqueCount=None):
"""Updates a child instance of ancpVlanRange on the server.
Args:
Enabled (bool): Disabled ranges won't be configured nor validated.
FirstId (number): The first ID to be used for the first VLAN tag.
IdIncrMode (number): Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Increment (number): Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
IncrementStep (number): Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerEnable (bool): Enable the inner VLAN.
InnerFirstId (number): The first ID to be used for the inner VLAN tag.
InnerIncrement (number): Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
InnerIncrementStep (number): Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerPriority (number): The 802.1Q priority to be used for the inner VLAN tag.
InnerTpid (str): The TPID value in the inner VLAN Tag.
InnerUniqueCount (number): Number of unique inner VLAN IDs to use.
Name (str): Name of range
Priority (number): The 802.1Q priority to be used for the outer VLAN tag.
Tpid (str): The TPID value in the outer VLAN Tag.
UniqueCount (number): Number of unique first VLAN IDs to use.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
| [
"[email protected]"
] | |
e4275df4e69cf6565d2afddbef18539b2d4d99f3 | 4f875744ccae8fa9225318ce16fc483b7bf2735e | /google/findDuplicate.py | 44e01dd1b67af92eaf0af5a61e728e840331fdcb | [] | no_license | nguyenngochuy91/companyQuestions | 62c0821174bb3cb33c7af2c5a1e83a60e4a29977 | c937fe19be665ba7ac345e1729ff531f370f30e8 | refs/heads/master | 2020-07-27T05:58:36.794033 | 2020-04-10T20:57:15 | 2020-04-10T20:57:15 | 208,893,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 20:34:41 2020
@author: huyn
"""
#609. Find Duplicate File in System
from typing import List
class Solution:
def findDuplicate(self, paths: List[str]) -> List[List[str]]:
d = {}
for path in paths:
item = path.split()
root = item[0]
for file in item[1:]:
file = file.split("(")
fileName = file[0]
content = file[1].split(")")[0]
if content not in d:
d[content] = []
d[content].append(root+"/"+fileName)
return [d[key] for key in d if len(d[key])>=2] | [
"[email protected]"
] | |
c07ba76a6ce1700bed5939dd56790525d85ad59a | 3e64d1fb4998fae24a4178d0925e0f30e30b00e7 | /venv/lib/python3.8/encodings/utf_7.py | fabe5e915e16c26cdb4c57b6fa50ed8570d0dee2 | [] | no_license | viraatdas/Model-Rest-API | a39e150c484c7136141f462932d741de5b45e044 | a08500a28e4ad32094de6f88223088b9a9081d69 | refs/heads/master | 2022-11-12T15:33:06.624474 | 2020-07-05T05:04:50 | 2020-07-05T05:04:50 | 257,821,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | /Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/encodings/utf_7.py | [
"[email protected]"
] | |
0fdb7a7c501f03fb7f776e4965cd4da3243f4ed9 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/7ab7bec6-576b-4910-98d1-ec30c84244ab__calculate_square.py | 0bf1d0137076df117eaec3d77052d26dce255f54 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | # calculate_square.py
from Tkinter import *
import ttk
def calculate_square(*args):
value_in = float(number_in.get())
number_out.set(value_in * value_in)
root = Tk()
root.title('Calculate square')
mainframe = ttk.Frame(root)
mainframe.grid(column=1, row=1, sticky=(N, E, S, W))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
number_in = StringVar()
number_out = StringVar()
square_of_string_label = ttk.Label(mainframe, text='The square of')
square_of_string_label.grid(column=1, row=1, sticky=E)
number_in_entry = ttk.Entry(mainframe, width=5, textvariable=number_in)
number_in_entry.grid(column=2, row=1, sticky=(E, W))
is_string_label = ttk.Label(mainframe, text='is')
is_string_label.grid(column=1, row=2, sticky=E)
number_out_label = ttk.Label(mainframe, textvariable=number_out)
number_out_label.grid(column=2, row=2, sticky=W)
go_button = ttk.Button(mainframe, text='Go!', command=calculate_square)
go_button.grid(column=2, row=3, sticky=W)
for child in mainframe.winfo_children():
child.grid_configure(padx=2, pady=2)
number_in_entry.focus()
root.bind('<Return>', calculate_square)
root.mainloop()
| [
"[email protected]"
] | |
201ec0e778d39c619ca7d2db0f6caee17ddd1f95 | d7363da78e6f1e8ae2c6abca3f845853756165d4 | /src/adafruit_blinka/board/dragonboard_410c.py | a627309d6c32ff8ab6a13dc5b5cc9a989804b538 | [
"MIT"
] | permissive | adafruit/Adafruit_Blinka | 7a9ed88f39ff12082d1b46647fa8869b541fba49 | 009b352a3234339000c32d2e61e830455cf389fa | refs/heads/main | 2023-08-09T06:25:02.178935 | 2023-07-28T16:45:40 | 2023-07-28T16:45:40 | 120,540,744 | 398 | 331 | MIT | 2023-09-14T20:32:23 | 2018-02-07T00:25:03 | Python | UTF-8 | Python | false | false | 972 | py | # SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the Dragonboard 410c."""
from adafruit_blinka.microcontroller.snapdragon.apq8016 import pin
GPIO_A = pin.GPIO_36
GPIO_B = pin.GPIO_12
GPIO_C = pin.GPIO_13
GPIO_D = pin.GPIO_69
GPIO_E = pin.GPIO_115
GPIO_F = pin.PM_MPP_4
GPIO_G = pin.GPIO_24
GPIO_H = pin.GPIO_25
GPIO_I = pin.GPIO_35
GPIO_J = pin.GPIO_34
GPIO_K = pin.GPIO_28
GPIO_L = pin.GPIO_33
GPIO_36 = pin.GPIO_36
GPIO_12 = pin.GPIO_12
GPIO_13 = pin.GPIO_13
GPIO_69 = pin.GPIO_69
GPIO_115 = pin.GPIO_115
GPIO_4 = pin.PM_MPP_4
GPIO_24 = pin.GPIO_24
GPIO_25 = pin.GPIO_25
GPIO_35 = pin.GPIO_35
GPIO_34 = pin.GPIO_34
GPIO_28 = pin.GPIO_28
GPIO_33 = pin.GPIO_33
SDA = pin.I2C0_SDA
SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
I2C0_SCL = pin.I2C0_SCL
I2C1_SDA = pin.I2C1_SDA
I2C1_SCL = pin.I2C1_SCL
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_MOSI
MISO = pin.SPI0_MISO
SPI_CS = pin.SPI0_CS
| [
"[email protected]"
] | |
3dcca22538909e4ca7c9e1f85a4a19c897d9ccc0 | bf4178e73f0f83781be6784d7587cb34a38d6edd | /platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4010_cfg_calc/si4010cfgcalcsecurity.py | 3da55602e5855910430be093d1a8e3ae2b503b84 | [] | no_license | kolbertv/ZigbeeSiliconV3 | 80d70515e93be1413c24cdcb3485f50c65a1564b | ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9 | refs/heads/master | 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | '''
Created on August 25, 2013
@author: shyang
'''
__all__ = ["Si4010CfgCalcSecurity"]
class Si4010CfgCalcSecurity(object):
'''
classdocs
'''
OEM_Key16_Table = [
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0x4B, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x29, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xDC, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x24, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x02, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x30, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xCB, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
]
OEM_ID_KEY_Table = [ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000 ]
def __init__(self, inputs):
self.cfg = {}
self.cfg_PQ_file = {}
# TODO check
if inputs.security.OEM_Key == []:
self.Key16 = self.OEM_Key16_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.Key16 = inputs.security.OEM_Key
self.cfg['bOEM_Key[16]'] = self.Key16
self.cfg_PQ_file['bOEM_Key[16]'] = self.Key16
if inputs.security.OEM_ID_Key == 0:
self.ID_Key = self.OEM_ID_KEY_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.ID_Key = inputs.security.OEM_ID_Key
self.cfg['lOEM_ID_Key'] = self.ID_Key
def get_ID_Key(self, index):
return self.OEM_ID_KEY_Table[index]
def get_Key16(self, index):
return self.OEM_Key16_Table[index]
def get_cfg_data(self):
return self.cfg
def dump(self):
print(' ------------- configuration data -------------')
for m in self.cfg:
print(' {} = {}'.format(m, self.cfg[m]))
| [
"[email protected]"
] | |
8f9c7c45bf173c6b1593881386614ed222c6c593 | 2bf43e862b432d44ba545beea4e67e3e086c1a1c | /tests/nemo_text_processing/zh/test_char.py | 1ca553eca3d027fe254df28f4d9b682ca08f9b57 | [
"Apache-2.0"
] | permissive | ericharper/NeMo | 719e933f6ffce1b27358bc21efe87cdf144db875 | f1825bc4b724b78c2d6ca392b616e8dc9a8cde04 | refs/heads/master | 2022-10-06T01:45:21.887856 | 2022-09-14T19:09:42 | 2022-09-14T19:09:42 | 259,380,135 | 1 | 0 | Apache-2.0 | 2022-09-20T18:01:57 | 2020-04-27T15:54:20 | Python | UTF-8 | Python | false | false | 1,257 | py | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer_zh = Normalizer(lang='zh', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('zh/data_text_normalization/test_cases_char.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer_zh.normalize(test_input)
assert expected == preds
| [
"[email protected]"
] | |
2bb192e13d0b897544b36848f736cf1666918f37 | e8160ba62759fc390daf60d88146e95c0c0de1b4 | /TestDjangoORM/settings.py | 97366c9073674155c60edddae7971a54bbb699fe | [] | no_license | imranq2/TestDjangoORM | 2a3a72aff36f03b6e2bb1a0f394a3499d2607bba | 8d51d772f42635c0dbbd1d462057defaa9cdfbff | refs/heads/master | 2023-01-05T23:07:07.662717 | 2020-11-03T04:36:44 | 2020-11-03T04:36:44 | 309,496,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,510 | py | """
Django settings for TestDjangoORM project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@f5a-qggnb9d=y^%tcto40rnxzb=6kq5)=077s*9in+$wx&y37'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
# Django stuff
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TestDjangoORM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TestDjangoORM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
},
},
'root': {
'handlers': ['console'],
}
} | [
"[email protected]"
] | |
afdfc45217af92feca35e8df5f3b06c51cf1a18f | 32cb84dd41e4be24c065bb205f226f9b121a6db2 | /feedback/urls.py | 523511566940bbd365ca5900079a62fd10f87512 | [] | no_license | InformatykaNaStart/staszic-sio2 | b38fda84bd8908472edb2097774838ceed08fcfa | 60a127e687ef8216d2ba53f9f03cfaa201c59e26 | refs/heads/master | 2022-06-29T11:09:28.765166 | 2022-06-13T21:56:19 | 2022-06-13T21:56:19 | 115,637,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.conf.urls import patterns, include, url
import views
noncontest_patterns = [url(r'^staszic/judging/(?P<jid>\d+)/$', views.judging)]
| [
"[email protected]"
] | |
4c84bb0dd04ef0a5558dab96f89e9a850724abde | 0386591b51fdbf5759faef6afb8729b64a3f1589 | /layerserver/widgets/modificationdate.py | 3b9aab935d42e5aa5a0047c815f565c8306afad5 | [
"BSD-3-Clause"
] | permissive | giscube/giscube-admin | 1e155402e094eb4db1f7ca260a8d1402e27a31df | 4ce285a6301f59a8e48ecf78d58ef83c3827b5e0 | refs/heads/main | 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 | BSD-3-Clause | 2023-07-07T13:22:09 | 2017-06-12T11:12:56 | Python | UTF-8 | Python | false | false | 556 | py | from datetime import datetime
from django.utils.timezone import get_current_timezone
from .date import DateWidget
class ModificationDateWidget(DateWidget):
base_type = 'date'
@staticmethod
def update(request, instance, validated_data, widget):
validated_data[widget['name']] = datetime.now(tz=get_current_timezone()).date()
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return ModificationDateWidget.ERROR_READONLY_REQUIRED
return DateWidget.is_valid(cleaned_data)
| [
"[email protected]"
] | |
6f1547fab3b6b91f274d8e7a04e2ac3e28693ae2 | 3b593b412c663a34784b1f60ad07cd2ee6ef87d1 | /month01/python base/day12/code03.py | 19ca59f6f051da2f348473bcdba1941fb51fd14e | [] | no_license | ShijieLiu-PR/Python_Learning | 88694bd44aeed4f8b022202c1065342bd17c26d2 | ed01cc0956120ea287c51667604db97ff563c829 | refs/heads/master | 2023-05-22T16:35:24.252313 | 2021-06-16T10:56:21 | 2021-06-16T10:56:21 | 337,445,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
运算符重载
"""
print("a" + "b")
class Vector:
"""
向量类
"""
def __init__(self, x):
self.x = x
def __add__(self, other):
# self.x += other
# return self
return Vector(self.x + other)
def __str__(self):
return "Vector(%d)" % self.x
v01 = Vector(10)
v02 = v01 + 5
print(id(v01))
print(id(v02))
print(v01)
print(v02)
| [
"[email protected]"
] | |
b62b9d12528fab30ba13d52d4ab9d783c4f58689 | e7c84801d7755806e58795d5fe51f7a924815ffc | /python-image-watermark/python-watermark-image.py | 86d4a5401ae25cc33b68205ae57687d2b72853e3 | [] | no_license | c0c1/python-image | 3454b37b3e0339fd3e204a38d7aa14c885e10e38 | b785801589722571ac7ed8ad4428b4d04f518a2b | refs/heads/master | 2023-06-04T23:36:17.974408 | 2021-06-21T12:38:23 | 2021-06-21T12:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import os, sys
from PIL import Image, ImageDraw, ImageFont
img_dir = "images/non-watermark/"
dirs = os.listdir( img_dir )
for img in dirs:
if os.path.isfile(img_dir + img):
#Create an Image Object from an Image
im = Image.open(img_dir + img)
#Image width and height
width, height = im.size
#Image name
img_name = os.path.basename(img_dir + img)
#print(img_name)
text = "{roytuts.com}"
font = ImageFont.truetype('arial.ttf', 30)
draw = ImageDraw.Draw(im)
textwidth, textheight = draw.textsize(text, font)
#Right bottom corner with margin 5 from right
margin = 5
#x = width - textwidth - margin
#y = height - textheight - margin
#Center of the image
x = (width - textwidth)/2 #center
y = (height - textheight)/2 #center
#draw.text((x, y), text, font=font)
draw.text((x, y), text, font=font, fill=(254, 130, 75, 15))
#im.show() //Will display in the image window
#Save watermarked image
im.save('images/watermark/' + img_name) | [
"[email protected]"
] | |
2d24087778240384516917c28596440c2aed5e2b | 8520c991dc543f5f4e1efe59ab401824173bb985 | /332-reconstruct-itinerary/solution.py | 9deb98ca04053efa355f326607f4c90351f51542 | [] | no_license | katryo/leetcode | d44f70f2853c4f5ea9a462d022feb0f5436c2236 | 0da45559271d3dba687858b8945b3e361ecc813c | refs/heads/master | 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | from collections import defaultdict
import heapq
class Solution:
def findItinerary(self, tickets):
dests = defaultdict(list)
ans = []
for src, dest in tickets:
heapq.heappush(dests[src], dest)
def dfs(dep):
arrivals = dests[dep]
while arrivals:
dfs(heapq.heappop(arrivals))
ans.insert(0, dep)
dfs('JFK')
return ans
# def findItinerary(self, tickets):
# dests = defaultdict(list)
# for a, b in sorted(tickets)[::-1]:
# dests[a].append(b)
# ans = []
#
# def visit(start):
# while dests[start]:
# visit(dests[start].pop())
# ans.append(start)
#
# visit('JFK')
# return list(reversed(ans))
s = Solution()
print(s.findItinerary([["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]))
| [
"[email protected]"
] | |
cc81969fe3c3463a9a336a1e77f56a7592cde567 | b91bd5b0954776fd186bf064a87fb8f7ffa4a58a | /python2/flask/flask_fun/flask_table/server.py | 5c146559aa55798c1023ee96a350f5061e5a2f4d | [] | no_license | ronaldaguerrero/practice | ddf1f41b693110cebe4d52e29910909f3ba21115 | 38627fddd8f79e6fb50c05a0e4e8d27a92146e1b | refs/heads/master | 2023-01-23T17:06:18.642983 | 2019-09-13T05:01:48 | 2019-09-13T05:01:48 | 186,157,588 | 0 | 0 | null | 2023-01-07T09:40:40 | 2019-05-11T16:40:12 | Python | UTF-8 | Python | false | false | 564 | py | # import things
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
# Get some objects
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
# Populate the table
table = ItemTable(items)
# Print the html
print(table.__html__())
# or just {{ table }} from within a Jinja template | [
"[email protected]"
] | |
51e6d0b64816e845f3804107099f83eb52511405 | 030cea4006a4ff559f23cb3b3c31cd038ed2e332 | /week11/hh_back/api/migrations/0001_initial.py | ff433e7b38b000547c461e4b1354c718d2bfa422 | [] | no_license | ayananygmetova/Web-Dev-2020 | f8834e0ee26f0f0f06d0e3a282c73b373954a430 | 957bca91554f015e9a3d13b4ec12e64de7ac633e | refs/heads/master | 2023-01-22T16:49:39.857983 | 2020-03-31T10:09:54 | 2020-03-31T10:09:54 | 236,937,810 | 1 | 0 | null | 2023-01-07T16:34:35 | 2020-01-29T08:41:10 | Python | UTF-8 | Python | false | false | 669 | py | # Generated by Django 3.0.4 on 2020-03-31 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('city', models.CharField(max_length=200)),
('address', models.TextField(default='')),
],
),
]
| [
"[email protected]"
] | |
f20231cfc5c8195e5135526087d532d334a0c5fa | 9907b3dd74d1aedbed5243105649f0acd8e965d8 | /demo/pytorch_laguerre.py | 0aded5c456579f8f7de77004c4e2c77956273df5 | [
"MIT"
] | permissive | shubhampachori12110095/OrthNet | 68c7442c448acdca2b0f2fbef0709efec280be4c | 74824c1858e14f023d3f0251910f223d6b8672ce | refs/heads/master | 2021-01-25T13:12:07.142646 | 2018-02-28T15:18:38 | 2018-02-28T15:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | import sys
sys.path.append('../')
from orthnet.pytorch import laguerre_tensor, multi_dim_laguerre_tensor
import torch
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
order1 = 5
order2 = 3
x1_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x2_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x1 = Variable(torch.Tensor(x1_data))
x2 = Variable(torch.Tensor(x2_data))
y1 = laguerre_tensor(n = order1, x = x1)
y2 = multi_dim_laguerre_tensor(n = order2, var = [x1, x2])
z1 = y1.data.numpy()
z2 = y2.data.numpy()
fig1 = plt.figure()
ax1 = fig1.gca()
for i in range(order1+1):
ax1.plot(x1_data, z1[:, i], label = 'n = '+str(i))
ax1.legend()
ax1.grid(True)
fig2 = plt.figure()
ax2 = fig2.gca(projection='3d')
x1_data, x2_data = np.meshgrid(x1_data, x2_data)
ax2.plot_surface(X = x1_data, Y = x2_data, Z = z2[:, -2])
plt.show() | [
"[email protected]"
] | |
7897e7d4cadfa5c63f6555c720fe7a1d117dfa50 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/statsmodels/2017/12/markov_regression.py | ef11b49627e507701b1babda3e3d9963f998bb8c | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 16,426 | py | """
Markov switching regression models
Author: Chad Fulton
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.regime_switching import markov_switching
class MarkovRegression(markov_switching.MarkovSwitching):
r"""
First-order k-regime Markov switching regression model
Parameters
----------
endog : array_like
The endogenous variable.
k_regimes : integer
The number of regimes.
trend : {'nc', 'c', 't', 'ct'}
Whether or not to include a trend. To include an intercept, time trend,
or both, set `trend='c'`, `trend='t'`, or `trend='ct'`. For no trend,
set `trend='nc'`. Default is an intercept.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : integer, optional
The order of the model describes the dependence of the likelihood on
previous regimes. This depends on the model in question and should be
set appropriately by subclasses.
exog_tvtp : array_like, optional
Array of exogenous or lagged variables to use in calculating
time-varying transition probabilities (TVTP). TVTP is only used if this
variable is provided. If an intercept is desired, a column of ones must
be explicitly included in this array.
switching_trend : boolean or iterable, optional
If a boolean, sets whether or not all trend coefficients are
switching across regimes. If an iterable, should be of length equal
to the number of trend variables, where each element is
a boolean describing whether the corresponding coefficient is
switching. Default is True.
switching_exog : boolean or iterable, optional
If a boolean, sets whether or not all regression coefficients are
switching across regimes. If an iterable, should be of length equal
to the number of exogenous variables, where each element is
a boolean describing whether the corresponding coefficient is
switching. Default is True.
switching_variance : boolean, optional
Whether or not there is regime-specific heteroskedasticity, i.e.
whether or not the error term has a switching variance. Default is
False.
Notes
-----
This model is new and API stability is not guaranteed, although changes
will be made in a backwards compatible way if possible.
The model can be written as:
.. math::
y_t = a_{S_t} + x_t' \beta_{S_t} + \varepsilon_t \\
\varepsilon_t \sim N(0, \sigma_{S_t}^2)
i.e. the model is a dynamic linear regression where the coefficients and
the variance of the error term may be switching across regimes.
The `trend` is accomodated by prepending columns to the `exog` array. Thus
if `trend='c'`, the passed `exog` array should not already have a column of
ones.
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
def __init__(self, endog, k_regimes, trend='c', exog=None, order=0,
exog_tvtp=None, switching_trend=True, switching_exog=True,
switching_variance=False, dates=None, freq=None,
missing='none'):
# Properties
self.trend = trend
self.switching_trend = switching_trend
self.switching_exog = switching_exog
self.switching_variance = switching_variance
# Exogenous data
self.k_exog, exog = markov_switching.prepare_exog(exog)
# Trend
nobs = len(endog)
self.k_trend = 0
self._k_exog = self.k_exog
trend_exog = None
if trend == 'c':
trend_exog = np.ones((nobs, 1))
self.k_trend = 1
elif trend == 't':
trend_exog = (np.arange(nobs) + 1)[:, np.newaxis]
self.k_trend = 1
elif trend == 'ct':
trend_exog = np.c_[np.ones((nobs, 1)),
(np.arange(nobs) + 1)[:, np.newaxis]]
self.k_trend = 2
if trend_exog is not None:
exog = trend_exog if exog is None else np.c_[trend_exog, exog]
self._k_exog += self.k_trend
# Initialize the base model
super(MarkovRegression, self).__init__(
endog, k_regimes, order=order, exog_tvtp=exog_tvtp, exog=exog,
dates=dates, freq=freq, missing=missing)
# Switching options
if self.switching_trend is True or self.switching_trend is False:
self.switching_trend = [self.switching_trend] * self.k_trend
elif not len(self.switching_trend) == self.k_trend:
raise ValueError('Invalid iterable passed to `switching_trend`.')
if self.switching_exog is True or self.switching_exog is False:
self.switching_exog = [self.switching_exog] * self.k_exog
elif not len(self.switching_exog) == self.k_exog:
raise ValueError('Invalid iterable passed to `switching_exog`.')
self.switching_coeffs = (
np.r_[self.switching_trend,
self.switching_exog].astype(bool).tolist())
# Parameters
self.parameters['exog'] = self.switching_coeffs
self.parameters['variance'] = [1] if self.switching_variance else [0]
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current regime
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
params = np.array(params, ndmin=1)
# Since in the base model the values are the same across columns, we
# only compute a single column, and then expand it below.
predict = np.zeros((self.k_regimes, self.nobs), dtype=params.dtype)
for i in range(self.k_regimes):
# Predict
if self._k_exog > 0:
coeffs = params[self.parameters[i, 'exog']]
predict[i] = np.dot(self.exog, coeffs)
return predict[:, None, :]
def _resid(self, params):
predict = np.repeat(self.predict_conditional(params),
self.k_regimes, axis=1)
return self.endog - predict
def _conditional_likelihoods(self, params):
"""
Compute likelihoods conditional on the current period's regime
"""
# Get residuals
resid = self._resid(params)
# Compute the conditional likelihoods
variance = params[self.parameters['variance']].squeeze()
if self.switching_variance:
variance = np.reshape(variance, (self.k_regimes, 1, 1))
conditional_likelihoods = (
np.exp(-0.5 * resid**2 / variance) / np.sqrt(2 * np.pi * variance))
return conditional_likelihoods
@property
def _res_classes(self):
return {'fit': (MarkovRegressionResults,
MarkovRegressionResultsWrapper)}
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
This uses the inherited _em_iteration method for computing the
non-TVTP transition probabilities and then performs the EM step for
regression coefficients and variances.
"""
# Inherited parameters
result, params1 = super(MarkovRegression, self)._em_iteration(params0)
tmp = np.sqrt(result.smoothed_marginal_probabilities)
# Regression coefficients
coeffs = None
if self._k_exog > 0:
coeffs = self._em_exog(result, self.endog, self.exog,
self.parameters.switching['exog'], tmp)
for i in range(self.k_regimes):
params1[self.parameters[i, 'exog']] = coeffs[i]
# Variances
params1[self.parameters['variance']] = self._em_variance(
result, self.endog, self.exog, coeffs, tmp)
# params1[self.parameters['variance']] = 0.33282116
return result, params1
def _em_exog(self, result, endog, exog, switching, tmp=None):
"""
EM step for regression coefficients
"""
k_exog = exog.shape[1]
coeffs = np.zeros((self.k_regimes, k_exog))
# First, estimate non-switching coefficients
if not np.all(switching):
nonswitching_exog = exog[:, ~switching]
nonswitching_coeffs = (
np.dot(np.linalg.pinv(nonswitching_exog), endog))
coeffs[:, ~switching] = nonswitching_coeffs
endog = endog - np.dot(nonswitching_exog, nonswitching_coeffs)
# Next, get switching coefficients
if np.any(switching):
switching_exog = exog[:, switching]
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, np.newaxis] * switching_exog
coeffs[i, switching] = (
np.dot(np.linalg.pinv(tmp_exog), tmp_endog))
return coeffs
def _em_variance(self, result, endog, exog, betas, tmp=None):
"""
EM step for variances
"""
k_exog = 0 if exog is None else exog.shape[1]
if self.switching_variance:
variance = np.zeros(self.k_regimes)
for i in range(self.k_regimes):
if k_exog > 0:
resid = endog - np.dot(exog, betas[i])
else:
resid = endog
variance[i] = (
np.sum(resid**2 *
result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
variance = 0
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
if k_exog > 0:
tmp_exog = tmp[i][:, np.newaxis] * exog
resid = tmp_endog - np.dot(tmp_exog, betas[i])
else:
resid = tmp_endog
variance += np.sum(resid**2)
variance /= self.nobs
return variance
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
Notes
-----
These are not very sophisticated and / or good. We set equal transition
probabilities and interpolate regression coefficients between zero and
the OLS estimates, where the interpolation is based on the regime
number. We rely heavily on the EM algorithm to quickly find much better
starting parameters, which are then used by the typical scoring
approach.
"""
# Inherited parameters
params = markov_switching.MarkovSwitching.start_params.fget(self)
# Regression coefficients
if self._k_exog > 0:
beta = np.dot(np.linalg.pinv(self.exog), self.endog)
variance = np.var(self.endog - np.dot(self.exog, beta))
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
params[self.parameters[i, 'exog']] = (
beta * (i / self.k_regimes))
else:
params[self.parameters['exog']] = beta
else:
variance = np.var(self.endog)
# Variances
if self.switching_variance:
params[self.parameters['variance']] = (
np.linspace(variance / 10., variance, num=self.k_regimes))
else:
params[self.parameters['variance']] = variance
return params
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
# Inherited parameters
param_names = np.array(
markov_switching.MarkovSwitching.param_names.fget(self),
dtype=object)
# Regression coefficients
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
param_names[self.parameters[i, 'exog']] = [
'%s[%d]' % (exog_name, i) for exog_name in self.exog_names]
else:
param_names[self.parameters['exog']] = self.exog_names
# Variances
if self.switching_variance:
for i in range(self.k_regimes):
param_names[self.parameters[i, 'variance']] = 'sigma2[%d]' % i
else:
param_names[self.parameters['variance']] = 'sigma2'
return param_names.tolist()
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
"""
# Inherited parameters
constrained = super(MarkovRegression, self).transform_params(
unconstrained)
# Nothing to do for regression coefficients
constrained[self.parameters['exog']] = (
unconstrained[self.parameters['exog']])
# Force variances to be positive
constrained[self.parameters['variance']] = (
unconstrained[self.parameters['variance']]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
# Inherited parameters
unconstrained = super(MarkovRegression, self).untransform_params(
constrained)
# Nothing to do for regression coefficients
unconstrained[self.parameters['exog']] = (
constrained[self.parameters['exog']])
# Force variances to be positive
unconstrained[self.parameters['variance']] = (
constrained[self.parameters['variance']]**0.5)
return unconstrained
class MarkovRegressionResults(markov_switching.MarkovSwitchingResults):
r"""
Class to hold results from fitting a Markov switching regression model
Parameters
----------
model : MarkovRegression instance
The fitted model instance
params : array
Fitted parameters
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
cov_type : string
The type of covariance matrix estimator to use. Can be one of 'approx',
'opg', 'robust', or 'none'.
Attributes
----------
model : Model instance
A reference to the model that was fit.
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
nobs : float
The number of observations used to fit the model.
params : array
The parameters of the model.
scale : float
This is currently set to 1.0 and not used by the model or its results.
"""
pass
class MarkovRegressionResultsWrapper(
markov_switching.MarkovSwitchingResultsWrapper):
pass
wrap.populate_wrapper(MarkovRegressionResultsWrapper, MarkovRegressionResults)
| [
"[email protected]"
] | |
d0eb44f47aea9e440d8ce9e2190b0d49f9f3822d | 94b101b38acb682422b8e26ff09527e1102e6524 | /project/users/views.py | 4ae6702c4b12933ac5fa836b8207dbb98b6bbb8b | [] | no_license | mjoze/Web-App | f0ff12118510cb5bfa6d4ff5541194b184848c41 | 8f5c237231d35d87a77cf9dffa7261c19f81dec7 | refs/heads/master | 2020-12-23T02:47:06.241269 | 2020-03-07T14:34:54 | 2020-03-07T14:34:54 | 237,010,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(
request, f'Your account has been created! You are now able to log in.')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(
request, f'Your account has been updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
| [
"[email protected]"
] | |
f58dc5c06357b947dad8b998e8070480de396f5d | d47b841f7e64d83cebbe63a25bac47adc495a760 | /test/test_box_score_teams_overall.py | 74d000b20e37922744080d08c234957c32e396ab | [] | no_license | CiscoNeville/cfbd-python | 810029240de30a2b7a205cbc3bb009599481206c | 5775ff7ce7464e881f1940a7c0a534b0c26c1ce8 | refs/heads/master | 2023-09-04T18:27:23.773119 | 2021-11-19T01:49:07 | 2021-11-19T01:49:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # coding: utf-8
"""
College Football Data API
This is an API for accessing all sorts of college football data. It currently has a wide array of data ranging from play by play to player statistics to game scores and more. # noqa: E501
OpenAPI spec version: 2.4.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cfbd
from cfbd.models.box_score_teams_overall import BoxScoreTeamsOverall # noqa: E501
from cfbd.rest import ApiException
class TestBoxScoreTeamsOverall(unittest.TestCase):
"""BoxScoreTeamsOverall unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBoxScoreTeamsOverall(self):
"""Test BoxScoreTeamsOverall"""
# FIXME: construct object with mandatory attributes with example values
# model = cfbd.models.box_score_teams_overall.BoxScoreTeamsOverall() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1d8762c60b7af569450421e970799689990cf863 | 69a8a88c99f5c401b188ce7637174c19a3ed48d8 | /0x0A-python-inheritance/10-square.py | 9f90ed3be2ee071cbcc079312aa9f6543eda60d0 | [] | no_license | JDorangetree/holbertonschool-higher_level_programming | 0546b25726052a8ce6468781f933eb28d1aee30d | f984f5047f690d352c7f203ef16aa7f0cc49afcd | refs/heads/master | 2020-09-29T01:22:22.387395 | 2020-05-16T23:35:12 | 2020-05-16T23:35:12 | 226,912,872 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/python3
""" Class that inherits from Rectangle """
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
""" Class that defines a Square by inheritance of Rectangle class """
def __init__(self, size):
""" Constructor """
self.integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
""" Area method"""
My_area = self.__size * self.__size
return My_area
| [
"[email protected]"
] | |
78b373ee16f0efc70102408817bb21f313d8525e | fdcbf5b203f07cceefbb38a746f4a43b322e263e | /Python/findNeighbors_of_Nulls.py | 52f8694848396c9527b570609bc2724e421599bd | [] | no_license | Masoumeh/0390.IbnAhmadMuqaddasi.AhsanTaqasim | e7a3eddc895edb79f8d93c1bd0f09f130a761858 | 592720e5a154fcfe9cdab84b16eaf5574f30b806 | refs/heads/master | 2021-01-18T00:36:09.962622 | 2017-11-07T13:13:46 | 2017-11-07T13:13:46 | 45,922,253 | 0 | 0 | null | 2015-11-10T15:49:02 | 2015-11-10T15:49:02 | null | UTF-8 | Python | false | false | 1,682 | py | """
To get some information from the route network graph, like how far are the first two neighbours (with coords) of a node (without coords)
"""
from networkx.readwrite import json_graph
import io, json, csv
import re
import networkx as nx
import sys
import operator
import compose_graphs as cg
def findNeighbors_of_Nulls(G, writer):
#G = nx.Graph()
#G = cg.composeGraphs(textRoutes, cornuRoutes, cornuPlaces)
'''with open(fileName, 'r') as meterFile:
distReader = csv.reader(meterFile, delimiter=',')
next(distReader, None)
for row in distReader:
G.add_node(row[0], lat=row[1], lng=row[2])
G.add_node(row[3], lat=row[4], lng=row[5])
G.add_edge(row[0],row[3], length= row[-1])'''
coord_neighbors = {}
nulls = [n for n in G.nodes() if G.node[n]['lat'] == "null" and G.node[n]['lng'] == "null"]
print(len(nulls))
for node in nulls:
length = nx.single_source_shortest_path_length(G,node)
sorted_length = sorted(length.items(), key=operator.itemgetter(1))
neighCoords = []
# exclude the firs item of list from the loop which is the node itself with the distance of zero from the node! i.e. ('node',0)
for l in sorted_length[1:]:
# check the distance of node from the neigbor and if the neighbor has coordinate
if l[1] == 1 and G.node[l[0]]['lat'] != "null" and G.node[l[0]]['lng'] != "null":
# add the neighbor to array
neighCoords.append( [l[0],l[1]])
# limit the neighbors to two to have at leat two neighbours with
if len(neighCoords) >= 2:
break
if len(neighCoords) == 2:
writer.writerow([node,neighCoords])
| [
"[email protected]"
] | |
bbef2beee7c94d588e9831ccbb760157f2f2e422 | 6915d6a20d82ecf2a2a3d3cd84ca22dab2491004 | /advtempproject/advtempproject/wsgi.py | 507d246211545d55217dfb1767569eb090224823 | [] | no_license | iitian-gopu/django | bb4302d101f4434fb61ab374807e29699a432e42 | 31db982212bbb453cc4c56c7f5cfad9a00cd231d | refs/heads/master | 2023-05-14T07:22:35.176477 | 2021-06-04T04:43:26 | 2021-06-04T04:43:26 | 366,114,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | """
WSGI config for advtempproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "advtempproject.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
2f4164ef4372fc6478789fc37f7c1f66296b61a9 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/az1.py | 2674ec878632dbc448cc05438068f00a33a83305 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'aZ1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
102056145a28eec5b448b8975f4633f44a628b6a | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/synthetic/rttoaobj.py | f85a0bd999b0746da1b151ecd36cc2f7a907ac50 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,526 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToAObj(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.synthetic.RtToAObj", "cobra.model.synthetic.SwCTestObj")
meta.moClassName = "syntheticRtToAObj"
meta.rnFormat = "rttoAObj"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Synthetic Sw C Test Object"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.synthetic.IfcCTestObj")
meta.parentClasses.add("cobra.model.synthetic.IfcTLTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwCTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwTLTestObj")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rttoAObj', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20610, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4272
prop.defaultValueStr = "syntheticSwCTestObj"
prop._addConstant("syntheticSwCTestObj", None, 4272)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
dda9f5d4466062d8ad277427e9721c6efad04a50 | e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6 | /spexy/bases/regular.py | ee2e4fd35ec1af3c62bc446c89556cd8cd5295c7 | [] | no_license | drufat/spexy | 6eba9f44a5539245486cd4ef8fefd24bdb7ade6a | 53255009c1830501986afbf6688142ddefe17b9a | refs/heads/master | 2021-09-18T19:51:47.313946 | 2018-07-19T05:09:02 | 2018-07-19T05:09:02 | 100,453,374 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,976 | py | # Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from spexy.bases import basesimp
class BasesImp(basesimp.BasesImp):
def module(self):
return 'spexy.bases.circular'
def numbers(self):
N = self.N
N0 = N + 1
N1 = N
N0d = N
N1d = N + 1
return (N0, N1), (N0d, N1d)
def cells_index(self):
half = self.imp.half
i0 = lambda n: (n,)
i1 = lambda n: (n, n + 1)
id0 = lambda n: (n + half,)
id1 = lambda n: (n - half, n + half)
return (i0, i1), (id0, id1)
def points(self, n):
N = self.N
return self.imp.points_regular_clamped(N, n)
def bases(self, correct=True):
imp = self.imp
N, half = imp.S(self.N), imp.half
def corr0(kappa):
# primal boundary vertex
if correct:
return lambda N, n, x: kappa(N, n, x) * imp.correction0(N, n)
return kappa
# Bases Functions
kappa0 = lambda n: lambda x: corr0(imp.kappa)(N, n, x)
kappa1 = lambda n: lambda x: imp.kappa_star(N, n + half, x)
kappad0 = lambda n: lambda x: imp.kappa(N, n + half, x)
kappad1 = lambda n: lambda x: imp.kappa_star(N, n, x)
# Gradients
kappa0.grad = lambda n: lambda x: corr0(imp.kappa_grad)(N, n, x)
kappad0.grad = lambda n: lambda x: imp.kappa_grad(N, n + half, x)
return (kappa0, kappa1), (kappad0, kappad1)
def boundary(self):
pi = self.imp.pi
return None, (0, pi)
def run_kappa():
"""
>>> from sympy.abc import x
>>> (kappa0, kappa1), (kappad0, kappad1) = BasesImp(2, 'sym').bases()
>>> kappa0(0)(x)
cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa0(1)(x)
-cos(2*x)/2 + 1/2
>>> kappa0(2)(x)
-cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa1(0)(x)
cos(x)/2 + 1/pi
>>> kappa1(1)(x)
-cos(x)/2 + 1/pi
>>> kappad0(0)(x)
sqrt(2)*cos(x)/2 + 1/2
>>> kappad0(1)(x)
-sqrt(2)*cos(x)/2 + 1/2
>>> kappad1(0)(x)
sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
>>> kappad1(1)(x)
-cos(2*x)/2 + 1/pi
>>> kappad1(2)(x)
-sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
"""
pass
def run(N):
"""
>>> run(1)
zero-form
[1, 0]
[0, 1]
one-form
[1]
dual zero-form
[1]
dual one-form
[1, 0]
[0, 1]
>>> run(2)
zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
one-form
[1, 0]
[0, 1]
dual zero-form
[1, 0]
[0, 1]
dual one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
>>> run(3)
zero-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual one-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
"""
from spexy.bases.symintegrals import run_integrals
run_integrals(BasesImp)(N)
| [
"[email protected]"
] | |
e3be99e1c6547843b797fea330aa576499260d31 | 99a4e7a4db3a3e062c0b08a5462749a28f3f7a39 | /core/utils/make_joint_dataset.py | 592af25331103bb288cfcb090d2dcd893614f3bb | [] | no_license | B-Step62/pytorch-motiongan-open | f85c1481363230826e9094e1c323ad90f0922744 | 4aefe2c427b88f357e8894d309ff46602e109001 | refs/heads/master | 2021-03-20T23:22:49.591472 | 2020-03-15T10:34:54 | 2020-03-15T10:34:54 | 247,241,734 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import os
import sys
import math
import subprocess
import cv2
from collections import OrderedDict
import numpy as np
import core.utils.bvh_to_joint as btoj
BVH_ROOT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
OUT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
def main():
# Copy all original bvh file
root_depth = BVH_ROOT.count(os.path.sep)
bvh_paths = []
out_dir = OUT
for (root, dirs, files) in os.walk(BVH_ROOT):
for origin_file in files:
if not origin_file.endswith('.bvh'):
continue
# Output path is 'out' + ('origin_path' - 'root')
if BVH_ROOT != OUT:
post = root.split(os.path.sep)[root_depth:]
out_dir = OUT + ''.join([os.path.sep + p for p in post])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# If save to different directory, copy original bvh
shutil.copy(os.path.join(root, origin_file), os.path.join(out_dir, origin_file))
bvh_paths.append(os.path.join(out_dir, origin_file))
else:
bvh_paths.append(os.path.join(root, origin_file))
skelton, non_end_bones, joints_to_index, permute_xyz_order = btoj.get_standard_format(bvh_paths[0])
for bvh_path in bvh_paths:
_, non_zero_joint_to_index = btoj.cut_zero_length_bone(skelton, joints_to_index)
format_data = btoj.create_data(bvh_path, skelton, joints_to_index)
npy_path = os.path.splitext(bvh_path)[0] + '.npy'
np.save(npy_path, format_data)
print(npy_path, format_data.shape)
| [
"[email protected]"
] | |
833b2113b3ae2c9ad9deecfba486cc67eee08b41 | 21839bc2817a02d01180baff826b4ce5fe2789bd | /official/vision/beta/projects/yolo/modeling/backbones/darknet.py | 5a76c7eefbc615657b563714da3e8a042c18257f | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/TF-OD-API_BICS | 1240fbf7cfbed73fe8633870c4eb237289dbd899 | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | refs/heads/main | 2023-06-24T23:46:19.756540 | 2021-07-26T05:27:12 | 2021-07-26T05:27:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,207 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions of Darknet Backbone Networks.
The models are inspired by ResNet, and CSPNet
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen,
Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
Darknets are used mainly for object detection in:
[1] Joseph Redmon, Ali Farhadi
YOLOv3: An Incremental Improvement. arXiv:1804.02767
[2] Alexey Bochkovskiy, Chien-Yao Wang, Hong-Yuan Mark Liao
YOLOv4: Optimal Speed and Accuracy of Object Detection. arXiv:2004.10934
"""
import collections
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.beta.modeling.backbones import factory
from official.vision.beta.projects.yolo.modeling.layers import nn_blocks
class BlockConfig:
"""Class to store layer config to make code more readable."""
def __init__(self, layer, stack, reps, bottleneck, filters, pool_size,
kernel_size, strides, padding, activation, route, dilation_rate,
output_name, is_output):
"""Initializing method for BlockConfig.
Args:
layer: A `str` for layer name.
stack: A `str` for the type of layer ordering to use for this specific
level.
reps: An `int` for the number of times to repeat block.
bottleneck: A `bool` for whether this stack has a bottle neck layer.
filters: An `int` for the output depth of the level.
pool_size: An `int` for the pool_size of max pool layers.
kernel_size: An `int` for convolution kernel size.
strides: A `Union[int, tuple]` that indicates convolution strides.
padding: An `int` for the padding to apply to layers in this stack.
activation: A `str` for the activation to use for this stack.
route: An `int` for the level to route from to get the next input.
dilation_rate: An `int` for the scale used in dialated Darknet.
output_name: A `str` for the name to use for this output.
is_output: A `bool` for whether this layer is an output in the default
model.
"""
self.layer = layer
self.stack = stack
self.repetitions = reps
self.bottleneck = bottleneck
self.filters = filters
self.kernel_size = kernel_size
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.activation = activation
self.route = route
self.dilation_rate = dilation_rate
self.output_name = output_name
self.is_output = is_output
def build_block_specs(config):
specs = []
for layer in config:
specs.append(BlockConfig(*layer))
return specs
class LayerBuilder:
"""Layer builder class.
Class for quick look up of default layers used by darknet to
connect, introduce or exit a level. Used in place of an if condition
or switch to make adding new layers easier and to reduce redundant code.
"""
def __init__(self):
self._layer_dict = {
'ConvBN': (nn_blocks.ConvBN, self.conv_bn_config_todict),
'MaxPool': (tf.keras.layers.MaxPool2D, self.maxpool_config_todict)
}
def conv_bn_config_todict(self, config, kwargs):
dictvals = {
'filters': config.filters,
'kernel_size': config.kernel_size,
'strides': config.strides,
'padding': config.padding
}
dictvals.update(kwargs)
return dictvals
def darktiny_config_todict(self, config, kwargs):
dictvals = {'filters': config.filters, 'strides': config.strides}
dictvals.update(kwargs)
return dictvals
def maxpool_config_todict(self, config, kwargs):
return {
'pool_size': config.pool_size,
'strides': config.strides,
'padding': config.padding,
'name': kwargs['name']
}
def __call__(self, config, kwargs):
layer, get_param_dict = self._layer_dict[config.layer]
param_dict = get_param_dict(config, kwargs)
return layer(**param_dict)
# model configs
LISTNAMES = [
'default_layer_name', 'level_type', 'number_of_layers_in_level',
'bottleneck', 'filters', 'kernal_size', 'pool_size', 'strides', 'padding',
'default_activation', 'route', 'dilation', 'level/name', 'is_output'
]
CSPDARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 106,
'neck_split': 132
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
CSPADARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'mish',
-1, 1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
LARGECSP53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 3, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 15, False, 256, None, None, None, None, 'mish',
-1, 1, 3, True
],
[
'DarkRes', 'csp', 15, False, 512, None, None, None, None, 'mish',
-1, 2, 4, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 8, 6, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 16, 7, True
],
]
}
DARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 76
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'leaky',
-1, 1, 1, False
],
[
'DarkRes', 'residual', 2, False, 128, None, None, None, None,
'leaky', -1, 1, 2, False
],
[
'DarkRes', 'residual', 8, False, 256, None, None, None, None,
'leaky', -1, 1, 3, True
],
[
'DarkRes', 'residual', 8, False, 512, None, None, None, None,
'leaky', -1, 2, 4, True
],
[
'DarkRes', 'residual', 4, False, 1024, None, None, None, None,
'leaky', -1, 4, 5, True
],
]
}
CSPDARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 28
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 2, 'same', 'leaky', -1, 1, 0,
False
],
[
'ConvBN', None, 1, False, 64, None, 3, 2, 'same', 'leaky', -1, 1, 1,
False
],
[
'CSPTiny', 'csp_tiny', 1, False, 64, None, 3, 2, 'same', 'leaky',
-1, 1, 2, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 128, None, 3, 2, 'same', 'leaky',
-1, 1, 3, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 256, None, 3, 2, 'same', 'leaky',
-1, 1, 4, True
],
[
'ConvBN', None, 1, False, 512, None, 3, 1, 'same', 'leaky', -1, 1,
5, True
],
]
}
DARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 14
},
'backbone': [
[
'ConvBN', None, 1, False, 16, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkTiny', 'tiny', 1, True, 32, None, 3, 2, 'same', 'leaky', -1, 1,
1, False
],
[
'DarkTiny', 'tiny', 1, True, 64, None, 3, 2, 'same', 'leaky', -1, 1,
2, False
],
[
'DarkTiny', 'tiny', 1, False, 128, None, 3, 2, 'same', 'leaky', -1,
1, 3, False
],
[
'DarkTiny', 'tiny', 1, False, 256, None, 3, 2, 'same', 'leaky', -1,
1, 4, True
],
[
'DarkTiny', 'tiny', 1, False, 512, None, 3, 2, 'same', 'leaky', -1,
1, 5, False
],
[
'DarkTiny', 'tiny', 1, False, 1024, None, 3, 1, 'same', 'leaky', -1,
1, 5, True
],
]
}
BACKBONES = {
'darknettiny': DARKNETTINY,
'darknet53': DARKNET53,
'cspdarknet53': CSPDARKNET53,
'altered_cspdarknet53': CSPADARKNET53,
'cspdarknettiny': CSPDARKNETTINY,
'csp-large': LARGECSP53,
}
@tf.keras.utils.register_keras_serializable(package='yolo')
class Darknet(tf.keras.Model):
"""The Darknet backbone architecture."""
def __init__(
self,
model_id='darknet53',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
min_level=None,
max_level=5,
width_scale=1.0,
depth_scale=1.0,
csp_level_mod=(),
activation=None,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
dilate=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
layer_specs, splits = Darknet.get_model_config(model_id)
self._model_name = model_id
self._splits = splits
self._input_shape = input_specs
self._registry = LayerBuilder()
# default layer look up
self._min_size = min_level
self._max_size = max_level
self._output_specs = None
self._csp_level_mod = set(csp_level_mod)
self._kernel_initializer = kernel_initializer
self._bias_regularizer = bias_regularizer
self._norm_momentum = norm_momentum
self._norm_epislon = norm_epsilon
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_regularizer = kernel_regularizer
self._dilate = dilate
self._width_scale = width_scale
self._depth_scale = depth_scale
self._default_dict = {
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
'dilation_rate': 1,
'name': None
}
inputs = tf.keras.layers.Input(shape=self._input_shape.shape[1:])
output = self._build_struct(layer_specs, inputs)
super().__init__(inputs=inputs, outputs=output, name=self._model_name)
@property
def input_specs(self):
return self._input_shape
@property
def output_specs(self):
return self._output_specs
@property
def splits(self):
return self._splits
def _build_struct(self, net, inputs):
endpoints = collections.OrderedDict()
stack_outputs = [inputs]
for i, config in enumerate(net):
if config.output_name > self._max_size:
break
if config.output_name in self._csp_level_mod:
config.stack = 'residual'
config.filters = int(config.filters * self._width_scale)
config.repetitions = int(config.repetitions * self._depth_scale)
if config.stack is None:
x = self._build_block(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'residual':
x = self._residual_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp':
x = self._csp_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp_tiny':
x_pass, x = self._csp_tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x_pass)
elif config.stack == 'tiny':
x = self._tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
if (config.is_output and self._min_size is None):
endpoints[str(config.output_name)] = x
elif (self._min_size is not None and
config.output_name >= self._min_size and
config.output_name <= self._max_size):
endpoints[str(config.output_name)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints.keys()}
return endpoints
def _get_activation(self, activation):
if self._activation is None:
return activation
return self._activation
def _csp_stack(self, inputs, config, name):
if config.bottleneck:
csp_filter_scale = 1
residual_filter_scale = 2
scale_filters = 1
else:
csp_filter_scale = 2
residual_filter_scale = 1
scale_filters = 2
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
else:
self._default_dict['dilation_rate'] = 1
# swap/add dilation
x, x_route = nn_blocks.CSPRoute(
filters=config.filters,
filter_scale=csp_filter_scale,
downsample=True,
**self._default_dict)(
inputs)
dilated_reps = config.repetitions - self._default_dict['dilation_rate'] // 2
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict)(
x)
for i in range(dilated_reps, config.repetitions):
self._default_dict[
'dilation_rate'] = self._default_dict['dilation_rate'] // 2
self._default_dict[
'name'] = f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict)(
x)
self._default_dict['name'] = f'{name}_csp_connect'
output = nn_blocks.CSPConnect(
filters=config.filters,
filter_scale=csp_filter_scale,
**self._default_dict)([x, x_route])
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return output
def _csp_tiny_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_tiny'
x, x_route = nn_blocks.CSPTiny(
filters=config.filters, **self._default_dict)(
inputs)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x, x_route
def _tiny_stack(self, inputs, config, name):
x = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=config.strides,
padding='same',
data_format=None,
name=f'{name}_tiny/pool')(
inputs)
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_tiny/conv'
x = nn_blocks.ConvBN(
filters=config.filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
def _residual_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_residual_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
if config.repetitions < 8:
config.repetitions += 2
else:
self._default_dict['dilation_rate'] = 1
x = nn_blocks.DarkResidual(
filters=config.filters, downsample=True, **self._default_dict)(
inputs)
dilated_reps = config.repetitions - (
self._default_dict['dilation_rate'] // 2) - 1
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters, **self._default_dict)(
x)
for i in range(dilated_reps, config.repetitions - 1):
self._default_dict[
'dilation_rate'] = self._default_dict['dilation_rate'] // 2
self._default_dict[
'name'] = f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
x = nn_blocks.DarkResidual(
filters=config.filters, **self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
self._default_dict['dilation_rate'] = 1
return x
def _build_block(self, inputs, config, name):
x = inputs
i = 0
self._default_dict['activation'] = self._get_activation(config.activation)
while i < config.repetitions:
self._default_dict['name'] = f'{name}_{i}'
layer = self._registry(config, self._default_dict)
x = layer(x)
i += 1
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
@staticmethod
def get_model_config(name):
name = name.lower()
backbone = BACKBONES[name]['backbone']
splits = BACKBONES[name]['splits']
return build_block_specs(backbone), splits
@property
def model_id(self):
return self._model_name
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def get_config(self):
layer_config = {
'model_id': self._model_name,
'min_level': self._min_size,
'max_level': self._max_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
}
return layer_config
@factory.register_backbone_builder('darknet')
def build_darknet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds darknet."""
backbone_cfg = backbone_config.get()
model = Darknet(
model_id=backbone_cfg.model_id,
min_level=backbone_cfg.min_level,
max_level=backbone_cfg.max_level,
input_specs=input_specs,
dilate=backbone_cfg.dilate,
width_scale=backbone_cfg.width_scale,
depth_scale=backbone_cfg.depth_scale,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model.summary()
return model
| [
"[email protected]"
] | |
19e5e99b4598f9270e0cc992301e841753fd2870 | c2b386e1d28c58efbb9d847098a87032e2cbacca | /products_app/init.py | 930a8691b7c54fa99f1d8508a131fb4977bb6b31 | [] | no_license | jmlm74/P11-Ameliorez-un-projet-existant-en-Python | e6468342554f5c4aa03bc0bb954aa7995e98e293 | 28cd84698bf272e279bbf6e1d15211ef2a3c6403 | refs/heads/master | 2022-12-11T02:57:59.563283 | 2020-09-10T16:02:27 | 2020-09-10T16:02:27 | 290,844,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py |
# url an parameters for the open food fact API
url = 'https://fr-en.openfoodfacts.org/cgi/search.pl'
params_off = {'search_simple': 1,
'action': 'process',
'json': 1,
'page_size': 300,
'page': 1,
'tagtype_0': 'categories',
'tag_contains_0': 'contains',
'tag_0': 'cat',
'tagtype_1': 'countries',
'tag_contains_1': 'contains',
'tag_1': 'france',
'sort_by': 'unique_scans_n'
}
# categories to fetch
categories = ['biscuits',
'Crepes',
'desserts',
'sweetened-beverages', ]
# brands to fecth to have well known products
brands = {'coca cola',
'ferrero',
'pepsi'}
# items per page for the paginator
NB_ITEMS_PAGE = 12
| [
"[email protected]"
] | |
cf3dc94deb898b3147c1d529a1fbf335561c2e0b | 639d6a00e3a8fab07ce07fec408eef6bc050d21d | /indice_pollution/db.py | 8c93d5b392a7f26b2474e3b88b9e22891432315d | [
"MIT"
] | permissive | betagouv/indice_pollution | e04634e0b9c6d4ce24ffdc4c19868599995c1bd5 | b85e53ca22d420e3d685fc84843d2011c6a696e4 | refs/heads/master | 2023-02-10T20:25:13.321999 | 2023-02-06T10:57:09 | 2023-02-06T10:57:09 | 250,297,957 | 4 | 1 | MIT | 2023-01-25T09:25:45 | 2020-03-26T15:33:02 | Python | UTF-8 | Python | false | false | 218 | py | from sqlalchemy import MetaData
from sqlalchemy.orm import declarative_base
global engine, Session
metadata = MetaData(schema="indice_schema")
Base = declarative_base(metadata=metadata)
engine = None
session = None | [
"[email protected]"
] | |
72b698651d6f869623903874a9cb46cd307ac5e2 | 05218d01394294bb7ede726bf3dc6f0691e4299b | /machineLearning/mini_Project/mini_project_0401.py | 21e36071cf323a4a3e1726e08d32fe4925ed6a43 | [] | no_license | bitacademy-howl/anaconda_workspace | 156556c52342b461ffb8304dfb55a845fff5ae90 | d9dc5007b6e95fa0bf7b95a457cafe68a0167992 | refs/heads/master | 2020-03-23T12:10:14.872385 | 2018-08-10T10:04:55 | 2018-08-10T10:04:55 | 141,539,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import os
# sklearn 에서 제공하는 학습용 데이터셋
data = load_boston()
print('=============================================================================')
print('================================ 데이터 타입 =================================')
print(type(data))
print('=============================================================================')
print('=============================================================================')
print(type(data.keys()), data.keys())
print('=============================================================================')
print('=============================== =설명서= ==================================')
print(data['DESCR'])
print('=============================================================================')
# 실제 값들만 존재하는 데이터셋
print('================================데이터 셋=====================================')
X = data['data']
print(X)
print('=============================================================================')
# 실제 데이터 필드에 컬럼명이 들어있지 않다.
print('=============================================================================')
header = data['feature_names']
print(header)
# 제공되는 데이터셋에 가격은 별도로 target 으로 제공되므로 dataframe을 만들때는 합쳐서 만든다....
print('=============================================================================')
Y = data['target']
Y = Y.reshape(-1, 1)
print(type(Y), Y)
print('=============================================================================')
# 실제 사용될 데이터 프레임 : 아직 헤더 포함되지 않음
df = pd.DataFrame(np.append(X, Y, axis=1))
print(df)
print('=============================================================================')
# 헤더에 header와 PRICE 컬럼명 추가
df.columns = np.append(header,'PRICE')
# 데이터 프레임에 헤더 추가
# 데이터프레임의 확인
print(df.head(5))
print(df.tail(5))
# 여러 통계치의 종합 선물세트
result_desc = df.describe()
print(result_desc)
#######################################################################################################
# 여기서 잠깐 번외로 통계치를 가지고
# 1. 박스플롯 그려보고
# 2. 분포도 그려보고
# # 1. 가격 분포도
# plt.hist(df['PRICE'],bins=100,color='green', density=True)
# plt.show()
# # 2.
# plt.boxplot([df['PRICE']],0)
# plt.show()
# 일단 이건 계속 해보고 생각해보쟈....
#######################################################################################################
# 각각의 컬럼간 상관관계
corr_df = np.round(df.corr(),3)
print(corr_df)
# ,marker='o',s=10
pd.plotting.scatter_matrix(df,alpha=0.8, diagonal='kde')
# os.chdir(r'D:\1. stark\temp')
#
# df.to_csv('data.csv',index=True) | [
"[email protected]"
] | |
dac61de3894ea89b441f9876d43b4e8b8e7aabcc | a7587f813492163433202e244df2237c9993a1a1 | /Store/migrations/0003_variation.py | 192756496452ac5feb5ca11e93277167f0ed89b4 | [] | no_license | kamran1231/E-COM-WEBSITE-2021 | 3a10bc0059f4d29fc52ee029e4919d4f965174c6 | 32214468cf716cc312a63f6346b8c844f720abda | refs/heads/master | 2023-06-01T03:18:03.137405 | 2021-07-04T14:20:16 | 2021-07-04T14:20:16 | 381,634,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # Generated by Django 3.2.4 on 2021-07-02 18:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Store', '0002_alter_product_price'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variation_category', models.CharField(choices=[('color', 'color'), ('size', 'size')], max_length=100)),
('variation_value', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Store.product')),
],
),
]
| [
"[email protected]"
] | |
826ff29b8209c97f97229d3a9b5855b40d325524 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py | efb21f39026ffdd1e919cf6d1b8d713df2b94c91 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 2,804 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
from .operations import ServiceOperations
from .operations import FileSystemOperations
from .operations import PathOperations
from . import models
class AzureDataLakeStorageRESTAPI(object):
"""Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
:ivar service: ServiceOperations operations
:vartype service: azure.storage.filedatalake.operations.ServiceOperations
:ivar file_system: FileSystemOperations operations
:vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
:ivar path: PathOperations operations
:vartype path: azure.storage.filedatalake.operations.PathOperations
:param url: The URL of the service account, container, or blob that is the targe of the desired operation.
:type url: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{url}'
self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.service = ServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_system = FileSystemOperations(
self._client, self._config, self._serialize, self._deserialize)
self.path = PathOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureDataLakeStorageRESTAPI
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| [
"[email protected]"
] | |
0599e1b5865e8d9987e0659e9e04bf93f58d70be | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/bigquery/v2/bigquery-v2-py/google/cloud/bigquery_v2/services/model_service/async_client.py | f663b4845089503f56d9cf414847565501df681b | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,963 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_v2.types import encryption_config
from google.cloud.bigquery_v2.types import model
from google.cloud.bigquery_v2.types import model as gcb_model
from google.cloud.bigquery_v2.types import model_reference
from google.cloud.bigquery_v2.types import standard_sql
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
from .client import ModelServiceClient
class ModelServiceAsyncClient:
""""""
_client: ModelServiceClient
DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path)
common_project_path = staticmethod(ModelServiceClient.common_project_path)
parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path)
common_location_path = staticmethod(ModelServiceClient.common_location_path)
parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ModelServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ModelServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ModelServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the model service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ModelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ModelServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def get_model(self,
request: model.GetModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.Model:
r"""Gets the specified model resource by model ID.
Args:
request (:class:`google.cloud.bigquery_v2.types.GetModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the requested
model.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the requested
model.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the requested
model.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.Model:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.GetModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_models(self,
request: model.ListModelsRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
max_results: wrappers_pb2.UInt32Value = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.ListModelsResponse:
r"""Lists all models in the specified dataset. Requires
the READER dataset role.
Args:
request (:class:`google.cloud.bigquery_v2.types.ListModelsRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the models to
list.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the models to
list.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
max_results (:class:`google.protobuf.wrappers_pb2.UInt32Value`):
The maximum number of results to
return in a single response page.
Leverage the page tokens to iterate
through the entire collection.
This corresponds to the ``max_results`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.ListModelsResponse:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, max_results])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.ListModelsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if max_results is not None:
request.max_results = max_results
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_models,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def patch_model(self,
request: gcb_model.PatchModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
model: gcb_model.Model = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcb_model.Model:
r"""Patch specific fields in the specified model.
Args:
request (:class:`google.cloud.bigquery_v2.types.PatchModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the model to
patch.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the model to
patch.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the model to
patch.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (:class:`google.cloud.bigquery_v2.types.Model`):
Required. Patched model.
Follows RFC5789 patch semantics. Missing
fields are not updated. To clear a
field, explicitly set to default value.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.Model:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id, model])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcb_model.PatchModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
if model is not None:
request.model = model
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.patch_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_model(self,
request: model.DeleteModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the model specified by modelId from the
dataset.
Args:
request (:class:`google.cloud.bigquery_v2.types.DeleteModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the model to
delete.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the model to
delete.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the model to
delete.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.DeleteModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ModelServiceAsyncClient",
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
06eb118e8879ca755ff7c592ecfb8c07b1333b91 | 553b34a101c54090e68f540d96369ac7d5774d95 | /python/algo/src/minimum_cut.py | bf33b42a8714492e38de25c04a941877eafc0264 | [
"MIT"
] | permissive | topliceanu/learn | fd124e1885b5c0bfea8587510b5eab79da629099 | 1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3 | refs/heads/master | 2022-07-16T19:50:40.939933 | 2022-06-12T15:40:20 | 2022-06-12T15:40:20 | 21,684,180 | 26 | 12 | MIT | 2020-03-26T20:51:35 | 2014-07-10T07:22:17 | JavaScript | UTF-8 | Python | false | false | 2,668 | py | # -*- coding: utf-8 -*-
import random
from src.maximum_flow import ford_fulkerson_maximum_flow
def pick_random_edge(graph):
""" Returns a random edge from the given graph. """
edges = graph.get_edges()
return random.choice(edges)
def contract(graph, edge):
""" Composes a new vertex from the ends of the given edge.
All the resulting self-loop edges are removed.
Args:
graph: a data structure containg all data and operations.
edge: a tuple of format (tail, head, value)
Returns:
The graph after contracting value.
"""
(tail, head, value) = graph.split_edge(edge)
super_vertex = '{start}_{end}'.format(start=tail, end=head)
# Remove individual vertices and add super-vertex.
graph.rename_vertex(tail, super_vertex)
graph.rename_vertex(head, super_vertex)
return graph
def randomized_cut(graph):
""" Finds a cut in a given graph using the random contraction algorithm
defined by David Karger in '93.
NOTE! This algorithm modifies the graph in place, so make sure you clone
it before compacting if you don't want your original graph modified.
Args:
graph: a data structure containg all data and operations.
Returns:
The compacted graph.
"""
while len(graph.get_vertices()) != 2:
edge = pick_random_edge(graph)
contract(graph, edge)
return graph
def minimum_cut(graph, tries):
""" Finds the the minimum cut in the given graph after a running the
randomized cut algorithm a given number of tries.
Args:
graph: a data structure containg all vertices, edges and supported
operations.
tries: int, number of times to try the randomized cut algorithm.
Returns:
cuts, list of cut edges which produce the minimum cut.
"""
min_cuts = []
for __ in xrange(tries):
g = graph.clone()
randomized_cut(g)
[left_super_vertex, right_super_vertex] = g.get_vertices()
left_vertices = set(left_super_vertex.split('_'))
right_vertices = set(right_super_vertex.split('_'))
cuts = []
for left_vertex in left_vertices:
right_neighbours = set(graph.neighbours(left_vertex))\
.intersection(right_vertices)
for right_vertex in right_neighbours:
cuts.append((left_vertex, right_vertex))
if (len(min_cuts) == 0 or len(min_cuts) > len(cuts)):
min_cuts = cuts
return min_cuts
def minimum_cut_using_maximum_flow(graph, start, end):
""" Solve the minimum cut problem by reducing it to maximum flow. """
# TODO
| [
"[email protected]"
] | |
3bc48ad57dbf84c0d65a2c59a2f654b60f5b1089 | a98bc512be9b9691200c6a0cc33a5fb7b4053c13 | /com.ppc.Bot/devices/thermostat/thermostat_honeywell_lyric.py | 4fc7ab8f7dd9ec9b7fd8517681898b5f9c38d9cf | [
"Apache-2.0"
] | permissive | 30s/botlab | c21682ed2c9aefc9cba688c6a8c136e9f969adc9 | f7617147b65521a66ad88cdbc175176021a7a486 | refs/heads/master | 2020-04-04T18:39:00.776636 | 2018-10-04T04:56:32 | 2018-10-04T04:56:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | '''
Created on March 27, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Device Model
# https://presence.atlassian.net/wiki/display/devices/Thermostat
from devices.thermostat.thermostat import ThermostatDevice
# Set the default rounding to 3 numbers.
from decimal import *
getcontext().prec = 1
class ThermostatHoneywellLyricDevice(ThermostatDevice):
"""Honeywell Lyric Thermostat Device"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [4230]
# Minimum setpoint in Celsius
MIN_SETPOINT_C = 7.0
# Maximum setpoint in Celsius
MAX_SETPOINT_C = 29.0
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Honeywell Lyric Thermostat")
def set_system_mode(self, botengine, system_mode, reliably=False):
"""
Set the system mode
:param botengine:
:param system_mode:
:param reliably: True to keep retrying to get the command through
:return:
"""
ThermostatDevice.set_system_mode(self, botengine, system_mode, reliably=False)
def set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the cooling setpoint
:param botengine: BotEngine environment
:param setpoint_celsius: Absolute setpoint in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False)
def set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the heating set-point
:param botengine: BotEngine environmnet
:param setpoint_celsius: Temperature in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False)
| [
"[email protected]"
] | |
46dfc22eb40865a48197d7f5ac0164222f4d45bc | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fabric/nodetopolicy.py | 73f41a28e62353875c97e455cded44de58073546 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,662 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class NodeToPolicy(Mo):
meta = ClassMeta("cobra.model.fabric.NodeToPolicy")
meta.isAbstract = True
meta.moClassName = "fabricNodeToPolicy"
meta.moClassName = "fabricNodeToPolicy"
meta.rnFormat = ""
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Super Class for Relation from Node to Fabric Policies Deployed on Node"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fabric.CreatedBy")
meta.childNamesAndRnPrefix.append(("cobra.model.fabric.CreatedBy", "source-"))
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.concreteSubClasses.add("cobra.model.fabric.RsModulePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsToInterfacePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsToInterfaceSpPolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsBndlGrp")
meta.concreteSubClasses.add("cobra.model.fabric.RsPodPolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsVpcBndlGrp")
meta.concreteSubClasses.add("cobra.model.fabric.RsInterfacePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsModulePolProfile")
meta.concreteSubClasses.add("cobra.model.fabric.RsNodeOverride")
meta.concreteSubClasses.add("cobra.model.infra.RsToVsanEncapInstDef")
meta.concreteSubClasses.add("cobra.model.fabric.RsCtrlrPolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsInfraNodeOverride")
meta.concreteSubClasses.add("cobra.model.fabric.RsNodePolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsToVsanAttr")
meta.concreteSubClasses.add("cobra.model.infra.RsToEncapInstDef")
meta.concreteSubClasses.add("cobra.model.infra.RsNodePolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsFexGrp")
meta.concreteSubClasses.add("cobra.model.infra.RsInterfacePolProfile")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "deplSt", "deplSt", 15582, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("delivered", "delivered", 1)
prop._addConstant("node-not-ready", "node-not-ready", 1073741824)
prop._addConstant("none", "none", 0)
prop._addConstant("not-registered-for-atg", "node-cannot-deploy-epg", 64)
prop._addConstant("not-registered-for-fabric-ctrls", "node-not-controller", 16)
prop._addConstant("not-registered-for-fabric-leafs", "node-not-leaf-for-fabric-policies", 4)
prop._addConstant("not-registered-for-fabric-node-group", "node-not-registered-for-node-group-policies", 32)
prop._addConstant("not-registered-for-fabric-oleafs", "node-not-capable-of-deploying-fabric-node-leaf-override", 2048)
prop._addConstant("not-registered-for-fabric-ospines", "node-not-capable-of-deploying-fabric-node-spine-override", 4096)
prop._addConstant("not-registered-for-fabric-pods", "node-has-not-joined-pod", 8)
prop._addConstant("not-registered-for-fabric-spines", "node-not-spine", 2)
prop._addConstant("not-registered-for-infra-leafs", "node-not-leaf-for-infra-policies", 128)
prop._addConstant("not-registered-for-infra-oleafs", "node-not-capable-of-deploying-infra-node-leaf-override", 512)
prop._addConstant("not-registered-for-infra-ospines", "node-not-capable-of-deploying-infra-node-spine-override", 1024)
prop._addConstant("not-registered-for-infra-spines", "node-not-spine-for-infra-policies", 256)
prop._addConstant("pod-misconfig", "node-belongs-to-different-pod", 8192)
prop._addConstant("policy-deployment-failed", "policy-deployment-failed", 2147483648)
meta.props.add("deplSt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 101, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
81e637c137eb35264303fc69b8323a2a2287261a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/o8-.py | 9c6d0d0413aeff42ae26f9f44f8d70275e890256 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'o8-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.