blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7dcb326ef51e4e6daaee6b946678ccd86fbbeef
|
1f030e5f0768c9243cd018a0630e3c3708e37499
|
/readfiles.py
|
0af8adba299af860900dae4496c8ec332d362d9f
|
[
"MIT"
] |
permissive
|
ilona-asa/LDSAproject
|
ba5b876e4cdfc1593462cca59f4857043f932878
|
a30ba715a8bea4d9ab7bebad56b7c66c77a2d1c3
|
refs/heads/master
| 2020-12-30T12:00:50.721364 | 2017-06-19T17:14:28 | 2017-06-19T17:14:28 | 91,493,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
#!/usr/bin/env python
import os
rootdir ='/home/alekodu/Downloads/Large Datasets for Scientific Applications/Miniproject/enron_mail_20110402/maildir'
for user in os.listdir(rootdir):
for sent in os.listdir(rootdir+'/'+user):
if (sent == 'sent_items'):
for mail in os.listdir(rootdir+'/'+user+'/'+sent):
#print mail
if os.path.isfile(rootdir+'/'+user+'/'+sent+'/'+mail):
f = open(rootdir+'/'+user+'/'+sent+'/'+mail, 'r')
print f.readlines()
f.close()
|
[
"[email protected]"
] | |
e2e40d5891dce3971a5ced4be320735fdf96b935
|
d56f1f35e3d5ad361ae1e31f56badd8b192010eb
|
/python/sample.py
|
1eccfe46f2dc43ed40f705bc2977281aeeef1660
|
[] |
no_license
|
v-stickykeys/bitbit
|
2c62c7239d278e34f4f4414890151b0e7d90bafc
|
81541a78fda00cc8c53dc326c99b3118e90d0541
|
refs/heads/master
| 2021-10-02T22:34:26.544609 | 2018-12-02T01:16:26 | 2018-12-02T01:16:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 43 |
py
|
def hello():
return 'Hello, Unit Test'
|
[
"[email protected]"
] | |
eec5c4fa0f644a545fc9a9cee45cc61cdb1b4f19
|
6ac0b1b3b86ae78e81daa52e685f57f55f93fcbd
|
/restaurante/migrations/0017_auto_20190701_1602.py
|
e21954c964f0e4d13573649ecb686015db443247
|
[] |
no_license
|
NicolefAvella/blog_Restaurante
|
e789d32a06303541ee59c71adb4edc9f7c45e45c
|
3de7e0a809a2978822cd026f057899ba692bc9ce
|
refs/heads/master
| 2022-05-01T02:17:50.357433 | 2019-07-02T15:18:22 | 2019-07-02T15:18:22 | 194,179,752 | 0 | 0 | null | 2022-04-22T21:46:37 | 2019-06-28T00:22:53 |
Python
|
UTF-8
|
Python
| false | false | 881 |
py
|
# Generated by Django 2.2.2 on 2019-07-01 21:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restaurante', '0016_auto_20190630_1616'),
]
operations = [
migrations.AlterField(
model_name='comentario',
name='comentarios',
field=models.TextField(default='deja tu comentario aqui!', max_length=200, null=True),
),
migrations.AlterField(
model_name='comentario',
name='fecha_comentario',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='comentario',
name='post_restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurante.PostRestaurant'),
),
]
|
[
"[email protected]"
] | |
dff2b536322cbc8ac24cd00ed962fdad5d4bbba2
|
592961def9fe287a31e117649f1ac1e97b085a9b
|
/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
|
a6582e6dd5b515ec3783b7ecc3ed81adffc4f3cb
|
[] |
no_license
|
Rushin95/The_Trip_Planner-Lyft_vs_Uber
|
62f03a1df8c6a0268089f50f4e80ec3d9b6b9870
|
4eeea4029eb4df047471b92065455a6828232293
|
refs/heads/master
| 2021-01-19T11:52:47.766019 | 2018-05-03T23:59:58 | 2018-05-03T23:59:58 | 82,268,914 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
from __future__ import absolute_import, division, unicode_literals
from . import base
try:
from collections import OrderedDict
except ImportError:
# noinspection PyUnresolvedReferences
from ordereddict import OrderedDict
class Filter(base.Filter):
def __iter__(self):
for token in base.Filter.__iter__(self):
if token["type"] in ("StartTag", "EmptyTag"):
attrs = OrderedDict()
for name, value in sorted(token["data"].items(),
key=lambda x: x[0]):
attrs[name] = value
token["data"] = attrs
yield token
|
[
"[email protected]"
] | |
96d88c62609176236a61db5677dfd5dbff8a2f77
|
c2fc8f1d065375c7e7148daf9701c706ee2c02b3
|
/labmodel/migrations/0008_processinstance_only_walkup.py
|
22ef261e54757d03f1fd9d62f5a25c4a9a3496ff
|
[] |
no_license
|
mmehta25/myriadwetlabmodel1
|
b20f41cc60b3e2ad1b5a4843abbac782cbd6322d
|
d8309d39df795841f7fa110cf71416de595bb8b5
|
refs/heads/master
| 2023-07-04T03:19:42.788020 | 2021-08-10T17:20:26 | 2021-08-10T17:20:26 | 375,168,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
# Generated by Django 3.2.4 on 2021-08-06 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labmodel', '0007_alter_labanalysis_failure_rate'),
]
operations = [
migrations.AddField(
model_name='processinstance',
name='only_walkup',
field=models.BooleanField(default=False, help_text='True if the process cannot be automated'),
),
]
|
[
"[email protected]"
] | |
e89c16444708539db0d248af6c6a7d2e548fe9e7
|
3343c4e998f7748f6a5f9fb198baa798ede3cee0
|
/python/video processing/homework_02_11_201921017_이지우.py
|
ac22ed341c010c361d2639c7a75ac8796b0465fc
|
[] |
no_license
|
jwo29/prj
|
c8bf028d63c255ba95814cdaf5a1303ee8c19256
|
6e7d5cefa36ae47ffea167e21f2ff926eda6008f
|
refs/heads/main
| 2023-08-30T03:07:36.730416 | 2021-10-17T16:39:19 | 2021-10-17T16:39:19 | 406,631,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,136 |
py
|
# 4-11
import numpy as np
import cv2 as cv
olive, violet = (128, 128, 0), (221, 160, 221)
def onChangeThickness(value):
global thickness
thickness = 1 if value == 0 else value
def onChangeRadius(value):
global radius
radius = 1 if value == 0 else value
def onMouse(event, x, y, flag, param):
global thickness, radius
if event == cv.EVENT_RBUTTONDOWN:
cv.circle(image, (x, y), radius, olive, thickness, cv.LINE_AA)
cv.imshow(title, image)
elif event == cv.EVENT_LBUTTONDOWN:
cv.rectangle(image, (x-15, y-15, 30, 30), violet, thickness, cv.LINE_AA)
# cv.rectangle(image, (x-15, y-15, 30, 30), violet, cv.LINE_4)
cv.imshow(title, image)
title = '4-10'
image = np.zeros((300, 500, 3), np.uint8)
image[:] = (255, 255, 255)
# 초기 선굵기, 반지름 선언
thickness, radius = 1, 20
cv.imshow(title, image)
cv.createTrackbar('Thickness', title, thickness, 10, onChangeThickness) # 선 굵기 최대 10
cv.createTrackbar('radius', title, radius, 50, onChangeRadius) # 반지름 최대 50
cv.setMouseCallback(title, onMouse)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"[email protected]"
] | |
2e0a0431b921c67132029866d0dc9a2fe708b565
|
e0268b6e868fcaaf6fc9c42b720e014c3ae41a20
|
/scripts/make_bu_data.py
|
ee30a5f8470d550046a3ed6c5170a7e7aee29344
|
[
"MIT"
] |
permissive
|
gradio-app/ImageCaptioning.pytorch
|
79208726dd09e1e532863af56c7a900b576cbca2
|
436d900d01139dc402b24425c60679409e0c9051
|
refs/heads/master
| 2022-11-15T03:27:38.775656 | 2020-07-12T22:44:30 | 2020-07-12T22:44:30 | 279,639,722 | 1 | 1 |
MIT
| 2020-07-14T16:37:47 | 2020-07-14T16:37:46 | null |
UTF-8
|
Python
| false | false | 1,889 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
import argparse
parser = argparse.ArgumentParser()
# output_dir
parser.add_argument('--downloaded_feats', default='data/bu_data', help='downloaded feature directory')
parser.add_argument('--output_dir', default='data/cocobu', help='output feature files')
args = parser.parse_args()
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infiles = ['trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv',
'trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv',\
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0', \
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1']
os.makedirs(args.output_dir+'_att')
os.makedirs(args.output_dir+'_fc')
os.makedirs(args.output_dir+'_box')
for infile in infiles:
print('Reading ' + infile)
with open(os.path.join(args.downloaded_feats, infile), "r+b") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.decodestring(item[field]),
dtype=np.float32).reshape((item['num_boxes'],-1))
np.savez_compressed(os.path.join(args.output_dir+'_att', str(item['image_id'])), feat=item['features'])
np.save(os.path.join(args.output_dir+'_fc', str(item['image_id'])), item['features'].mean(0))
np.save(os.path.join(args.output_dir+'_box', str(item['image_id'])), item['boxes'])
|
[
"[email protected]"
] | |
b9feff22b1ab5d0e0c53c87c88ac128c1c29ab1a
|
9a8787995e418a96209908c2804533802fc3e465
|
/run.py
|
f6bcbe2026e5b577082471e55333606ed06ec48c
|
[
"MIT"
] |
permissive
|
BuildWeek-ft-airbnb-6/airbnb-plotly-dash-app
|
166a0d539e6ebc92e038f270e4da585fb244ab4f
|
4ad98c1016427ee4b6edf03ba2bc51114a66765b
|
refs/heads/main
| 2023-04-18T02:31:32.943437 | 2021-05-01T01:45:04 | 2021-05-01T01:45:04 | 370,393,378 | 0 | 3 |
MIT
| 2021-05-24T15:08:12 | 2021-05-24T15:08:11 | null |
UTF-8
|
Python
| false | false | 2,770 |
py
|
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app, server
from pages import index, predictions
# Navbar docs: https://dash-bootstrap-components.opensource.faculty.ai/l/components/navbar
navbar = dbc.NavbarSimple(
brand='AirBnB Price Predictor',
brand_href='/',
children=[
dbc.NavItem(dcc.Link('Predictions', href='/predictions', className='nav-link')),
# dbc.NavItem(dcc.Link('Insights', href='/insights', className='nav-link')),
# dbc.NavItem(dcc.Link('Process', href='/process', className='nav-link')),
],
sticky='top',
color='dark',
light=False,
dark=True
)
# Footer docs:
# dbc.Container, dbc.Row, dbc.Col: https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
# html.P: https://dash.plot.ly/dash-html-components
# fa (font awesome) : https://fontawesome.com/icons/github-square?style=brands
# mr (margin right) : https://getbootstrap.com/docs/4.3/utilities/spacing/
# className='lead' : https://getbootstrap.com/docs/4.3/content/typography/#lead
footer = dbc.Container(
dbc.Row(
dbc.Col(
html.P(
[
html.Span('Lambda Data Science Track Team 45', className='mr-2'),
# html.A(html.I(className='fas fa-envelope-square mr-1'), href='mailto:<you>@<provider>.com'),
html.A(html.I(className='fab fa-github-square mr-1'), href='https://github.com/tt-dsft-45-AirBnb'),
# html.A(html.I(className='fab fa-linkedin mr-1'), href='https://www.linkedin.com/in/<you>/'),
# html.A(html.I(className='fab fa-twitter-square mr-1'), href='https://twitter.com/<you>'),
],
className='lead'
)
)
)
)
# Layout docs:
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
navbar,
dbc.Container(id='page-content', className='mt-4'),
html.Hr(),
footer
])
# URL Routing for Multi-Page Apps: https://dash.plot.ly/urls
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return index.layout
elif pathname == '/predictions':
return predictions.layout
# elif pathname == '/insights':
# return insights.layout
# elif pathname == '/process':
# return process.layout
else:
return dcc.Markdown('## Page not found')
# Run app server: https://dash.plot.ly/getting-started
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"[email protected]"
] | |
09923eb66bd4f369d5c1ce927235126256150d99
|
5ec0468b7e12d4b12cbfe99e0659db6c049802a3
|
/cs50_python_js/python/loops.py
|
382119f7006aff9c636002aed842985efc412146
|
[] |
no_license
|
orellanachn/cs50
|
4b86bfa3ef777583cad30ae5efebacfc1b7de864
|
22fdbca070d8ff786c9cf0daf5a26967a882fb7d
|
refs/heads/master
| 2020-12-04T23:03:32.355545 | 2020-01-06T01:57:54 | 2020-01-06T01:57:54 | 231,929,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 45 |
py
|
for i in range(5):
print(f"item number {i}")
|
[
"[email protected]"
] | |
7461c4581310fb6c776ce5069f7790960fc23f59
|
322f5e4711492f69858be712fc391cf9fdfda398
|
/proxylab-handout/nop-server.py
|
b38faf7b6ba7e04203a303bbf3cc620d49b7a5dc
|
[] |
no_license
|
RedemptionC/csapp-3e-lab
|
8b0ef2f14855658c404304be45a3255b60ff42ed
|
e79b72bf7fd89639c1502f15bcfd4e7b596b124d
|
refs/heads/master
| 2021-06-21T12:14:10.654061 | 2021-04-24T13:53:02 | 2021-04-24T13:53:02 | 209,988,988 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 529 |
py
|
#!/usr/bin/python3.6
# nop-server.py - This is a server that we use to create head-of-line
# blocking for the concurrency test. It accepts a
# connection, and then spins forever.
#
# usage: nop-server.py <port>
#
import socket
import sys
#create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('', int(sys.argv[1])))
serversocket.listen(5)
while 1:
channel, details = serversocket.accept()
while 1:
continue
|
[
"[email protected]"
] | |
ce037214f60bd6c8975b5e9da15eaaa6acd30d83
|
685038d4be188fa72e9dba1d2213a47ee3aa00a2
|
/ECOS2021/Demands/Inputs/Surveys/A/S3/Oct_S3_A.py
|
f3bb6b79446fe8f081e16398f9239662c9c7acc0
|
[] |
no_license
|
CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy
|
e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4
|
459f31552e3ab57a2e52167ab82f8f48558e173c
|
refs/heads/master
| 2023-06-01T18:09:29.839747 | 2021-06-19T15:56:26 | 2021-06-19T15:56:26 | 343,720,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,968 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 14:33:07 2020
@author: alejandrosoto
Script for 2 class of household in Raqaypampa.
"""
# -*- coding: utf-8 -*-
"""
@author: Alejandro Soto
"""
from core import User, np
User_list = []
#User classes definition
HI = User("high income",1)
User_list.append(HI)
LI = User("low income",0)
User_list.append(LI)
'''
Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1)
Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2)
A
Scenario 1: BSA + Fridge (1) + Freezer* (1).
Scenario 2: BSA + Fridge (1).
Scenario 3: BSA + Fridge (1)*.
Scenario 4: BSA + Freezer (1).
Scenario 5: BSA + Wheler (1).
Scerario 6: BSA + Grinder (1).
Scanerio 7: Add + Dryer (1),
Scenario 9: All
B
Scenario 8: BSB + Water Heater** (1).
Scenario 10: BSA + Pump Water (1).
Scenario 11: BSA + DVD (1).
Scenario 12: BSA + Blender (1).
Scenario 13: BSA + Iron (1).
Scerario 14: BSA + Mill (1).
* With seasonal variation
** Occasional use
Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees
Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees
Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees
Int Cycle 10:01-15:00
'''
#High-Income
#indoor bulb
HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190)
HI_indoor_bulb.windows([1080,1440],[0,0])
#outdoor bulb
HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300)
HI_outdoor_bulb.windows([1100,1440],[0,0])
HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110)
HI_Radio.windows([420,708],[0,0])
#tv
HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114)
HI_TV.windows([1140,1440],[651,1139],0.35,[300,650])
#phone charger
HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95)
HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189])
#water_heater
HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30)
HI_Water_heater.windows([0,1440],[0,0])
#mixer
HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3)
HI_Mixer.windows([420,560],[0,0])
#fridge
HI_Fridge = HI.Appliance(HI,1,200,1,1440,0,30,'yes',3)
HI_Fridge.windows([0,1440],[0,0])
HI_Fridge.specific_cycle_1(200,20,5,10)
HI_Fridge.specific_cycle_2(200,15,5,15)
HI_Fridge.specific_cycle_3(200,10,5,20)
HI_Fridge.cycle_behaviour([570,990],[0,0],[0,480],[1170,1440],[481,569],[991,1169])
#Lower Income
#indoor bulb
LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124)
LI_indoor_bulb.windows([1153,1440],[0,300],0.5)
#outdoor bulb
LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71)
LI_outdoor_bulb.windows([1197,1440],[0,0])
#radio
LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49)
LI_Radio.windows([480,840],[841,1200],0.5)
#TV
LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74)
LI_TV.windows([1170,1420],[551,1169],0.3,[300,550])
#phone charger
LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82)
LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
|
[
"[email protected]"
] | |
c6f6ba2d9b4e9d5fb450005f0c441d8158549d6f
|
7f7239ca087141faf3706994a5209ed0c1f8c21f
|
/Python Crash Course/Chinese Version/第三章 列表简介.py
|
cbbafaf34e9db381a4208a1b71434e1ee83d6da6
|
[] |
no_license
|
xiemeigongzi88/Python_Beginners
|
f81bfaec6a3f607c18514d9c7c3d93271652cc8c
|
72a85cbd132ecab2c0d607f06d5e21002628795f
|
refs/heads/master
| 2021-07-01T14:39:43.346280 | 2020-10-30T17:52:47 | 2020-10-30T17:52:47 | 173,500,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,183 |
py
|
第三章 列表简介
Page 26 - 33
Page 26
3.1 列表是 什么?
列表:
由一系列按照 特定顺序排列的元素组成
其中元素之间可以没有任何关系
用 [] 来表示列表
bicycles=['trek','cannondale','redline','specialized']
print(bicycles)
OUT:
['trek', 'cannondale', 'redline', 'specialized']
3.1.1 访问列表元素
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[0])
OUT:
trek
######################################
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[0].title())
OUT:
Trek
########################################
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[1])
print(bicycles[3])
print("###########")
print(bicycles[-1])
print(bicycles[-2])
print(bicycles[-3])
print(bicycles[-4])
OUT:
cannondale
specialized
###########
specialized
redline
cannondale
trek
3.1.3 使用列表中的各个值
可以使用拼接根据列表中的值 来 创建信息
bicycles=['trek','cannondale','redline','specialized']
message="My first bicycle was a "+bicycles[0].title()+"."
print(message)
OUT:
My first bicycle was a Trek.
###########################################
EXC 3-1
names=['Abel','Erica','Eric','Batholomew','Ana','Ada']
print(names)
for i in range(len(names)):
print(names[i])
OUT:
['Abel', 'Erica', 'Eric', 'Batholomew', 'Ana', 'Ada']
Abel
Erica
Eric
Batholomew
Ana
Ada
#######################################
EXC 3-2
names=['Abel','Erica','Eric','Batholomew','Ana','Ada']
print(names)
for i in range(len(names)):
print(names[i]+"\n Good Luck!")
################
EXC 3-3
Page 27
3.2 修改 添加 删除 元素
创建的 列表是动态的 在列表创建后 可与运行 删除 增加 操作
3.2.1 修改列表元素
motor=['honda','yamaha','suzuki']
print(motor)
motor[2]='ducati'
print(motor)
OUT:
['honda', 'yamaha', 'suzuki']
['honda', 'yamaha', 'ducati']
motor[3]='scar'
IndexError: list assignment index out of range
## 也就是说 列表只能对已存在的元素进行修改
3.2.2 在列表中添加元素
1. 在列表末尾添加元素
motor=['honda','yamaha','suzuki']
print(motor)
motor.append('ducati')
print(motor)
OUT:
['honda', 'yamaha', 'suzuki']
['honda', 'yamaha', 'suzuki', 'ducati']
##################################
a=[]
print(a)
a.append(1)
a.append(2)
a.append(3)
a.append(4)
a.append(5)
a.append(6)
print(a)
OUT:
[]
[1, 2, 3, 4, 5, 6]
经常要等到程序运行后 才能知道 用户在程序中存储了哪些数据
2. 在列表中插入元素
使用 insert() 方法 可在任意为止添加新元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
a.insert(1,'civic')
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'civic', 'toyota', 'Benz', 'LandRover', 'nissa']
3.2.3 从列表中删除元素
1. 使用 del 语句删除元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
del a[4]
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'toyota', 'Benz', 'LandRover']
2. 使用 pop() 方法 删除元素
将元素从列表中删除 并接着使用这个元素的值
pop() 可以删除列表末尾的元素 并可以使用
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
b=a.pop()
print(b)
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
nissa
['honda', 'toyota', 'Benz', 'LandRover']
##################################
a=['honda','toyota','Benz','LandRover','nissa']
b=a.pop()
print("The last motor I owned was a "+b.title()+".")
OUT:
The last motor I owned was a Nissa.
3. 弹出列表中任意位置的元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
b=a.pop(0)
print(b)
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
honda
['toyota', 'Benz', 'LandRover', 'nissa']
当使用 pop() 时, 被弹出的元素就不在列表中了
如果要从列表中删除一个元素 且 不再以任何方式使用, 就是用 del 语句
如果要在删除元素后还要再使用它, 就使用 pop() 方法
4. 根据值 删除元素
不知道 从列表中删除的值 所在的位置
使用 remove() 方法
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
a.remove('Benz')
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
使用 remove() 从列表中删除元素时, 也课接着使用它的值
a=['honda','toyota','benz','LandRover','nissa']
print(a)
b='benz'
a.remove(b)
print(a)
print("\nA "+b.title()+" is too expensive for me.")
OUT:
['honda', 'toyota', 'benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
A Benz is too expensive for me.
OUT:
['honda', 'toyota', 'benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
A Benz is too expensive for me.
Note:
方法 remove() 只是删除第一个指定的值, 如果要删除 的值可能在列表中出现多次 就需要循环来判断是否删除了所有这样的值
remove() 根据元素的内容删除
pop() 根据元素的位置删除
Page 30
3.3 组织列表
3.3.1 使用方法 sort() 对列表进行永久性排序
对列表元素排列顺序的修改是 永久性的
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.sort()
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['audi', 'bmw', 'subaru', 'toyota']
# 永久性地修改了列表元素地顺序 再也无法恢复到 原来的排列顺序
还可以按与字母顺序相反的顺序排列列表元素
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.sort()
print(cars)
cars.sort(reverse=True)
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['audi', 'bmw', 'subaru', 'toyota']
['toyota', 'subaru', 'bmw', 'audi']
3.3.2 使用函数 sorted() 对列表进行临时排序
要保留 原来 列表元素的排列顺序, 同时能以特定的顺序呈现列表元素, 可是使用 sorted() 函数
sorted() 能够按照特定的顺序显示 列表元素 同时不影响它们在列表中的原始排列顺序
cars=['bmw','audi','toyota','subaru']
print("here is the original list:")
print(cars)
print("\nhere is the sorted list:")
print(sorted(cars))
print("\nhere is the original list again:")
print(cars)
OUT:
here is the original list:
['bmw', 'audi', 'toyota', 'subaru']
here is the sorted list:
['audi', 'bmw', 'subaru', 'toyota']
here is the original list again:
['bmw', 'audi', 'toyota', 'subaru']
调用 函数 sorted() 以后, 列表元素的排列顺序没有发生改变
print("\nhere is the reverse sorted list:")
print(sorted(cars,reverse=True))
OUT:
here is the reverse sorted list:
['toyota', 'subaru', 'bmw', 'audi']
3.3.3 倒着打印列表
要反转列表元素的排列顺序 可以使用 reverse()
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.reverse()
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['subaru', 'toyota', 'audi', 'bmw']
##Note:
方法 reverse() 永久的修改了 列表元素的排列顺序, 但是可以随时恢复到原来的排列顺序 只需要 对列表再次调用 reverse() 即可
3.3.4 确定列表的长度
len()
>>> cars=['bmw','audi','toyota','subaru']
>>> len(cars)
4
Page 32
3.4 使用列表时 避免索引错误
cars=['bmw','audi','toyota','subaru']
print(cars[4])
OUT:
IndexError: list index out of range
####################################
cars=['bmw','audi','toyota','subaru']
print(cars[-1])
a=[]
print(a[-1])
OUT:
subaru
File "C:/Users/sxw17/PycharmProjects/myPro_obj/mypy_01.py", line 6, in <module>
print(a[-1])
IndexError: list index out of range
#Note:
仅当列表未空的时候,不包含任何元素
|
[
"[email protected]"
] | |
3aa40b19ea5dfacf313cd9d32dcac22f722576df
|
5ea7eb2c34eec0ea2ce5d7556cc910aa2bae47ad
|
/定期禁用端口/sock_date.py
|
d07760797603f244ae3845d2aa8fab6c33adc2af
|
[] |
no_license
|
lihuacai168/python_learning
|
b644b2573f2900e4459e734ec895c8ceb6f64f39
|
ba1d02f9e306394d173042a41595b9770e645672
|
refs/heads/master
| 2021-05-04T16:50:07.223850 | 2020-03-28T15:16:03 | 2020-03-28T15:16:03 | 120,259,858 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 943 |
py
|
# -*- coding: utf-8 -*-
import datetime,os
# dict for sock and date
dict = {}
# 输入需要禁用的端口和日期
def my_input():
socks = input("please input sock that need to be banned:")
date = input("please input validity period for the sock:")
save(socks, date)
# save sock as key,date as value to dictionary
def save(socks, add_date):
for sock in socks.split(","):
delta = datetime.timedelta(days=int(add_date))
today = datetime.date.today()
valid_day = today + delta
dict[sock] = valid_day
def ban():
print("ban begin:")
for sock in dict:
if dict[sock] > datetime.date.today():
os.popen("sudo iptables -A INPUT -p tcp --dport %s -j DROP"%dict[sock])
print("ban execute")
def ban_log():
pass
if __name__ == "__main__":
go = input('do you wanna continue: yes or no')
if go == 'y' or go == 'yes':
my_input()
ban()
|
[
"[email protected]"
] | |
c9fb1db790544b2d33dafa3e7e4c3f65558c695c
|
bfaf6c5659d57b50f6d99bc1492a352bf26923b9
|
/evennumbers.py
|
36b98f0e3b764e3aad16461c68c5e90c095e72da
|
[
"MIT"
] |
permissive
|
BjornChrisnach/Basics_of_Computing_and_Programming
|
537108dcf9974e8abc50571066a1a6df96915c87
|
45cb99d2b5060b23db20b84384f7b35b72ef8029
|
refs/heads/main
| 2023-05-30T05:11:20.636542 | 2021-06-17T18:08:25 | 2021-06-17T18:08:25 | 376,010,794 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 139 |
py
|
print("Please enter a positive integer: ")
number_of_numbers = int(input())
for i in range(2, (number_of_numbers*2) + 1, 2):
print(i)
|
[
"[email protected]"
] | |
382ab283e99868eb7c25aae590e703339aa079d0
|
f4b694982027ac362de1e9d6755f2943d0355a06
|
/DECSKS-12 -- debugging the recast from DECSKS-09 by comparing with v1.2/v1.2/DECSKS/lib/diagnostics.py
|
e4234421bf17641907309887eb6c7504590672c3
|
[] |
no_license
|
dsirajud/IPython-notebooks
|
55275e44191c16f5393571522787993f931cfd98
|
6ad9d978c611558525fc9d716af101dc841a393b
|
refs/heads/master
| 2021-01-15T15:33:57.119172 | 2016-07-13T20:08:29 | 2016-07-13T20:08:29 | 35,054,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,472 |
py
|
import DECSKS
import numpy as np
import sys # to retrieve smallest float for lower bound tolerance
def HighPrecisionE(number):
"""Converts a number into a string object
while retaining a chosen degree of precision. This
is designed to evade the truncation that is involved
with str() so that outputs can store numbers with high
precision
inputs:
number -- (number)
outputs:
string object with chosen precision in scientific notation
"""
return "%.22e" % number
def calcs_and_writeout(sim_params,f,n,x,v):
"""orchestrates the calculation of various quantities, e.g.
Lp norms, energy, electrostatic energy, ...
inputs:
files -- (dict) contains output filenames to be written to
f -- (ndarray, ndim=3), f(t,x,v)
n -- (int) time step number, t^n
x -- (instance) space variable
v -- (instance) velocity variable
outputs:
None
"""
#I = "invariant", I1 = L1 norm invariant, etc.
if sim_params['record_outputs'] == 'yes':
I1 = L1(f,n,x,v)
I2 = L2(f,n,x,v)
# electrostatic terms
E = DECSKS.lib.fieldsolvers.Poisson(sim_params['ni'], f, x, v, n)
IW = total_energy(f,n,x,v,E)
WE = electrostatic_energy(x,E)
S = entropy(f,n,x,v)
# write to files
sim_params['outfiles']['I1'].write(HighPrecisionE(I1) + '\n')
sim_params['outfiles']['I2'].write(HighPrecisionE(I2) + '\n')
sim_params['outfiles']['IW'].write(HighPrecisionE(IW) + '\n')
sim_params['outfiles']['WE'].write(HighPrecisionE(WE) + '\n')
sim_params['outfiles']['S'].write(HighPrecisionE(S) + '\n')
if n == sim_params['Nt']:
close_all_outfiles(sim_params)
return None
def L1(f,n,x,v):
"""computes the L1 norm
inputs:
f -- (ndarray, ndim=3), f(t,x,v)
n -- (int) time step number, t^n
x -- (instance) space variable
v -- (instance) velocity variable
outputs:
I1 -- (float) L1 norm
"""
return np.sum(f[n,:,:]) * x.width * v.width
def L2(f,n,x,v):
"""computes the square of the L2 norm. Note, the intended
purpose of this computation is to compare with its deviation
from the value at time zero. To minimize compounded errors
from redundant operations, a squareroot is not taken here
and should be applied later if desired,
e.g. np.sqrt( (L2[t] - L2[0]) / L2[0])
inputs:
f -- (ndarray, ndim=3), f(t,x,v)
n -- (int) time step number, t^n
x -- (instance) space variable
v -- (instance) velocity variable
outputs:
I2 -- (float) L2 norm
"""
# compute the square of the L2 norm below to minimize
# compounded error from repeated operations like squareroot
return np.sum(f[n,:,:]**2) * x.width * v.width
def total_energy(f,n,x,v,E):
"""computes the total energy for a Vlasov-Poisson system
IW = 1/2 sum_i sum_j f[n,i,j] dx dv + 1/2 sum_i E[i] dx
inputs:
f -- (ndarray, ndim=3), f(t,x,v)
n -- (int) time step number, t^n
x -- (instance) space variable
v -- (instance) velocity variable
E -- (ndarray, ndim=1), E(x) at t^n
outputs:
IW -- (float) total energy at time t^n in system
"""
return 1/2.*np.sum(f[n,:,:] * v.cells **2) * x.width * v.width \
+ 1/2. * np.sum(E**2) * x.width
def electrostatic_energy(x,E):
"""computes the electrostic energy WE = 1/2 sum_i E[i] dx
inputs:
E -- (ndarray, ndim=1) E(x) at t^n
x -- (instance) space variable
outputs:
WE -- (float) electrostatic energy at time t^n
"""
return 1/2.* np.sum(E**2)* x.width
def entropy(f,n,x,v):
"""computes the entropy S at time t^n,
S = sum_i sum_j f_[n,i,j] * ln (f[n,i,j] + eps) dxdv
inputs:
f -- (ndarray, ndim=3), f(t,x,v)
n -- (int) time step number, t^n
x -- (instance) space variable
v -- (instance) velocity variable
outputs:
S -- (float) entropy at time t^n
"""
eps = sys.float_info.min # to evade taking np.log(0)
return np.sum(f[n,:,:] * np.log(f[n,:,:] + eps)) * x.width * v.width
def close_all_outfiles(sim_params):
"""Closes all opened output files inside dictionary
sim_params['outfiles']
inputs:
sim_params -- (dict) simulation parameters, includes dict of outfiles
outputs:
None
"""
if sim_params['outfiles'] is not None:
for outfile in sim_params['outfiles'].itervalues():
outfile.close()
return None
|
[
"[email protected]"
] | |
b668b0abab5e11cdc720fd2cf1af244fcc9727a3
|
f2bb0e8ea0e0ae747238055a5669ed8b44095f81
|
/api/__init__.py
|
7c4f18948535976fc9f083e800619c67f21ad732
|
[] |
no_license
|
Rareseal94/NoteAPP
|
c94e203913dba9d2e336759b52ffe451222e6525
|
d2026caec2eac44acf6c19a5364d9ebe180a5d49
|
refs/heads/main
| 2023-08-15T14:28:43.131868 | 2021-09-14T16:56:22 | 2021-09-14T16:56:22 | 406,454,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,273 |
py
|
import logging
from config import Config
from flask import Flask, g
from flask_restful import Api, Resource, abort, reqparse
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_marshmallow import Marshmallow
from flask_httpauth import HTTPBasicAuth
from flask_apispec.extension import FlaskApiSpec
app = Flask(__name__)
app.config.from_object(Config)
api = Api(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
ma = Marshmallow(app)
auth = HTTPBasicAuth()
docs = FlaskApiSpec(app)
app.logger.setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.WARNING)
@auth.verify_password
def verify_password(username_or_token, password):
from api.models.user import UserModel
# сначала проверяем authentication token
# print("username_or_token = ", username_or_token)
# print("password = ", password)
user = UserModel.verify_auth_token(username_or_token)
if not user:
# потом авторизация
user = UserModel.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@auth.get_user_roles
def get_user_roles(user):
return g.user.get_roles()
|
[
"[email protected]"
] | |
df585f561e1bd0f95edb526fd662fc99e5cba754
|
f56fda98a93cedcec33a7d9fbb330e5cf78031e1
|
/Leetcode/45. Jump Game II.py
|
b2d963b2956cda7d0acaeac20324868e1d0d0149
|
[] |
no_license
|
GuanzhouSong/Leetcode_Python
|
7a2bac42203fb6c0b671153d9e300eb0c73d39d1
|
dbb9be177c5e572eb72a79508bb6e24f357d54b3
|
refs/heads/master
| 2021-09-25T04:10:09.217565 | 2018-10-17T22:31:41 | 2018-10-17T22:31:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
import sys
class Solution:
def jump(self, nums):
mintimes = [0] + [sys.maxsize] * (len(nums) - 1)
for i in range(0, len(nums) - 1):
for j in range(1, min(nums[i] + 1, len(nums) - i)):
mintimes[i + j] = min(mintimes[i + j], mintimes[i] + 1)
return mintimes[-1]
s = Solution()
nums = [6, 2, 6, 1, 7, 9, 3, 5, 3, 7, 2, 8, 9, 4, 7, 7, 2, 2, 8, 4, 6, 6, 1, 3]
print(s.jump2(nums))
|
[
"[email protected]"
] | |
284a5ef774fda9b4dfaa331c91ef901575bb3aff
|
1cc69dfdcf664a26f2f7d64d803343e3d0165515
|
/4_Brave_Knight/braveKnight.2.py
|
37c36d335823e9f2ac1c21b1654c52f19fda8e29
|
[] |
no_license
|
juanlluva/Tuenti_Challenge_8
|
69827bbbe8af052507fa942d8f181d1e1d55f858
|
36371029815709bf6a0d789d22731bd85a7d7322
|
refs/heads/master
| 2020-04-22T04:33:49.884820 | 2019-02-23T01:37:05 | 2019-02-23T01:37:05 | 170,127,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,994 |
py
|
import sys
from collections import deque, namedtuple
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def make_edge(start, end, cost=1):
return Edge(start, end, cost)
class Graph:
def __init__(self, edges):
# let's check that the data is right
wrong_edges = [i for i in edges if len(i) not in [2, 3]]
if wrong_edges:
raise ValueError('Wrong edges data: {}'.format(wrong_edges))
self.edges = [make_edge(*edge) for edge in edges]
@property
def vertices(self):
return set(
# this piece of magic turns ([1,2], [3,4]) into [1, 2, 3, 4]
# the set above makes it's elements unique.
sum(
([edge.start, edge.end] for edge in self.edges), []
)
)
def get_node_pairs(self, n1, n2, both_ends=True):
if both_ends:
node_pairs = [[n1, n2], [n2, n1]]
else:
node_pairs = [[n1, n2]]
return node_pairs
def remove_edge(self, n1, n2, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
edges = self.edges[:]
for edge in edges:
if [edge.start, edge.end] in node_pairs:
self.edges.remove(edge)
def add_edge(self, n1, n2, cost=1, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
for edge in self.edges:
if [edge.start, edge.end] in node_pairs:
return ValueError('Edge {} {} already exists'.format(n1, n2))
self.edges.append(Edge(start=n1, end=n2, cost=cost))
if both_ends:
self.edges.append(Edge(start=n2, end=n1, cost=cost))
@property
def neighbours(self):
neighbours = {vertex: set() for vertex in self.vertices}
for edge in self.edges:
neighbours[edge.start].add((edge.end, edge.cost))
return neighbours
def dijkstra(self, source, dest):
# 1. Mark all nodes unvisited and store them.
# 2. Set the distance to zero for our initial node
# and to infinity for other nodes.
distances = {vertex: inf for vertex in self.vertices}
previous_vertices = {
vertex: None for vertex in self.vertices
}
distances[source] = 0
vertices = self.vertices.copy()
while vertices:
# 3. Select the unvisited node with the smallest distance,
# it's current node now.
current_vertex = min(
vertices, key=lambda vertex: distances[vertex])
# 6. Stop, if the smallest distance
# among the unvisited nodes is infinity.
if distances[current_vertex] == inf:
break
# 4. Find unvisited neighbors for the current node
# and calculate their distances through the current node.
for neighbour, cost in self.neighbours[current_vertex]:
alternative_route = distances[current_vertex] + cost
# Compare the newly calculated distance to the assigned
# and save the smaller one.
if alternative_route < distances[neighbour]:
distances[neighbour] = alternative_route
previous_vertices[neighbour] = current_vertex
# 5. Mark the current node as visited
# and remove it from the unvisited set.
vertices.remove(current_vertex)
path, current_vertex = deque(), dest
if current_vertex in previous_vertices.keys():
while previous_vertices[current_vertex] is not None:
path.appendleft(current_vertex)
current_vertex = previous_vertices[current_vertex]
if path:
path.appendleft(current_vertex)
return path
else:
return "IMPOSSIBLE"
def step(current):
reach = []
if current in superSquares:
movimientos = [(2,4),(2,-4),(-2,4),(-2,-4),(4,2),(4,-2),(-4,2),(-4,-2)]
else:
movimientos = [(1,2),(1,-2),(-1,2),(-1,-2),(2,1),(2,-1),(-2,1),(-2,-1)]
for mov in movimientos:
possible = tuple(map(sum, zip(current, mov)))
if possible in openSquares:
reach.append(possible)
return reach
if __name__ == '__main__':
fin = open(sys.argv[1], 'r')
fout = open(sys.argv[2], 'w')
cases = int(fin.readline())
for case in range(1, cases+1):
(N,M)=fin.readline().rsplit()
# print("Dimensiones: "+ N +"x"+ M)
(N,M) = (int(N),int(M))
arena = []
for n in range(0,N):
line = list(fin.readline().replace('\n', ''))
arena.append(line)
# print(arena[n])
openSquares = []
superSquares = []
for n in range(N):
for m in range(M):
if arena[n][m] != '#' :
openSquares.append((n,m))
if arena[n][m] == '*' :
superSquares.append((n,m))
if arena[n][m] == 'S' :
origen = (n,m)
if arena[n][m] == 'P' :
stop = (n,m)
if arena[n][m] == 'D' :
destino = (n,m)
# print(str(openSquares))
# print(str(origen) + ", " + str(stop) + ", " + str(destino))
graph = Graph([])
for square in openSquares:
reach = step(square)
# print(reach)
for rch in reach:
graph.add_edge(square, rch, 1, False)
path1 = graph.dijkstra(origen, stop)
# print(path1)
path2 = graph.dijkstra(stop, destino)
# print(path2)
if path1 != "IMPOSSIBLE" and path2 != "IMPOSSIBLE":
n_saltos = len(path1) + len(path2) - 2
fout.write(f'Case #{case}: {n_saltos}\n')
else:
fout.write(f'Case #{case}: IMPOSSIBLE\n')
|
[
"[email protected]"
] | |
7f26625263c11326ea38eeafc3642e708fe7a559
|
8606814015cc13920052acb9c1218cb57c796941
|
/aws_emr_launch/constructs/base.py
|
f919a0a63344232e84e81be12aa86ef7644c7576
|
[
"MIT-0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
salilbhadauria/emr-custom
|
a92d55f6a39584631f911908fec399dd4ff9910f
|
6cf3a6f6fd517bc1e036eaa69e1f3ac0fca270ec
|
refs/heads/master
| 2023-02-12T02:00:10.868302 | 2021-01-05T09:34:45 | 2021-01-05T09:34:45 | 317,466,643 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 870 |
py
|
import os
from aws_cdk import core
from logzero import logger
from aws_emr_launch import __product__, __version__
def _tag_construct(construct: core.Construct):
suppress_tags = os.environ.get('SUPPRESS_EMR_LAUNCH_DEPLOYMENT_TAGS', '').lower() in \
('1', 't', 'true', 'y', 'yes')
if not suppress_tags:
core.Tags.of(construct).add('deployment:product:name', __product__)
core.Tags.of(construct).add('deployment:product:version', __version__)
else:
logger.info('Suppressing "deployment:product" tags for: %s', construct.node.id)
class BaseConstruct(core.Construct):
def __init__(self, scope: core.Construct, id: str):
super().__init__(scope, id)
_tag_construct(self)
class BaseBuilder:
@staticmethod
def tag_construct(construct: core.Construct):
_tag_construct(construct)
|
[
"[email protected]"
] | |
26350657c516bdbc078319c35b32e08da7e3bf3c
|
26a47d3afb403d5307a1eaaae0bf43c90e8e16a0
|
/creme_fraiche/views/home.py
|
6a336c5c49583ad4b0f418f5a8b533d53742e6fa
|
[] |
no_license
|
makewhatis/creme-fraiche
|
e7336b3418a6acff1236a35a9514aefe41835df5
|
8f7dc441de354b45db2a1520432329f3b17094e1
|
refs/heads/master
| 2021-01-22T04:42:02.553244 | 2013-04-23T03:16:30 | 2013-04-23T03:16:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,708 |
py
|
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.view import forbidden_view_config
from pyramid.security import authenticated_userid
from pyramid.security import remember
from pyramid.security import forget
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy import desc
from sqlalchemy.exc import DBAPIError
from creme_fraiche.models import DBSession
from creme_fraiche.models import Users
from creme_fraiche.models import authenticate
from creme_fraiche.exceptions import AuthException
import logging
log = logging.getLogger(__name__)
@view_config(
route_name='home_page',
renderer='/home/index.mak'
)
def home_page(request):
try:
users = DBSession.query(Users).all()
except DBAPIError as e:
print(e)
return dict(
project='creme fraiche',
users=users,
logged_in=authenticated_userid(request)
)
@view_config(
context=Exception,
renderer='/home/fail.mak'
)
def fail_view(exc, request):
log.exception(exc)
msg="Uh oh. Somethings broke."
return dict(
msg=msg,
project='creme_fraiche'
)
@view_config(
route_name='login',
renderer='/home/login.mak'
)
@forbidden_view_config(renderer='/home/login.mak')
def login_view(request):
login_url = request.route_url('login')
referrer = request.url
print "URL: %s" % referrer
if referrer == login_url:
referrer = '/' # never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
message = ''
login = ''
password = ''
if request.POST:
login = request.POST.get('username')
password = request.POST.get('password')
try:
userobj = authenticate(login, password)
except AuthException as e:
userobj = False
if userobj:
headers = remember(request, login)
request.session.flash("Logged in successfully!", 'success')
return HTTPFound(location=came_from, headers=headers)
else:
request.session.flash("Login Failed.", 'warning')
return HTTPFound(location=request.route_url('login', message='done'))
return dict(
message=message,
url=request.application_url + '/login',
came_from=came_from,
login=login,
password=password,
logged_in=authenticated_userid(request)
)
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
request.session.delete()
return HTTPFound(location=request.route_url('home_page'),
headers=headers)
|
[
"[email protected]"
] | |
17cc8fa97f6368e616ebe185f8ba427f717efcee
|
bc5e26509ad2130202034474338b17ad4119a886
|
/kmtshi/wsgi.py
|
cb9ceda02e6236b01348844fd6f568571975bc3a
|
[] |
no_license
|
mdrout/kmtshi
|
21ea45839bfc29a74053fd1b323476e63dc5708d
|
6d208c98c590e75d2b1cbcc148852e632579d17c
|
refs/heads/master
| 2021-09-13T05:45:13.042223 | 2018-04-25T15:47:09 | 2018-04-25T15:47:09 | 73,971,140 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
"""
WSGI config for kmtshi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kmtshi.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
37a024c9f7fb0d090e1597c0898fc90ded145af4
|
bf02d02dad536f48dff434aae2b737d58d9ef216
|
/mysite/mysite/settings.py
|
57fbd51bd69a684e35624379b4a2f0ff6f672a68
|
[] |
no_license
|
myselfsai/project1
|
f59d090829782f46770a0d360a9f88928a8f720f
|
ac74079040beb2dd74dbefe7c2faba01126355b7
|
refs/heads/master
| 2021-01-15T12:02:12.123298 | 2020-01-24T05:13:28 | 2020-01-24T05:13:28 | 99,643,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,126 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fip#j!(h_g(6=en_4@^y%y=x!d2pp+@exd3fr$ve_n20-mjggp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
556f62ce6e5bfe7f12088bb5d8be6eec651c0602
|
7a5609ef2852f2ded0f4ffc8076a38143e0ae1c8
|
/图灵机器人聊天.py
|
8c5d065f40e33f3e14d7ca5240e303db79ed097b
|
[] |
no_license
|
yzulzf/TalkingAi
|
1c560f25c0e8198a2e9b219f6b94cbaaa6e318e3
|
b87c4e1067d8d477e0abb7d62a439539447404f9
|
refs/heads/master
| 2022-03-01T03:14:57.251040 | 2019-09-18T07:49:28 | 2019-09-18T07:49:28 | 124,658,735 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
# coding=utf-8
import requests
import json
key='a8a0f074197d4ab5a67dac988240fbbc'
while True:#//主循环
info = input('\n我:')#//输入对话信息
url = 'http://www.tuling123.com/openapi/api?key='+key+'&info='+info#//组成url
res = requests.get(url)#//得到网页HTML代码
res.encoding = 'utf-8'#//防止中文乱码
jd = json.loads(res.text)#/将得到的json格式的信息转换为Python的字典格式
print('\nTuling: '+jd['text'])#//输出结果
|
[
"[email protected]"
] | |
64511cc39b9c27659b5e2ed5fe9709459794abfe
|
811ea65e1b91f03410bc3e0af6712e4e59048109
|
/luis.py
|
ddbecd0e8d24b7e267c71de59c0aaf9568a13c4b
|
[] |
no_license
|
fartuk/TrackML
|
40de40cffb48cd20ddc5d6ed9b129be57dfd332b
|
610181123aa3d78c6c60ff43e818a1a3b0821fe7
|
refs/heads/master
| 2020-03-22T14:08:37.468116 | 2018-08-05T09:05:56 | 2018-08-05T09:05:56 | 140,156,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,965 |
py
|
import numpy as np
import pandas as pd
import os
from trackml.dataset import load_event, load_dataset
from trackml.score import score_event
from multiprocessing import Pool
from sklearn.neighbors import KDTree
cnt = 0
for event_id, hits, cells, particles, truth in load_dataset('../storage/track_ml_data/train_5.zip'):
if cnt == 1:
break
cnt += 1
from sklearn.preprocessing import StandardScaler
import hdbscan
from scipy import stats
from tqdm import tqdm
from sklearn.cluster import DBSCAN
class Clusterer(object):
def __init__(self,rz_scales=[0.65, 0.965, 1.528]):
self.rz_scales=rz_scales
def _eliminate_outliers(self,labels,M):
norms=np.zeros((len(labels)),np.float32)
indices=np.zeros((len(labels)),np.float32)
for i, cluster in tqdm(enumerate(labels),total=len(labels)):
if cluster == 0:
continue
index = np.argwhere(self.clusters==cluster)
index = np.reshape(index,(index.shape[0]))
indices[i] = len(index)
x = M[index]
norms[i] = self._test_quadric(x)
threshold1 = np.percentile(norms,90)*5
threshold2 = 25
threshold3 = 6
for i, cluster in enumerate(labels):
if norms[i] > threshold1 or indices[i] > threshold2 or indices[i] < threshold3:
self.clusters[self.clusters==cluster]=0
def _test_quadric(self,x):
if x.size == 0 or len(x.shape)<2:
return 0
xm = np.mean(x,axis=0)
x = x - xm
Z = np.zeros((x.shape[0],10), np.float32)
Z[:,0] = x[:,0]**2
Z[:,1] = 2*x[:,0]*x[:,1]
Z[:,2] = 2*x[:,0]*x[:,2]
Z[:,3] = 2*x[:,0]
Z[:,4] = x[:,1]**2
Z[:,5] = 2*x[:,1]*x[:,2]
Z[:,6] = 2*x[:,1]
Z[:,7] = x[:,2]**2
Z[:,8] = 2*x[:,2]
Z[:,9] = 1
v, s, t = np.linalg.svd(Z,full_matrices=False)
smallest_index = np.argmin(np.array(s))
T = np.array(t)
T = T[smallest_index,:]
norm = np.linalg.norm(np.dot(Z,T), ord=2)**2
return norm
def _preprocess(self, hits):
x = hits.x.values
y = hits.y.values
z = hits.z.values
r = np.sqrt(x**2 + y**2 + z**2)
hits['x2'] = x/r
hits['y2'] = y/r
r = np.sqrt(x**2 + y**2)
hits['z2'] = z/r
ss = StandardScaler()
X = ss.fit_transform(hits[['x2', 'y2', 'z2']].values)
for i, rz_scale in enumerate(self.rz_scales):
X[:,i] = X[:,i] * rz_scale
return X
def find_labels(self, params):
w1, w2, w3, w4, w5, w6, w7, epsilon = 2.7474448671796874,1.3649721713529086,0.7034918842926337,\
0.0005549122352940002,0.023096034747190672,0.04619756315527515,\
0.2437077420144654,0.009750302717746615
hits, dz, z_shift, unroll_type = params
hits['z'] = hits['z'] - z_shift
hits['r'] = np.sqrt(hits['x'].values ** 2 + hits['y'].values ** 2 + hits['z'].values ** 2)
hits['rt'] = np.sqrt(hits['x'].values ** 2 + hits['y'].values ** 2)
hits['a0'] = np.arctan2(hits['y'].values, hits['x'].values)
hits['z1'] = hits['z'].values / hits['rt'].values
hits['z2'] = hits['z'].values / hits['r'].values
hits['s1'] = hits['hit_id']
hits['N1'] = 1
hits['z1'] = hits['z'].values / hits['rt'].values
hits['z2'] = hits['z'].values / hits['r'].values
hits['x1'] = hits['x'].values / hits['y'].values
hits['x2'] = hits['x'].values / hits['r'].values
hits['x3'] = hits['y'].values / hits['r'].values
hits['x4'] = hits['rt'].values / hits['r'].values
if unroll_type == 0:
hits['a1'] = hits['a0'].values + np.nan_to_num(np.arccos(dz*hits['rt'].values))
if unroll_type == 1:
hits['a1'] = hits['a0'].values + dz*hits['rt'].values
if unroll_type == 2:
hits['a1'] = hits['a0'].values + dz*hits['z'].values
if unroll_type == 3:
hits['a1'] = hits['a0'].values + dz * (hits['rt'].values + 0.000005 * hits['rt'].values ** 2)
#hits['a1'] = hits['a0'].values + np.nan_to_num(np.arccos(dz*hits['rt'].values))
hits['sina1'] = np.sin(hits['a1'].values)
hits['cosa1'] = np.cos(hits['a1'].values)
ss = StandardScaler()
hits = ss.fit_transform(hits[['sina1', 'cosa1', 'z1', 'z2','x1','x2','x3','x4']].values)
cx = np.array([w1, w1, w2, w3, w4, w5, w6, w7])
hits = np.multiply(hits, cx)
clusters = DBSCAN(eps=0.009750302717746615, min_samples=1, metric="euclidean", n_jobs=32).fit(hits).labels_
return clusters
def _init(self, hits, Niter):
# w1, w2, w3, w4, w5, w6, w7, epsilon = 2.7474448671796874,1.3649721713529086,0.7034918842926337,\
# 0.0005549122352940002,0.023096034747190672,0.04619756315527515,\
# 0.2437077420144654,0.009750302717746615
params = []
for i in range(Niter):
unroll_type = np.random.randint(0,4)
if unroll_type == 0:
dz = np.random.normal(0.0, 0.00035)
elif unroll_type == 1:
dz = np.random.normal(0.0, 0.00065)
elif unroll_type == 2:
dz = np.random.normal(0.0, 0.00085)
elif unroll_type == 3:
dz = np.random.normal(0.0, 0.001)
#dz = 1 / 1000 * (ii / 2) / 180 * np.pi
#dz = np.random.normal(0.0, 0.001)
#dz = np.random.normal(0.0, 0.00035)
z_shift = np.random.normal(0.0, 4.5)
params.append((hits, dz, z_shift, unroll_type))
pool = Pool(processes=8)
result = []
for i in tqdm(pool.imap(self.find_labels, params)):
result += [i]
pool.close()
return np.array(result)
def predict(self, hits, Niter):
result = self._init(hits, Niter)
# X = self._preprocess(hits)
# cl = hdbscan.HDBSCAN(min_samples=1,min_cluster_size=7,
# metric='braycurtis',cluster_selection_method='leaf',algorithm='best', leaf_size=50)
# labels = np.unique(self.clusters)
# self._eliminate_outliers(labels,X)
# max_len = np.max(self.clusters)
# mask = self.clusters == 0
# self.clusters[mask] = cl.fit_predict(X[mask])+max_len
return result
model = Clusterer()
result = model.predict(hits, 60000)
def create_one_event_submission(event_id, hits, labels):
sub_data = np.column_stack(([event_id]*len(hits), hits, labels))
submission = pd.DataFrame(data=sub_data, columns=["event_id", "hit_id", "track_id"]).astype(int)
return submission
def merge(cl1, cl2, min_cnt): # merge cluster 2 to cluster 1
d = pd.DataFrame(data={'s1':cl1,'s2':cl2})
d['N1'] = d.groupby('s1')['s1'].transform('count')
d['N2'] = d.groupby('s2')['s2'].transform('count')
maxs1 = d['s1'].max()
cond = np.where((d['N2'].values>d['N1'].values) & (d['N2'].values<20) & (d['N2'].values>min_cnt))
#cond = np.where((d['N2'].values>d['N1'].values) & (d['N2'].values<20) )
s1 = d['s1'].values
s1[cond] = d['s2'].values[cond]+maxs1
return s1
#result = np.array(second)
labels = range(result.shape[1])
for k in [0]:
for i in range(len(result[:])):
labels = merge(labels, result[i], k)
submission = create_one_event_submission(0, hits['hit_id'].values, labels)
print(score_event(truth, submission))
np.save('predicts/luis_60k', result)
|
[
"[email protected]"
] | |
bfcab4cecd2a7d8e3946cf55d03659e839d25b3d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/82/usersdata/165/44623/submittedfiles/decimal2bin.py
|
b07134dde6b2df7bd468626e44d12cc75e301ed4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
# -*- coding: utf-8 -*-
n=int(input('digite n:'))
i=0
soma=0
while n>0:
resto=n%10
soma=soma+resto*(2**i)
n=n//10
i=i+1
print(soma)
|
[
"[email protected]"
] | |
c9d87b0c2efd5f04ba999abcec54d97fb11bb1c3
|
a916bf443953f25535d8a929e44a194805cbc832
|
/dataStructures3.py
|
e620dea8ebcbece3a6c24bbef4b2bf482e70ccd6
|
[] |
no_license
|
TanujSharma369/258286_DailyCommits
|
7eeaca0ce1af48add06fe81c0d162300f104d351
|
313d9f7d6daf47b6ac595291f6a4135e062a698a
|
refs/heads/master
| 2023-04-20T08:45:13.554589 | 2021-05-19T07:23:06 | 2021-05-19T07:23:06 | 358,884,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
under_10 = [x for x in range(10)]
print('under_10:' +str(under_10))
squares = [x*x for x in under_10]
print('squares:' +str(squares))
odds = [x for x in range(10) if x%2==1]
print('odds:' +str(odds))
sentence = 'I love 2 go t0 the store 7 times a w3ek'
nums = [x for x in sentence if x.isnumeric()]
print('nums:' + ''.join(nums))
names = [ 'Nishant', 'Pankaj', 'Santosh', 'Tanuj','Abdul']
index = [k for k, v in enumerate(names) if v == 'Tanuj']
print('index :' +str(index[0]))
letters = [x for x in 'ABCDEFGHI']
letters = [a for a in letters if a!='C']
print('letters:'+str(letters))
|
[
"tanuj.com"
] |
tanuj.com
|
b46b992f72c74596ded6b403282b96a1c9815f17
|
0be52e0a7d788088bc5a79559b9bb2532d17a210
|
/lib/ldapalchemy/gui/objectclass.py
|
816364f2e85f773f981efbb11c3af1ab708a6e60
|
[] |
no_license
|
clebergnu/ldapalchemy
|
e3439a6b45ab9b9b45fc2548faf7b8fe402586c7
|
af3b021a5b9ebad337c6665a56b6428f42daf18c
|
refs/heads/master
| 2021-01-20T09:01:38.517110 | 2010-10-20T17:15:42 | 2010-10-20T17:15:42 | 1,009,678 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,966 |
py
|
# -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## This file is part of LDAPAlchemy
## Copyright (C) 2007 Cleber Rodrigues <[email protected]>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Cleber Rodrigues <[email protected]>
##
'''
gui/objectclass.py
Provides ObjectClass related GUI classes
'''
__all__ = ['ObjectClassList']
import gtk
from ldapalchemy.schema import OC_NAME
from ldapalchemy.elements import ObjectClassElement
class ObjectClassListStore(gtk.ListStore):
'''
Hold ObjectClass names
'''
def __init__(self, schema):
'''
Class init
'''
gtk.ListStore.__init__(self, str)
self.load_from_schema(schema)
def load_from_schema(self, schema):
'''
Load all entries from schema
'''
for name in schema.get_all_element_names(OC_NAME):
self.append([name])
#
# The Text Cell Renderer Singleton
#
text_cell_renderer = gtk.CellRendererText()
#
# The TreeViewColumn for OIDs Singleton
#
oid_treeview_column = gtk.TreeViewColumn('OID')
oid_treeview_column.pack_start(text_cell_renderer, True)
oid_treeview_column.set_attributes(text_cell_renderer, text=0)
#
# The TreeViewColumn for OIDs Singleton
#
name_treeview_column = gtk.TreeViewColumn('ObjectClass Name')
name_treeview_column.pack_start(text_cell_renderer, True)
name_treeview_column.set_attributes(text_cell_renderer, text=0)
class ObjectClassListView(gtk.TreeView):
'''
A list view of Object Classes
'''
def __init__(self, schema):
list_store = ObjectClassListStore(schema)
gtk.TreeView.__init__(self, list_store)
self.append_column(name_treeview_column)
class ObjectClassListWindow(gtk.ScrolledWindow):
'''
A window that embbeds a ObjectClassListView
'''
def __init__(self, schema):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.list_view = ObjectClassListView(schema)
self.add(self.list_view)
self.set_size_request(300, 480)
class ObjectClassInfo(gtk.VBox):
'''
Provides information on a given ObjectClass
'''
def __init__(self, element_object):
assert isinstance(element_object, ObjectClassElement)
gtk.VBox.__init__(self, 4)
name_label = gtk.Label(element_object.names[0])
name_label.show()
self.pack_start(name_label, False, False)
sup_label = gtk.Label("\n".join(element_object.sup))
sup_label.show()
self.pack_start(sup_label, False, False)
must_label = gtk.Label("\n".join(element_object.may))
must_label.show()
self.pack_start(must_label, True, True)
self.show_all()
class ObjectClassInfoDlg(gtk.Dialog):
'''
Provides a Dialog that embeds a ObjectClassInfo
'''
def __init__(self, element_object):
gtk.Dialog.__init__(self,
title='%s Info' % element_object.names[0],
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_ACCEPT))
self.set_size_request(300, 480)
self.object_class_info = ObjectClassInfo(element_object)
self.vbox.pack_start(self.object_class_info)
|
[
"[email protected]"
] | |
13daa325c738b21366101b6991ee7c31d9dcb811
|
dd80be06f1d8ef04e94ea2fe1cc970a8d58a087a
|
/iniciante/python/1185.py
|
2fa9b1f2f22f5e2af1cca983806bddfdcf93d53c
|
[] |
no_license
|
quatroka/urionlinejudge
|
600231e9ef9850cee8f1d7b6e3f635b030868165
|
d94e777f95cbf95fa5d7455faba3aa5f4bac1cf3
|
refs/heads/master
| 2023-01-12T14:09:46.683526 | 2023-01-02T23:14:55 | 2023-01-02T23:14:55 | 56,419,228 | 3 | 1 | null | 2016-11-04T19:04:37 | 2016-04-17T04:40:11 |
Python
|
UTF-8
|
Python
| false | false | 413 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
OP = input()
DATA = []
for i in range(0, 12):
DATA_AUX = []
for j in range(0, 12):
DATA_AUX.append(float(input()))
DATA.append(DATA_AUX)
RESULT = 0
COUNT = 0
for x in range(0, 11):
COUNT += 11 - x
for y in range(0, 11 - x):
RESULT += DATA[x][y]
if OP == 'M':
print('{0:.1f}'.format(RESULT / COUNT))
else:
print(RESULT)
|
[
"[email protected]"
] | |
2406f8d30bec4dab92a9975052e0714bba593eaf
|
986853c69a9dfc13ca5303f7e56db028f114a744
|
/fiboncci series.py
|
22ec2d5e8c79f99d8c5335d34f7f44ed266123e6
|
[] |
no_license
|
omee1/Python
|
78320d04f48eab1a9b929fca92813be36e33a302
|
ba94ecbb0afd2f99b70e3bfd2b4837d0d4fce354
|
refs/heads/master
| 2023-08-18T17:01:29.977259 | 2021-10-21T09:22:13 | 2021-10-21T09:22:13 | 419,625,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
def series(n):
a=0
b=1
count=0
while count<n:
print(a,end=",")
c=a+b
a=b
b=c
count +=1
num=int(input("enter the no"))
print(series(num))
|
[
"[email protected]"
] | |
79f62a7ee6eb1f0d6df192c475af8fec47ca39a9
|
ea5af064f6583c4dc244627f67bf51a9119347a9
|
/crypto.py
|
4c6a27ad97768b78070c68886cdd9f351d4f73f8
|
[] |
no_license
|
celiyan/PyPassManager
|
034c10cfe594d365822dc836e0f0143e02ac25e3
|
fda994b44b7a003825e16bbcaffd07cf094e04b7
|
refs/heads/master
| 2022-12-19T19:51:29.714559 | 2020-10-15T05:16:37 | 2020-10-15T05:16:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,530 |
py
|
from Crypto.Cipher import AES
from os import urandom
def pad(txt):
"AES CBC requires the number of plaintext bytes to be a multiple of 16, so we pad it to the nearest multiple. Takes&Returns bytes object."
padding_length = AES.block_size - len(txt)%AES.block_size
# we pad with a character = to the padding length, to make unpadding easy
padding = chr(padding_length) * padding_length
return txt+padding.encode()
def unpad(txt):
"To get just the encrypted data back, we need to undo any meaningless padding we added to satisfy length requirements. Takes&Returns bytes object."
padding_length = txt[-1] # length is stored as the character code of the padding
return txt[:-padding_length]
def encrypt(raw, key):
"Encrypt bytes using AES CBC, and a random InitialVector that is stored at the start. Inputs two bytes objects: plaintext & key. Returns ciphertext as bytes object."
iv = urandom(AES.block_size)
key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv+cipher.encrypt(pad(raw)) # store iv so it can be decoded
def decrypt(data, key):
"Decrypt bytes using AES CBC, extracting the InitialVector from the start. Inputs two bytes objects: ciphertext & key. Returns plaintext as bytes object."
iv, data = data[:AES.block_size], data[AES.block_size:] # extract the iv from the start
key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(data))
|
[
"[email protected]"
] | |
9feacf0a85e2b4cb750a3f12f786d8971b96efc5
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc042/B/4081354.py
|
8eea907c466a07c6b45bfcd05fcae80479294c1a
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
py
|
#!/usr/bin/env python3
p = complex(*list(map(int, input().split())))
N = int(input())
li = [complex(*list(map(int, input().split()))) for _ in range(N)]
li += [li[0]]
m = min(((p - a) / (b - a)).imag * abs(b - a) for a, b in zip(li, li[1:]))
print(m)
|
[
"[email protected]"
] | |
a99f1838906b7f0cea9a4ddabadca266b47cebdd
|
89a5f9bdc33ed649bacf15e837f61729ee322faf
|
/Python (diff tasks)/pytask3-25.py
|
1b097cba6780bc1a13fd6d56156877b4407c876a
|
[] |
no_license
|
gonnawanna/univertasks
|
65a1d93ffbf876ad7ea432f9be321dbe17da2770
|
2b1bd28f997dbcf37ce03794550c851b78f58fb3
|
refs/heads/master
| 2020-05-25T04:26:52.450991 | 2019-05-20T11:25:25 | 2019-05-20T11:25:25 | 187,625,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
import numpy as np
a = np.random.random_integers(-2,3,[3,4])
print(a)
counter = (a == 0).sum(axis=0)
a = np.vstack((a, counter))
counter = (a == 0).sum(axis=1)
a = np.column_stack((a, counter))
print(a)
|
[
"[email protected]"
] | |
e0fead7054f8729113d251c9efebcfdfc90f6514
|
14cc8a9947a0a6623dd354767107883bc575d209
|
/web_auto_pro/page/addbugpage.py
|
eb9dfeecc17b8e782a4c8d8bf8230a2a6aae0f14
|
[] |
no_license
|
lstesterhh/ls_web_auto_project
|
3a4f935840b4355c3e318e9ceeb72b2e41161934
|
947ed0b6ca2764f999420d8a66eb6eec0532904f
|
refs/heads/master
| 2021-04-01T03:18:01.243261 | 2020-03-19T06:29:42 | 2020-03-19T06:29:42 | 248,152,555 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,291 |
py
|
#coding:utf-8
from case.basway import base
from selenium import webdriver
import time
class Addbug(base):
loc_test=("xpath",'.//*[@data-id="qa"]/a')
loc_bug=("xpath",'.//*[@data-id="bug"]/a')
loc_tibug=("xpath",".//*[text()='提Bug']")
loc_yxbb=("xpath",'.//*[@class="chosen-choices"]/li')
loc_truck=("xpath",".//*[@id='openedBuild_chosen']/div/ul/li")
loc_biaoti=("xpath",".//*[@id='title']")
iframe=("class name","ke-edit-iframe")
body=("class name","article-content")
submit=("xpath",".//*[@id='submit']")
loc_buglist=("xpath",".//*[@id='bugList']/tbody/tr/td[4]/a")
loc_firselement=("xpath",".//*[@id='bugList']/tbody/tr[1]/td[4]/a")
def addbug(self,timestr):
self.click(self.loc_test)
self.click(self.loc_bug)
self.click(self.loc_tibug)
self.click(self.loc_yxbb)
self.click(self.loc_truck)
self.sendkeys(self.loc_biaoti,text="测试的标题:%s" %timestr)
frame=self.findelement(self.iframe)
self.driver.switch_to.frame(frame)
self.sendkeys(self.body,text="你好禅道body正文")
self.driver.switch_to.default_content()
self.click(self.submit)
def get_bug_list_title_text(self):
try:
all_title=self.findelements(self.loc_buglist)
print(all_title)
t1=all_title[0].text
print(t1)
return t1
except:
return ""
def get_new_text(self,_text):
result=self.is_text_in_element(self.loc_firselement,_text=_text)
print("结果如下:%s"%result)
return result
if __name__ == '__main__':
driver=webdriver.Firefox()
driver.maximize_window()
driver.get("http://127.0.0.1:81/zentao/user-login.html")
from case.atest_zentao_login_new import LoginTest
login=LoginTest(driver)
login.login()
loc_firselement=("xpath",".//*[@id='bugList']/tbody/tr[1]/td[4]/a")
add_bug=Addbug(driver)
timestr=str(time.time())
print(timestr)
add_bug.addbug(timestr)
result=add_bug.get_bug_list_title_text()
print("开始校验相等: "+result)
y="测试的标题:"+timestr
r1=add_bug.get_new_text(y)
print("新的测试结果如下:%s" %r1)
# assert result ==y
driver.quit()
|
[
"[email protected]"
] | |
cb4ed431777e8b10a7599b169d74a3f947751042
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/ads/googleads/v5/googleads-py/tests/unit/gapic/googleads.v5/services/test_ad_service.py
|
8c1b9a30e2d2d599c646bfa72dbe5b188716250f
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 34,485 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v5.common.types import ad_asset
from google.ads.googleads.v5.common.types import ad_type_infos
from google.ads.googleads.v5.common.types import custom_parameter
from google.ads.googleads.v5.common.types import final_app_url
from google.ads.googleads.v5.common.types import url_collection
from google.ads.googleads.v5.enums.types import ad_type
from google.ads.googleads.v5.enums.types import app_url_operating_system_type
from google.ads.googleads.v5.enums.types import call_conversion_reporting_state
from google.ads.googleads.v5.enums.types import device
from google.ads.googleads.v5.enums.types import display_ad_format_setting
from google.ads.googleads.v5.enums.types import display_upload_product_type
from google.ads.googleads.v5.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v5.enums.types import mime_type
from google.ads.googleads.v5.enums.types import response_content_type
from google.ads.googleads.v5.enums.types import served_asset_field_type
from google.ads.googleads.v5.enums.types import system_managed_entity_source
from google.ads.googleads.v5.resources.types import ad
from google.ads.googleads.v5.services.services.ad_service import AdServiceClient
from google.ads.googleads.v5.services.services.ad_service import transports
from google.ads.googleads.v5.services.types import ad_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AdServiceClient._get_default_mtls_endpoint(None) is None
assert AdServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_ad_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = AdServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = AdServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = AdServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_client_get_transport_class():
transport = AdServiceClient.get_transport_class()
assert transport == transports.AdServiceGrpcTransport
transport = AdServiceClient.get_transport_class("grpc")
assert transport == transports.AdServiceGrpcTransport
@mock.patch.object(AdServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdServiceClient))
def test_ad_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.AdServiceClient.get_transport_class') as gtc:
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = AdServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.AdServiceClient.get_transport_class') as gtc:
client = AdServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = AdServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = AdServiceClient()
@mock.patch.object(AdServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_ad_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = AdServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_ad_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_ad(transport: str = 'grpc', request_type=ad_service.GetAdRequest):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad.Ad(
resource_name='resource_name_value',
id=205,
final_urls=['final_urls_value'],
final_mobile_urls=['final_mobile_urls_value'],
tracking_url_template='tracking_url_template_value',
final_url_suffix='final_url_suffix_value',
display_url='display_url_value',
type_=ad_type.AdTypeEnum.AdType.UNKNOWN,
added_by_google_ads=True,
device_preference=device.DeviceEnum.Device.UNKNOWN,
name='name_value',
system_managed_resource_source=system_managed_entity_source.SystemManagedResourceSourceEnum.SystemManagedResourceSource.UNKNOWN,
text_ad=ad_type_infos.TextAdInfo(headline='headline_value'),
)
response = client.get_ad(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_service.GetAdRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad.Ad)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.final_urls == ['final_urls_value']
assert response.final_mobile_urls == ['final_mobile_urls_value']
assert response.tracking_url_template == 'tracking_url_template_value'
assert response.final_url_suffix == 'final_url_suffix_value'
assert response.display_url == 'display_url_value'
assert response.type_ == ad_type.AdTypeEnum.AdType.UNKNOWN
assert response.added_by_google_ads is True
assert response.device_preference == device.DeviceEnum.Device.UNKNOWN
assert response.name == 'name_value'
assert response.system_managed_resource_source == system_managed_entity_source.SystemManagedResourceSourceEnum.SystemManagedResourceSource.UNKNOWN
def test_get_ad_from_dict():
test_get_ad(request_type=dict)
def test_get_ad_field_headers():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_service.GetAdRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
call.return_value = ad.Ad()
client.get_ad(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_ad_flattened():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad.Ad()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_ad(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_ad_flattened_error():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_ad(
ad_service.GetAdRequest(),
resource_name='resource_name_value',
)
def test_mutate_ads(transport: str = 'grpc', request_type=ad_service.MutateAdsRequest):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_service.MutateAdsResponse(
)
response = client.mutate_ads(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_service.MutateAdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad_service.MutateAdsResponse)
def test_mutate_ads_from_dict():
test_mutate_ads(request_type=dict)
def test_mutate_ads_field_headers():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_service.MutateAdsRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
call.return_value = ad_service.MutateAdsResponse()
client.mutate_ads(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_ads_flattened():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_service.MutateAdsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_ads(
customer_id='customer_id_value',
operations=[ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_ads_flattened_error():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_ads(
ad_service.MutateAdsRequest(),
customer_id='customer_id_value',
operations=[ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AdServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.AdServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.AdServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_ad_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.AdServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_ad',
'mutate_ads',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_ad_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AdServiceTransport()
adc.assert_called_once()
def test_ad_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AdServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.AdServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_service_host_no_port():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_host_with_port():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_ad_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.AdServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.AdServiceGrpcTransport])
def test_ad_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.AdServiceGrpcTransport,])
def test_ad_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_ad_path():
customer = "squid"
ad = "clam"
expected = "customers/{customer}/ads/{ad}".format(customer=customer, ad=ad, )
actual = AdServiceClient.ad_path(customer, ad)
assert expected == actual
def test_parse_ad_path():
expected = {
"customer": "whelk",
"ad": "octopus",
}
path = AdServiceClient.ad_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_ad_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = AdServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = AdServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = AdServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = AdServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = AdServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = AdServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = AdServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = AdServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = AdServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = AdServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.AdServiceTransport, '_prep_wrapped_messages') as prep:
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.AdServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = AdServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
55e546f6119a07a5c3ec899c648a6d4fb3a1b7f0
|
1b29279e1517cb331657539825d0b6259582d00d
|
/hdrstats.py
|
b64985b3ecdbe30431cf81221b380822baaa8439
|
[
"Unlicense"
] |
permissive
|
OCHA-DAP/dap-scrapers
|
1671a0f52ce19150dacae724394f893f87508f5e
|
3beb34acfe5bf5f2fd7d2a15857264a1e65bcf08
|
refs/heads/master
| 2022-01-18T17:38:23.097214 | 2021-12-22T20:19:56 | 2021-12-22T20:19:56 | 13,861,733 | 4 | 2 | null | 2016-03-31T17:18:08 | 2013-10-25T13:42:20 |
Python
|
UTF-8
|
Python
| false | false | 5,440 |
py
|
import re
import lxml.html
import requests
import xypath
import StringIO
import messytables
#from hamcrest import equal_to, is_in
from orm import session, Value, DataSet, Indicator
import orm
import dateutil.parser
#import re
indicator_list = """
100106
38906
68606
89006
101406
98706
57506
38006
69706
103006
105906""".strip().split('\n')
"""Value: dsID, region, indID, period, value, source, is_number
DataSet: dsID, last_updated, last_scraped, name
Indicator: indID, name, units
"""
def disasters():
baseurl = "http://hdrstats.undp.org/en/tables/displayByRow.cfm"
data = {"selectedCountries": "3,103,203,403,503,703,803,903,1103,1203,1303,1403,1503,1603,1703,1803,1903,2003,2103,2303,2403,2503,2603,2703,2903,3003,3103,3203,3303,3403,3503,3603,3803,3903,4003,4103,4203,4303,4403,4503,4703,4803,4903,5003,5103,5203,5303,5403,5503,5603,5703,5803,5903,6003,6103,6203,6303,6603,6703,6803,7103,7203,7303,7403,7503,7703,7903,8203,8303,8403,8503,8603,8803,8903,9003,9103,9203,9303,9403,9503,9603,9803,9903,10003,10103,10203,10303,10403,10503,10603,10703,10803,10903,11003,11103,11203,11303,11403,11503,11603,11703,11803,12103,12203,12303,12403,12503,12603,12703,12903,13003,13203,13303,13403,13503,13603,13703,13903,14003,14103,14203,14303,14403,14503,14803,14903,15003,15103,15503,15603,15703,15803,15903,16003,16103,16203,16303,16403,16603,16703,16903,17103,17203,17303,17503,17603,17803,17903,18003,18103,18203,18303,18403,18603,18703,18803,18903,19003,19103,19203,19303,19403,19503,19603,19703,19903,20003,20103,20203,20403,20503,20603,12003,20703,20803,21003,21103,21203,21303,21403,21603,21703,21803,21903,22003,22103,22203,22303,22403,22503,22603,23003,23103,23203,202,2,102,2602,302,402,602,702,502,902,802,1002,1202,1102,1402,1302,1502,1602,1702,2202,1802,2002,1902,2302,2102,2402,2502,2702,3402,3302,3502,3702,3602,3802,3902,4002,4102,",
"selectedIndicators": "98606,",
"selectedYears": "1960,1970,1980,1985,1990,1995,2000,2005,2006,2007,2008,2009,2010,2011,2012,",
"language": "en",
"displayIn": "row"}
html = requests.post(baseurl, data=data).content
return html, baseurl
def getindicator(ind="100106", overridefunction=None):
if not overridefunction:
baseurl = 'http://hdrstats.undp.org/en/indicators/display_cf_xls_indicator.cfm?indicator_id=%s&lang=en' % ind
html = requests.get(baseurl).content
else:
html, baseurl = overridefunction()
value = {'dsID': 'HDRStats',
'indID': "HDR:"+ind,
'source': baseurl,
'is_number': True}
dataset = {'dsID': 'HDRStats',
'last_scraped': orm.now(),
'name': 'Human Development Indicators, UNDP'}
indicator = {'indID': "HDR:"+ind}
hdi_indicator = {'indID': 'HDR:HDI Rank',
'name': 'Human Development Index rank',
'units': ''}
Indicator(**hdi_indicator).save()
DataSet(**dataset).save()
print html
exit(3)
htmlio = StringIO.StringIO(html)
messy = messytables.html.HTMLTableSet(htmlio)
table = xypath.Table.from_messy(list(messy.tables)[0])
root = lxml.html.fromstring(html)
"get odd indicator / update time"
indicator_text = root.xpath("//h2/text()")[-1]
print indicator_text
try:
indicator_split, = re.findall("(.*)\(([^\(\)]+)\)", indicator_text)
except ValueError:
indicator_split = [indicator_text, ""]
indicator['name'], indicator['units'] = indicator_split
indicator['name'] = indicator['name'].strip()
access_text, = [x.tail.strip() for x in root.xpath("//br") if str(x.tail) != "None" and x.tail.strip()]
access_date_raw, = re.findall('Accessed:(.*)from', access_text)
dataset['last_updated'] = dateutil.parser.parse(access_date_raw).isoformat()
print dataset['last_updated'], indicator['name'], "*", indicator['units']
Indicator(**indicator).save()
country_cell = table.filter("Country").assert_one()
years = country_cell.fill(xypath.RIGHT).filter(lambda b: b.value != '')
countries = country_cell.fill(xypath.DOWN)
hdi_rank = table.filter("HDI Rank").assert_one()
max_year = max(year.value for year in years)
for i in countries.junction(hdi_rank):
newvalue = dict(value)
newvalue['indID'] = "HDR:HDI Rank"
newvalue['region'] = get_region(i[0])
newvalue['value'] = i[2].value.strip()
newvalue['period'] = 2012 # TODO Hard coded for now because year it pertains to is not clear
if newvalue['value'].strip() != '..':
Value(**newvalue).save()
for i in countries.junction(years):
newvalue = dict(value)
newvalue['region'] = get_region(i[0])
newvalue['value'] = i[2].value.strip()
newvalue['period'] =i[1].value.strip()
if newvalue['value'].strip() != '..':
Value(**newvalue).save()
print newvalue
session.commit()
def get_region(country):
region_el=lxml.html.fromstring(country.properties['html'])
try:
link, = region_el.xpath('//a/@href')
except ValueError: # non-countries don't have links.
niceregion = country.value.strip()
else:
niceregion, = re.findall("profiles/([^\.]*)\.html", link)
return niceregion
#getindicator("98606", disasters)
#exit()
for ind in indicator_list:
print ind
getindicator(ind)
|
[
"[email protected]"
] | |
e7c26e50bd6e4c25ab1b9b130752e585a2be96cd
|
4d1614ba0104bb2b4528b32fa0a2a1e8caa3bfa3
|
/exp/motivation/more_ports_exp/exp_result.py
|
bb856678abe4af31e07beb1358da96f738d59094
|
[
"MIT"
] |
permissive
|
Lossless-Virtual-Switching/Backdraft
|
c292c87f8d483a5dbd8d28009cb3b5e263e7fb36
|
4cedd1403c7c9fe5e1afc647e374173c7c5c46f0
|
refs/heads/master
| 2023-05-24T03:27:49.553264 | 2023-03-01T14:59:00 | 2023-03-01T14:59:00 | 455,533,889 | 11 | 4 |
MIT
| 2022-04-20T16:34:22 | 2022-02-04T12:09:31 |
C
|
UTF-8
|
Python
| false | false | 2,972 |
py
|
def load_result_file(path):
results = []
with open(path) as f:
r = Result()
for line in f:
if line.startswith('==='):
results.append(r)
r = Result()
continue
name, val = line.split(':')
if name == 'total ports':
r.total_ports = int(val)
elif name == 'excess ports':
r.excess_ports = int(val)
elif name == 'mean latency (us)':
r.mean_latency_us = float(val)
elif name == 'pkt per sec':
r.pkt_per_sec = float(val)
elif name == 'pkt send failure':
r.send_failure = int(val)
elif name == 'total pkt sent':
r.total_pkt_send = int(val)
elif name == 'bess_drops':
r.bess_drops = int(val)
elif name == 'experiment duration':
r.exp_duration = float(val)
else:
print('unknown name while parsing results file:', name, val)
return results
class Result:
@classmethod
def from_netperf_stdout(cls, txt):
r = Result()
lines = txt.split('\n')
for line in lines:
if 'ran for' in line:
raw = line.split()
t = float(raw[2])
r.exp_duration = t
pkts = int(raw[5])
r.total_pkt_send = pkts
elif line.startswith('client reqs/s'):
raw = line.split()
v = float(raw[2])
r.pkt_per_sec = v
elif line.startswith('mean latency (us):'):
raw = line.split()
v = float(raw[3])
r.mean_latency_us = v
elif line.startswith('send failures:'):
raw = line.split()
v = int(raw[2])
r.send_failure = v
return r
def __init__(self):
self.excess_ports = -1
self.total_ports = -1
self.mean_latency_us = -1
self.pkt_per_sec = -1
self.send_failure = -1
self.total_pkt_send = -1
self.bess_drops = -1
self.exp_duration = -1
def set_excess_ports(self, count):
self.excess_ports = count
self.total_ports = count + 2
def generate_report(self):
txt = '\n'.join([
'total ports: {}'.format(self.total_ports),
'excess ports: {}'.format(self.excess_ports),
'mean latency (us): {}'.format(self.mean_latency_us),
'pkt per sec: {}'.format(self.pkt_per_sec),
'pkt send failure: {}'.format(self.send_failure),
'total pkt sent: {}'.format(self.total_pkt_send),
'bess_drops: {}'.format(self.bess_drops),
'experiment duration: {}'.format(self.exp_duration),
'',
])
return txt
def __repr__(self):
return '<More Ports Exp Result>'
|
[
"[email protected]"
] | |
443b2b91dc9daca8881db0e7a62c89c4519e69c5
|
5369083ed1f0feed1770783ca52fdc46da8a8ad3
|
/scrips/jogo_da_velha.py
|
8c3d7c7cd62d93ffc47a171fcbe9b8d5268dd8a5
|
[
"MIT"
] |
permissive
|
ericxlima/ILoveCoffee
|
6e2b4ba3226fe3736a62bf20e77953f38d89c457
|
88a3fbc1a0233993870e2fdb59b614c5d6040f24
|
refs/heads/master
| 2022-12-23T13:14:32.681034 | 2020-10-02T20:24:42 | 2020-10-02T20:24:42 | 289,762,978 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,852 |
py
|
import json
from time import sleep
class JogoDaVelha:
def __init__(self, nick):
self.nick = nick
self.other_nick = None
self.tentativas = 0
self.matriz = [[' ' for _ in range(3)] for _ in range(3)]
self.other_player()
if self.other_nick:
self.jogar()
sleep(3.5)
def print_matriz(self):
print(' 1 2 3 ')
print(' ---------')
print('1| {} |'.format(' '.join(self.matriz[0])))
print('2| {} |'.format(' '.join(self.matriz[1])))
print('3| {} |'.format(' '.join(self.matriz[2])))
print(' ---------')
def other_player(self):
try:
other_nick = input('Insira o nick do segundo jogador: ')
other_senha = input('Insira a senha do segundo jogador: ')
def other_player_exist(nick, senha):
with open('etc/usuarios.json', 'r') as file:
geral = json.load(file)
for idx, pessoa in enumerate(geral['usuarios']):
if pessoa['nome'] == nick and pessoa['senha'] == senha:
return True
return False
while not other_player_exist(other_nick, other_senha):
print('\nO usuário inserido não existe (Senha ou Nick inválido)\n')
continuar = input('Insira "sair" para sair ou "continuar" para inserir outro usuário? ').lower()
if continuar == 'sair':
raise TypeError('')
else:
other_nick = input('Insira o nick do segundo jogador: ')
other_senha = input('Insira a senha do segundo jogador: ')
self.other_nick = other_nick
except TypeError:
self.other_nick = None
def ganhador(self):
""" Checa a matriz para ver se alguém conseguiu vencer"""
def salvar(nome):
with open('etc/usuarios.json', 'r') as file:
geral = json.load(file)
for idx, pessoa in enumerate(geral['usuarios']):
if pessoa['nome'] == nome:
line, user = (idx, geral['usuarios'][idx])
user['pontos'] += 250
geral['usuarios'][idx] = user
with open('etc/usuarios.json', 'w') as file2:
geral = json.dumps(geral, indent=4)
file2.write(geral)
m = self.matriz.copy()
p1 = m[0][0] if m[0][0] == m[0][1] == m[0][2] else False
p2 = m[0][0] if m[0][0] == m[1][0] == m[2][0] else False
p3 = m[2][2] if m[2][0] == m[2][1] == m[2][2] else False
p4 = m[0][2] if m[0][2] == m[1][2] == m[2][2] else False
p5 = m[1][0] if m[1][0] == m[1][1] == m[1][2] else False
p6 = m[1][1] if m[0][1] == m[1][1] == m[2][1] else False
p7 = m[0][0] if m[0][0] == m[1][1] == m[2][2] else False
p8 = m[0][2] if m[0][2] == m[1][1] == m[2][0] else False
ganhou = [x for x in [p1, p2, p3, p4, p5, p6, p7, p8] if x == 'X' or x == 'O']
if ganhou:
if ganhou == 'X':
salvar(self.other_nick)
return f"X - {self.other_nick} Ganhou! 750 pontos"
else:
salvar(self.nick)
return f"O - {self.nick} Ganhou! 750 pontos"
else:
return False
def coordenadas(self):
def x_or_o():
self.tentativas += 1
return 'O' if not self.tentativas % 2 else 'X'
def analise(coordenada, linha=None, coluna=None):
try:
linha, coluna = list(map(int, list(coordenada)))
except ValueError:
print('Você deve inserir apenas números')
if linha not in [1, 2, 3] or coluna not in [1, 2, 3]:
print('As coordenadas devem estar entre [1, 2, 3]')
return False
elif self.matriz[linha - 1][coluna - 1] != ' ':
print('Esta celula já está ocupada')
return False
else:
self.matriz[linha - 1][coluna - 1] = x_or_o()
return True
user_coordenada = input('Insira as coordenadas [linha coluna]: ').replace(' ', '')
while not analise(user_coordenada):
user_coordenada = input('Insira as coordenadas [linha coluna]: ').replace(' ', '')
def jogar(self):
print(f'\nO jogador {self.other_nick} começa com X\n'
f'O jogador {self.nick} é o O')
while not self.ganhador() or self.tentativas >= 9:
self.print_matriz()
self.coordenadas()
self.ganhador()
if self.tentativas >= 9:
print("Deu velha")
return False
else:
print(self.ganhador())
|
[
"[email protected]"
] | |
c17fd3267ac51e7a4fda177e6d131a4b06b9b26d
|
ab41437412fb5b94c4d9618402242171adb9f24a
|
/PasswordGenerator-project/generator/views.py
|
7958ed9f68efd15463fad8e1da14396556f260f9
|
[] |
no_license
|
GeranMS/django3-password-generator
|
9968949985188f3db324b2dbeed58f387259fe19
|
a0bd442a3e83df9f3f350eccd1cbf168ac6cda85
|
refs/heads/master
| 2022-11-24T18:16:33.837422 | 2020-07-21T10:53:08 | 2020-07-21T10:53:08 | 281,373,198 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
from django.shortcuts import render
from django.http import HttpResponse
import random
# Create your views here.
def home(request):
password_dict = {'password':'ihidguyuy'}
return render(request, 'generator/home.html', password_dict)
def about(request):
return render(request, 'generator/about.html')
def password(request):
characters = 'abcdefghijklmnopqrstuvwxyz'
if request.GET.get('uppercase'):
Uppercase = characters.upper()
characters = list(characters)
characters.extend(list(Uppercase))
if request.GET.get('special'):
characters.extend(list('!@#$%^&*()?'))
if request.GET.get('numbers'):
characters.extend(list('0123456789'))
length = int(request.GET.get('length',12))
password_dict = ''
for x in range(length):
password_dict += random.choice(characters)
return render(request, 'generator/password.html', {'password':password_dict})
|
[
"[email protected]"
] | |
cb9b3aee824c57b21f29ee6facc88b380fc9eb68
|
a0265b6b2350586a3c4e5b52c593129d291bb9ca
|
/synthetic/synthetic_cov.py
|
ee8285482027937aef0f48639349edfc68f94749
|
[] |
no_license
|
yowald/elliptical-losses
|
ed3ae0f58a49abecfa7dc76516e6e4ac969dee05
|
0497e4b6863f300e5d05f46d0704c786a59fa986
|
refs/heads/master
| 2020-08-28T17:57:06.604507 | 2020-01-04T16:22:53 | 2020-01-04T16:22:53 | 217,775,920 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,820 |
py
|
"""Functions to generate synthetic data and run experiment.
flags control number of variables, sparsity parameter, seed etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from absl import app
# from absl import flags
import os
import sys
import numpy as np
import scipy as sp
from scipy.linalg import cho_factor
from scipy.linalg import LinAlgError
from sklearn.datasets import make_sparse_spd_matrix
import tensorflow as tf
from ..PositiveScalarSamplerFactory import PositiveScalarSamplerFactory
from ..structured_optimizers import GMRFOptimizer
from ..structured_optimizers import LossFunctionFactory
from ..structured_optimizers import structured_elliptical_maximum_likelihood
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_features', 10, '')
tf.app.flags.DEFINE_integer('seed', 1, '')
tf.app.flags.DEFINE_integer('num_steps_newton', 75000,
"""Number of steps for newton optimizer.""")
tf.app.flags.DEFINE_integer('num_steps_mm_newton', 1000,
"""Number of steps or newton in MM algorithm.""")
tf.app.flags.DEFINE_integer('num_steps_mm', 100,
"""Number of steps for MM algorithm.""")
tf.app.flags.DEFINE_boolean('delete_checkpoint', False,
"""Delete existing checkpoint and start fresh.""")
tf.app.flags.DEFINE_boolean('delete_existing', False,
"""Delete existing checkpoint and start fresh.""")
tf.app.flags.DEFINE_float('beta', 0.5,
"""shape for generalized gaussian data creation.""")
tf.app.flags.DEFINE_float('nu', 3.,
'degrees of freedom for multivariate-t'
'data creation.')
tf.app.flags.DEFINE_float('learning_rate', 0.05,
"""Train Validation fraction.""")
tf.app.flags.DEFINE_boolean('standardize_data', True,
"""If True, divides data by standard deviation.""")
tf.app.flags.DEFINE_float('outliers_std', 10., '')
tf.app.flags.DEFINE_float('outliers_samples_prob', 0.05, '')
tf.app.flags.DEFINE_float('sparsity_alpha', 0.85, '')
tf.app.flags.DEFINE_string('sampler_type', 'mggd',
"""scalar sampler type to use for data generation""")
tf.app.flags.DEFINE_string('save_dir',
'./elliptical-losses/synthetic/results/',
'Directory where to write event logs '
'and checkpoint.')
def is_pos_def(matrix):
return np.all(np.linalg.eigvals(matrix) > 0)
def get_sparse_high_correlations(dim=25, seed=1, rep_num=1000,
sparsity_alpha=0.9):
"""Gets sparse inverse covariance matrix.
The method draw a few matrices and returns te one where the average
correlation between variables is the highest.
Args:
dim: the dimension of the matrix to be returned.
seed: seed for reproducibility.
rep_num: number of matrices to draw and choose from.
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
Returns:
A sparse inverse covariance matrix.
"""
np.random.seed(seed)
max_mean = 0
for _ in range(rep_num):
candidate_matrix = make_sparse_spd_matrix(dim, alpha=sparsity_alpha,
smallest_coef=.4, largest_coef=.7)
candidate_correlations = np.linalg.inv(candidate_matrix)
diag_part = np.sqrt(np.expand_dims(np.diag(candidate_correlations), axis=0))
candidate_correlations /= diag_part
candidate_correlations /= diag_part.transpose()
cur_mean = np.tril(np.abs(candidate_correlations)).mean()
if max_mean < cur_mean:
best_candidate = candidate_matrix
max_mean = cur_mean
return best_candidate
def get_edge_indices_from_matrix(matrix, miss_probability=0.0):
"""Gets a list of indices where the entries in the given matrix are non-zero.
Each index is a list of two integers [i,j] such that matrix[i,j]!=0.
Args:
matrix: the matrix to get the edges of.
miss_probability: float in the range [0., 1.], edges will be omitted from
the least with this probability.
Returns:
A list of indices (or edges so to speak).
"""
[n, _] = matrix.shape
edge_indices_triu = []
edge_indices_tril = []
for i in range(n-1):
for j in range(i+1, n):
if (np.abs(matrix[i, j]) > 0 and np.random.rand() > miss_probability):
edge_indices_triu.append([i, j])
edge_indices_tril.append([j, i])
edge_indices = np.array(edge_indices_triu + edge_indices_tril)
return edge_indices
def check_pd(matrix, lower=True):
"""Checks if matrix is positive definite.
Args:
matrix: input to check positive definiteness of.
lower: If True gets the lower triangular part of the Cholesky decomposition.
Returns:
If matrix is positive definite returns True and its Cholesky decomposition,
otherwise returns False and None.
"""
try:
return True, np.tril(cho_factor(matrix, lower=lower)[0])
except LinAlgError as err:
if 'not positive definite' in str(err):
return False, None
def get_elliptic_data(scalar_sampler, n, m_train, seed=1, sparsity_alpha=0.9):
"""Generates data from an elliptic distribution.
Args:
scalar_sampler: a function that receives an integer m, and draws m positive
scalars from some distribution. the distribution defines the type of
elliptic distribution we are using.
See Frahm 04. https://kups.ub.uni-koeln.de/1319/
n: number of variables in the elliptic distribution.
m_train: number of training examples to draw from distribution.
seed: seed for the random number generator, for reproducibility purposes.
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
Returns:
Training data, and the inverse covariance matrix it was generates with.
Raises:
Exception: if there was a problem with generating a covariance matrix, such
that the resulting matrix was not positive definite.
"""
np.random.seed(seed)
num_samples = m_train
inverse_cov = get_sparse_high_correlations(n, seed,
sparsity_alpha=sparsity_alpha)
inverse_cov = np.float32(inverse_cov)
covariance = np.linalg.inv(inverse_cov)
if not check_pd(covariance):
raise Exception('covariance matrix is not Positive Definite')
spherical_uniform = np.random.randn(n, num_samples)
spherical_uniform /= np.linalg.norm(spherical_uniform, axis=0)
scaling_params = scalar_sampler(num_samples)
train_data = np.multiply(scaling_params.T,
sp.linalg.sqrtm(covariance).dot(spherical_uniform))
return train_data, inverse_cov
def get_losses_dictionary(features_dimension):
"""Creates a dictionary with all the losses to test, and their gradients.
Args:
features_dimension: the dimension of the inverse covariance matrix we are
estimating.
Returns:
A dictionary where the keys are the names of the losses to estimate and the
values are tuples of (loss, grad) where loss is the loss function and grad
is its gradient.
"""
loss_factory = LossFunctionFactory()
loss_dict = {
'tyler': loss_factory.tylers_estimator({'d': features_dimension}),
'gen_gauss_0_2': loss_factory.generalized_gaussian({
'm': (features_dimension)**((0.2-1)/0.2),
'beta': 0.2
}),
'gen_gauss_0_5': loss_factory.generalized_gaussian({
'm': (features_dimension)**((0.5-1)/0.5),
'beta': 0.5
}),
'multivariate_t': loss_factory.multivariate_t({
'nu': 3.,
'd': features_dimension
})
}
return loss_dict
def get_distance_from_ground_truth(ground_truth_matrix, estimation, std=None):
"""Calculates an normalized distance of estimation and ground truth matrix.
Args:
ground_truth_matrix: the true inverse covariance matrix we are estimating.
estimation: the estimation of the matrix.
std: if not None, it is the standard deviation of each feature in the
training data. This is used to restore the original sclaes of the features
before measuring the distance between matrices.
Returns:
the normalized frobenius distance (i.e. froebnius distance divided by
frobenius norm of ground_truth_matrix) between normalized versions of
estimation and ground_truth_matrix. normaliztion is done by dividing
estimation by its trace and multiplying by that of ground_truth_matrix.
"""
if std is not None:
diag_of_stds = np.linalg.inv(np.diag(std))
estimation = diag_of_stds.dot(estimation).dot(diag_of_stds)
estimation *= (np.trace(ground_truth_matrix)/np.trace(estimation))
distance_between_normalized = np.linalg.norm(estimation - ground_truth_matrix)
return distance_between_normalized/np.linalg.norm(ground_truth_matrix)
def run_experiment(data_train, edge_indices_with_diag, inverse_covariance,
seed, sampler_type, sampler_param, sparsity_alpha,
num_steps_newton, num_steps_mm_newton, num_steps_mm,
standardize_data=True):
"""Runs a single experiment comparing all losses on generated data.
Args:
data_train: the generated data to run on.
edge_indices_with_diag: list of edges to use for the graphical structure.
An edge is itself a list of two integers in the range [0..num_features-1].
Should include self edges (i.e. [i,i]) for digonal elements of the inverse
covariance.
inverse_covariance: the ground truth inverse covariance matrix used to
generate the data.
seed: the seed used in generation of the data, for logging purposes.
sampler_type: the type of sampler used to generate the data (see
PositiveScalarSamplerFactory)
sampler_param: parameter for the scalar sampler (shape for mggd and degrees
of freedom for t-distribution)
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
num_steps_newton: maximum number of steps for newton optimizer in structured
gmrfs.
num_steps_mm_newton: maximum number of steps for inner loop newton optimizer
in minimization majorization of structured robust mrfs.
num_steps_mm: maximum number of minimization majorization steps in robust
mrfs.
standardize_data: if True, divides training data by standard deviations
before passing to structured optimizers.
"""
[num_features, m_train] = data_train.shape
tf.logging.info('==== seed={}, m_train={},'.format(seed, m_train))
# Create directory to save results.
full_dir = os.path.join(FLAGS.save_dir, '%d_%d' %
(num_features, m_train))
full_dir = os.path.join(full_dir, '%d' % (seed))
if sampler_type == 'mggd':
full_dir = os.path.join(full_dir,
'%s_beta_%0.2f' % (sampler_type, sampler_param))
elif sampler_type == 'multivariate_t':
full_dir = os.path.join(full_dir,
'%s_nu_%0.2f' % (sampler_type, sampler_param))
full_dir = os.path.join(full_dir, '%0.2f' % (sparsity_alpha))
if tf.gfile.Exists(full_dir):
if FLAGS.delete_existing:
tf.gfile.DeleteRecursively(full_dir)
tf.gfile.MakeDirs(full_dir)
# Standardize data and keep stds
std_val = None
if standardize_data:
std_val = np.std(data_train, axis=1)
data_train_ = data_train/np.std(data_train, axis=1, keepdims=True)
else:
data_train_ = data_train
# Sample Covariance
sample_cov = data_train.dot(data_train.T)/m_train
inverse_sample_cov = np.linalg.pinv(sample_cov)
sample_cov_err = get_distance_from_ground_truth(inverse_covariance,
inverse_sample_cov,
std=None)
# Save results for sample covariance estimator.
fname = os.path.join(full_dir, '%s.npy' % 'sample_cov_err')
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(sample_cov_err)
np.save(fp, sample_cov_err)
# Gaussian MRF
gmrf_optimizer = GMRFOptimizer(num_features, edge_indices_with_diag)
estimate_gmrf, _ = (
gmrf_optimizer.alt_newton_coord_descent(data_train_,
max_iter=num_steps_newton))
gmrf_err = get_distance_from_ground_truth(inverse_covariance, estimate_gmrf,
std=std_val)
fname = os.path.join(full_dir, '%s.npy' % 'gmrf_err')
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(gmrf_err)
np.save(fp, gmrf_err)
n_steps_newt = num_steps_mm_newton
loss_dict = get_losses_dictionary(num_features)
for estimator_name, (loss, loss_grad) in loss_dict.items():
estimate_cur, _ = (
structured_elliptical_maximum_likelihood(data_train_, loss, loss_grad,
edge_indices_with_diag,
initial_value=None,
max_iters=num_steps_mm,
newton_num_steps=n_steps_newt))
cur_err = get_distance_from_ground_truth(inverse_covariance, estimate_cur,
std=std_val)
fname = os.path.join(full_dir, '%s.npy' % (estimator_name+'_err'))
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(cur_err)
np.save(fp, cur_err)
def main(argv):
del argv # Unused.
tf.logging.set_verbosity(tf.logging.INFO)
seed = FLAGS.seed
num_features = FLAGS.num_features
num_steps_newton = FLAGS.num_steps_newton
num_steps_mm_newton = FLAGS.num_steps_mm_newton
num_steps_mm = FLAGS.num_steps_mm
sparsity_alpha = FLAGS.sparsity_alpha
sampler_type = FLAGS.sampler_type
standardize_data = FLAGS.standardize_data
beta = FLAGS.beta
nu = FLAGS.nu
# Get the scalar sampler for generating elliptic data
scalar_sampler_factory = PositiveScalarSamplerFactory()
if sampler_type == 'mggd':
assert(beta <= 1 and beta > 0)
sampler_param = beta
gen_gauss_sampler_params = {'shape': beta, 'dim': num_features}
scalar_sampler = \
scalar_sampler_factory.generalized_gaussian(gen_gauss_sampler_params)
elif sampler_type == 'multivariate_t':
assert nu > 2
sampler_param = nu
multi_t_sampler_params = {'nu': nu, 'dim': num_features}
scalar_sampler = \
scalar_sampler_factory.multivariate_t(multi_t_sampler_params)
else:
raise ValueError('Unrecognized sampler type')
# Create training data and ground truth parameters.
m_train_max = 1500
np.random.seed(seed)
data_train, inverse_cov = get_elliptic_data(scalar_sampler, num_features,
m_train_max, seed=seed,
sparsity_alpha=sparsity_alpha)
edge_indices = get_edge_indices_from_matrix(inverse_cov)
edge_indices = np.concatenate([edge_indices,
[[i, i] for i in range(num_features)]])
m_trains = [30, 40, 50, 60, 70, 80, 100, 150, 250, 500, 850]
for m in m_trains:
np.random.seed(seed)
train_inds = np.random.permutation(m_train_max)[:m]
data_train_cur = data_train[:, train_inds]
print('==== n={}, seed={}, m_train={}, sparsity_alpha={}'
', distribution_beta={}'.format(num_features, seed, m, sparsity_alpha,
beta))
run_experiment(data_train_cur, edge_indices, inverse_cov, seed,
sampler_type, sampler_param, sparsity_alpha,
num_steps_newton, num_steps_mm_newton, num_steps_mm,
standardize_data=standardize_data)
if __name__ == '__main__':
tf.app.run(main)
|
[
"[email protected]"
] | |
d235aec102d27ca4fae3b8e5d215f502675ae6fb
|
17c366bf8aa9fed59fb3d91db06142860cb9ce38
|
/nbs/examples/mnist_blocks.py
|
14043be821f6c97c3bf782edb3b9b4b097f38029
|
[
"Apache-2.0"
] |
permissive
|
dienhoa/fastai
|
3f4884f9fb96f9e5199e33b959478dfa0bbfa0d4
|
fdce0330e05ae02db90c3456f9fc2827c3cf86a0
|
refs/heads/master
| 2022-04-14T06:27:52.994595 | 2022-04-13T21:24:27 | 2022-04-13T21:24:27 | 154,803,492 | 0 | 0 |
Apache-2.0
| 2018-10-26T08:38:44 | 2018-10-26T08:38:43 | null |
UTF-8
|
Python
| false | false | 422 |
py
|
from fastai.vision.all import *
splitter = GrandparentSplitter(train_name='training', valid_name='testing')
mnist = DataBlock(blocks=(ImageBlock(PILImageBW), CategoryBlock),
get_items=get_image_files, splitter=splitter, get_y=parent_label)
if __name__ == '__main__':
data = mnist.dataloaders(untar_data(URLs.MNIST), bs=256)
learn = cnn_learner(data, resnet18)
learn.fit_one_cycle(1, 1e-2)
|
[
"[email protected]"
] | |
8284303e2d78a6089a9fd4c7ccbb37454b2e67c4
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc200_c.py
|
e206350c17a0371913a9b0f7696b9550c9039895
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 278 |
py
|
# https://atcoder.jp/contests/abc200/tasks/abc200_c
from collections import Counter
n = int(input())
a = list(map(int, (input().split())))
for i in range(n):
a[i] %= 200
cnt = Counter(a)
ans = 0
for i, v in cnt.items():
if v>=2:
ans += v*(v-1) // 2
print(ans)
|
[
"[email protected]"
] | |
9ea1070cae4c16524eb8794b5d79ee1e19e2d7f7
|
a44dd48829823ce92522c855279a41f10bda1481
|
/docker-pyspark-pytest/examples/streaming/streaming/constant.py
|
e9a2ef0350ed5c5ce4dd18d134b6bb33f371e3ee
|
[] |
no_license
|
i05nagai/docker-sandbox
|
f6b56f5ad283c7e74b6f1127e20c1b367eb79909
|
5b9d46cd396d67ba241ec9bc4a5d7ce6ad1699d7
|
refs/heads/master
| 2023-09-04T12:52:03.444928 | 2023-08-28T11:24:36 | 2023-08-28T11:24:36 | 124,549,748 | 2 | 0 | null | 2023-08-28T11:24:38 | 2018-03-09T14:19:07 |
C++
|
UTF-8
|
Python
| false | false | 815 |
py
|
import os
STREAM_READER_KAFKA_DEFAULT_OPTIONS = {
"kafka.bootstrap.servers": os.environ.get('KAFKA_BOOTSTRAP_SERVERS'),
"kafka.security.protocol": "SASL_SSL",
"kafka.sasl.mechanism": "PLAIN",
"kafka.ssl.truststore.location": os.environ.get('KAFKA_SSL_TRUSTSTORE_LOCATION'),
"kafka.ssl.truststore.password": os.environ.get('KAFKA_SSL_TRUSTSTORE_PASSWORD'),
"startingOffsets": os.environ.get('STARTING_OFFSETS'),
"maxOffsetsPerTrigger": os.environ.get('MAX_OFFSETS_PER_TRIGGER'),
"failOnDataLoss": 'false',
}
STREAM_WRITER_DEFAULT_OPTIONS = {
'checkpointLocation': os.environ.get('CHECKPOINT_LOCATION'),
}
SPARK_CONF_DEFAULT = [
('spark.app.name', ''),
# https://spark.apache.org/docs/latest/submitting-applications.html#master-urls
('spark.master', 'local[*]'),
]
|
[
"[email protected]"
] | |
e84ec7fc630e4899800b726dad66cfa35fc0a9bc
|
bf9059d1a04ba37d0e6548c5f09481aac5aa3b2e
|
/lesson/4.4.MaxCounters.py
|
c7f117fe90fc75fa50dbbbc27778930a18fdfddc
|
[] |
no_license
|
peistop/Codility
|
d61539b3c2c354370a90b23a42d6a60eda7a434a
|
d200ac8820d4a966e2b3f6e43e3d2d6bd2756755
|
refs/heads/master
| 2020-03-23T04:13:17.023669 | 2018-09-14T01:36:52 | 2018-09-14T01:36:52 | 141,070,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 787 |
py
|
"""
1. MaxCounters
Calculate the values of counters after applying all alternating operations:
increase counter by 1; set value of all counters to current maximum.
Medium: 100
"""
def solution(N, A):
# write your code in Python 3.6
if len(A) == 0:
return []
counters = {key + 1: 0 for key in range(N)}
tmp_Max, Max = 0, 0
for a in A:
if a < N + 1:
if counters[a] <= Max:
counters[a] = Max + 1
else:
counters[a] += 1
if counters[a] > tmp_Max:
tmp_Max = counters[a]
else:
Max = tmp_Max
for c in counters:
if counters[c] <= Max:
counters[c] = Max
return list(counters.values())
|
[
"[email protected]"
] | |
e58cfb3de60cd764e85865be181cffa7c8b60191
|
81f2653de884d9b27c4f921ee8b2761ef65098db
|
/main.py
|
8ded70c9b58c8deb91be512a7ffa6b4c6dbc216c
|
[] |
no_license
|
vnikaski/epidemic_simulation
|
fa2b084b2281ef34dd200b1bde971a9365afc1f7
|
8aba4ac026bfc4b35405964435512703f7adcba7
|
refs/heads/master
| 2021-05-19T13:40:37.081479 | 2020-03-31T20:55:47 | 2020-03-31T20:55:47 | 251,729,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,666 |
py
|
import numpy as np
from random import randint, random
from matplotlib import pylab
from matplotlib.animation import FuncAnimation
import argparse
def update_neighbours_for_cell(map: np.array, direction: str, i: int, j: int, r: int):
"""
Updates number of Moore's neighbours in the distance of r from the cell map[i,j]
:param map: map of states
:param direction: 'up', 'down', 'right', 'left'
:param i: row of the cell
:param j: column of the cell
:param r: radius of Moore's neighbourhood
:return: updated map: np.array
"""
a = 0 #sum of infected neighbours in given direction
for k in range(r):
b = k #parameter needed in the while loop to check for the edges of the map
c = k #same as above
if direction == 'up':
while j-b < 0:
b -= 1
while j+c+2 > len(map):
c -= 1
a = sum(map[i,j-b:j+c+2,0]==1)
elif direction == 'down':
while j-b-1 < 0:
b -= 1
while j+c+1 > len(map):
c -= 1
a = sum(map[i, j-b-1:j+c+1, 0]==1)
elif direction == 'left':
while i - b - 1 < 0:
b -= 1
while i + c + 1 > len(map):
c -= 1
a = sum(map[i-b-1:i+c+1, j, 0]==1)
elif direction == 'right':
while i-b < 0:
b -= 1
while i+c+2 > len(map):
c -= 1
a = sum(map[i-b:i+c+2, j, 0]==1)
map[i,j,1] += a
return map
def update_neighbours(map: np.array, r: int):
"""
Goes through all of the map to update neighbours in every direction
:param map: np.array map of states
:param r: radius of infection
:return: updated map np.array
"""
for i in range(len(map)):
for j in range(len(map)):
map = update_neighbours_for_cell(map, 'up', i, j, r)
map = update_neighbours_for_cell(map, 'right', i, j, r)
map = update_neighbours_for_cell(map, 'down', i, j, r)
map = update_neighbours_for_cell(map, 'left', i, j, r)
return map
def main(N: int, k: int, p_w: float, p_z: float, M: int, r: int = 1):
"""
Creates simulation of a spreading infection on a square map. Each cell is in one of the three states:
0 - healthy, capable of getting infected
1 - infected, can spread the infection
2 - cured, no longer spreading, can't get infected
:param N: size of the edge of the square
:param k: number of first randomly infected cells
:param p_w: probability of curing the infection by an infected cell per epoch
:param p_z: probability of getting the infection an infected neighbour cell (changes with the number of infected neighbours)
:param M: number of epochs
:param r: radius of spreadage
"""
map = np.zeros((N,N,2)) #creating map; every cell has two dimensions: [state, number_of_infected_neighbours]
while k > 0: #choosing randomly k infected people
i = randint(0, N-1)
j = randint(0, N-1)
if map[i,j,0] == 0:
map[i,j,0] = 1
k -= 1
map = update_neighbours(map, r) #updating infecting neighbours after random infection
count = {0: [sum(sum(map[:, :, 0] == 0))], 1: [sum(sum(map[:, :, 0] == 1))], 2: [sum(sum(map[:, :, 0] == 2))]}
#preparing for data storage needed for the animation
maps = np.zeros((N, N, M))
maps[:, :, 0] = map[:, :, 0]
for e in range(M): #iterating through epochs
for i in range(N): #going through rows of the map; i = row in
for j in range(N):#going through columns of the map; j = column in
if map[i,j,0] == 0 and map[i,j,1]>0 and random() < 1-(1-p_z)**map[i,j,1]: #trying to infect cell with probability = 1-(1-p_z)
map[i,j,0] = 1
elif map[i,j,0] == 1 and random() < p_w: #trying to heal infected cell
map[i,j,0] = 2
update_neighbours(map, r)
#counting epoch stats
count[0].append(sum(sum(map[:, :, 0] == 0)))
count[1].append(sum(sum(map[:, :, 0] == 1)))
count[2].append(sum(sum(map[:, :, 0] == 2)))
#drawing and saving heatmaps of map state in the epoch
pylab.imshow(map[:,:,0])
pylab.savefig(f"map{e+1}")
pylab.clf()
#saving data for animation
maps[:,:,e] = map[:,:,0]
if sum(sum(map[:,:,0])) == (N**2)*2: #checking whether everyone is cured to end simulation
break
pylab.plot(count[0], label='healthy')
pylab.plot(count[1], label='infected')
pylab.plot(count[2], label='cured')
pylab.legend(loc='upper right')
pylab.xlabel('epoch')
pylab.savefig(f"plot.png")
pylab.clf()
#preparing for animation
fig = pylab.figure()
im = pylab.imshow(maps[:, :, 0])
def init():
im.set_data(np.zeros((N, N)))
def animate(i):
data = maps[:, :, i]
im.set_data(data)
return im
#animation
anim = FuncAnimation(fig, animate, init_func=init, frames=M, repeat=False)
anim.save('spreading.gif', writer='imagemagick')
"""
Była próba wykorzystania biblioteki argparse jednak z poziomu terminala wykrywało dziwne błędy w kodzie, których normalnie nie było + nie widziało biblioteki numpy?
Możliwe, że wyhashowany kod działa, ale nie na moim komputerze, więc wykorzystałam niepreferowane rozwiązanie
"""
#parser = argparse.ArgumentParser()
#parser.add_argument("N", help="size of the map",type=int)
#parser.add_argument("k", help="number of infected cells",type=int)
#parser.add_argument("p_w", help="probability of curing the infection",type=float)
#parser.add_argument("p_z", help="probability of spreading the infection",type=float)
#parser.add_argument("M", help="number of epochs",type=int)
#parser.add_argument("r", help="radius of spreadage",type=int)
#args = parser.parse_args()
#main(args)
#Getting the data for simulation from the user
N = int(input("Set the size of the map (N): "))
k = int(input("Set the number of infected cells (k): "))
p_w = float(input("Set the probability of curing infection (p_w): "))
p_z = float(input("Set the probability of getting infected (p_z): "))
M = int(input("Set how many epochs should the simulation take (M): "))
r = input("Set the radius of spreading the infection (r), if not provided: r=1: ")
if r =='':
main(N,k,p_w,p_z,M)
else:
main(N,k,p_w,p_z,M,int(r))
|
[
"[email protected]"
] | |
c4f4c920eb95280fe07fd2f0e167237bbef954b7
|
3dd44550b479890bc13965b04f85ee568b22ba30
|
/PREP/L7 - Lists&Tuples in Python-Part 2.py
|
bbb63062a1822401ecd4ea49a1336026a1158474
|
[] |
no_license
|
BedirT/Python-Class-2019
|
5e41b1dd76ddc86c2a5d023e3df0bf4c9afda5a2
|
97f30ca9ef7c565cbf32b257b34cfa2ae2eaf3f3
|
refs/heads/master
| 2020-04-22T14:13:02.619234 | 2019-04-19T22:13:27 | 2019-04-19T22:13:27 | 170,435,736 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 539 |
py
|
"""
Lecture 7: Lists & Tuples Part 2
Momentum Learning
Introduction to Python
M. Bedir Tapkan
"""
# Prepending or Appending Items to a List
# You can't append one item, you have to append a list
# ls += 20 # Wrong
# Methods That Modify a List
# append
# ls.append(<obj>)
# We also can append a list
# extend
# ls.extend(<obj>)
# insert
# ls.insert(<index>, <obj>)
# remove
# pop(index=-1)
# Lists are dynamic.
# Python Tuples
# Unlike lists tuples are immutable
# t[0] = 'd'
# Tuple Assignment, Packing, and Unpacking
|
[
"[email protected]"
] | |
d269849313594626cefa3b6fccafcc19ca0a84c5
|
ab951846417e1fa57d12a9f28975c723940ef265
|
/main.py
|
4dd179705898aa647726584c587462e0ef28f5d6
|
[] |
no_license
|
arivvid27/Task-Manager
|
72991eb8e5d647efcbf94fa58651a8db4444d1e7
|
a2e51821c0581b702ccd338b3cd503d4e15d151d
|
refs/heads/master
| 2023-06-11T07:20:29.411924 | 2021-07-02T20:17:34 | 2021-07-02T20:17:34 | 383,537,330 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,617 |
py
|
import os
from replit import db
from time import sleep
Admin_user = os.environ['Admin_user']
Admin_password = os.environ['Admin_password']
username = input('Account Username? > ')
if username == Admin_user:
pass_check = input('What is the password? > ')
if pass_check == Admin_password:
print("Access Granted")
option = input('Would you like to delete or add? > ')
if option == 'delete':
print('Ok!')
print('Here are all the usernames and passwords to delete:')
value = db.values()
print(value)
elif username not in db:
new_account = input(f'This account name does not exist, would you like to make an account using the name, {username}? > ')
if new_account == 'yes':
print('Ok!')
db["username"] = username
new_password = input('What would you like your password to be? > ')
db['password'] = new_password
sleep(2)
print('Your Account is now set!')
print('Restart to login to your account.')
exit(120)
elif new_account == 'no':
print('Ok!')
print('Quitting...')
sleep(2)
exit(120)
elif username == db:
password = input('Account Password? > ')
if password != db:
password_nomatch = input('This password is incorrect, would you like to reset your password? > ')
if password_nomatch == 'yes':
del db["password"]
print('Ok!')
password_reset = input("What would you like your password to be? > ")
db["password"] = password_reset
print('Your password is now reset.')
print('Restart to login to your account.')
exit(120)
elif password_nomatch == 'no':
print('Ok!')
print('Restart the program to get another try at logging in.')
exit(120)
|
[
"[email protected]"
] | |
703a4ce0e381a16155fc695889d3ab6e62f0261b
|
2cee87831997f906ff0b1972cb72e294eadabb56
|
/dags/my_dag.py
|
dc59b47a0734921c1e23856350dc63fe2ed3ba6b
|
[] |
no_license
|
Cattleman/airflow_play
|
2c643b5021379c8974111cc83466ef7386a09016
|
b64b2ccabe4eac79aee1986540c8071f9df5cf30
|
refs/heads/master
| 2022-07-24T21:29:03.949536 | 2020-05-17T00:24:57 | 2020-05-17T00:24:57 | 264,552,843 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,733 |
py
|
# Airflow related
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
# other packages
from datetime import datetime
from datetime import timedelta
# DS stack
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Set up dfault args
default_args = {
'owner': 'meemoo',
'depends_on_past': False,
'start_date': datetime(2018, 9, 1),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=5),
}
# Sets of callables used by python operator
def prep_data() -> pd.DataFrame:
_df = pd.DataFrame({"col1": [1,2,3], "col2": [2,4,6]})
_df.to_csv("../data/raw.csv")
def data_process_double()-> pd.DataFrame:
_df = pd.read_csv("../data/raw.csv")
_df["col3"] = _df["col1"] * 2
_df["col4"] = _df["col2"] * 2
_df.to_csv("../data/processed.csv")
def save_out_plot() -> None:
plt.scatter(df["col1"],df["co2"], c='r', label="col1,2")
plt.scatter(df["col3"], df["col4"], c='b', label="col3,4")
plt.legend()
plt.savefig(f'../plots/my_plot.png')
with DAG('simple_plot_dag',
default_args=default_args,
schedule_interval='*/2 * * * *'
) as dag:
opr_prep_data = PythonOperator(
task_id='prep_data',
python_callable=prep_data
)
opr_data_process = PythonOperator(
task_id='data_process_double',
python_callable=data_process_double
)
opr_save_plot = PythonOperator(
task_id='save_plot',
python_callable=save_out_plot
)
opr_prep_data >> opr_data_process >> opr_save_plot
|
[
"[email protected]"
] | |
a1b2a37c189f9f9b1cfd4951375d3f8f54555c37
|
b05de4075070be000ae46e83aa2f8c16dba92873
|
/HW3/rkolhe_Q4.py
|
9477e602a00de6d616d81c8d58270b0ec4b7fecc
|
[] |
no_license
|
indu2407/ncsu-csc591-privacy
|
5ef2b51233f77457a1627d3d605b13c62feb9cb4
|
6481a89124ec8046f4af9d949c2f2144bda05d12
|
refs/heads/master
| 2023-03-15T02:25:30.380189 | 2020-03-16T01:18:28 | 2020-03-16T01:18:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,721 |
py
|
"""Reads a har file from the filesystem, converts to CSV, then dumps to
stdout.
"""
import argparse
import json
from urllib.parse import urlparse
from tld import get_tld
import networkx as nx
import matplotlib.pyplot as plt
from adblockparser import AdblockRules
def readfile(filename):
rules = []
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
rules.append(line)
return rules
def main(harfile_path):
"""Reads a har file from the filesystem, converts to CSV, then dumps to
stdout.
"""
txt_file = 'easylist.txt'
raw_rules = readfile(txt_file)
harfile = open(harfile_path, encoding = 'UTF-8')
harfile_json = json.loads(harfile.read())
i = 0
first_party = harfile_path.split('.')[1]+'.'+harfile_path.split('.')[2]
rules = AdblockRules(raw_rules)
blocked = 0
blocked_domains = set()
opt = {'script': True,'image':True,'stylesheet':True,'object':True,'subdocument':True,'xmlhttprequest':True,'websocket':True,'webrtc':True,'popup':True,'generichide':True,'genericblock':True}
for entry in harfile_json['log']['entries']:
i = i + 1
url = entry['request']['url']
urlparts = urlparse(entry['request']['url'])
size_bytes = entry['response']['bodySize']
size_kilobytes = float(entry['response']['bodySize'])/1024
mimetype = 'unknown'
if 'mimeType' in entry['response']['content']:
mimetype = entry['response']['content']['mimeType']
option = ''
res = get_tld(url, as_object=True)
mime_opt = mimetype.split('/')[0]
if mime_opt in opt:
option = mime_opt
if res.fld != first_party and option in opt and rules.should_block(url, {option: opt[option]}):
blocked += 1
blocked_domains.add(res.fld)
blocked_domains = [dom for dom in blocked_domains] if blocked_domains else 'No domains blocked'
print(f'\nSite: {first_party}\n# of total HTTP requests: {i}\n# of HTTP requests blocked: {blocked}\nBlocked domains: {blocked_domains}\n')
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
prog='parsehar',
description='Parse .har files into comma separated values (csv).')
argparser.add_argument('harfile', type=str, nargs=3,
help='path to harfile to be processed.')
args = argparser.parse_args()
# main(args.harfile[0])
for idx in range(3):
harfile_path = args.harfile[idx]
first_party = harfile_path.split('.')[1]+'.'+harfile_path.split('.')[2]
print(f'\nProcessing {first_party}...')
main(harfile_path)
print(f'Completed {first_party}!')
|
[
"[email protected]"
] | |
fe349e425c50cb0f517d63394f143894ac478af4
|
12e90368507792ef0ed9152d0b23852979455f95
|
/src/run.py
|
a4a573d5a364ca56397c4bae87218ec87c162b40
|
[] |
no_license
|
KomorIksDe/KocchiBot--Twitch
|
a8dd8c670b4924e94729b26b86d5d61de47e901f
|
075332eae8a569a12e1e729992550f5be8ed14ac
|
refs/heads/master
| 2021-01-09T05:56:54.845886 | 2017-02-03T21:37:49 | 2017-02-03T21:37:49 | 80,871,209 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 632 |
py
|
import socket
import string
import time
from cfg import *
from bot import readMessage
sock = initSocket()
start = int(time.time())
while True:
for line in str(sock.recv(1024)).split('\\r\\n'):
parts = line.split(':')
if len(parts) < 3:
continue
if "QUIT" not in parts[1] and "JOIN" not in parts[1] and "PART" not in parts[1]:
message = parts[2][:len(parts[2])]
usernamesplit = parts[1].split("!")
username = usernamesplit[0]
readMessage(sock, message, username, start)
timePassed = time.time()
|
[
"[email protected]"
] | |
4dd509fbf559746d31fc3753d3ebf3f12f580c3d
|
c7598e64bc54c5aa437d7867db00c90090eb228e
|
/RG_average_user.py
|
3c5e8eaf74ee278ed897f221ce49c03617f09272
|
[] |
no_license
|
lucaleporini/FixationSaccadesAnalysis
|
43d98c280589efa73ff60b4b46b93646ab601ccc
|
e682d5ae251326dc8b3278cc75d85cf9d46f84b6
|
refs/heads/master
| 2022-04-26T01:28:21.346144 | 2020-04-29T14:41:45 | 2020-04-29T14:41:45 | 259,948,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,443 |
py
|
"""
QUESTO SCRIPT PYTHON PERMETTE DI TRASFORMARE I RAW DATA OTTENUTI DA EYE TRACKER
VENGONO PULITI DA VALORI DI BLINK (0,0) E DA VALORI NON AMMESSI O FUORI DALLO SCHERMO
VENGONO CALCOLATE DISTRIBUZIONI SULLA BASE DELLA MEDIA DEGLI UTENTI --> SOMMA DELLE FREQUENZE / N° DI UTENTI
"""
# OBIETTIVO: individuare statistiche dell'utente medio che osserva il video
# stats_raw_data_cleaned.pkl --> file che presente le statistiche dei singoli utenti per ogni video
# sotto forma di dizionario
# Ogni file.pkl rappresenta un determinato video. In ogni file.pkl è presente il seguente formato:
# {
# "video_1": {
# "user_1": {
# "raw_data_sublists": ...
# "blink": ...
# "time_fixations": ...
# "width_saccades": ...
# "saccades_directions": ...
# },
# "user_N": { ... }
# },
# "video_N": { ... }
# }
#
#
# "mean_user_time_fixations" rappresenta i tempi di fissazione dell'utente medio. Presenta il seguente formato:
# { K0: V0, ... , Kn: Vn} dove:
# -> Ki rappresenta i tempi di fissazione in secondi (0.04, 0.08, ....)
# -> Vi rappresenta la frequenza di fissazioni con tempo Ki dell'utente medio
#
#
# "mean_user_saccades_width" rappresenta le ampiezze delle saccadi dell'utente medio. Presenta il seguente formato:
# { K0: V0, ... , Kn: Vn} dove:
# -> Ki rappresenta l'intervallo di distanze (Ki-1, Ki]. K0 rappresenta un intervallo (0, K0]
# -> Vi rappresenta la frequenza di ampiezze delle saccade che ricando nell'intervallo definito da Ki
# parametri per l'esecuzione delle statistiche
# list_users (dict) --> lista di utenti sulla quale effettuare le statistiche
# step_sw (int) --> bins per la creazione della distribuzione delle ampiezze delle saccadi --> distanza euclidea
# step_sd (int) --> bins per la creazione della distribuzione delle direzioni delle saccadi --> gradi
import pprint
def run(stats_video, list_users, step_sw, step_sd):
# print("compute RG_average_user.py")
# fattore di normalizzazione per le statistiche
n_users = len(list_users)
# statistche per l'utente medio
mean_users_stats = {}
# fissazioni dell'utente MEDIO rispetto al singolo video
mean_user_time_fixations = {}
mean_user_blink = {}
mean_user_saccades_width = {}
mean_user_saccades_directions = {}
for k_user in list(list_users.keys()):
# caricamento di "mean_user_time_fixations"
# somma cumultate delle frequenze dei tempi di fissazione (espressi in secondi)
for k_fixation in stats_video[k_user]["time_fixations"].keys():
if mean_user_time_fixations.get(k_fixation) is None:
mean_user_time_fixations[k_fixation] = stats_video[k_user]["time_fixations"][k_fixation]
else:
mean_user_time_fixations[k_fixation] += stats_video[k_user]["time_fixations"][k_fixation]
""" caricamento di "mean_user_blink"
# somma cumultate delle frequenze dei tempi di blink (espressi in secondi)
for k_blink in stats_video[k_user]["blink"].keys():
if mean_user_blink.get(k_blink) is None:
mean_user_blink[k_blink] = stats_video[k_user]["blink"][k_blink]
else:
mean_user_blink[k_blink] += stats_video[k_user]["blink"][k_blink]"""
# popolamento delle ampiezze delle saccadi di tutti gli utenti --> CUMULATA
for k in list(stats_video[k_user]["saccades_width"].keys()):
if mean_user_saccades_width.get(k) is None:
mean_user_saccades_width[k] = stats_video[k_user]["saccades_width"][k]
else:
mean_user_saccades_width[k] += stats_video[k_user]["saccades_width"][k]
# popolamento delle direzioni delle saccadi --> CUMULATA
for k in stats_video[k_user]["saccades_directions"].keys():
if mean_user_saccades_directions.get(k) is None:
mean_user_saccades_directions[k] = stats_video[k_user]["saccades_directions"][k]
else:
mean_user_saccades_directions[k] += stats_video[k_user]["saccades_directions"][k]
# ---------------------------------------------------------------------------------------------------
# TIME FIXATION
# normalizzo per il numero di utenti
mean_user_time_fixations = {k: v / n_users for k, v in mean_user_time_fixations.items()}
# ordino il dizionario per chiave
mean_user_time_fixations = dict(sorted(mean_user_time_fixations.items(), key=lambda k: k[0]))
# aggiungo al dict risultante
mean_users_stats["mean_user_time_fixations"] = mean_user_time_fixations
# ---------------------------------------------------------------------------------------------------
"""
# BLINK
# normalizzo per il numero di utenti
mean_user_blink = {k: v / n_users for k, v in mean_user_blink.items()}
# ordino il dizionario per chiave
mean_user_blink = dict(sorted(mean_user_blink.items(), key=lambda k: k[0]))
# aggiungo al dict risultante
mean_users_stats["mean_user_blink"] = mean_user_blink
"""
# ---------------------------------------------------------------------------------------------------
# SACCADES AMPLITUDES
# normalizzo per il numero di utenti
mean_user_saccades_width = {k: v / n_users for k, v in mean_user_saccades_width.items()}
# ordino il dizionario per chiave
mean_user_saccades_width = dict(sorted(mean_user_saccades_width.items(), key=lambda k: k[0][0]))
# aggiungo al dict risultante
mean_users_stats["mean_user_saccades_width"] = mean_user_saccades_width
# ---------------------------------------------------------------------------------------------------
# SACCADES DIRECTIONS
# normalizzo per il numero di utenti
mean_user_saccades_directions = {k: v / n_users for k, v in mean_user_saccades_directions.items()}
# ordino il dizionario per chiave
mean_user_saccades_directions = dict(sorted(mean_user_saccades_directions.items(), key=lambda k: k[0][0]))
# aggiungo al dict risultante
mean_users_stats["mean_user_saccades_directions"] = mean_user_saccades_directions
# ---------------------------------------------------------------------------------------------------
return mean_users_stats
|
[
"[email protected]"
] | |
3407ab894c95d7feda5e091ba5fdbbe5e030a782
|
229983fdd1475a4e38a24aeb97499fda7175622d
|
/moye/apps/users/apps.py
|
423ae4e5abf0d0efad2a88b1a5cef151fcfccf62
|
[] |
no_license
|
shangshanzhishui/moye
|
651a2e7ba6ae931531e0f8c3b74d4b5b88b9a891
|
0168316f313475f0bfcda06e2faf9e50dfd83bc1
|
refs/heads/master
| 2020-03-09T00:24:27.933179 | 2018-04-07T02:13:58 | 2018-04-07T02:13:58 | 128,487,410 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 146 |
py
|
# _*_ encoding: utf-8 _*_
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
verbose_name = u'用户信息'
|
[
"[email protected]"
] | |
c344c8404ac954642b6f02f8f20bca296c731bae
|
5fc6b5a420b9cb2a7d5102df55b0b5248f8199e1
|
/pypykatz/commons/winapi/local/function_defs/live_reader_ctypes.py
|
aa2bae8f03b5ebc283d8a225b8ccda4bdf88894b
|
[
"MIT"
] |
permissive
|
ASkyeye/pypykatz
|
8e1c598d57017fd400b9a8d830ed314be7562b96
|
8ad07f2f6f0c4904f9a77c711f693d6c794a7fb4
|
refs/heads/master
| 2021-07-03T13:48:34.350145 | 2020-11-14T22:50:30 | 2020-11-14T22:50:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,565 |
py
|
import os
import sys
import ctypes
import enum
import logging
from pypykatz import logger
from .ntdll import *
from .kernel32 import *
from .psapi import *
class WindowsMinBuild(enum.Enum):
WIN_XP = 2500
WIN_2K3 = 3000
WIN_VISTA = 5000
WIN_7 = 7000
WIN_8 = 8000
WIN_BLUE = 9400
WIN_10 = 9800
#utter microsoft bullshit commencing..
def getWindowsBuild():
class OSVersionInfo(ctypes.Structure):
_fields_ = [
("dwOSVersionInfoSize" , ctypes.c_int),
("dwMajorVersion" , ctypes.c_int),
("dwMinorVersion" , ctypes.c_int),
("dwBuildNumber" , ctypes.c_int),
("dwPlatformId" , ctypes.c_int),
("szCSDVersion" , ctypes.c_char*128)];
GetVersionEx = getattr( ctypes.windll.kernel32 , "GetVersionExA")
version = OSVersionInfo()
version.dwOSVersionInfoSize = ctypes.sizeof(OSVersionInfo)
GetVersionEx( ctypes.byref(version) )
return version.dwBuildNumber
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
SYNCHRONIZE = 0x00100000
STANDARD_RIGHTS_REQUIRED = DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER
STANDARD_RIGHTS_ALL = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE
if getWindowsBuild() >= WindowsMinBuild.WIN_VISTA.value:
PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
else:
PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFF
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
#https://msdn.microsoft.com/en-us/library/windows/desktop/ms683217(v=vs.85).aspx
def enum_process_names():
pid_to_name = {}
for pid in EnumProcesses():
if pid == 0:
continue
pid_to_name[pid] = 'Not found'
try:
process_handle = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, pid)
except Exception as e:
continue
pid_to_name[pid] = QueryFullProcessImageNameW(process_handle)
return pid_to_name
def get_lsass_pid():
pid_to_name = enum_process_names()
for pid in pid_to_name:
if pid_to_name[pid].lower().find('lsass.exe') != -1:
return pid
raise Exception('Failed to find lsass.exe')
def enum_lsass_handles():
#searches for open LSASS process handles in all processes
# you should be having SE_DEBUG enabled at this point
RtlAdjustPrivilege(20)
lsass_handles = []
sysinfohandles = NtQuerySystemInformation(16)
for pid in sysinfohandles:
if pid == 4:
continue
#if pid != GetCurrentProcessId():
# continue
for syshandle in sysinfohandles[pid]:
#print(pid)
try:
pHandle = OpenProcess(PROCESS_DUP_HANDLE, False, pid)
except Exception as e:
logger.debug('Error opening process %s Reason: %s' % (pid, e))
continue
try:
dupHandle = NtDuplicateObject(pHandle, syshandle.Handle, GetCurrentProcess(), PROCESS_QUERY_INFORMATION|PROCESS_VM_READ)
#print(dupHandle)
except Exception as e:
logger.debug('Failed to duplicate object! PID: %s HANDLE: %s' % (pid, hex(syshandle.Handle)))
continue
oinfo = NtQueryObject(dupHandle, ObjectTypeInformation)
if oinfo.Name.getString() == 'Process':
try:
pname = QueryFullProcessImageNameW(dupHandle)
if pname.lower().find('lsass.exe') != -1:
logger.info('Found open handle to lsass! PID: %s HANDLE: %s' % (pid, hex(syshandle.Handle)))
#print('%s : %s' % (pid, pname))
lsass_handles.append((pid, dupHandle))
except Exception as e:
logger.debug('Failed to obtain the path of the process! PID: %s' % pid)
continue
return lsass_handles
|
[
"[email protected]"
] | |
2c54bc3cfc9044c00be36b2b9217dea19a54c10e
|
25d24cc1caf00fc16fc8ddba88a92b05e2fcb25c
|
/hw1/bc.py
|
10e902273eac0a1ee5d4e2c3d73ecaf0556d644d
|
[
"MIT"
] |
permissive
|
doviettung96/homework
|
037de25c8ee93b1c82b72c52f5c410b14413e721
|
eb8e16f0fb4b2365408eadedf533dbf6d744bbe9
|
refs/heads/master
| 2020-04-25T05:14:51.512485 | 2019-04-25T01:27:49 | 2019-04-25T01:27:49 | 172,536,222 | 0 | 0 | null | 2019-02-25T15:52:44 | 2019-02-25T15:52:44 | null |
UTF-8
|
Python
| false | false | 2,772 |
py
|
import os
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
def build_model(training_data, env, config):
from keras.models import Sequential
from keras.layers import Dense, Lambda
from keras.optimizers import Adam
from sklearn.utils import shuffle
obs_mean, obs_std = np.mean(training_data['observations']), np.std(
training_data['observations'])
obs_dim = env.observation_space.shape[0]
actions_dim = env.action_space.shape[0]
model = Sequential([
Lambda(lambda obs: (obs - obs_mean) / obs_std),
Dense(64, activation='tanh'),
Dense(64, activation='tanh'),
Dense(actions_dim)
])
opt = Adam(lr=config['learning_rate'])
model.compile(optimizer=opt, loss='mse', metrics=['mse'])
x, y = shuffle(training_data['observations'], training_data['actions'].reshape(
[-1, actions_dim])) # because validation data is extracted before shuffling
model.fit(x, y, batch_size=128, validation_split=0.1,
epochs=config['epochs'], verbose=2)
return model
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('envname', type=str)
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
args = parser.parse_args()
training_data = {}
print('loading training data')
with open(os.path.join('expert_data', args.envname + '.pkl'), 'rb') as f:
training_data = pickle.load(f)
print('loaded and start to train policy')
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
config = {
'learning_rate': 1e-3,
'num_rollouts': 30,
'epochs': args.epochs
}
policy_fn = build_model(training_data, env, config)
returns = []
observations = []
actions = []
for i in range(config['num_rollouts']):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn.predict(obs[None, :])
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps % 100 == 0:
print("%i/%i" % (steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
4094d314cd1e46974e37b771806677fe883de139
|
7801f4405de1b21f40dcd3dc07d4dfcc512a5e0d
|
/backend/base/models.py
|
af602bad05d8f769ae45015d6e4bac4a25957517
|
[] |
no_license
|
ioannisgkouzionis/surgery-management
|
9420c87ff7a17edd929f7900b545a403ce2a07d0
|
b059cde784607c26cb770fcfc630a482c90a606c
|
refs/heads/master
| 2023-04-27T11:24:22.899106 | 2021-05-18T00:08:28 | 2021-05-18T00:08:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,991 |
py
|
from django.db import models
from django.contrib.auth.models import User
class Staff(models.Model):
id = models.AutoField(primary_key=True, editable=False)
firstName = models.CharField(max_length=200, null=True, blank=True)
lastName = models.CharField(max_length=200, null=True, blank=True)
email = models.CharField(max_length=200, null=False, blank=False)
phone = models.CharField(max_length=200, null=True, blank=True)
staffType = models.CharField(max_length=200, null=True, blank=True)
authCode = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return f'{self.id} | {self.firstName} | {self.lastName} | {self.staffType}'
class Patient(models.Model):
id = models.AutoField(primary_key=True, editable=False)
firstName = models.CharField(max_length=200, null=True, blank=True)
lastName = models.CharField(max_length=200, null=True, blank=True)
email = models.CharField(max_length=200, null=True, blank=True)
dob = models.DateField()
contactNumber = models.CharField(max_length=12, null=True, blank=True)
def __str__(self):
return f'{self.id} | {self.firstName} | {self.lastName} | {self.dob}'
class Room(models.Model):
id = models.AutoField(primary_key=True, editable=False)
roomName = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return f'{self.id} | {self.roomName}'
class Surgery(models.Model):
id = models.AutoField(primary_key=True, editable=False)
requestedBy = models.ForeignKey(Staff, on_delete=models.CASCADE, related_name='requested')
room = models.ForeignKey(Room, on_delete=models.SET_NULL, null=True)
patient = models.OneToOneField(Patient, on_delete=models.CASCADE)
startDate = models.DateField()
endDate = models.DateField()
doctors = models.ManyToManyField(Staff, blank=True)
def __str__(self):
return f'{self.id} __ {self.requestedBy} __ {self.room} __ {self.patient}'
|
[
"[email protected]"
] | |
ccbd6c3d4162bd05e08640574311e4d27fb3a947
|
7ac0a328386478c615e395d9b4761543fa30e68b
|
/server/appElevation/config.py
|
e813c62639fb6058fc3f256414b4f83033cafecf
|
[] |
no_license
|
dzhen19/winterhacks20
|
2e6c47ad99bde51bfd0de3ae5f3b462cafb2c760
|
19b53bcf0e87c7b6fddefc7350790c6b5fb96f46
|
refs/heads/main
| 2023-06-23T21:04:48.293908 | 2021-08-02T15:15:09 | 2021-08-02T15:15:09 | 322,659,729 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 50 |
py
|
api_key="AIzaSyAHDNSaU9mGMTLk2gb1tuAUWWo6MkCRlhk"
|
[
"[email protected]"
] | |
2522f1e735fd1af2dc665b46ccf68c5b5980e0a3
|
7223da9c7d2fb4d00b8323657aec81926ec03cdb
|
/ex31.py
|
479aa8a60372bf6ee8120ee7a900d59895827f49
|
[] |
no_license
|
diango/Lessons
|
dc72bf5fd2a76d393147a4721ad8407da8f11838
|
210e2e5a99cd50daaf1fb00ee9e618171ba7e2d9
|
refs/heads/master
| 2020-03-08T04:25:38.659824 | 2018-05-01T19:23:40 | 2018-05-01T19:23:40 | 127,921,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,119 |
py
|
print("""You enter a dark room with two doors.
Do you go through door #1 or door #2?""")
door = input("> ")
if door == "1":
print("There's a giant bear here eating a cheese cake.")
print("What do you do?")
print("1. Take the cake.")
print("2. Scream at the bear")
bear = input("> ")
if bear == "1":
print("The bear eats your face off. Good Job!")
elif bear == "2":
print("The bear eats your legs off. Good Job!")
else:
print(f"Well, doing {bear} is probably better.")
print("Bear runs away.")
elif door == "2":
print("You stare into the endless abyss at Chulhu's retina.")
print("1. Blueberries.")
print("2. Yellow jacket clothepins.")
print("3. Understanding revolvers yelling melodies.")
insanity = input("> ")
if (insanity == "1" or insanity == "2"):
print("Your body survives powered by a mind of jello.")
print("Good Job!")
else:
print("The insanity rots your eyes into a pool of muck.")
print("Good job!")
else:
print("you stumble around and fall on a knife and die. Good Job!")
|
[
"[email protected]"
] | |
84be026c4a9decd8c8cbeb0044e6269de46348c9
|
c383840367c09a4aa3762d224b17b742fe53eb31
|
/GANs_Advanced/DiscoGAN/train_DiscoGAN_org.py
|
081a29ab949a0e8e7a706e48f2d192a1060b2e74
|
[] |
no_license
|
qzq2514/GAN
|
04f3f1ff6437d6805369f28b207a8f726a112d11
|
a313deb08884c2ce60d4fc3834b79a8518e38f44
|
refs/heads/master
| 2020-09-21T17:32:00.913453 | 2020-01-17T05:02:18 | 2020-01-17T05:02:18 | 224,866,070 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,800 |
py
|
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
import tensorflow.contrib.slim as slim
from DataLoader import Pix2Pix_loader
from net.DiscoGAN import DiscoGAN
import tensorflow as tf
import numpy as np
import scipy.misc
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
image_height = 64
image_width = 64
batch_size = 64
sample_num = 10
Train_Step = 30005
starting_rate = 0.01
change_rate = 0.5
learning_rate = 0.0002
#读取未分开的成对数据
image_dir = "/media/cgim/data/GAN/data/edges2shoes/"
model_name = "DiscoGAN_1227"
model_path="/media/cgim/dataset/models/"+model_name
pb_path=os.path.join(model_path,"pb/")
ckpt_path=os.path.join(model_path,"ckpt/")
result_dir=model_path+"/result"
if not os.path.exists(result_dir):
os.makedirs(result_dir)
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
def train():
input_A_place = tf.placeholder(tf.float32,shape=[None,image_height,image_width, 3],name="input_A")
input_B_place = tf.placeholder(tf.float32, shape=[None, image_height,image_width, 3], name="input_B")
is_training_place = tf.placeholder_with_default(False, shape=(),name="is_training")
reconst_rate_place = tf.placeholder(tf.float32, shape=(),name="reconst_rate")
discoGan = DiscoGAN(is_training_place,reconst_rate_place)
G_loss,D_loss = discoGan.build_DiscoGAN(input_A_place,input_B_place)
g_vars,d_vars = discoGan.get_vars()
global_step = tf.Variable(-1, trainable=False,name="global_step")
global_step_increase = tf.assign(global_step, tf.add(global_step, 1))
train_op_D = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(D_loss, var_list=d_vars)
train_op_G = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(G_loss, var_list=g_vars)
A2B_out,ABA_out = discoGan.sample_generate(input_A_place, "A2B")
A2B_output = tf.identity(A2B_out, name="A2B_output")
B2A_out,BAB_out = discoGan.sample_generate(input_B_place, "B2A")
B2A_output = tf.identity(B2A_out, name="B2A_output")
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(ckpt_path, ckpt_name))
_global_step = sess.run(global_step_increase)
dataLoader = Pix2Pix_loader(image_dir, image_height, image_width,batch_size=batch_size,global_step=_global_step)
while _global_step<Train_Step:
if _global_step<10000:
reconst_rate = starting_rate
else:
reconst_rate = change_rate
images_A,images_B = dataLoader.next_batch() #0~255
feed_dict = {input_A_place:images_A,input_B_place:images_B,
is_training_place:True,reconst_rate_place:reconst_rate}
if _global_step%2==0:
sess.run(train_op_D,feed_dict=feed_dict)
sess.run(train_op_G, feed_dict=feed_dict)
_global_step,_D_loss,_G_loss = sess.run([global_step,D_loss,G_loss],
feed_dict=feed_dict)
if _global_step%50==0:
print("Step:{},Reconst_rate:{},D_loss:{},G_loss:{}".format(_global_step,reconst_rate, _D_loss, _G_loss,))
if _global_step%100==0:
test_images_A, test_images_B = dataLoader.random_next_test_batch()
#save result form A to B
_A2B_output,_ABA_out = sess.run([A2B_output,ABA_out],feed_dict={input_A_place:test_images_A})
_A2B_output = (_A2B_output + 1) / 2 * 255.0
_ABA_out = (_ABA_out + 1) / 2 * 255.0
for ind,trg_image in enumerate(_A2B_output[:sample_num]):
scipy.misc.imsave(result_dir + "/{}_{}_A.jpg".format(_global_step,ind),test_images_A[ind])
scipy.misc.imsave(result_dir + "/{}_{}_A2B.jpg".format(_global_step,ind), _A2B_output[ind])
scipy.misc.imsave(result_dir + "/{}_{}_ABA.jpg".format(_global_step, ind), _ABA_out[ind])
# save result form B to A
_B2A_output,_BAB_out = sess.run([B2A_output,BAB_out], feed_dict={input_B_place: test_images_B})
_B2A_output = (_B2A_output + 1) / 2 * 255.0
_BAB_out = (_BAB_out + 1) / 2 * 255.0
for ind,trg_image in enumerate(_B2A_output[:sample_num]):
scipy.misc.imsave(result_dir + "/{}_{}_B.jpg".format(_global_step,ind),test_images_B[ind])
scipy.misc.imsave(result_dir + "/{}_{}_B2A.jpg".format(_global_step,ind), _B2A_output[ind])
scipy.misc.imsave(result_dir + "/{}_{}_BAB.jpg".format(_global_step, ind), _BAB_out[ind])
if _global_step==Train_Step-5:
# 保存PB
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,
["A2B_output","B2A_output"])
save_model_name = model_name + "-" + str(_global_step) + ".pb"
with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw:
fw.write(constant_graph.SerializeToString())
# 保存CKPT
saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=_global_step)
print("Successfully saved model {}".format(save_model_name))
return
_global_step = sess.run(global_step_increase)
if __name__ == '__main__':
train()
|
[
"[email protected]"
] | |
bfdf797a277a954891aa63c31ebe81a0ff9bd739
|
943db78bc97cab709cbb17e32b8172606d69b360
|
/async_handsmart.py
|
b92a26e6385b6afd68f1a5d3847ce41419d10c80
|
[] |
no_license
|
ns1202-j/hnd
|
bcb6e76c83037b3f9d298732c0831d495d44ba08
|
7ab993883580cfb727bb6023953736e18a6149bc
|
refs/heads/master
| 2023-06-11T20:07:16.500038 | 2021-07-09T21:07:12 | 2021-07-09T21:07:12 | 384,523,975 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,305 |
py
|
import time
import requests
import board
import busio
import asyncio
import threading
import urllib.request
import RPi.GPIO as GPIO
import adafruit_vl53l0x
from adafruit_vl53l0x import VL53L0X
import threading
from threading import Thread
GPIO.setup(26, GPIO.IN) # señal entrada
GPIO.setup(11, GPIO.OUT) #señal apagado entrada
GPIO.setup(13, GPIO.IN) #persona en entrada
GPIO.setup(6, GPIO.OUT) #señal apagado salida
GPIO.setup(5, GPIO.IN) #señal salida(arduino2)
GPIO.setup(19, GPIO.IN)#persona salida
GPIO.setup(10, GPIO.OUT) #apagado señal oportunidad
GPIO.setup(9, GPIO.IN) #señal oportunidad
oportunidad = GPIO.input(9)
GPIO.setup(6, GPIO.OUT)
GPIO.output(6,GPIO.HIGH)
time.sleep(0.5)
GPIO.setup(6, GPIO.OUT)
GPIO.output(6,GPIO.LOW)
señalSalida = GPIO.input(5)
personaSalida = GPIO.input(19)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11,GPIO.HIGH)
time.sleep(0.5)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11,GPIO.LOW)
personaEntrada = GPIO.input(13)
señalEntrada = GPIO.input(26)
def PostOport():
url= 'http://143.198.132.112/smarthh/novaspost_oportunidades.php'
status_code = 200
try:
status_code = requests.get(url, timeout = 10)
except requests.exceptions.ConnectTimeout:
#print ('time out')
status_code = 3
except requests.exceptions.ConnectionError:
#print ('error de conexion')
status_code = 3
if status_code == 200:
query = {'lat':'45','lon':'180'}
response = requests.post('http://143.198.132.112/smarthh/novaspost_oportunidades.php',data = {'foo2':'bar2'})
#print (response.text)
else:
pass
def ventana(i):
print (i)
if(i == 1):
i = 2
t = threading.Thread(target = PostOport)
t.start()
print (i)
elif(i == 2):
i = 3
t1 = threading.Thread(target = PostOport)
t1.start()
print (i)
elif(i == 3):
i = 4
t2 = threading.Thread(target = PostOport)
t2.start()
print (i)
elif(i == 4):
i = 5
t3 = threading.Thread(target = PostOport)
t3.start()
print (i)
elif(i == 5):
i = 1
t4 = threading.Thread(target = PostOport)
t4.start()
print (i)
print ("siguiente thread")
return i
def PostEfect():
url= 'http://143.198.132.112/smarthh/novaspost_oportunidades.php'
status_code = 200
try:
status_code = requests.get(url, timeout = 10)
except requests.exceptions.ConnectTimeout:
#print ('time out')
status_code = 3
except requests.exceptions.ConnectionError:
#print ('error de conexion')
status_code = 3
if status_code == 200:
query = {'lat':'45','lon':'180'}
response = requests.post('http://143.198.132.112/smarthh/novaspost_oportunidades.php',data = {'foo2':'bar2'})
#print (response.text)
else:
pass
url= 'http://143.198.132.112/smarthh/novaspost_efectivo.php'
status_code = 200
try:
status_code = requests.get(url, timeout = 5)
status_code.raise_for_status()
except requests.exceptions.ConnectTimeout:
#print ('time out')
status_code = 3
except requests.exceptions.ConnectionError:
# print ('error de conexion')
status_code = 3
if status_code == 200:
query = {'lat':'45','lon':'180'}
response = requests.post('http://143.198.132.112/smarthh/novaspost_efectivo.php',data = {'foo2':'bar2'})
#print (response.text)
else:
pass
#print ('no fue posible entrar')
#print("lavado de manos")
def apagarbomba():
GPIO.output(10,GPIO.HIGH)
time.sleep(0.4)
GPIO.output(10,GPIO.LOW)
time.sleep(0.1)
def apagarentrada():
global k
GPIO.setup(26, GPIO.IN)
señalEntrada = GPIO.input(26)
while(señalEntrada == 1):
GPIO.setup(26, GPIO.IN)
señalEntrada = GPIO.input(26)
GPIO.setup(5, GPIO.IN)
señalSalida = GPIO.input(5)
GPIO.setup(6, GPIO.OUT)
GPIO.output(6,GPIO.HIGH)
time.sleep(0.1)
GPIO.setup(6, GPIO.OUT)
GPIO.output(6,GPIO.LOW)
time.sleep(0.1)
k = 1
def apagarsalida():
GPIO.setup(5, GPIO.IN)
señalSalida = GPIO.input(5)
global l
while(señalSalida == 1):
GPIO.setup(5, GPIO.IN)
señalSalida = GPIO.input(5)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11,GPIO.HIGH)
time.sleep(0.1)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11,GPIO.LOW)
time.sleep(0.1)
l = 1
flag = 1
print("Iniciando...")
i = 1
while True:
GPIO.setup(26, GPIO.IN)
señalEntrada = GPIO.input(26)
GPIO.setup(5, GPIO.IN)
señalSalida = GPIO.input(5)
#print("señal entrada:")
#print(señalEntrada)
#print("señal salida:")
#print(señalSalida)
if(señalEntrada == 1):
GPIO.setup(13, GPIO.IN)
personaEntrada = GPIO.input(13)
GPIO.setup(19, GPIO.IN)
personaSalida = GPIO.input(19)
oportunidad = GPIO.input(9)
#print("alguien en la puerta")
#print(personaEntrada)
if(oportunidad == 1):
flag = 1;
else:
flag = 0;
while(personaEntrada == 1 or personaSalida == 1):
personaEntrada = GPIO.input(13)
personaSalida = GPIO.input(19)
#print("alguien en la puerta")
#print(personaEntrada)
if(flag ==1):
b = threading.Thread(target = apagarbomba)
b.start()
e = threading.Thread(target = PostEfect)
e.start()
#realizar peticion http oportunidad con lavado de manos
else:
b = threading.Thread(target = apagarbomba)
b.start()
f = threading.Thread(target = PostOport)
f.start()
#i = ventana(i)
#realizar peticion http oportunidad sin lavado de manos
greenflag = 1
global k
global l
k = 0
l = 0
while(k == 0):
if(greenflag ==1):
es = threading.Thread(target = apagarentrada)
es.start()
greenflag = 0
#print("valor de k es:")
#print(k)
else:
pass
greenflag = 1
#print(k)
while(l == 0):
if(greenflag ==1):
ec = threading.Thread(target = apagarsalida)
ec.start()
greenflag = 0
else:
pass
greenflag = 1
#############################################################
#print("apagando... entrada")
#print("señal apagada")
i = i + 1
#print(i)
elif (señalSalida == 1):
GPIO.setup(13, GPIO.IN)
personaEntrada = GPIO.input(13)
GPIO.setup(19, GPIO.IN)
personaSalida = GPIO.input(19)
while(personaEntrada == 1 or personaSalida == 1):
personaEntrada = GPIO.input(13)
personaSalida = GPIO.input(19)
#print("alguien en la puerta")
#print(personaEntrada)
greenflag = 1
k = 0
l = 0
while(l == 0):
if(greenflag ==1):
er = threading.Thread(target = apagarsalida)
er.start()
greenflag = 0
else:
pass
greenflag = 1
while(k == 0):
if(greenflag ==1):
ee = threading.Thread(target = apagarentrada)
ee.start()
greenflag = 0
#print("valor de k es:")
#print(k)
else:
pass
greenflag = 1
#print(k)
#print("salida")
else:
GPIO.output(6,GPIO.LOW)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11,GPIO.LOW)
#print("no hay nadie")
|
[
"[email protected]"
] | |
ccd7c753cf3f1a7e04ca7b256c5f92fffcc69c25
|
3b2e30a6f082b4b21818eae44ea2f55fc25e7aa2
|
/project/cart/views.py
|
51a36c3411656d1a5ebb2b1e76ab2d20290d4d53
|
[] |
no_license
|
alekseykonotop/online_store_django
|
d9e9941ddedd783b38b5592ab2a3af5e35f0c2ee
|
183cb3680b5b8f90457ea144dafaa96c13a3433d
|
refs/heads/master
| 2020-07-30T09:13:57.449081 | 2019-11-07T19:46:58 | 2019-11-07T19:46:58 | 210,168,644 | 0 | 0 | null | 2020-06-05T23:07:09 | 2019-09-22T15:19:34 |
JavaScript
|
UTF-8
|
Python
| false | false | 942 |
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from store.models import Product, Category
from .cart import Cart
from .forms import CartAddProductForm
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
context = {}
context['cart'] = Cart(request)
return render(request, 'cart/detail.html', context)
|
[
"[email protected]"
] | |
42b0472c75cbd948eeebe12a39aafb7d5520a233
|
6e1544e3fd4089d21930155d717739ea3b4a2243
|
/boards/migrations/0002_topic_views.py
|
f7b47dc1c95160d7bcbfbbaa3e9fb8bbdd096156
|
[] |
no_license
|
Vivo1994/django-boards
|
55c969d342ff8d317705a52e3b92c09d7f9b5a69
|
18ff5ac6540ddf105b3c10792724343b77c8579f
|
refs/heads/master
| 2023-01-23T01:58:50.342384 | 2020-11-21T10:30:11 | 2020-11-21T10:30:11 | 314,772,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
# Generated by Django 3.1.2 on 2020-11-07 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='topic',
name='views',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
f1d243252c282fc20b9b21eb64bdaddf232ed8df
|
89adb97f457c4730275d2eb4ac2e47e4860b878f
|
/mnoogle/jobs/migrations/0002_auto_20190409_0602.py
|
d4bfc21933ff31b84cee43bc5d41996c024ad5e1
|
[] |
no_license
|
ajay-banstola/Mnoogle
|
4bfe3da5bed62f2ec87c456108c883f1bf8dac8e
|
177f290d7ef8d080d9432630fcf29d0df78162ad
|
refs/heads/master
| 2022-10-29T21:05:20.887692 | 2019-04-10T15:55:50 | 2019-04-10T15:55:50 | 165,762,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
# Generated by Django 2.1.7 on 2019-04-09 00:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='jobs',
old_name='Job_Id',
new_name='blog_Id',
),
]
|
[
"[email protected]"
] | |
e3890f86efe95e867f60a04ad1fb1640b5b9c625
|
6a253ee7b47c5f70c826bbc97bb8e33cd1dab3b6
|
/4.Working with Dask Bags for Unstructured Data/Filtering vetoed bills.py
|
f6f1b993c692dc6f8cda3afb05d26a40595ed1aa
|
[] |
no_license
|
Mat4wrk/Parallel-Programming-with-Dask-in-Python-Datacamp
|
19a646d6d16ff46173964c25639ff923407c8f32
|
535f69b78adb50cffc7f402f81ddff19f853eea1
|
refs/heads/main
| 2023-03-06T19:52:39.495066 | 2021-02-13T13:27:06 | 2021-02-13T13:27:06 | 338,565,569 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 279 |
py
|
# Filter the bills: overridden
overridden = bills_dicts.filter(veto_override)
# Print the number of bills retained
print(overridden.count().compute())
# Get the value of the 'title' key
titles = overridden.pluck('title')
# Compute and print the titles
print(titles.compute())
|
[
"[email protected]"
] | |
1a5893451ff5bbe931aadaa91de200ba69929bd2
|
98054c8b4b50ce31b5cfa37afd6513d97e279fb8
|
/recipes.py
|
6425ef887619c5e8179bf233e0f731d2a7543a43
|
[
"MIT"
] |
permissive
|
teichopsia/poe_craftingrecipes
|
0a38f0cdc005991a1ac0c51e334f3cb145f93c4f
|
1d19e1f950fbda6b6e0980cf3ffb06438216384c
|
refs/heads/master
| 2020-04-18T17:42:55.500604 | 2019-01-26T12:49:56 | 2019-01-26T12:49:56 | 167,662,335 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118,064 |
py
|
mods = {
"Prefix":
{
"+# to maximum Life":
[
[1, '+(15 - 25) to maximum Life', '1<span class=\"alt\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(26 - 40) to maximum Life', '6<span class=\"transmute\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Beacon'],
[3, '+(41 - 55) to maximum Life', '2<span class=\"alch\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 10 - The Desecrated Chambers'],
[4, '+(56 - 70) to maximum Life', '4<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Port Map - Tier 5'],
[5, '+(71 - 85) to maximum Life', '4<span class=\"chaos\"></span>', ['bd'], 'Plateau Map - Tier 10'],
],
"#% increased Movement Speed":
[
[1, '(10 - 14)% increased Movement Speed', '3<span class=\"aug\"></span>', ['bo'], 'Act 2 - The Caverns'],
[2, '(15 - 19)% increased Movement Speed', '2<span class=\"chaos\"></span>', ['bo'], 'Act 5 - The Chamber of Innocence'],
[3, '(20 - 24)% increased Movement Speed', '4<span class=\"chaos\"></span>', ['bo'], 'Epilogue - Oriath'],
],
"#% of Physical Attack Damage Leeched as Life":
[
[1, '(0.3 - 0.5)% of Physical Attack Damage Leeched as Life', '3<span class=\"chance\"></span>', ['am', 'gl', 'qu', 'ri'], 'Cage Map - Tier 3'],
[2, '(0.6 - 0.8)% of Physical Attack Damage Leeched as Life', '4<span class=\"chance\"></span>', ['am'], 'Atzoatl - Sanctum of Immortality'],
],
"+# to maximum Mana":
[
[1, '+(25 - 34) to maximum Mana', '3<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(35 - 44) to maximum Mana', '6<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Beacon'],
[3, '+(45 - 54) to maximum Mana', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 10 - The Desecrated Chambers'],
],
"#% increased Physical Damage":
[
[1, '(40 - 59)% increased Physical Damage', '4<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 3 - The Sewers'],
[2, '(60 - 79)% increased Physical Damage', '8<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 5 - The Ossuary'],
[3, '(80 - 99)% increased Physical Damage', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 9 - The Quarry'],
[4, '(100 - 129)% increased Physical Damage', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr'], 'Coral Ruins Map - Tier 10'],
],
"#% increased Spell Damage":
[
[1, '(37 - 51)% increased Spell Damage', '4<span class=\"alt\"></span>', ['tm'], 'Act 2 - The Ancient Pyramid'],
[2, '(52 - 66)% increased Spell Damage', '8<span class=\"alt\"></span>', ['tm'], "Act 6 - Shavronne's Tower (The Prison)"],
[3, '(67 - 81)% increased Spell Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Act 9 - The Boiling Lake'],
[4, '(82 - 99)% increased Spell Damage', '1<span class=\"exalt\"></span>', ['tm'], 'Siege Map - Tier 11'],
[1, '(25 - 34)% increased Spell Damage', '4<span class=\"alt\"></span>', ['om', 'or'], 'Act 2 - The Ancient Pyramid'],
[2, '(35 - 44)% increased Spell Damage', '8<span class=\"alt\"></span>', ['om', 'or'], "Act 6 - Shavronne's Tower (The Prison)"],
[3, '(45 - 54)% increased Spell Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Act 9 - The Boiling Lake'],
[4, '(55 - 66)% increased Spell Damage', '1<span class=\"exalt\"></span>', ['om', 'or'], 'Siege Map - Tier 11'],
],
"#% increased Damage over Time":
[
[1, '(11 - 20)% increased Damage over Time', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 7 - The Crypt'],
[2, '(21 - 30)% increased Damage over Time', '9<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Waterways Map - Tier 9'],
],
"#% increased Elemental Damage with Attack Skills":
[
[1, '(15 - 23)% increased Elemental Damage with Attack Skills', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'be', 'qu', 'ri'], 'Act 3 - The Catacombs'],
[2, '(24 - 32)% increased Elemental Damage with Attack Skills', '4<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'be', 'qu'], 'Dunes Map - Tier 7'],
],
"#% increased Fire Damage":
[
[1, '(37 - 51)% increased Fire Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Crematorium'],
[2, '(52 - 66)% increased Fire Damage', '6<span class=\"transmute\"></span>', ['tm'], 'Act 7 - The Dread Thicket'],
[3, '(67 - 81)% increased Fire Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Arid Lake Map - Tier 2'],
[1, '(25 - 34)% increased Fire Damage', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Crematorium'],
[2, '(35 - 44)% increased Fire Damage', '6<span class=\"transmute\"></span>', ['om', 'or'], 'Act 7 - The Dread Thicket'],
[3, '(45 - 54)% increased Fire Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Arid Lake Map - Tier 2'],
],
"#% increased Cold Damage":
[
[1, '(37 - 51)% increased Cold Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Solaris Temple Level 2'],
[2, '(52 - 66)% increased Cold Damage', '6<span class=\"transmute\"></span>', ['tm'], 'Act 7 - The Causeway'],
[3, '(67 - 81)% increased Cold Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Excavation Map - Tier 3'],
[1, '(25 - 34)% increased Cold Damage', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Solaris Temple Level 2'],
[2, '(35 - 44)% increased Cold Damage', '6<span class=\"transmute\"></span>', ['om', 'or'], 'Act 7 - The Causeway'],
[3, '(45 - 54)% increased Cold Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Excavation Map - Tier 3'],
],
"#% increased Lightning Damage":
[
[1, '(37 - 51)% increased Lightning Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Lunaris Temple Level 2'],
[2, '(52 - 66)% increased Lightning Damage', '6<span class=\"transmute\"></span>', ['tm'], 'Act 7 - The Chamber of Sins Level 1'],
[3, '(67 - 81)% increased Lightning Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Beach Map - Tier 4'],
[1, '(25 - 34)% increased Lightning Damage', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Lunaris Temple Level 2'],
[2, '(35 - 44)% increased Lightning Damage', '6<span class=\"transmute\"></span>', ['om', 'or'], 'Act 7 - The Chamber of Sins Level 1'],
[3, '(45 - 54)% increased Lightning Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Beach Map - Tier 4'],
],
"#% increased Chaos Damage":
[
[1, '(37 - 51)% increased Chaos Damage', '2<span class=\"chance\"></span>', ['tm'], 'Act 4 - The Belly of the Beast Level 2'],
[2, '(52 - 66)% increased Chaos Damage', '4<span class=\"chance\"></span>', ['tm'], 'Act 10 - The Ossuary'],
[3, '(67 - 81)% increased Chaos Damage', '2<span class=\"regal\"></span>', ['tm'], 'Atzoatl - Toxic Grove'],
[1, '(25 - 34)% increased Chaos Damage', '2<span class=\"chance\"></span>', ['om', 'or'], 'Act 4 - The Belly of the Beast Level 2'],
[2, '(35 - 44)% increased Chaos Damage', '4<span class=\"chance\"></span>', ['om', 'or'], 'Act 10 - The Ossuary'],
[3, '(45 - 54)% increased Chaos Damage', '2<span class=\"regal\"></span>', ['om', 'or'], 'Atzoatl - Toxic Grove'],
],
"#% increased Armour":
[
[1, '(34 - 45)% increased Armour', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Armour', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Armour', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"+# to Armour":
[
[1, '+(40 - 50) to Armour', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '+(60 - 80) to Armour', '1<span class=\"alch\"></span>', ['bd', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '+(180 - 250) to Armour', '2<span class=\"chaos\"></span>', ['bd', 'sh'], 'Laboratory Map - Tier 8'],
],
"#% increased Evasion Rating":
[
[1, '(34 - 45)% increased Evasion Rating', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Evasion Rating', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Evasion Rating', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"+# to Evasion Rating":
[
[1, '+(40 - 50) to Evasion Rating', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '+(60 - 80) to Evasion Rating', '1<span class=\"alch\"></span>', ['bd', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '+(180 - 250) to Evasion Rating', '2<span class=\"chaos\"></span>', ['bd', 'sh'], 'Laboratory Map - Tier 8'],
],
"#% increased Energy Shield":
[
[1, '(34 - 45)% increased Energy Shield', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Energy Shield', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Energy Shield', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"+# to maximum Energy Shield":
[
[1, '+(27 - 34) to maximum Energy Shield', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '+(35 - 45) to maximum Energy Shield', '1<span class=\"alch\"></span>', ['bd', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '+(56 - 69) to maximum Energy Shield', '2<span class=\"chaos\"></span>', ['bd', 'sh'], 'Laboratory Map - Tier 8'],
],
"#% increased maximum Energy Shield":
[
[1, '(10 - 14)% increased maximum Energy Shield', '1<span class=\"chaos\"></span>', ['am'], 'Relic Chambers Map - Tier 9'],
[2, '(15 - 20)% increased maximum Energy Shield', '2<span class=\"chaos\"></span>', ['am'], 'Lava Lake Map - Tier 15'],
],
"#% increased Armour and Evasion":
[
[1, '(34 - 45)% increased Armour and Evasion', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Armour and Evasion', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Armour and Evasion', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"#% increased Armour and Energy Shield":
[
[1, '(34 - 45)% increased Armour and Energy Shield', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Armour and Energy Shield', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Armour and Energy Shield', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"#% increased Evasion and Energy Shield":
[
[1, '(34 - 45)% increased Evasion and Energy Shield', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 6 - The Karui Fortress'],
[2, '(45 - 55)% increased Evasion and Energy Shield', '1<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Act 10 - The Feeding Trough'],
[3, '(56 - 74)% increased Evasion and Energy Shield', '2<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Laboratory Map - Tier 8'],
],
"Adds # to # Fire Damage":
[
[1, 'Adds (24 - 27) to (28 - 35) Fire Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Crematorium'],
[2, 'Adds (50 - 60) to (63 - 73) Fire Damage', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Dread Thicket'],
[3, 'Adds (78 - 93) to (95 - 110) Fire Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Arid Lake Map - Tier 2'],
[1, 'Adds (16 - 18) to (19 - 23) Fire Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tr'], 'Act 3 - The Crematorium'],
[2, 'Adds (33 - 40) to (42 - 49) Fire Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tr'], 'Act 7 - The Dread Thicket'],
[3, 'Adds (52 - 62) to (63 - 73) Fire Damage', '4<span class=\"chaos\"></span>', ['om', 'or', 'tr'], 'Arid Lake Map - Tier 2'],
],
"Adds # to # Fire Damage to Attacks":
[
[1, 'Adds (8 - 11) to (16 - 20) Fire Damage to Attacks', '4<span class=\"transmute\"></span>', ['am', 'ri'], 'Act 3 - The Crematorium'],
[2, 'Adds (12 - 17) to (26 - 30) Fire Damage to Attacks', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Act 7 - The Dread Thicket'],
],
"Adds # to # Cold Damage":
[
[1, 'Adds (24 - 27) to (28 - 35) Cold Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Solaris Temple Level 2'],
[2, 'Adds (50 - 60) to (63 - 73) Cold Damage', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Causeway'],
[3, 'Adds (78 - 93) to (95 - 110) Cold Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Excavation Map - Tier 3'],
[1, 'Adds (16 - 18) to (19 - 23) Cold Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tr'], 'Act 3 - The Solaris Temple Level 2'],
[2, 'Adds (33 - 40) to (42 - 49) Cold Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tr'], 'Act 7 - The Causeway'],
[3, 'Adds (52 - 62) to (63 - 73) Cold Damage', '4<span class=\"chaos\"></span>', ['om', 'or', 'tr'], 'Excavation Map - Tier 3'],
],
"Adds # to # Cold Damage to Attacks":
[
[1, 'Adds (6 - 8) to (14 - 18) Cold Damage to Attacks', '4<span class=\"transmute\"></span>', ['am', 'ri'], 'Act 3 - The Solaris Temple Level 2'],
[2, 'Adds (11 - 15) to (23 - 27) Cold Damage to Attacks', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Act 7 - The Causeway'],
],
"Adds # to # Lightning Damage":
[
[1, 'Adds (2 - 4) to (40 - 55) Lightning Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Lunaris Temple Level 2'],
[2, 'Adds (5 - 8) to (90 - 120) Lightning Damage', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Chamber of Sins Level 1'],
[3, 'Adds (9 - 12) to (150 - 185) Lightning Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Beach Map - Tier 4'],
[1, 'Adds (1 - 3) to (27 - 37) Lightning Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tr'], 'Act 3 - The Lunaris Temple Level 2'],
[2, 'Adds (3 - 5) to (60 - 80) Lightning Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tr'], 'Act 7 - The Chamber of Sins Level 1'],
[3, 'Adds (6 - 8) to (100 - 123) Lightning Damage', '4<span class=\"chaos\"></span>', ['om', 'or', 'tr'], 'Beach Map - Tier 4'],
],
"Adds # to # Lightning Damage to Attacks":
[
[1, 'Adds (1 - 4) to (32 - 36) Lightning Damage to Attacks', '4<span class=\"transmute\"></span>', ['am', 'ri'], 'Act 3 - The Lunaris Temple Level 2'],
[2, 'Adds (1 - 5) to (41 - 48) Lightning Damage to Attacks', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Act 7 - The Chamber of Sins Level 1'],
],
"Adds # to # Physical Damage":
[
[1, 'Adds (9 - 12) to (18 - 21) Physical Damage', '4<span class=\"transmute\"></span>', ['tm', 'tr'], 'Act 3 - The Sewers'],
[2, 'Adds (11 - 15) to (23 - 27) Physical Damage', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'Act 5 - The Ossuary'],
[3, 'Adds (18 - 24) to (36 - 42) Physical Damage', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'Act 9 - The Quarry'],
[1, 'Adds (6 - 8) to (13 - 15) Physical Damage', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Sewers'],
[2, 'Adds (7 - 11) to (16 - 19) Physical Damage', '2<span class=\"alch\"></span>', ['om', 'or'], 'Act 5 - The Ossuary'],
[3, 'Adds (13 - 17) to (26 - 30) Physical Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Act 9 - The Quarry'],
],
"Adds # to # Physical Damage to Attacks":
[
[1, 'Adds (3 - 5) to (6 - 8) Physical Damage to Attacks', '4<span class=\"transmute\"></span>', ['am', 'gl', 'qu', 'ri'], 'Act 3 - The Sewers'],
[2, 'Adds (5 - 7) to (8 - 10) Physical Damage to Attacks', '2<span class=\"chaos\"></span>', ['am', 'ri'], 'Act 5 - The Ossuary'],
],
"Adds # to # Chaos Damage to Attacks":
[
[1, 'Adds (6 - 8) to (14 - 18) Chaos Damage to Attacks', '6<span class=\"transmute\"></span>', ['am', 'gl', 'qu', 'ri'], 'Act 4 - The Belly of the Beast Level 2'],
[2, 'Adds (11 - 15) to (23 - 27) Chaos Damage to Attacks', '2<span class=\"chaos\"></span>', ['am', 'ri'], 'Act 10 - The Ossuary'],
],
"Adds # to # Fire Damage to Spells":
[
[1, 'Adds (16 - 21) to (31 - 36) Fire Damage to Spells', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Crematorium'],
[2, 'Adds (28 - 38) to (57 - 66) Fire Damage to Spells', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Dread Thicket'],
[3, 'Adds (41 - 55) to (83 - 96) Fire Damage to Spells', '4<span class=\"chaos\"></span>', ['tm'], 'Arid Lake Map - Tier 2'],
[1, 'Adds (12 - 16) to (23 - 27) Fire Damage to Spells', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Crematorium'],
[2, 'Adds (21 - 28) to (42 - 49) Fire Damage to Spells', '2<span class=\"alch\"></span>', ['om', 'or'], 'Act 7 - The Dread Thicket'],
[3, 'Adds (31 - 41) to (61 - 71) Fire Damage to Spells', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Arid Lake Map - Tier 2'],
],
"Adds # to # Cold Damage to Spells":
[
[1, 'Adds (14 - 19) to (28 - 33) Cold Damage to Spells', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Solaris Temple Level 2'],
[2, 'Adds (26 - 34) to (52 - 60) Cold Damage to Spells', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Causeway'],
[3, 'Adds (38 - 50) to (75 - 88) Cold Damage to Spells', '4<span class=\"chaos\"></span>', ['tm'], 'Excavation Map - Tier 3'],
[1, 'Adds (10 - 13) to (19 - 22) Cold Damage to Spells', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Solaris Temple Level 2'],
[2, 'Adds (17 - 23) to (34 - 40) Cold Damage to Spells', '2<span class=\"alch\"></span>', ['om', 'or'], 'Act 7 - The Causeway'],
[3, 'Adds (25 - 33) to (50 - 58) Cold Damage to Spells', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Excavation Map - Tier 3'],
],
"Adds # to # Lightning Damage to Spells":
[
[1, 'Adds (1 - 5) to (59 - 63) Lightning Damage to Spells', '4<span class=\"transmute\"></span>', ['tm'], 'Act 3 - The Lunaris Temple Level 2'],
[2, 'Adds (3 - 9) to (109 - 115) Lightning Damage to Spells', '2<span class=\"alch\"></span>', ['tm'], 'Act 7 - The Chamber of Sins Level 1'],
[3, 'Adds (4 - 13) to (159 - 168) Lightning Damage to Spells', '4<span class=\"chaos\"></span>', ['tm'], 'Beach Map - Tier 4'],
[1, 'Adds (1 - 4) to (39 - 42) Lightning Damage to Spells', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Act 3 - The Lunaris Temple Level 2'],
[2, 'Adds (2 - 6) to (73 - 77) Lightning Damage to Spells', '2<span class=\"alch\"></span>', ['om', 'or'], 'Act 7 - The Chamber of Sins Level 1'],
[3, 'Adds (3 - 9) to (106 - 112) Lightning Damage to Spells', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Beach Map - Tier 4'],
],
"#% faster start of Energy Shield Recharge":
[
[1, '(12 - 17)% faster start of Energy Shield Recharge', '2<span class=\"regal\"></span>', ['ri'], 'Lookout Map - Tier 4'],
[2, '(18 - 24)% faster start of Energy Shield Recharge', '1<span class=\"exalt\"></span>', ['ri'], 'Carcass Map - Tier 14'],
],
"Item drops on Death if Equipped by an Animated Guardian":
[
[1, 'Item drops on Death if Equipped by an Animated Guardian', '1<span class=\"chaos\"></span>', ['bd'], 'Atzoatl - Chamber of Iron'],
],
"Minions deal #% increased Damage":
[
[1, 'Minions deal (10 - 20)% increased Damage', '3<span class=\"chaos\"></span>', ['gl'], 'Act 8 - The Lunaris Temple Level 2'],
[1, 'Minions deal (14 - 25)% increased Damage', '4<span class=\"aug\"></span>', ['tm', 'tr'], 'Act 8 - The Lunaris Temple Level 2'],
[2, 'Minions deal (26 - 38)% increased Damage', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'Phantasmagoria Map - Tier 6'],
[3, 'Minions deal (39 - 50)% increased Damage', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'Atzoatl - Hybridisation Chamber'],
[1, 'Minions deal (9 - 16)% increased Damage', '4<span class=\"aug\"></span>', ['om', 'or'], 'Act 8 - The Lunaris Temple Level 2'],
[2, 'Minions deal (17 - 25)% increased Damage', '2<span class=\"alch\"></span>', ['om', 'or'], 'Phantasmagoria Map - Tier 6'],
[3, 'Minions deal (26 - 33)% increased Damage', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Atzoatl - Hybridisation Chamber'],
],
"#% increased Physical Damage<br>#% chance to Impale Enemies on Hit with Attacks":
[
[1, '(26 - 45)% increased Physical Damage<br>(10 - 12)% chance to Impale Enemies on Hit with Attacks', '3<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, '(46 - 65)% increased Physical Damage<br>(13 - 15)% chance to Impale Enemies on Hit with Attacks', '3<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, '(66 - 85)% increased Physical Damage<br>(16 - 17)% chance to Impale Enemies on Hit with Attacks', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[4, '(86 - 105)% increased Physical Damage<br>(18 - 20)% chance to Impale Enemies on Hit with Attacks', '8<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"#% increased Physical Damage<br>#% chance to cause Bleeding on Hit":
[
[1, '(26 - 45)% increased Physical Damage<br>(10 - 12)% chance to cause Bleeding on Hit', '3<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, '(46 - 65)% increased Physical Damage<br>(13 - 15)% chance to cause Bleeding on Hit', '3<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, '(66 - 85)% increased Physical Damage<br>(16 - 17)% chance to cause Bleeding on Hit', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[4, '(86 - 105)% increased Physical Damage<br>(18 - 20)% chance to cause Bleeding on Hit', '8<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"#% increased Physical Damage<br>#% chance to Blind Enemies on hit":
[
[1, '(26 - 45)% increased Physical Damage<br>(10 - 12)% chance to Blind Enemies on hit', '3<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, '(46 - 65)% increased Physical Damage<br>(13 - 15)% chance to Blind Enemies on hit', '3<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, '(66 - 85)% increased Physical Damage<br>(16 - 17)% chance to Blind Enemies on hit', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[4, '(86 - 105)% increased Physical Damage<br>(18 - 20)% chance to Blind Enemies on hit', '8<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"#% increased Physical Damage<br>#% chance to Poison on Hit":
[
[1, '(26 - 45)% increased Physical Damage<br>(10 - 12)% chance to Poison on Hit', '3<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, '(46 - 65)% increased Physical Damage<br>(13 - 15)% chance to Poison on Hit', '3<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, '(66 - 85)% increased Physical Damage<br>(16 - 17)% chance to Poison on Hit', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[4, '(86 - 105)% increased Physical Damage<br>(18 - 20)% chance to Poison on Hit', '8<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"#% increased Fire Damage<br>#% chance to Ignite":
[
[1, '(48 - 55)% increased Fire Damage<br>(12 - 14)% chance to Ignite', '3<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, '(56 - 63)% increased Fire Damage<br>(15 - 17)% chance to Ignite', '3<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[3, '(64 - 72)% increased Fire Damage<br>(18 - 20)% chance to Ignite', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[4, '(73 - 80)% increased Fire Damage<br>(21 - 23)% chance to Ignite', '8<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, '(21 - 28)% increased Fire Damage<br>6% chance to Ignite', '3<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(29 - 36)% increased Fire Damage<br>7% chance to Ignite', '3<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[3, '(37 - 44)% increased Fire Damage<br>(8 - 9)% chance to Ignite', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
[4, '(45 - 52)% increased Fire Damage<br>(10 - 11)% chance to Ignite', '8<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"#% increased Cold Damage<br>#% chance to Freeze":
[
[1, '(48 - 55)% increased Cold Damage<br>(12 - 14)% chance to Freeze', '3<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, '(56 - 63)% increased Cold Damage<br>(15 - 17)% chance to Freeze', '3<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[3, '(64 - 72)% increased Cold Damage<br>(18 - 20)% chance to Freeze', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[4, '(73 - 80)% increased Cold Damage<br>(21 - 23)% chance to Freeze', '8<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, '(21 - 28)% increased Cold Damage<br>6% chance to Freeze', '3<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(29 - 36)% increased Cold Damage<br>7% chance to Freeze', '3<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[3, '(37 - 44)% increased Cold Damage<br>(8 - 9)% chance to Freeze', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
[4, '(45 - 52)% increased Cold Damage<br>(10 - 11)% chance to Freeze', '8<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"#% increased Lightning Damage<br>#% chance to Shock":
[
[1, '(48 - 55)% increased Lightning Damage<br>(12 - 14)% chance to Shock', '3<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, '(56 - 63)% increased Lightning Damage<br>(15 - 17)% chance to Shock', '3<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[3, '(64 - 72)% increased Lightning Damage<br>(18 - 20)% chance to Shock', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[4, '(73 - 80)% increased Lightning Damage<br>(21 - 23)% chance to Shock', '8<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, '(21 - 28)% increased Lightning Damage<br>6% chance to Shock', '3<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(29 - 36)% increased Lightning Damage<br>7% chance to Shock', '3<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[3, '(37 - 44)% increased Lightning Damage<br>(8 - 9)% chance to Shock', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
[4, '(45 - 52)% increased Lightning Damage<br>(10 - 11)% chance to Shock', '8<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"#% increased Chaos Damage<br>Chaos Skills have #% increased Skill Effect Duration":
[
[1, '(48 - 55)% increased Chaos Damage<br>Chaos Skills have (12 - 14)% increased Skill Effect Duration', '3<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, '(56 - 63)% increased Chaos Damage<br>Chaos Skills have (15 - 17)% increased Skill Effect Duration', '3<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[3, '(64 - 72)% increased Chaos Damage<br>Chaos Skills have (18 - 20)% increased Skill Effect Duration', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[4, '(73 - 80)% increased Chaos Damage<br>Chaos Skills have (21 - 23)% increased Skill Effect Duration', '8<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, '(21 - 28)% increased Chaos Damage<br>Chaos Skills have (5 - 6)% increased Skill Effect Duration', '3<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(29 - 36)% increased Chaos Damage<br>Chaos Skills have (7 - 8)% increased Skill Effect Duration', '3<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[3, '(37 - 44)% increased Chaos Damage<br>Chaos Skills have (9 - 10)% increased Skill Effect Duration', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
[4, '(45 - 52)% increased Chaos Damage<br>Chaos Skills have (11 - 12)% increased Skill Effect Duration', '8<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"#% increased Spell Damage<br>#% increased Mana Regeneration Rate":
[
[1, '(48 - 55)% increased Spell Damage<br>(10 - 15)% increased Mana Regeneration Rate', '4<span class=\"transmute\"></span>', ['tm'], 'Veiled'],
[2, '(56 - 63)% increased Spell Damage<br>(16 - 20)% increased Mana Regeneration Rate', '6<span class=\"alt\"></span>', ['tm'], 'Veiled'],
[3, '(64 - 72)% increased Spell Damage<br>(21 - 25)% increased Mana Regeneration Rate', '4<span class=\"alch\"></span>', ['tm'], 'Veiled'],
[4, '(73 - 80)% increased Spell Damage<br>(26 - 30)% increased Mana Regeneration Rate', '4<span class=\"chaos\"></span>', ['tm'], 'Veiled'],
[1, '(21 - 28)% increased Spell Damage<br>(5 - 6)% increased Mana Regeneration Rate', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(29 - 36)% increased Spell Damage<br>(7 - 9)% increased Mana Regeneration Rate', '6<span class=\"alt\"></span>', ['om', 'or'], 'Veiled'],
[3, '(37 - 44)% increased Spell Damage<br>(10 - 12)% increased Mana Regeneration Rate', '4<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[4, '(45 - 52)% increased Spell Damage<br>(13 - 15)% increased Mana Regeneration Rate', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"#% increased Spell Damage<br>Gain #% of Non-Chaos Damage as extra Chaos Damage":
[
[1, '(29 - 40)% increased Spell Damage<br>Gain 6% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"transmute\"></span>', ['tm'], 'Veiled'],
[2, '(41 - 52)% increased Spell Damage<br>Gain 8% of Non-Chaos Damage as extra Chaos Damage', '6<span class=\"alt\"></span>', ['tm'], 'Veiled'],
[3, '(53 - 63)% increased Spell Damage<br>Gain 10% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"alch\"></span>', ['tm'], 'Veiled'],
[4, '(64 - 75)% increased Spell Damage<br>Gain 12% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"chaos\"></span>', ['tm'], 'Veiled'],
[1, '(19 - 26)% increased Spell Damage<br>Gain 3% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '(27 - 34)% increased Spell Damage<br>Gain 4% of Non-Chaos Damage as extra Chaos Damage', '6<span class=\"alt\"></span>', ['om', 'or'], 'Veiled'],
[3, '(35 - 42)% increased Spell Damage<br>Gain 5% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[4, '(43 - 50)% increased Spell Damage<br>Gain 6% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"Minions deal #% increased Damage<br>Minions have #% increased maximum Life":
[
[1, 'Minions deal (19 - 25)% increased Damage<br>Minions have (19 - 25)% increased maximum Life', '4<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, 'Minions deal (26 - 32)% increased Damage<br>Minions have (26 - 32)% increased maximum Life', '6<span class=\"alt\"></span>', ['tm', 'tr'], 'Veiled'],
[3, 'Minions deal (33 - 38)% increased Damage<br>Minions have (33 - 38)% increased maximum Life', '4<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[4, 'Minions deal (39 - 45)% increased Damage<br>Minions have (39 - 45)% increased maximum Life', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, 'Minions deal (12 - 15)% increased Damage<br>Minions have (12 - 15)% increased maximum Life', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, 'Minions deal (16 - 19)% increased Damage<br>Minions have (16 - 19)% increased maximum Life', '6<span class=\"alt\"></span>', ['om', 'or'], 'Veiled'],
[3, 'Minions deal (20 - 24)% increased Damage<br>Minions have (20 - 24)% increased maximum Life', '4<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[4, 'Minions deal (25 - 28)% increased Damage<br>Minions have (25 - 28)% increased maximum Life', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"+#% to Non-Ailment Chaos Damage over Time Multiplier":
[
[1, '+(8 - 15)% to Non-Ailment Chaos Damage over Time Multiplier', '4<span class=\"transmute\"></span>', ['tm', 'tr'], 'Veiled'],
[2, '+(16 - 23)% to Non-Ailment Chaos Damage over Time Multiplier', '6<span class=\"alt\"></span>', ['tm', 'tr'], 'Veiled'],
[3, '+(24 - 32)% to Non-Ailment Chaos Damage over Time Multiplier', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'Veiled'],
[4, '+(33 - 40)% to Non-Ailment Chaos Damage over Time Multiplier', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'Veiled'],
[1, '+(5 - 8)% to Non-Ailment Chaos Damage over Time Multiplier', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '+(9 - 12)% to Non-Ailment Chaos Damage over Time Multiplier', '6<span class=\"alt\"></span>', ['om', 'or'], 'Veiled'],
[3, '+(13 - 16)% to Non-Ailment Chaos Damage over Time Multiplier', '2<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[4, '+(17 - 20)% to Non-Ailment Chaos Damage over Time Multiplier', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"+#% to Cold Damage over Time Multiplier":
[
[1, '+(8 - 15)% to Cold Damage over Time Multiplier', '4<span class=\"transmute\"></span>', ['tm'], 'Veiled'],
[2, '+(16 - 23)% to Cold Damage over Time Multiplier', '6<span class=\"alt\"></span>', ['tm'], 'Veiled'],
[3, '+(24 - 32)% to Cold Damage over Time Multiplier', '2<span class=\"alch\"></span>', ['tm'], 'Veiled'],
[4, '+(33 - 40)% to Cold Damage over Time Multiplier', '4<span class=\"chaos\"></span>', ['tm'], 'Veiled'],
[1, '+(5 - 8)% to Cold Damage over Time Multiplier', '4<span class=\"transmute\"></span>', ['om', 'or'], 'Veiled'],
[2, '+(9 - 12)% to Cold Damage over Time Multiplier', '6<span class=\"alt\"></span>', ['om', 'or'], 'Veiled'],
[3, '+(13 - 16)% to Cold Damage over Time Multiplier', '2<span class=\"alch\"></span>', ['om', 'or'], 'Veiled'],
[4, '+(17 - 20)% to Cold Damage over Time Multiplier', '4<span class=\"chaos\"></span>', ['om', 'or'], 'Veiled'],
],
"Attacks with this Weapon Penetrate #% Elemental Resistances":
[
[1, 'Attacks with this Weapon Penetrate (8 - 10)% Elemental Resistances', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, 'Attacks with this Weapon Penetrate (11 - 13)% Elemental Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, 'Attacks with this Weapon Penetrate (14 - 16)% Elemental Resistances', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"Attacks with this Weapon Penetrate #% Chaos Resistance":
[
[1, 'Attacks with this Weapon Penetrate (8 - 10)% Chaos Resistance', '4<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[2, 'Attacks with this Weapon Penetrate (11 - 13)% Chaos Resistance', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
[3, 'Attacks with this Weapon Penetrate (14 - 16)% Chaos Resistance', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Veiled'],
],
"#% increased Armour and Evasion<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Armour and Evasion<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Armour and Evasion<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Armour and Evasion<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Armour and Evasion<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Armour and Energy Shield<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Armour and Energy Shield<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Armour and Energy Shield<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Armour and Energy Shield<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Armour and Energy Shield<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Evasion and Energy Shield<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Evasion and Energy Shield<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Evasion and Energy Shield<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Evasion and Energy Shield<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Evasion and Energy Shield<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Armour<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Armour<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Armour<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Armour<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Armour<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Evasion Rating<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Evasion Rating<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Evasion Rating<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Evasion Rating<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Evasion Rating<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Energy Shield<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Energy Shield<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Energy Shield<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Energy Shield<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Energy Shield<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"#% increased Armour, Evasion and Energy Shield<br>+# to maximum Life":
[
[1, '(20 - 25)% increased Armour, Evasion and Energy Shield<br>+(10 - 14) to maximum Life', '4<span class=\"transmute\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[2, '(26 - 30)% increased Armour, Evasion and Energy Shield<br>+(15 - 19) to maximum Life', '6<span class=\"alt\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[3, '(31 - 35)% increased Armour, Evasion and Energy Shield<br>+(20 - 23) to maximum Life', '2<span class=\"alch\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
[4, '(36 - 40)% increased Armour, Evasion and Energy Shield<br>+(24 - 28) to maximum Life', '4<span class=\"chaos\"></span>', ['bd', 'bo', 'gl', 'he', 'sh'], 'Veiled'],
],
"+# to maximum Mana<br># Mana Regenerated per second":
[
[1, '+(20 - 26) to maximum Mana<br>1 Mana Regenerated per second', '3<span class=\"aug\"></span>', ['am', 'ri'], 'Veiled'],
[2, '+(27 - 33) to maximum Mana<br>2 Mana Regenerated per second', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Veiled'],
[3, '+(34 - 40) to maximum Mana<br>4 Mana Regenerated per second', '4<span class=\"chaos\"></span>', ['am', 'ri'], 'Veiled'],
],
"+# to maximum Mana<br>#% reduced Mana Cost of Skills":
[
[1, '+(20 - 26) to maximum Mana<br>3% reduced Mana Cost of Skills', '3<span class=\"aug\"></span>', ['am', 'ri'], 'Veiled'],
[2, '+(27 - 33) to maximum Mana<br>4% reduced Mana Cost of Skills', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Veiled'],
[3, '+(34 - 40) to maximum Mana<br>5% reduced Mana Cost of Skills', '4<span class=\"chaos\"></span>', ['am', 'ri'], 'Veiled'],
],
"+# to maximum Mana<br>#% of Damage taken gained as Mana over 4 seconds when Hit":
[
[1, '+(20 - 26) to maximum Mana<br>4% of Damage taken gained as Mana over 4 seconds when Hit', '3<span class=\"aug\"></span>', ['am', 'ri'], 'Veiled'],
[2, '+(27 - 33) to maximum Mana<br>5% of Damage taken gained as Mana over 4 seconds when Hit', '2<span class=\"alch\"></span>', ['am', 'ri'], 'Veiled'],
[3, '+(34 - 40) to maximum Mana<br>6% of Damage taken gained as Mana over 4 seconds when Hit', '4<span class=\"chaos\"></span>', ['am', 'ri'], 'Veiled'],
],
"#% increased Movement Speed<br>#% increased Movement Speed if you haven't been Hit Recently":
[
[1, "(9 - 12)% increased Movement Speed<br>(6 - 9)% increased Movement Speed if you haven't been Hit Recently", '2<span class=\"chance\"></span>', ['bo'], 'Veiled'],
[2, "(13 - 16)% increased Movement Speed<br>(6 - 9)% increased Movement Speed if you haven't been Hit Recently", '2<span class=\"alch\"></span>', ['bo'], 'Veiled'],
[3, "(17 - 20)% increased Movement Speed<br>(6 - 9)% increased Movement Speed if you haven't been Hit Recently", '4<span class=\"chaos\"></span>', ['bo'], 'Veiled'],
],
"#% increased Movement Speed<br>#% chance to gain Onslaught for # seconds on Kill":
[
[1, '(9 - 12)% increased Movement Speed<br>(8 - 12)% chance to gain Onslaught for 4 seconds on Kill', '2<span class=\"chance\"></span>', ['bo'], 'Veiled'],
[2, '(13 - 16)% increased Movement Speed<br>(8 - 12)% chance to gain Onslaught for 4 seconds on Kill', '2<span class=\"alch\"></span>', ['bo'], 'Veiled'],
[3, '(17 - 20)% increased Movement Speed<br>(8 - 12)% chance to gain Onslaught for 4 seconds on Kill', '4<span class=\"chaos\"></span>', ['bo'], 'Veiled'],
],
"#% increased Movement Speed<br>Cannot be Chilled":
[
[1, '(9 - 12)% increased Movement Speed<br>Cannot be Chilled', '2<span class=\"chance\"></span>', ['bo'], 'Veiled'],
[2, '(13 - 16)% increased Movement Speed<br>Cannot be Chilled', '2<span class=\"alch\"></span>', ['bo'], 'Veiled'],
[3, '(17 - 20)% increased Movement Speed<br>Cannot be Chilled', '4<span class=\"chaos\"></span>', ['bo'], 'Veiled'],
],
"+# to Armour and Evasion Rating":
[
[1, '+(35 - 60) to Armour and Evasion Rating', '4<span class=\"alt\"></span>', ['be', 'qu'], 'Veiled'],
[2, '+(61 - 110) to Armour and Evasion Rating', '8<span class=\"alt\"></span>', ['be', 'qu'], 'Veiled'],
[3, '+(111 - 185) to Armour and Evasion Rating', '2<span class=\"alch\"></span>', ['be', 'qu'], 'Veiled'],
[4, '+(186 - 285) to Armour and Evasion Rating', '4<span class=\"chaos\"></span>', ['be', 'qu'], 'Veiled'],
],
"+# to Armour<br>+# to maximum Energy Shield":
[
[1, '+(35 - 60) to Armour<br>+(6 - 10) to maximum Energy Shield', '4<span class=\"alt\"></span>', ['be'], 'Veiled'],
[2, '+(61 - 110) to Armour<br>+(11 - 15) to maximum Energy Shield', '8<span class=\"alt\"></span>', ['be'], 'Veiled'],
[3, '+(111 - 185) to Armour<br>+(16 - 20) to maximum Energy Shield', '2<span class=\"alch\"></span>', ['be'], 'Veiled'],
[4, '+(186 - 285) to Armour<br>+(21 - 25) to maximum Energy Shield', '4<span class=\"chaos\"></span>', ['be'], 'Veiled'],
],
"+# to Evasion Rating<br>+# to maximum Energy Shield":
[
[1, '+(35 - 60) to Evasion Rating<br>+(6 - 10) to maximum Energy Shield', '4<span class=\"alt\"></span>', ['be', 'qu'], 'Veiled'],
[2, '+(61 - 110) to Evasion Rating<br>+(11 - 15) to maximum Energy Shield', '8<span class=\"alt\"></span>', ['be', 'qu'], 'Veiled'],
[3, '+(111 - 185) to Evasion Rating<br>+(16 - 20) to maximum Energy Shield', '2<span class=\"alch\"></span>', ['be', 'qu'], 'Veiled'],
[4, '+(186 - 285) to Evasion Rating<br>+(21 - 25) to maximum Energy Shield', '4<span class=\"chaos\"></span>', ['be', 'qu'], 'Veiled'],
],
"#% increased effect of Flasks on you":
[
[1, '(3 - 4)% increased effect of Flasks on you', "3<span class=\"bauble\"></span>", ['be'], 'Veiled'],
[2, '(5 - 6)% increased effect of Flasks on you', '3<span class=\"chaos\"></span>', ['be'], 'Veiled'],
[3, '(7 - 8)% increased effect of Flasks on you', '1<span class=\"exalt\"></span>', ['be'], 'Veiled'],
],
"#% chance for your Flasks to not consume Charges":
[
[1, '(5 - 6)% chance for your Flasks to not consume Charges', "3<span class=\"bauble\"></span>", ['be'], 'Veiled'],
[2, '(7 - 8)% chance for your Flasks to not consume Charges', '3<span class=\"chaos\"></span>', ['be'], 'Veiled'],
],
"#% increased effect of Flasks on you<br>#% reduced Flask Charges gained":
[
[1, '(8 - 10)% increased effect of Flasks on you<br>20% reduced Flask Charges gained', '2<span class=\"chaos\"></span>', ['be'], 'Veiled'],
[2, '(11 - 14)% increased effect of Flasks on you<br>33% reduced Flask Charges gained', '1<span class=\"exalt\"></span>', ['be'], 'Veiled'],
],
"#% of Physical Damage Converted to Fire Damage":
[
[1, '(20 - 25)% of Physical Damage Converted to Fire Damage', '1<span class=\"divine\"></span>', ['gl'], 'Veiled'],
],
"#% of Physical Damage Converted to Cold Damage":
[
[1, '(20 - 25)% of Physical Damage Converted to Cold Damage', '1<span class=\"divine\"></span>', ['gl'], 'Veiled'],
],
"#% of Physical Damage Converted to Lightning Damage":
[
[1, '(20 - 25)% of Physical Damage Converted to Lightning Damage', '1<span class=\"divine\"></span>', ['gl'], 'Veiled'],
],
"+# to maximum number of Zombies<br>+# to maximum number of Skeletons":
[
[1, '+1 to maximum number of Zombies<br>+1 to maximum number of Skeletons', '4<span class=\"chaos\"></span>', ['bd', 'he'], 'Veiled'],
],
"You can apply an additional Curse":
[
[1, 'You can apply an additional Curse', '3<span class=\"exalt\"></span>', ['bd'], 'Veiled, Item Level 75+'],
],
"+# to maximum number of Summoned Totems":
[
[1, '+1 to maximum number of Summoned Totems', '2<span class=\"exalt\"></span>', ['sh'], 'Veiled, Item Level 75+'],
],
"+# to Armour during Soul Gain Prevention":
[
[1, '+(500 - 750) to Armour during Soul Gain Prevention', '1<span class=\"vaal\"></span>', ['bd', 'sh'], 'Veiled'],
[2, '+(751 - 1000) to Armour during Soul Gain Prevention', '2<span class=\"vaal\"></span>', ['bd', 'sh'], 'Veiled'],
[3, '+(1001 - 2000) to Armour during Soul Gain Prevention', '3<span class=\"vaal\"></span>', ['bd', 'sh'], 'Veiled'],
[4, '+(2001 - 3000) to Armour during Soul Gain Prevention', '4<span class=\"vaal\"></span>', ['bd', 'sh'], 'Veiled'],
],
"#% increased Area of Effect<br>+# to Level of Socketed AoE Gems":
[
[1, '(8 - 10)% increased Area of Effect<br>+1 to Level of Socketed AoE Gems', '1<span class=\"divine\"></span>', ['gl', 'he'], 'Veiled'],
],
"Projectiles Pierce an additional Target<br>+# to Level of Socketed Projectile Gems":
[
[1, 'Projectiles Pierce an additional Target<br>+1 to Level of Socketed Projectile Gems', '1<span class=\"divine\"></span>', ['gl', 'he'], 'Veiled'],
],
"+# to Melee Weapon and Unarmed Attack range<br>+# to Level of Socketed Melee Gems":
[
[1, '+1 to Melee Weapon and Unarmed Attack range<br>+1 to Level of Socketed Melee Gems', '1<span class=\"divine\"></span>', ['gl', 'he'], 'Veiled'],
],
"+# to maximum Life<br>+# to maximum Mana":
[
[1, '+(14 - 20) to maximum Life<br>+(14 - 20) to maximum Mana', '3<span class=\"aug\"></span>', ['am', 'be', 'bo', 'gl', 'he', 'qu', 'ri'], 'Veiled'],
[2, '+(21 - 27) to maximum Life<br>+(21 - 27) to maximum Mana', '4<span class=\"alt\"></span>', ['am', 'be', 'bo', 'gl', 'he', 'qu', 'ri'], 'Veiled'],
[3, '+(28 - 33) to maximum Life<br>+(28 - 33) to maximum Mana', '2<span class=\"alch\"></span>', ['am', 'be', 'bo', 'gl', 'he', 'qu', 'ri'], 'Veiled'],
[4, '+(34 - 40) to maximum Life<br>+(34 - 40) to maximum Mana', '4<span class=\"chaos\"></span>', ['am', 'be', 'bo', 'gl', 'he', 'qu', 'ri'], 'Veiled'],
],
"Adds # to # Fire Damage<br>Adds # to # Cold Damage":
[
[1, 'Adds (3 - 4) to (5 - 8) Fire Damage<br>Adds (3 - 4) to (5 - 8) Cold Damage', '4<span class=\"transmute\"></span>', ['qu', 'ri', 'sh'], 'Veiled'],
[2, 'Adds (5 - 8) to (9 - 12) Fire Damage<br>Adds (5 - 8) to (9 - 12) Cold Damage', '2<span class=\"alch\"></span>', ['ri', 'sh'], 'Veiled'],
[3, 'Adds (9 - 12) to (13 - 16) Fire Damage<br>Adds (9 - 12) to (13 - 16) Cold Damage', '3<span class=\"chaos\"></span>', ['ri', 'sh'], 'Veiled'],
],
"Adds # to # Fire Damage<br>Adds # to # Lightning Damage":
[
[1, 'Adds (3 - 4) to (5 - 8) Fire Damage<br>Adds 1 to (7 - 12) Lightning Damage', '4<span class=\"transmute\"></span>', ['qu', 'ri', 'sh'], 'Veiled'],
[2, 'Adds (5 - 8) to (9 - 12) Fire Damage<br>Adds 1 to (13 - 18) Lightning Damage', '2<span class=\"alch\"></span>', ['ri', 'sh'], 'Veiled'],
[3, 'Adds (9 - 12) to (13 - 16) Fire Damage<br>Adds 1 to (19 - 24) Lightning Damage', '3<span class=\"chaos\"></span>', ['ri', 'sh'], 'Veiled'],
],
"Adds # to # Cold Damage<br>Adds # to # Lightning Damage":
[
[1, 'Adds (3 - 4) to (5 - 8) Cold Damage<br>Adds 1 to (7 - 12) Lightning Damage', '4<span class=\"transmute\"></span>', ['qu', 'ri', 'sh'], 'Veiled'],
[2, 'Adds (5 - 8) to (9 - 12) Cold Damage<br>Adds 1 to (13 - 18) Lightning Damage', '2<span class=\"alch\"></span>', ['ri', 'sh'], 'Veiled'],
[3, 'Adds (9 - 12) to (13 - 16) Cold Damage<br>Adds 1 to (19 - 24) Lightning Damage', '3<span class=\"chaos\"></span>', ['ri', 'sh'], 'Veiled'],
],
"+#% chance to Dodge Attack Hits while Focussed":
[
[1, '+(13 - 15)% chance to Dodge Attack Hits while Focussed', '3<span class=\"aug\"></span>', ['bo', 'he'], 'Veiled'],
[2, '+(16 - 18)% chance to Dodge Attack Hits while Focussed', '4<span class=\"alt\"></span>', ['bo', 'he'], 'Veiled'],
[3, '+(19 - 22)% chance to Dodge Attack Hits while Focussed', '2<span class=\"alch\"></span>', ['bo', 'he'], 'Veiled'],
[4, '+(23 - 25)% chance to Dodge Attack Hits while Focussed', '4<span class=\"chaos\"></span>', ['bo', 'he'], 'Veiled'],
],
"#% additional Physical Damage Reduction while Focussed":
[
[1, '7% additional Physical Damage Reduction while Focussed', '3<span class=\"aug\"></span>', ['gl', 'he'], 'Veiled'],
[2, '8% additional Physical Damage Reduction while Focussed', '4<span class=\"alt\"></span>', ['gl', 'he'], 'Veiled'],
[3, '(9 - 10)% additional Physical Damage Reduction while Focussed', '2<span class=\"alch\"></span>', ['gl', 'he'], 'Veiled'],
[4, '(11 - 12)% additional Physical Damage Reduction while Focussed', '4<span class=\"chaos\"></span>', ['gl', 'he'], 'Veiled'],
],
"#% increased Damage":
[
[1, '(8 - 10)% increased Damage', '3<span class=\"aug\"></span>', ['be', 'ri'], "Leo's Veiled"],
[2, '(11 - 13)% increased Damage', '2<span class=\"alch\"></span>', ['be', 'ri'], "Leo's Veiled"],
[3, '(14 - 16)% increased Damage', '2<span class=\"chaos\"></span>', ['be', 'ri'], "Leo's Veiled"],
[4, '(17 - 20)% increased Damage', '1<span class=\"exalt\"></span>', ['be', 'ri'], "Leo's Veiled"],
],
"+# to Level of Socketed Support Gems":
[
[1, '+1 to Level of Socketed Support Gems', '1<span class=\"divine\"></span>', ['om', 'or', 'tm', 'tr', 'sh'], "Catarina's Veiled"],
[2, '+2 to Level of Socketed Support Gems', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'sh'], "Catarina's Veiled"],
],
"-# to Total Mana Cost of Skills":
[
[1, '(-3 - -2) to Total Mana Cost of Skills', '2<span class=\"chance\"></span>', ['am', 'ri'], "Elreon's Veiled"],
[2, '(-5 - -4) to Total Mana Cost of Skills', '2<span class=\"alch\"></span>', ['am', 'ri'], "Elreon's Veiled"],
[3, '(-7 - -6) to Total Mana Cost of Skills', '3<span class=\"chaos\"></span>', ['am', 'ri'], "Elreon's Veiled"],
[4, '(-9 - -8) to Total Mana Cost of Skills', '1<span class=\"exalt\"></span>', ['am', 'ri'], "Elreon's Veiled"],
],
"#% increased Damage while Leeching":
[
[1, '(17 - 23)% increased Damage while Leeching', '4<span class=\"transmute\"></span>', ['am', 'gl'], "Vorici's Veiled"],
[2, '(24 - 30)% increased Damage while Leeching', '2<span class=\"alch\"></span>', ['am', 'gl'], "Vorici's Veiled"],
[3, '(31 - 36)% increased Damage while Leeching', '3<span class=\"chaos\"></span>', ['am', 'gl'], "Vorici's Veiled"],
[4, '(37 - 43)% increased Damage while Leeching', '1<span class=\"exalt\"></span>', ['am', 'gl'], "Vorici's Veiled"],
],
"+#% to Quality of Socketed Gems":
[
[1, '+(4 - 5)% to Quality of Socketed Gems', "2<span class=\"gcp\"></span>", ['om', 'or', 'tm', 'tr', 'sh'], "Haku's Veiled"],
[2, '+(5 - 6)% to Quality of Socketed Gems', "4<span class=\"gcp\"></span>", ['om', 'or', 'tm', 'tr', 'sh'], "Haku's Veiled"],
[3, '+(6 - 7)% to Quality of Socketed Gems', "8<span class=\"gcp\"></span>", ['om', 'or', 'tm', 'tr', 'sh'], "Haku's Veiled"],
[4, '+(7 - 8)% to Quality of Socketed Gems', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'sh'], "Haku's Veiled"],
],
"Adds # to # Physical Damage<br>#% chance to cause Bleeding on Hit":
[
[1, 'Adds (2 - 3) to (4 - 5) Physical Damage<br>25% chance to cause Bleeding on Hit', '3<span class=\"aug\"></span>', ['tm', 'tr'], "Tora's Veiled"],
[2, 'Adds (5 - 7) to (8 - 11) Physical Damage<br>30% chance to cause Bleeding on Hit', '4<span class=\"alt\"></span>', ['tm', 'tr'], "Tora's Veiled"],
[3, 'Adds (11 - 13) to (14 - 16) Physical Damage<br>35% chance to cause Bleeding on Hit', '2<span class=\"alch\"></span>', ['tm', 'tr'], "Tora's Veiled"],
[4, 'Adds (14 - 16) to (17 - 20) Physical Damage<br>40% chance to cause Bleeding on Hit', '4<span class=\"chaos\"></span>', ['tm', 'tr'], "Tora's Veiled"],
[1, 'Adds (1 - 2) to (3 - 4) Physical Damage<br>25% chance to cause Bleeding on Hit', '3<span class=\"aug\"></span>', ['om', 'or'], "Tora's Veiled"],
[2, 'Adds (3 - 4) to (5 - 6) Physical Damage<br>30% chance to cause Bleeding on Hit', '4<span class=\"alt\"></span>', ['om', 'or'], "Tora's Veiled"],
[3, 'Adds (5 - 7) to (8 - 10) Physical Damage<br>35% chance to cause Bleeding on Hit', '2<span class=\"alch\"></span>', ['om', 'or'], "Tora's Veiled"],
[4, 'Adds (9 - 11) to (12 - 14) Physical Damage<br>40% chance to cause Bleeding on Hit', '4<span class=\"chaos\"></span>', ['om', 'or'], "Tora's Veiled"],
],
"Hits can't be Evaded":
[
[1, "Hits can't be Evaded", '3<span class=\"exalt\"></span>', ['om', 'tm'], "Vagan's Veiled"],
],
"#% increased Damage during any Flask Effect":
[
[1, '(12 - 15)% increased Damage during any Flask Effect', "3<span class=\"bauble\"></span>", ['gl'], "Guff's Veiled"],
[2, '(16 - 18)% increased Damage during any Flask Effect', "6<span class=\"bauble\"></span>", ['gl'], "Guff's Veiled"],
[3, '(19 - 23)% increased Damage during any Flask Effect', '2<span class=\"alch\"></span>', ['gl'], "Guff's Veiled"],
[4, '(24 - 28)% increased Damage during any Flask Effect', '4<span class=\"chaos\"></span>', ['gl'], "Guff's Veiled"],
],
"Gain #% of Non-Chaos Damage as extra Chaos Damage":
[
[1, 'Gain (8 - 10)% of Non-Chaos Damage as extra Chaos Damage', '2<span class=\"vaal\"></span>', ['tm', 'tr'], "It That Fled's Veiled"],
[2, 'Gain (11 - 14)% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"vaal\"></span>', ['tm', 'tr'], "It That Fled's Veiled"],
[3, 'Gain (15 - 18)% of Non-Chaos Damage as extra Chaos Damage', '6<span class=\"vaal\"></span>', ['tm', 'tr'], "It That Fled's Veiled"],
[4, 'Gain (19 - 22)% of Non-Chaos Damage as extra Chaos Damage', '8<span class=\"vaal\"></span>', ['tm', 'tr'], "It That Fled's Veiled"],
[1, 'Gain (4 - 5)% of Non-Chaos Damage as extra Chaos Damage', '2<span class=\"vaal\"></span>', ['om', 'or', 'sh'], "It That Fled's Veiled"],
[2, 'Gain (6 - 7)% of Non-Chaos Damage as extra Chaos Damage', '4<span class=\"vaal\"></span>', ['om', 'or', 'sh'], "It That Fled's Veiled"],
[3, 'Gain (8 - 9)% of Non-Chaos Damage as extra Chaos Damage', '6<span class=\"vaal\"></span>', ['om', 'or', 'sh'], "It That Fled's Veiled"],
[4, 'Gain (10 - 11)% of Non-Chaos Damage as extra Chaos Damage', '8<span class=\"vaal\"></span>', ['om', 'or', 'sh'], "It That Fled's Veiled"],
],
"Gain #% of Maximum Life as Extra Maximum Energy Shield":
[
[1, 'Gain 6% of Maximum Life as Extra Maximum Energy Shield', '3<span class=\"aug\"></span>', ['bd'], "Gravicius' Veiled"],
[2, 'Gain 8% of Maximum Life as Extra Maximum Energy Shield', '2<span class=\"alch\"></span>', ['bd'], "Gravicius' Veiled"],
[3, 'Gain 10% of Maximum Life as Extra Maximum Energy Shield', '4<span class=\"chaos\"></span>', ['bd'], "Gravicius' Veiled"],
],
"#% of Physical Damage from Hits taken as Fire Damage":
[
[1, '(1 - 2)% of Physical Damage from Hits taken as Fire Damage', '3<span class=\"alt\"></span>', ['he'], "Korell's Veiled"],
[2, '(3 - 4)% of Physical Damage from Hits taken as Fire Damage', '1<span class=\"chaos\"></span>', ['he'], "Korell's Veiled"],
[3, '(5 - 6)% of Physical Damage from Hits taken as Fire Damage', '2<span class=\"chaos\"></span>', ['he'], "Korell's Veiled"],
[4, '(7 - 8)% of Physical Damage from Hits taken as Fire Damage', '1<span class=\"divine\"></span>', ['he'], "Korell's Veiled"],
],
"#% chance to Avoid being Frozen":
[
[1, '40% chance to Avoid being Frozen', '3<span class=\"aug\"></span>', ['bo'], "Rin's Veiled"],
[2, '60% chance to Avoid being Frozen', '4<span class=\"alt\"></span>', ['bo'], "Rin's Veiled"],
[3, '80% chance to Avoid being Frozen', '2<span class=\"alch\"></span>', ['bo'], "Rin's Veiled"],
[4, 'Cannot be Frozen', '4<span class=\"chaos\"></span>', ['bo'], "Rin's Veiled"],
],
"#% increased maximum Life<br>#% increased maximum Mana":
[
[1, '(5 - 8)% increased maximum Life<br>(5 - 8)% increased maximum Mana', '4<span class=\"chaos\"></span>', ['bd'], 'Veiled'],
],
"#% Chance to Block Attack Damage":
[
[1, '(5 - 7)% Chance to Block Attack Damage', '4<span class=\"chaos\"></span>', ['bd'], 'Veiled'],
],
"#% chance to Dodge Spell Hits":
[
[1, '(8 - 10)% chance to Dodge Spell Hits', '4<span class=\"chaos\"></span>', ['bd'], 'Veiled'],
],
"#% chance to Avoid Elemental Ailments<br>#% chance to Avoid being Stunned":
[
[1, '(25 - 35)% chance to Avoid Elemental Ailments<br>(25 - 35)% chance to Avoid being Stunned', '6<span class=\"alch\"></span>', ['bd'], 'Veiled'],
],
"#% increased Area Damage<br>#% increased Area of Effect":
[
[1, '(6 - 8)% increased Area Damage<br>(6 - 7)% increased Area of Effect', '2<span class=\"alch\"></span>', ['am'], 'Veiled'],
[2, '(9 - 12)% increased Area Damage<br>(8 - 9)% increased Area of Effect', '3<span class=\"chaos\"></span>', ['am'], 'Veiled'],
[3, '(13 - 16)% increased Area Damage<br>(10 - 12)% increased Area of Effect', '1<span class=\"divine\"></span>', ['am'], 'Veiled'],
],
"#% increased Projectile Damage<br>#% increased Projectile Speed":
[
[1, '(6 - 8)% increased Projectile Damage<br>(10 - 12)% increased Projectile Speed', '2<span class=\"alch\"></span>', ['am'], 'Veiled'],
[2, '(9 - 12)% increased Projectile Damage<br>(13 - 16)% increased Projectile Speed', '3<span class=\"chaos\"></span>', ['am'], 'Veiled'],
[3, '(13 - 16)% increased Projectile Damage<br>(17 - 20)% increased Projectile Speed', '1<span class=\"divine\"></span>', ['am'], 'Veiled'],
],
"#% increased Melee Damage<br>+# to Melee Weapon and Unarmed Attack range":
[
[1, '(6 - 8)% increased Melee Damage<br>+1 to Melee Weapon and Unarmed Attack range', '2<span class=\"alch\"></span>', ['am'], 'Veiled'],
[2, '(9 - 12)% increased Melee Damage<br>+1 to Melee Weapon and Unarmed Attack range', '3<span class=\"chaos\"></span>', ['am'], 'Veiled'],
[3, '(13 - 16)% increased Melee Damage<br>+1 to Melee Weapon and Unarmed Attack range', '1<span class=\"divine\"></span>', ['am'], 'Veiled'],
],
"Suffixes Cannot Be Changed":
[
[1, 'Suffixes Cannot Be Changed', '2<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Prophecy - The Unbreathing Queen V'],
],
},
"Suffix":
{
"+#% to Fire Resistance":
[
[1, '+(16 - 20)% to Fire Resistance', '1<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 28)% to Fire Resistance', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Crystal Veins'],
[3, '+(29 - 35)% to Fire Resistance', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "Act 8 - Doedre's Cesspool"],
],
"+#% to Cold Resistance":
[
[1, '+(16 - 20)% to Cold Resistance', '1<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 28)% to Cold Resistance', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Crystal Veins'],
[3, '+(29 - 35)% to Cold Resistance', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "Act 8 - Doedre's Cesspool"],
],
"+#% to Lightning Resistance":
[
[1, '+(16 - 20)% to Lightning Resistance', '1<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 28)% to Lightning Resistance', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Crystal Veins'],
[3, '+(29 - 35)% to Lightning Resistance', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "Act 8 - Doedre's Cesspool"],
],
"+# to Strength":
[
[1, '+(15 - 20) to Strength', '1<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 25) to Strength', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Ascent'],
[3, '+(26 - 30) to Strength', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Western Forest'],
],
"+# to Dexterity":
[
[1, '+(15 - 20) to Dexterity', '1<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 25) to Dexterity', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Ascent'],
[3, '+(26 - 30) to Dexterity', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Western Forest'],
],
"+# to Intelligence":
[
[1, '+(15 - 20) to Intelligence', '1<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
[2, '+(21 - 25) to Intelligence', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 4 - The Ascent'],
[3, '+(26 - 30) to Intelligence', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Western Forest'],
],
"+#% to Fire and Cold Resistances":
[
[1, '+(10 - 12)% to Fire and Cold Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Lower Prison'],
[2, '+(13 - 16)% to Fire and Cold Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], "The Cruel Labyrinth"],
[3, '+(17 - 20)% to Fire and Cold Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "The Merciless Labyrinth"],
],
"+#% to Cold and Lightning Resistances":
[
[1, '+(10 - 12)% to Cold and Lightning Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Lower Prison'],
[2, '+(13 - 16)% to Cold and Lightning Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], "The Cruel Labyrinth"],
[3, '+(17 - 20)% to Cold and Lightning Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "The Merciless Labyrinth"],
],
"+#% to Fire and Lightning Resistances":
[
[1, '+(10 - 12)% to Fire and Lightning Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 6 - The Lower Prison'],
[2, '+(13 - 16)% to Fire and Lightning Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], "The Cruel Labyrinth"],
[3, '+(17 - 20)% to Fire and Lightning Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'sh'], "The Merciless Labyrinth"],
],
"+#% to all Elemental Resistances":
[
[1, '+(5 - 8)% to all Elemental Resistances', '6<span class=\"alt\"></span>', ['am', 'ri'], "The Labyrinth"],
[2, '+(9 - 12)% to all Elemental Resistances', '1<span class=\"chaos\"></span>', ['am', 'ri'], "The Eternal Labyrinth"],
],
"+# to all Attributes":
[
[1, '+(6 - 9) to all Attributes', '3<span class=\"chance\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 3 - The Imperial Gardens'],
[2, '+(10 - 13) to all Attributes', '6<span class=\"chance\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 9 - The Tunnel'],
],
"#% of Physical Attack Damage Leeched as Life":
[
[1, '(0.3 - 0.5)% of Physical Attack Damage Leeched as Life', '3<span class=\"chance\"></span>', ['om', 'or', 'tm', 'tr'], 'Cage Map - Tier 3'],
[2, '(0.5 - 0.8)% of Physical Attack Damage Leeched as Life', '4<span class=\"chance\"></span>', ['om', 'tm'], 'Atzoatl - Sanctum of Immortality'],
],
"#% of Physical Attack Damage Leeched as Mana":
[
[1, '(0.2 - 0.4)% of Physical Attack Damage Leeched as Mana', '1<span class=\"regal\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Sanctum of Immortality'],
],
"#% increased Mana Regeneration Rate":
[
[1, '(20 - 30)% increased Mana Regeneration Rate', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri', 'sh'], 'Act 7 - The Chamber of Sins Level 2'],
[2, '(31 - 40)% increased Mana Regeneration Rate', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri', 'sh'], 'City Square Map - Tier 5'],
],
"#% increased Attack Speed":
[
[1, '(8 - 10)% increased Attack Speed', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 4 - The Dried Lake'],
[2, '(11 - 15)% increased Attack Speed', '2<span class=\"chaos\"></span>', ['om', 'tm'], 'Act 9 - The Vastiri Desert'],
[3, '(16 - 20)% increased Attack Speed', '1<span class=\"exalt\"></span>', ['om', 'tm'], 'Dig Map - Tier 13'],
[3, '(11 - 13)% increased Attack Speed', '1<span class=\"exalt\"></span>', ['or', 'tr'], 'Dig Map - Tier 13'],
[1, '(7 - 12)% increased Attack Speed', '2<span class=\"chaos\"></span>', ['gl', 'qu'], 'Act 4 - The Dried Lake'],
[1, '(3 - 5)% increased Attack Speed', '2<span class=\"alch\"></span>', ['ri', 'sh'], 'Act 4 - The Dried Lake'],
],
"#% increased Cast Speed":
[
[1, '(12 - 15)% increased Cast Speed', '6<span class=\"alt\"></span>', ['tm'], 'Act 4 - The Dried Lake'],
[2, '(16 - 20)% increased Cast Speed', '2<span class=\"chaos\"></span>', ['tm'], 'Act 9 - The Vastiri Desert'],
[3, '(21 - 28)% increased Cast Speed', '1<span class=\"exalt\"></span>', ['tm'], 'Dig Map - Tier 13'],
[1, '(9 - 11)% increased Cast Speed', '6<span class=\"alt\"></span>', ['om', 'or'], 'Act 4 - The Dried Lake'],
[2, '(12 - 14)% increased Cast Speed', '2<span class=\"chaos\"></span>', ['om', 'or'], 'Act 9 - The Vastiri Desert'],
[3, '(15 - 18)% increased Cast Speed', '1<span class=\"exalt\"></span>', ['om', 'or'], 'Dig Map - Tier 13'],
[1, '(6 - 8)% increased Cast Speed', '6<span class=\"alt\"></span>', ['am', 'sh'], 'Act 4 - The Dried Lake'],
],
"#% increased Fire Damage":
[
[1, '(9 - 12)% increased Fire Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 3 - The Crematorium'],
[2, '(13 - 16)% increased Fire Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 7 - The Dread Thicket'],
],
"#% increased Cold Damage":
[
[1, '(9 - 12)% increased Cold Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 3 - The Solaris Temple Level 2'],
[2, '(13 - 16)% increased Cold Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 7 - The Causeway'],
],
"#% increased Lightning Damage":
[
[1, '(9 - 12)% increased Lightning Damage', '4<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 3 - The Lunaris Temple Level 2'],
[2, '(13 - 16)% increased Lightning Damage', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 7 - The Chamber of Sins Level 1'],
],
"#% increased Chaos Damage":
[
[1, '(9 - 12)% increased Chaos Damage', '3<span class=\"chance\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 4 - The Belly of the Beast Level 2'],
[2, '(13 - 16)% increased Chaos Damage', '3<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'ri'], 'Act 10 - The Ossuary'],
],
"+# to Accuracy Rating":
[
[1, '+(91 - 120) to Accuracy Rating', '4<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 5 - The Reliquary'],
[2, '+(121 - 200) to Accuracy Rating', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 7 - The Temple of Decay Level 2'],
[3, '+(201 - 300) to Accuracy Rating', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Hall of Champions'],
[1, '+(91 - 120) to Accuracy Rating', '4<span class=\"transmute\"></span>', ['am', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 5 - The Reliquary'],
[2, '+(121 - 150) to Accuracy Rating', '2<span class=\"alch\"></span>', ['am', 'gl', 'he', 'qu', 'ri', 'sh'], 'Act 7 - The Temple of Decay Level 2'],
[3, '+(151 - 220) to Accuracy Rating', '2<span class=\"chaos\"></span>', ['am', 'gl', 'he', 'qu', 'ri', 'sh'], 'Atzoatl - Hall of Champions'],
],
"#% increased Global Physical Damage":
[
[1, '(9 - 12)% increased Global Physical Damage', '3<span class=\"chance\"></span>', ['am', 'sh'], 'Act 3 - The Sewers'],
[2, '(13 - 16)% increased Global Physical Damage', '3<span class=\"alch\"></span>', ['am', 'sh'], 'Act 5 - The Ossuary'],
],
"#% increased Critical Strike Chance":
[
[1, '(17 - 19)% increased Critical Strike Chance', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Dungeon Map - Tier 2'],
[2, '(20 - 24)% increased Critical Strike Chance', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Dark Forest Map - Tier 14'],
[3, '(25 - 27)% increased Critical Strike Chance', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Factory'],
],
"+#% to Global Critical Strike Multiplier":
[
[1, '+(17 - 19)% to Global Critical Strike Multiplier', '6<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'Dungeon Map - Tier 2'],
[2, '+(20 - 24)% to Global Critical Strike Multiplier', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Dark Forest Map - Tier 14'],
[3, '+(25 - 28)% to Global Critical Strike Multiplier', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Factory'],
],
"#% increased Critical Strike Chance for Spells":
[
[1, '(45 - 75)% increased Critical Strike Chance for Spells', '6<span class=\"alt\"></span>', ['tm'], 'Dungeon Map - Tier 2'],
[2, '(76 - 105)% increased Critical Strike Chance for Spells', '3<span class=\"chaos\"></span>', ['tm'], 'Dark Forest Map - Tier 14'],
[1, '(30 - 49)% increased Critical Strike Chance for Spells', '6<span class=\"alt\"></span>', ['om', 'or'], 'Dungeon Map - Tier 2'],
[2, '(50 - 69)% increased Critical Strike Chance for Spells', '3<span class=\"chaos\"></span>', ['om', 'or'], 'Dark Forest Map - Tier 14'],
],
"#% increased Global Critical Strike Chance":
[
[1, '(17 - 21)% increased Global Critical Strike Chance', '9<span class=\"alt\"></span>', ['am', 'qu'], 'Dungeon Map - Tier 2'],
[2, '(22 - 27)% increased Global Critical Strike Chance', '1<span class=\"chaos\"></span>', ['am', 'qu'], 'Dark Forest Map - Tier 14'],
],
"#% increased Flask effect duration":
[
[1, '(5 - 10)% increased Flask effect duration', "2<span class=\"bauble\"></span>", ['be'], 'Act 8 - The Bath House'],
[2, '(11 - 15)% increased Flask effect duration', "4<span class=\"bauble\"></span>", ['be'], 'Castle Ruins Map - Tier 12'],
],
"#% chance to Avoid being Stunned":
[
[1, '(15 - 20)% chance to Avoid being Stunned', '1<span class=\"regal\"></span>', ['gl', 'he'], 'Bazaar Map - Tier 7'],
[2, '(21 - 25)% chance to Avoid being Stunned', '2<span class=\"regal\"></span>', ['gl', 'he'], 'Racecourse Map - Tier 13'],
],
"#% chance to Avoid Elemental Ailments":
[
[1, '(15 - 20)% chance to Avoid Elemental Ailments', '1<span class=\"chaos\"></span>', ['gl', 'he'], 'Bazaar Map - Tier 7'],
[2, '(21 - 25)% chance to Avoid Elemental Ailments', '2<span class=\"chaos\"></span>', ['gl', 'he'], 'Racecourse Map - Tier 13'],
],
"+#% Chance to Block":
[
[1, '+(2 - 3)% Chance to Block', '9<span class=\"alt\"></span>', ['sh'], 'Arachnid Tomb Map - Tier 11'],
[2, '+(4 - 5)% Chance to Block', '3<span class=\"chaos\"></span>', ['sh'], 'Desert Spring Map - Tier 15'],
],
"#% Chance to Block Spell Damage":
[
[1, '(2 - 3)% Chance to Block Spell Damage', '4<span class=\"chance\"></span>', ['sh'], 'Arachnid Tomb Map - Tier 11'],
[2, '(4 - 5)% Chance to Block Spell Damage', '3<span class=\"regal\"></span>', ['sh'], 'Desert Spring Map - Tier 15'],
],
"#% reduced Damage taken from Damage Over Time":
[
[1, '5% reduced Damage taken from Damage Over Time', '2<span class=\"regal\"></span>', ['sh'], 'Atzoatl - Locus of Corruption'],
],
"Minions have #% increased maximum Life":
[
[1, 'Minions have (11 - 15)% increased maximum Life', '4<span class=\"aug\"></span>', ['he'], 'Act 8 - The Lunaris Temple Level 2'],
[2, 'Minions have (16 - 20)% increased maximum Life', '2<span class=\"alch\"></span>', ['he'], 'Phantasmagoria Map - Tier 6'],
],
"#% increased Trap Throwing Speed":
[
[1, '(6 - 8)% increased Trap Throwing Speed', '6<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 8 - The Solaris Temple Level 2'],
[2, '(9 - 12)% increased Trap Throwing Speed', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Defense Research Lab'],
],
"#% increased Mine Laying Speed":
[
[1, '(6 - 8)% increased Mine Laying Speed', '6<span class=\"aug\"></span>', ['om', 'or', 'tm', 'tr'], 'Act 8 - The Solaris Temple Level 2'],
[2, '(9 - 12)% increased Mine Laying Speed', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'Atzoatl - Defense Research Lab'],
],
"Minions have #% increased Attack Speed<br>Minions have #% increased Cast Speed":
[
[1, 'Minions have (16 - 18)% increased Attack Speed<br>Minions have (16 - 18)% increased Cast Speed', '3<span class=\"aug\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, 'Minions have (19 - 21)% increased Attack Speed<br>Minions have (19 - 21)% increased Cast Speed', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, 'Minions have (22 - 24)% increased Attack Speed<br>Minions have (22 - 24)% increased Cast Speed', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'of the Veil'],
[4, 'Minions have (25 - 28)% increased Attack Speed<br>Minions have (25 - 28)% increased Cast Speed', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, 'Minions have (8 - 9)% increased Attack Speed<br>Minions have (8 - 9)% increased Cast Speed', '3<span class=\"aug\"></span>', ['om', 'or'], 'of the Veil'],
[2, 'Minions have (10 - 11)% increased Attack Speed<br>Minions have (10 - 11)% increased Cast Speed', '4<span class=\"alt\"></span>', ['om', 'or'], 'of the Veil'],
[3, 'Minions have (12 - 13)% increased Attack Speed<br>Minions have (12 - 13)% increased Cast Speed', '2<span class=\"alch\"></span>', ['om', 'or'], 'of the Veil'],
[4, 'Minions have (14 - 15)% increased Attack Speed<br>Minions have (14 - 15)% increased Cast Speed', '4<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"#% chance to deal Double Damage":
[
[1, '(4 - 6)% chance to deal Double Damage', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, '(7 - 9)% chance to deal Double Damage', '2<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, '(10 - 12)% chance to deal Double Damage', '1<span class=\"exalt\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(2 - 3)% chance to deal Double Damage', '4<span class=\"alt\"></span>', ['om', 'or'], 'of the Veil'],
[2, '(4 - 5)% chance to deal Double Damage', '2<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
[3, '(6 - 7)% chance to deal Double Damage', '1<span class=\"exalt\"></span>', ['om', 'or'], 'of the Veil'],
],
"+# to Strength and Dexterity":
[
[1, '+(6 - 8) to Strength and Dexterity', '3<span class=\"aug\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(9 - 11) to Strength and Dexterity', '4<span class=\"alt\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(12 - 14) to Strength and Dexterity', '2<span class=\"alch\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[4, '+(15 - 17) to Strength and Dexterity', '4<span class=\"chaos\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+# to Dexterity and Intelligence":
[
[1, '+(6 - 8) to Dexterity and Intelligence', '3<span class=\"aug\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(9 - 11) to Dexterity and Intelligence', '4<span class=\"alt\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(12 - 14) to Dexterity and Intelligence', '2<span class=\"alch\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[4, '+(15 - 17) to Dexterity and Intelligence', '4<span class=\"chaos\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+# to Strength and Intelligence":
[
[1, '+(6 - 8) to Strength and Intelligence', '3<span class=\"aug\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(9 - 11) to Strength and Intelligence', '4<span class=\"alt\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(12 - 14) to Strength and Intelligence', '2<span class=\"alch\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[4, '+(15 - 17) to Strength and Intelligence', '4<span class=\"chaos\"></span>', ['am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+# to Minimum Endurance Charges":
[
[1, '+1 to Minimum Endurance Charges', '1<span class=\"exalt\"></span>', ['am', 'ri'], 'of the Veil, Item Level 75+'],
],
"+# to Minimum Power Charges":
[
[1, '+1 to Minimum Power Charges', '1<span class=\"exalt\"></span>', ['am', 'ri'], 'of the Veil, Item Level 75+'],
],
"+# to Minimum Frenzy Charges":
[
[1, '+1 to Minimum Frenzy Charges', '1<span class=\"exalt\"></span>', ['am', 'ri'], 'of the Veil, Item Level 75+'],
],
"#% increased Attack and Cast Speed":
[
[1, '3% increased Attack and Cast Speed', '4<span class=\"alt\"></span>', ['am', 'qu'], 'of the Veil'],
[2, '4% increased Attack and Cast Speed', '2<span class=\"alch\"></span>', ['am', 'qu'], 'of the Veil'],
[3, '(5 - 6)% increased Attack and Cast Speed', '3<span class=\"chaos\"></span>', ['am', 'qu'], 'of the Veil'],
],
"#% increased Cooldown Recovery Speed":
[
[1, '(6 - 8)% increased Cooldown Recovery Speed', '2<span class=\"chaos\"></span>', ['be'], 'of the Veil'],
[2, '(9 - 12)% increased Cooldown Recovery Speed', '1<span class=\"divine\"></span>', ['be'], 'of the Veil'],
],
"#% increased Damage per Endurance Charge":
[
[1, '(5 - 6)% increased Damage per Endurance Charge', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(3 - 4)% increased Damage per Endurance Charge', '3<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"#% increased Damage per Frenzy Charge":
[
[1, '(5 - 6)% increased Damage per Frenzy Charge', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(3 - 4)% increased Damage per Frenzy Charge', '3<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"#% increased Damage per Power Charge":
[
[1, '(5 - 6)% increased Damage per Power Charge', '3<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(3 - 4)% increased Damage per Power Charge', '3<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"#% increased Effect of non-Damaging Ailments on Enemies":
[
[1, '(11 - 20)% increased Effect of non-Damaging Ailments on Enemies', '2<span class=\"chance\"></span>', ['am', 'bo'], 'of the Veil'],
[2, '(21 - 30)% increased Effect of non-Damaging Ailments on Enemies', '2<span class=\"regal\"></span>', ['am', 'bo'], 'of the Veil'],
],
"#% increased Effect of your Curses":
[
[1, '(5 - 7)% increased Effect of your Curses', '2<span class=\"chaos\"></span>', ['sh'], 'of the Veil'],
[2, '(8 - 10)% increased Effect of your Curses', '1<span class=\"exalt\"></span>', ['sh'], 'of the Veil'],
],
"#% of Life Regenerated per second":
[
[1, '(0.4 - 0.6)% of Life Regenerated per second', '2<span class=\"chance\"></span>', ['am', 'sh'], 'of the Veil'],
[2, '(0.7 - 1)% of Life Regenerated per second', '4<span class=\"chaos\"></span>', ['am', 'sh'], 'of the Veil'],
],
"#% chance to Avoid Elemental Damage from Hits during Soul Gain Prevention":
[
[1, '(6 - 7)% chance to Avoid Elemental Damage from Hits during Soul Gain Prevention', '2<span class=\"vaal\"></span>', ['bd', 'he'], 'of the Veil'],
[2, '(8 - 9)% chance to Avoid Elemental Damage from Hits during Soul Gain Prevention', '4<span class=\"vaal\"></span>', ['bd', 'he'], 'of the Veil'],
],
"You have Onslaught during Soul Gain Prevention":
[
[1, 'You have Onslaught during Soul Gain Prevention', '3<span class=\"vaal\"></span>', ['bo', 'gl'], 'of the Veil'],
],
"Non-Vaal Skills deal #% increased Damage during Soul Gain Prevention":
[
[1, 'Non-Vaal Skills deal (30 - 40)% increased Damage during Soul Gain Prevention', '1<span class=\"vaal\"></span>', ['bo', 'gl'], 'of the Veil'],
[2, 'Non-Vaal Skills deal (41 - 50)% increased Damage during Soul Gain Prevention', '2<span class=\"vaal\"></span>', ['bo', 'gl'], 'of the Veil'],
[3, 'Non-Vaal Skills deal (51 - 60)% increased Damage during Soul Gain Prevention', '4<span class=\"vaal\"></span>', ['bo', 'gl'], 'of the Veil'],
],
"+#% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby":
[
[1, '+(18 - 24)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '3<span class=\"aug\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, '+(25 - 31)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, '+(32 - 38)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'of the Veil'],
[4, '+(39 - 45)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '+(12 - 16)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '3<span class=\"aug\"></span>', ['om', 'or'], 'of the Veil'],
[2, '+(17 - 21)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '4<span class=\"alt\"></span>', ['om', 'or'], 'of the Veil'],
[3, '+(22 - 25)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '2<span class=\"alch\"></span>', ['om', 'or'], 'of the Veil'],
[4, '+(26 - 30)% Critical Strike Multiplier while there is a Rare or Unique Enemy Nearby', '4<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"#% increased Attack Speed while a Rare or Unique Enemy is Nearby":
[
[1, '(11 - 13)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '3<span class=\"aug\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, '(14 - 16)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, '(17 - 19)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'of the Veil'],
[4, '(20 - 22)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '6% increased Attack Speed while a Rare or Unique Enemy is Nearby', '3<span class=\"aug\"></span>', ['om', 'or'], 'of the Veil'],
[2, '7% increased Attack Speed while a Rare or Unique Enemy is Nearby', '4<span class=\"alt\"></span>', ['om', 'or'], 'of the Veil'],
[3, '(8 - 9)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '2<span class=\"alch\"></span>', ['om', 'or'], 'of the Veil'],
[4, '(10 - 11)% increased Attack Speed while a Rare or Unique Enemy is Nearby', '4<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"# Energy Shield Regenerated per second while a Rare or Unique Enemy is Nearby":
[
[1, '15 Energy Shield Regenerated per second while a Rare or Unique Enemy is Nearby', '3<span class=\"aug\"></span>', ['bd', 'be'], 'of the Veil'],
[2, '30 Energy Shield Regenerated per second while a Rare or Unique Enemy is Nearby', '4<span class=\"alt\"></span>', ['bd', 'be'], 'of the Veil'],
[3, '60 Energy Shield Regenerated per second while a Rare or Unique Enemy is Nearby', '2<span class=\"alch\"></span>', ['bd', 'be'], 'of the Veil'],
[4, '120 Energy Shield Regenerated per second while a Rare or Unique Enemy is Nearby', '4<span class=\"chaos\"></span>', ['bd', 'be'], 'of the Veil'],
],
"#% chance to gain a Frenzy Charge on Critical Strike<br>#% increased Global Critical Strike Chance":
[
[1, '2% chance to gain a Frenzy Charge on Critical Strike<br>(7 - 8)% increased Global Critical Strike Chance', '3<span class=\"aug\"></span>', ['qu'], 'of the Veil'],
[2, '3% chance to gain a Frenzy Charge on Critical Strike<br>(9 - 10)% increased Global Critical Strike Chance', '4<span class=\"alt\"></span>', ['qu'], 'of the Veil'],
[3, '4% chance to gain a Frenzy Charge on Critical Strike<br>(11 - 12)% increased Global Critical Strike Chance', '2<span class=\"alch\"></span>', ['qu'], 'of the Veil'],
[4, '5% chance to gain a Frenzy Charge on Critical Strike<br>(13 - 14)% increased Global Critical Strike Chance', '4<span class=\"chaos\"></span>', ['qu'], 'of the Veil'],
],
"#% increased Elemental Damage if you've dealt a Critical Strike Recently<br>#% increased Global Critical Strike Chance":
[
[1, "(11 - 13)% increased Elemental Damage if you've dealt a Critical Strike Recently<br>(9 - 10)% increased Global Critical Strike Chance", '3<span class=\"aug\"></span>', ['gl', 'qu'], 'of the Veil'],
[2, "(14 - 16)% increased Elemental Damage if you've dealt a Critical Strike Recently<br>(11 - 12)% increased Global Critical Strike Chance", '4<span class=\"alt\"></span>', ['gl', 'qu'], 'of the Veil'],
[3, "(17 - 19)% increased Elemental Damage if you've dealt a Critical Strike Recently<br>(13 - 14)% increased Global Critical Strike Chance", '2<span class=\"alch\"></span>', ['gl', 'qu'], 'of the Veil'],
[4, "(20 - 22)% increased Elemental Damage if you've dealt a Critical Strike Recently<br>(15 - 16)% increased Global Critical Strike Chance", '4<span class=\"chaos\"></span>', ['gl', 'qu'], 'of the Veil'],
],
"Adds # to # Chaos Damage if you've dealt a Critical Strike Recently<br>#% increased Global Critical Strike Chance":
[
[1, "Adds (3 - 5) to (6 - 8) Chaos Damage if you've dealt a Critical Strike Recently<br>(9 - 10)% increased Global Critical Strike Chance", '3<span class=\"aug\"></span>', ['gl', 'qu'], 'of the Veil'],
[2, "Adds (6 - 8) to (9 - 12) Chaos Damage if you've dealt a Critical Strike Recently<br>(11 - 12)% increased Global Critical Strike Chance", '4<span class=\"alt\"></span>', ['gl', 'qu'], 'of the Veil'],
[3, "Adds (9 - 12) to (13 - 16) Chaos Damage if you've dealt a Critical Strike Recently<br>(13 - 14)% increased Global Critical Strike Chance", '2<span class=\"alch\"></span>', ['gl', 'qu'], 'of the Veil'],
[4, "Adds (13 - 16) to (17 - 24) Chaos Damage if you've dealt a Critical Strike Recently<br>(15 - 16)% increased Global Critical Strike Chance", '4<span class=\"chaos\"></span>', ['gl', 'qu'], 'of the Veil'],
],
"#% increased Totem Placement speed":
[
[1, '(10 - 15)% increased Totem Placement speed', '3<span class=\"aug\"></span>', ['am', 'bo'], 'of the Veil'],
[2, '(16 - 23)% increased Totem Placement speed', '2<span class=\"alch\"></span>', ['am', 'bo'], 'of the Veil'],
[3, '(24 - 30)% increased Totem Placement speed', '3<span class=\"chaos\"></span>', ['am', 'bo'], 'of the Veil'],
],
"Your Critical Strike Chance is Lucky while Focussed":
[
[1, 'Your Critical Strike Chance is Lucky while Focussed', '9<span class=\"alt\"></span>', ['be'], 'of the Veil'],
],
"Shock nearby Enemies for # Seconds when you Focus":
[
[1, 'Shock nearby Enemies for 2 Seconds when you Focus', '4<span class=\"alt\"></span>', ['ri'], 'of the Veil'],
[2, 'Shock nearby Enemies for 3 Seconds when you Focus', '2<span class=\"alch\"></span>', ['ri'], 'of the Veil'],
[3, 'Shock nearby Enemies for 4 Seconds when you Focus', '2<span class=\"chaos\"></span>', ['ri'], 'of the Veil'],
],
"#% of Evasion Rating is Regenerated as Life per second while Focussed":
[
[1, '1% of Evasion Rating is Regenerated as Life per second while Focussed', '3<span class=\"chaos\"></span>', ['bd'], 'of the Veil'],
],
"Recover #% of Mana and Energy Shield when you Focus":
[
[1, 'Recover (20 - 22)% of Mana and Energy Shield when you Focus', '3<span class=\"aug\"></span>', ['bd'], 'of the Veil'],
[2, 'Recover (23 - 25)% of Mana and Energy Shield when you Focus', '4<span class=\"alt\"></span>', ['bd'], 'of the Veil'],
[3, 'Recover (26 - 28)% of Mana and Energy Shield when you Focus', '2<span class=\"alch\"></span>', ['bd'], 'of the Veil'],
[4, 'Recover (29 - 31)% of Mana and Energy Shield when you Focus', '4<span class=\"chaos\"></span>', ['bd'], 'of the Veil'],
],
"#% chance to deal Double Damage while Focussed":
[
[1, '(13 - 20)% chance to deal Double Damage while Focussed', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, '(21 - 28)% chance to deal Double Damage while Focussed', '2<span class=\"alch\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, '(29 - 36)% chance to deal Double Damage while Focussed', '1<span class=\"divine\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(7 - 10)% chance to deal Double Damage while Focussed', '4<span class=\"alt\"></span>', ['om', 'or', 'sh'], 'of the Veil'],
[2, '(11 - 14)% chance to deal Double Damage while Focussed', '2<span class=\"alch\"></span>', ['om', 'or', 'sh'], 'of the Veil'],
[3, '(15 - 18)% chance to deal Double Damage while Focussed', '1<span class=\"divine\"></span>', ['om', 'or', 'sh'], 'of the Veil'],
],
"#% increased Attack and Cast Speed while Focussed":
[
[1, '(13 - 20)% increased Attack and Cast Speed while Focussed', '4<span class=\"alt\"></span>', ['gl'], 'of the Veil'],
[2, '(21 - 28)% increased Attack and Cast Speed while Focussed', '2<span class=\"alch\"></span>', ['gl'], 'of the Veil'],
[3, '(29 - 36)% increased Attack and Cast Speed while Focussed', '2<span class=\"chaos\"></span>', ['gl'], 'of the Veil'],
],
"#% increased Duration of Ailments you inflict while Focussed":
[
[1, '(81 - 100)% increased Duration of Ailments you inflict while Focussed', '4<span class=\"alt\"></span>', ['he'], 'of the Veil'],
[2, '(101 - 120)% increased Duration of Ailments you inflict while Focussed', '2<span class=\"alch\"></span>', ['he'], 'of the Veil'],
[3, '(121 - 140)% increased Duration of Ailments you inflict while Focussed', '2<span class=\"chaos\"></span>', ['he'], 'of the Veil'],
],
"You are Immune to Ailments while Focussed":
[
[1, 'You are Immune to Ailments while Focussed', '3<span class=\"chaos\"></span>', ['bo'], 'of the Veil'],
],
"#% of Damage Leeched as Life while Focussed<br>You have Vaal Pact while Focussed":
[
[1, '10% of Damage Leeched as Life while Focussed<br>You have Vaal Pact while Focussed', '3<span class=\"chaos\"></span>', ['am'], 'of the Veil'],
],
"#% increased Effect of Fortify on you while Focussed":
[
[1, '80% increased Effect of Fortify on you while Focussed', '4<span class=\"alt\"></span>', ['bd'], 'of the Veil'],
[2, '100% increased Effect of Fortify on you while Focussed', '2<span class=\"alch\"></span>', ['bd'], 'of the Veil'],
[3, '120% increased Effect of Fortify on you while Focussed', '2<span class=\"chaos\"></span>', ['bd'], 'of the Veil'],
],
"Trigger Socketed Spells when you Focus":
[
[1, 'Trigger Socketed Spells when you Focus', '3<span class=\"chaos\"></span>', ['he'], 'of the Veil'],
],
"#% of Damage is taken from Mana before Life while Focussed":
[
[1, '(21 - 25)% of Damage is taken from Mana before Life while Focussed', '3<span class=\"chaos\"></span>', ['bd'], 'of the Veil'],
],
"Minions Recover #% of their Life when you Focus":
[
[1, 'Minions Recover 100% of their Life when you Focus', '3<span class=\"chaos\"></span>', ['gl'], 'of the Veil'],
],
"Skills Cost no Mana while Focussed":
[
[1, 'Skills Cost no Mana while Focussed', '3<span class=\"chaos\"></span>', ['am'], 'of the Veil'],
],
"#% increased Rarity of Items Dropped by Slain Rare or Unique Enemies":
[
[1, '(26 - 30)% increased Rarity of Items Dropped by Slain Rare or Unique Enemies', '2<span class=\"chance\"></span>', ['he'], "of Janus' Veil"],
[2, '(31 - 35)% increased Rarity of Items Dropped by Slain Rare or Unique Enemies', '4<span class=\"chance\"></span>', ['he'], "of Janus' Veil"],
[3, '(36 - 40)% increased Rarity of Items Dropped by Slain Rare or Unique Enemies', '8<span class=\"chance\"></span>', ['he'], "of Janus' Veil"],
[4, '(41 - 45)% increased Rarity of Items Dropped by Slain Rare or Unique Enemies', '4<span class=\"regal\"></span>', ['he'], "of Janus' Veil"],
],
"#% increased Attributes":
[
[1, '3% increased Attributes', '3<span class=\"aug\"></span>', ['bd'], "of Hillock's Veil"],
[2, '4% increased Attributes', '4<span class=\"alt\"></span>', ['bd'], "of Hillock's Veil"],
[3, '5% increased Attributes', '2<span class=\"alch\"></span>', ['bd'], "of Hillock's Veil"],
[4, '6% increased Attributes', '4<span class=\"chaos\"></span>', ['bd'], "of Hillock's Veil"],
],
"#% Chance to Trigger Level # Summon Spectral Wolf on Kill":
[
[1, '10% Chance to Trigger Level 18 Summon Spectral Wolf on Kill', '3<span class=\"chaos\"></span>', ['am'], "of Jorgin's Veil"],
],
"+#% to Critical Strike Multiplier if you've Shattered an Enemy Recently<br>#% increased Global Critical Strike Chance":
[
[1, "+(12 - 14)% to Critical Strike Multiplier if you've Shattered an Enemy Recently<br>8 - 9)% increased Global Critical Strike Chance", '3<span class=\"aug\"></span>', ['ri'], "of Cameria's Veil"],
[2, "+(15 - 17)% to Critical Strike Multiplier if you've Shattered an Enemy Recently<br>(10 - 11)% increased Global Critical Strike Chance", '4<span class=\"alt\"></span>', ['ri'], "of Cameria's Veil"],
[3, "+(18 - 20)% to Critical Strike Multiplier if you've Shattered an Enemy Recently<br>(12 - 13)% increased Global Critical Strike Chance", '2<span class=\"alch\"></span>', ['ri'], "of Cameria's Veil"],
[4, "+(21 - 23)% to Critical Strike Multiplier if you've Shattered an Enemy Recently<br>(14 - 16)% increased Global Critical Strike Chance", '4<span class=\"chaos\"></span>', ['ri'], "of Cameria's Veil"],
],
"#% increased Chaos Damage<br>#% increased Global Physical Damage":
[
[1, '(5 - 7)% increased Chaos Damage<br>(5 - 7)% increased Global Physical Damage', '3<span class=\"aug\"></span>', ['ri'], "of Aisling's Veil"],
[2, '(8 - 10)% increased Chaos Damage<br>(8 - 10)% increased Global Physical Damage', '4<span class=\"alt\"></span>', ['ri'], "of Aisling's Veil"],
[3, '(11 - 13)% increased Chaos Damage<br>(11 - 13)% increased Global Physical Damage', '2<span class=\"alch\"></span>', ['ri'], "of Aisling's Veil"],
[4, '(14 - 16)% increased Chaos Damage<br>(14 - 16)% increased Global Physical Damage', '4<span class=\"chaos\"></span>', ['ri'], "of Aisling's Veil"],
],
"#% increased Fire Damage<br>#% increased Lightning Damage":
[
[1, '(5 - 7)% increased Fire Damage<br>(5 - 7)% increased Lightning Damage', '3<span class=\"aug\"></span>', ['ri'], "of Riker's Veil"],
[2, '(8 - 10)% increased Fire Damage<br>(8 - 10)% increased Lightning Damage', '4<span class=\"alt\"></span>', ['ri'], "of Riker's Veil"],
[3, '(11 - 13)% increased Fire Damage<br>(11 - 13)% increased Lightning Damage', '2<span class=\"alch\"></span>', ['ri'], "of Riker's Veil"],
[4, '(14 - 16)% increased Fire Damage<br>(14 - 16)% increased Lightning Damage', '4<span class=\"chaos\"></span>', ['ri'], "of Riker's Veil"],
],
"#% of Life Regenerated per second during Flask Effect":
[
[1, '3% of Life Regenerated per second during Flask Effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% Chance to avoid being Stunned during Flask Effect<br>#% increased Movement Speed during Flask effect":
[
[1, '50% Chance to avoid being Stunned during Flask Effect<br>(8 - 12)% increased Movement Speed during Flask effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% reduced Mana Cost of Skills during Flask Effect":
[
[1, '(20 - 25)% reduced Mana Cost of Skills during Flask Effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% increased Rarity of Items found during Flask Effect":
[
[1, '(20 - 30)% increased Rarity of Items found during Flask Effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% increased Critical Strike Chance during Flask Effect":
[
[1, '(60 - 80)% increased Critical Strike Chance during Flask Effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% of Damage Taken from Hits is Leeched as Life during Flask Effect":
[
[1, '15% of Damage Taken from Hits is Leeched as Life during Flask Effect', "6<span class=\"bauble\"></span>", ['fl'], 'of the Veil'],
],
"#% increased Attack Speed<br>+#% to Quality":
[
[1, '(8 - 10)% increased Attack Speed<br>+(7 - 12)% to Quality', '1<span class=\"divine\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[2, '(13 - 16)% increased Attack Speed<br>+(13 - 18)% to Quality', '1<span class=\"exalt\"></span>', ['om', 'tm'], 'of the Veil'],
],
"#% increased Critical Strike Chance<br>+#% to Quality":
[
[1, '(15 - 20)% increased Critical Strike Chance<br>+(7 - 12)% to Quality', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[2, '(21 - 25)% increased Critical Strike Chance<br>+(13 - 18)% to Quality', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
],
"+# to Accuracy Rating<br>+#% to Quality":
[
[1, '+(30 - 50) to Accuracy Rating<br>+(7 - 9)% to Quality', "5<span class=\"whetstone\"></span>", ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[2, '+(51 - 100) to Accuracy Rating<br>+(10 - 12)% to Quality', '4<span class=\"alt\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[3, '+(101 - 160) to Accuracy Rating<br>+(13 - 15)% to Quality', '4<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[4, '+(161 - 250) to Accuracy Rating<br>+(16 - 18)% to Quality', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
],
"#% increased Attack Speed<br>#% chance to Trigger Level # Blood Rage when you Kill an Enemy":
[
[1, '(8 - 10)% increased Attack Speed<br>10% chance to Trigger Level 1 Blood Rage when you Kill an Enemy', '1<span class=\"divine\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
[2, '(13 - 16)% increased Attack Speed<br>10% chance to Trigger Level 1 Blood Rage when you Kill an Enemy', '1<span class=\"exalt\"></span>', ['om', 'tm'], 'of the Veil'],
],
"#% increased Cast Speed<br>#% chance to gain Arcane Surge when you Kill an Enemy":
[
[1, '(10 - 12)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"alt\"></span>', ['tm', 'tr'], 'of the Veil'],
[2, '(13 - 15)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"alch\"></span>', ['tm', 'tr'], 'of the Veil'],
[3, '(16 - 19)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"chaos\"></span>', ['tm', 'tr'], 'of the Veil'],
[1, '(7 - 8)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"alt\"></span>', ['om', 'or'], 'of the Veil'],
[2, '(9 - 10)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"alch\"></span>', ['om', 'or'], 'of the Veil'],
[3, '(11 - 13)% increased Cast Speed<br>10% chance to gain Arcane Surge when you Kill an Enemy', '4<span class=\"chaos\"></span>', ['om', 'or'], 'of the Veil'],
],
"Trigger a Socketed Spell when you Use a Skill":
[
[1, 'Trigger a Socketed Spell when you Use a Skill', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr'], 'of the Veil'],
],
"+#% to Fire and Chaos Resistances":
[
[1, '+(8 - 9)% to Fire and Chaos Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(10 - 12)% to Fire and Chaos Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(13 - 15)% to Fire and Chaos Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+#% to Lightning and Chaos Resistances":
[
[1, '+(8 - 9)% to Lightning and Chaos Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(10 - 12)% to Lightning and Chaos Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(13 - 15)% to Lightning and Chaos Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+#% to Cold and Chaos Resistances":
[
[1, '+(8 - 9)% to Cold and Chaos Resistances', '6<span class=\"transmute\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[2, '+(10 - 12)% to Cold and Chaos Resistances', '1<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
[3, '+(13 - 15)% to Cold and Chaos Resistances', '3<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'of the Veil'],
],
"+# to Strength<br>+#% to Quality":
[
[1, '+(10 - 15) to Strength<br>+(7 - 10)% to Quality', "6<span class=\"scrap\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[2, '+(16 - 20) to Strength<br>+(11 - 14)% to Quality', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[3, '+(21 - 25) to Strength<br>+(15 - 18)% to Quality', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
],
"+# to Dexterity<br>+#% to Quality":
[
[1, '+(10 - 15) to Dexterity<br>+(7 - 10)% to Quality', "6<span class=\"scrap\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[2, '+(16 - 20) to Dexterity<br>+(11 - 14)% to Quality', '2<span class=\"alch\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[3, '+(21 - 25) to Dexterity<br>+(15 - 18)% to Quality', '4<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
],
"+# to Intelligence<br>+#% to Quality":
[
[1, '+(10 - 15) to Intelligence<br>+(7 - 10)% to Quality', "6<span class=\"scrap\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[2, '+(16 - 20) to Intelligence<br>+(11 - 14)% to Quality', '2<span class=\"chaos\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
[3, '+(21 - 25) to Intelligence<br>+(15 - 18)% to Quality', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'of the Veil'],
],
"#% of Life Regenerated per second during any Flask Effect":
[
[1, '1% of Life Regenerated per second during any Flask Effect', "2<span class=\"bauble\"></span>", ['be'], 'of the Veil'],
[2, '1.5% of Life Regenerated per second during any Flask Effect', '4<span class=\"alch\"></span>', ['be'], 'of the Veil'],
[3, '2% of Life Regenerated per second during any Flask Effect', '4<span class=\"chaos\"></span>', ['be'], 'of the Veil'],
],
"#% increased Trap Throwing Speed - Veiled":
[
[1, '(7 - 8)% increased Trap Throwing Speed', '3<span class=\"aug\"></span>', ['am', 'be'], 'of the Veil'],
[2, '(9 - 10)% increased Trap Throwing Speed', '2<span class=\"alch\"></span>', ['am', 'be'], 'of the Veil'],
[3, '(11 - 12)% increased Trap Throwing Speed', '4<span class=\"chaos\"></span>', ['am', 'be'], 'of the Veil'],
],
"#% increased Mine Laying Speed - Veiled":
[
[1, '(7 - 8)% increased Mine Laying Speed', '3<span class=\"aug\"></span>', ['am', 'he'], 'of the Veil'],
[2, '(9 - 10)% increased Mine Laying Speed', '2<span class=\"alch\"></span>', ['am', 'he'], 'of the Veil'],
[3, '(11 - 12)% increased Mine Laying Speed', '4<span class=\"chaos\"></span>', ['am', 'he'], 'of the Veil'],
],
"#% increased Brand Attachment range":
[
[1, '(7 - 8)% increased Brand Attachment range', '3<span class=\"aug\"></span>', ['am', 'gl'], 'of the Veil'],
[2, '(9 - 10)% increased Brand Attachment range', '2<span class=\"alch\"></span>', ['am', 'gl'], 'of the Veil'],
[3, '(11 - 12)% increased Brand Attachment range', '4<span class=\"chaos\"></span>', ['am', 'gl'], 'of the Veil'],
],
"Prefixes Cannot Be Changed":
[
[1, 'Prefixes Cannot Be Changed', '2<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Prophecy - Unbearable Whispers V'],
],
"Can have multiple Crafted Modifiers":
[
[1, 'Can have multiple Crafted Modifiers', '2<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Prophecy - The Pale Court'],
],
"Cannot roll Attack Modifiers":
[
[1, 'Cannot roll Attack Modifiers', '1<span class=\"exalt\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Prophecy - The Plaguemaw V'],
],
"Cannot roll Caster Modifiers":
[
[1, 'Cannot roll Caster Modifiers', '5<span class=\"blessed\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Prophecy - The Feral Lord V'],
],
},
"Other":
{
"Sockets":
[
[1, 'Two Sockets', "1<span class=\"jorb\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - First Node (Azurite Cavity)'],
[1, 'Three Sockets', "3<span class=\"jorb\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - First Node (Azurite Cavity)'],
[2, 'Four Sockets', "10<span class=\"jorb\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Vault'],
[3, 'Five Sockets', "70<span class=\"jorb\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Fissure'],
[4, 'Six Sockets', "350<span class=\"jorb\"></span>", ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Abyssal City'],
],
"Linked Sockets":
[
[1, 'Two Linked Sockets', '1<span class=\"fuse\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Frozen Hollow - Howling Den'],
[2, 'Three Linked Sockets', '3<span class=\"fuse\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Magma Fissure - Sweltering Burrow'],
[3, 'Four Linked Sockets', '5<span class=\"fuse\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Fungal Caverns - Beast Burrow'],
[4, 'Five Linked Sockets', '150<span class=\"fuse\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Abyssal Depths - Haunted Remains'],
[5, 'Six Linked Sockets', '1500<span class=\"fuse\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Primeval Ruins'],
],
"At Least One x Socket":
[
[1, 'At Least One Red Socket', '4<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'ri', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least One Green Socket', '4<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'ri', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least One Blue Socket', '4<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'bo', 'gl', 'he', 'ri', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
],
"At Least Two x Sockets":
[
[1, 'At Least Two Red Sockets', '25<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least Two Green Sockets', '25<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least Two Blue Sockets', '25<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
],
"At Least Three x Sockets":
[
[1, 'At Least Three Red Sockets', '120<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Three Green Sockets', '120<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Three Blue Sockets', '120<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
],
"At Least One x and One y Socket":
[
[1, 'At Least One Red and One Green Socket', '15<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least One Red and One Blue Socket', '15<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
[1, 'At Least One Green and One Blue Socket', '15<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Any Biome - Azurite Cavity'],
],
"At Least Two x and One y Socket":
[
[1, 'At Least Two Red and One Green Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Two Red and One Blue Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Two Green and One Red Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Two Green and One Blue Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Two Blue and One Green Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
[1, 'At Least Two Blue and One Red Socket', '100<span class=\"chrom\"></span>', ['om', 'or', 'tm', 'tr', 'bd', 'bo', 'gl', 'he', 'sh'], 'Mines - Vaal Outpost'],
],
"Remove Crafted Mods":
[
[1, 'Remove Crafted Mods', '1<span class=\"scour\"></span>', ['om', 'or', 'tm', 'tr', 'am', 'bd', 'be', 'bo', 'gl', 'he', 'qu', 'ri', 'sh'], 'Default'],
],
},
}
|
[
"[email protected]"
] | |
699c3359bb35aef01fa3958e089dbe89a112baae
|
8728e817b9f8faac0f2bcf3ee5e9980b148b735a
|
/journal/models.py
|
86ca7270400988150eead6bb34926b0a14289ba2
|
[
"MIT"
] |
permissive
|
anjaliujjainia/lifetab
|
9b9c06879b7ba07ab187bae09cf509222dbf83d5
|
186481b262069db65aa670cfa6a181813ffbb4b0
|
refs/heads/master
| 2021-08-25T07:00:55.938945 | 2015-01-26T18:31:45 | 2015-01-26T18:31:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 488 |
py
|
from django.db import models
from model_utils.models import TimeStampedModel
class Entry(TimeStampedModel):
FEELING_CHOICES = (
('HAPP', "happy"),
('SADD', "sad"),
('NOST', "nostalgic"),
('CURI', "curious"),
('WOND', "wonderful"),
)
is_protected = models.BooleanField(default=False)
title = models.CharField(max_length=2000)
content = models.TextField(max_length=100000)
feeling = models.CharField(max_length=4,
choices=FEELING_CHOICES,
default='HAPP')
|
[
"[email protected]"
] | |
e39c0eb015805450cfadbbbadee7c3c216b162cf
|
0e1fbfa23989dd5679d272b12c1e3d2080f2feb2
|
/tests/data/config/t.py
|
9f085ae675b33900f73434c3a7d631f7f24fb98d
|
[
"Apache-2.0"
] |
permissive
|
LuGuo1920/mmcv
|
dae1003f63ce23d5cfc58b27c0c620a61cc5323e
|
76d9bf1efb052785fea95cb157288a102976a49e
|
refs/heads/master
| 2023-05-30T08:11:36.148373 | 2021-06-28T09:31:36 | 2021-06-28T09:31:36 | 381,047,895 | 1 | 0 |
Apache-2.0
| 2021-06-28T13:49:28 | 2021-06-28T13:49:27 | null |
UTF-8
|
Python
| false | false | 173 |
py
|
_base_ = ['./l1.py', './l2.yaml', './l3.json', './l4.py']
item3 = False
item4 = 'test'
item8 = '{{fileBasename}}'
item9 = {{ _base_.item2 }}
item10 = {{ _base_.item7.b.c }}
|
[
"[email protected]"
] | |
26cb68b12f6852ef885417963ed3f227dde4232b
|
ad6681ec221fddc78956d45182f22bd8f1aae8e1
|
/基础班/python基础班作业/zuoye5.py
|
d7778e78f4774e78cf7432ba9bdc60433604db33
|
[] |
no_license
|
caoxp930/MyPythonCode
|
cb2428fd7078100df0b118f64713b7db76fe1e23
|
6b7e17b23fbaddcc69812ba7a14a0a5ad548ad4b
|
refs/heads/master
| 2023-03-15T01:22:17.847582 | 2021-03-02T12:37:09 | 2021-03-02T12:37:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
# -*- coding: utf-8 -*-
for i in range(1,10):
for j in range(1,i+1):
print(j,'*',i,'=',i*j,end='\t')
if i == j:
print()
|
[
"[email protected]"
] | |
de9a0e66038745dc64eb6405022bb285c909ecff
|
a4ac472bf2da27c008701bec8fd6d127a8d82279
|
/public_html/textmine.py
|
68c899bd21ef41a6738a1a9810bf9be855107e9b
|
[] |
no_license
|
LeviKuperus/MaurisAnabolic
|
358d346428e7cd03f239aea41eca1f90a4350c80
|
ee64ba408e0e8274fc143a641f7a8593eb73545e
|
refs/heads/master
| 2021-01-19T08:55:02.756897 | 2014-06-25T12:20:11 | 2014-06-25T12:20:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,737 |
py
|
#!/usr/bin/env python
# numpy and biopython are required -- pip install numpy biopython
# -*- coding: utf-8 -*-
from Bio import Entrez
from Bio import Medline
MAX_COUNT = 10
TERM = ("mtor")
abstracts = []
titles = []
def fetch_abstract(pmid):
handle = Entrez.efetch(db='pubmed', id=pmid, retmode='xml')
xml_data = Entrez.read(handle)[0]
try:
article = xml_data['MedlineCitation']['Article']
abstract = article['Abstract']['AbstractText'][0]
title = article['ArticleTitle']
titles.append(title)
return abstract
except IndexError:
return None
except KeyError:
return "No abstract found..."
def main():
#print ('Getting {0} publications containing {1}...'.format(MAX_COUNT, TERM))
Entrez.email = '[email protected]'
h = Entrez.esearch(db='pubmed', retmax=MAX_COUNT, term=TERM)
result = Entrez.read(h)
#print ('Total number of publications containing {0}: {1}'.format(TERM, result['Count']))
ids = result['IdList']
h = Entrez.efetch(db='pubmed', id=ids, rettype='medline', retmode='text')
records = Medline.parse(h)
authors = []
for record in records:
au = record.get('AU', '?')
for a in au:
if a not in authors:
authors.append(a)
authors.sort()
#print ('Authors: {0}'.format(', '.join(authors)))
publications = ""
for id in ids:
abstracts.append(fetch_abstract(id))
publications += '\n-----ABSTRACT-----'.join(abstracts)
publications += '\n-----TITLE-----'.join(titles)
#print('\n-----ABSTRACT-----'.join(abstracts))
#print('\n-----TITLE-----'.join(titles))
return publications.encode('utf-8')
main()
|
[
"[email protected]"
] | |
50a5d75f3d7217583197bf761b30017f22f0c514
|
0f53f71e67b43223acb0f0159c9ee6f41f64240a
|
/SentimentAnalyzer.py
|
3bdf34e84a5c4472d90d16ca1f3a161ab628ce55
|
[] |
no_license
|
sarmadabbas80/Machine-Learning-Examples
|
ab964457bf3a3cd911eed4c2228b8435d741134d
|
fe7f8dc59d272533f7ea3766743a4fb613d51d10
|
refs/heads/master
| 2020-03-08T01:23:37.732619 | 2018-05-08T01:14:44 | 2018-05-08T01:14:44 | 127,829,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
import tweepy
from textblob import TextBlob
consumer_key = 'mba5W3n1iJ3K2kFw44aV7cLHi'
consumer_secret = 'QeRcuB2xabQpFVPMQEinCMKVfIOuzAU1hQkgBdDwW1J23bzyWV'
access_token = '272531257-MPf9Ag88Zmlih2c4454h8XpgzR7HTQMELQe0UL4o'
access_token_secret = 'ZDQ4xOFLMfiKFqk697amgLSp81YW5U2fFv9JUak6lXf8Q'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
public_tweets = api.search('#TrumpRussia')
for tweet in public_tweets:
print(tweet.text)
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
|
[
"[email protected]"
] | |
eec978dc429c647715528b35b8aaba6d196cfb81
|
6e1887eaa730ff764e6cff692a07e85cc2713e83
|
/tests/test_run_aster_solver.py
|
42c000e5b732a1ef02cb3f5831f924793c27c829
|
[] |
no_license
|
zhanxiangqian/salome
|
145759ec75698d49ab686ed196f78b815f42414d
|
62592c0f17be823caad8ea71cd52841acbab6185
|
refs/heads/master
| 2021-01-23T07:44:50.832028 | 2017-04-14T09:29:47 | 2017-04-14T09:29:47 | 86,442,922 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,804 |
py
|
"""Run the Aster solver and control the process execution.
"""
import os.path as osp
import unittest as UT
import subprocess as SP
import aster_s.astk as AS
import salome_aster_tests as ST
from aster_tests import build_srv
RUNNING, ENDED, UNKNOWN = [object() for idx in range(3)]
class Process(object):
"""Imitate an ASTK process
"""
def __init__(self, fname):
self._popen = SP.Popen("python %s" % fname,
shell=True, stderr=SP.PIPE)
def status(self):
"""Return the current status"""
prc = self._popen
if prc.poll() is None:
return RUNNING
elif prc.poll() == 0:
return ENDED
else:
return UNKNOWN
def wait(self):
"""Wait until the process terminates"""
self._popen.wait()
class TestControlProcessThroughSockets(UT.TestCase):
def setUp(self):
self.srv = build_srv()
self.tmp_dir = ST.TmpDir("test_aster_sockets")
def tearDown(self):
self.tmp_dir.clean()
self.srv.close()
def test_connect_a_client_socket_to_a_server(self):
# Useful for checking that the Aster command file
# has been executed
from aster_tests import Cnt
srv = self.srv
self.assert_(not srv.has_cnt())
cnt = Cnt(srv.port)
self.assert_(srv.wait_cnt())
self.assert_(srv.has_cnt())
cnt.close()
def _start_clt(self, rep, lines, srv=None):
srv = srv or self.srv
comm = [
"from aster_tests import Cnt",
"cnt = Cnt(%i)" % srv.port,
] + lines + [
"cnt.close()",
]
fname = ST.write_comm(self.tmp_dir.add(rep), comm, "clt.py")
return Process(fname)
def test_control_client_process_by_breakpoints(self):
lines = [
"cnt.wait_srv()",
"cnt.wait_srv()",
]
proc = self._start_clt("control_client", lines)
self.assert_(self.srv.wait_cnt())
self.assert_(proc.status() is RUNNING)
self.srv.release_cnt()
self.assert_(proc.status() is RUNNING)
self.srv.release_cnt()
proc.wait()
self.assert_(proc.status() is ENDED)
def test_handle_several_process_status_correctly(self):
comm = (
["cnt.wait_srv()", "create_failure"],
["cnt.wait_srv()"],
)
srvs = [build_srv() for idx in range(2)]
procs = [self._start_clt("c%i" % idx, lines, srv)
for idx, (lines, srv) in enumerate(zip(comm, srvs))]
for srv in srvs:
self.assert_(srv.wait_cnt())
self.assert_(procs[1].status() is RUNNING)
srvs[1].release_cnt()
procs[1].wait()
self.assert_(procs[1].status() is ENDED)
self.assert_(procs[0].status() is RUNNING)
srvs[0].release_cnt()
procs[0].wait()
self.assert_(procs[0].status() is UNKNOWN)
class TestRunAsterCase(UT.TestCase):
"""Contract that a JobBehavior must implement
"""
def setUp(self):
self.srv = build_srv()
self.tmp_dir = ST.TmpDir("run_aster_solver")
def tearDown(self):
self.tmp_dir.clean()
self.srv.close()
def check(self, cond, callback):
"""Call the callback if the condition is false"""
try:
self.assert_(cond)
except:
callback()
raise
def _write_comm(self, rep, lines, srv=None):
"""Write an Aster command file especially for tests"""
srv = srv or self.srv
return ST.write_clt_comm(rep, lines, srv)
def _bld_case(self, rep, lines, name="c", srv=None):
"""Build an Aster case especially for tests"""
case_rep = self.tmp_dir.add(rep)
case = AS.build_case(name)
case.use(AS.CommFile(self._write_comm(case_rep, lines, srv)))
case.remove(AS.RRMedFile)
return case
def test_run_an_aster_command_file(self):
case = self._bld_case("run_aster", [])
job = case.run()
self.assert_(self.srv.wait_cnt())
def see():
print case.get_result(AS.MessFile).read()
self.check(job.wait_result() is AS.SUCCESS, see)
self.assert_(AS.RMedFile in case._result_types)
def write_forma01a(self, rep):
export = osp.join(rep, "forma01a.export")
data = {
"version" : AS.give_aster_version(),
"comm" : self._write_comm(rep, []),
"results" : osp.join(rep, "forma01a"),
}
fid = open(export, "w")
fid.write("""\
P actions make_etude
P version %(version)s
P nomjob forma01a
P debug nodebug
P mode interactif
P ncpus 1
A memjeveux 64
A tpmax 120
F comm %(comm)s D 1
F mess %(results)s.mess R 6
F resu %(results)s.resu R 8
""" % data)
fid.close()
return export
def test_run_from_astk_export_file(self):
rep = self.tmp_dir.add("from_astk_export_file")
export = self.write_forma01a(rep)
case = AS.build_case("from-export", AS.FromExport)
case.load_export(export)
job = case.run()
self.assert_(self.srv.wait_cnt())
self.assert_(job.wait_result() is AS.SUCCESS)
def test_run_from_aster_profil(self):
rep = self.tmp_dir.add("from_astk_profil")
prof = AS.build_aster_profil(self.write_forma01a(rep))
export = osp.join(rep, "profil.export")
case = AS.build_case("from-profil", AS.FromProfil)
case.use_profil(prof)
case.use_fname(export)
job = case.run()
self.assert_(self.srv.wait_cnt())
self.assert_(job.wait_result() is AS.SUCCESS)
def test_simulate_a_failure(self):
case = self._bld_case("simulate_aster_failure", [
"generate_failure",
])
job = case.run()
self.assert_(self.srv.wait_cnt())
self.assert_(job.wait_result() is AS.FAILURE)
self.assert_("'generate_failure' is not defined" in \
case.get_result(AS.MessFile).read())
self.assert_(AS.RMedFile not in case._result_types)
def test_query_status_during_process(self):
case = self._bld_case("query_status", [
# Imitate a long calcul
"cnt.wait_srv()",
"cnt.wait_srv()",
])
job = case.run()
status = job.status()
self.assert_(status is AS.RUNNING)
self.assertEqual(status.from_astk, "RUN")
self.assert_(self.srv.wait_cnt())
self.assert_(job.status() is AS.RUNNING)
self.srv.release_cnt()
self.assert_(job.status() is AS.RUNNING)
self.srv.release_cnt()
self.assert_(job.wait_result() is AS.SUCCESS)
status = job.status()
self.assert_(status is AS.ENDED)
self.assertEqual(status.from_astk, "ENDED")
def test_kill_a_job(self):
case = self._bld_case("kill_a_job", [
# A long calcul without end
"cnt.wait_srv()",
])
job = case.run()
self.assert_(self.srv.wait_cnt())
self.assert_(job.status() is AS.RUNNING)
job.kill()
self.assert_(job.status() is AS.ENDED)
status = job.res_status()
self.assert_(status in (AS.FAILURE, AS.UNKNOWN))
self.assert_(status.from_astk in ("<F>_ERROR", "_"))
def test_handle_several_jobs_status_correctly(self):
comms = (
# A long calcul that is going to be killed
["cnt.wait_srv()"],
# A failure after some work
["cnt.wait_srv()", "generate_failure"],
# A success after some work
["cnt.wait_srv()"],
)
srvs = [build_srv() for idx in range(3)]
cases = [self._bld_case("jstatus%i" % idx, lines, "c%i" % idx, srv)
for idx, (lines, srv) in enumerate(zip(comms, srvs))]
jobs = [case.run() for case in cases]
for srv in srvs:
self.assert_(srv.wait_cnt())
self.assert_(jobs[1].status() is AS.RUNNING)
srvs[1].release_cnt()
self.assert_(jobs[2].status() is AS.RUNNING)
srvs[2].release_cnt()
jobs[2].wait_result()
self.assert_(jobs[2].status() is AS.ENDED)
self.assert_(jobs[2].res_status() is AS.SUCCESS)
jobs[1].wait_result()
self.assert_(jobs[1].status() is AS.ENDED)
self.assert_(jobs[1].res_status() is AS.FAILURE)
self.assert_(jobs[0].status() is AS.RUNNING)
jobs[0].kill()
self.assert_(jobs[0].status() is AS.ENDED)
self.assert_(jobs[0].res_status() in (AS.FAILURE, AS.UNKNOWN))
if __name__ == "__main__":
UT.main()
|
[
"[email protected]"
] | |
993373ece26b6d2f719365e62ecfd6b8c80ba87e
|
dfd9e07ad1e6a7e7a1d864bde4bb2ec3e391a576
|
/problems/0001-0010/0001-two-sum/brute-force/solution.py
|
eadd84699c683e4f8ab4c98a3f785dc101dcb92c
|
[] |
no_license
|
shc261392/leetcode-solutions
|
4210e83553420f91e8dc2631d10290e84172e93f
|
57052e3c4bf584129c5ef6468051bc622f1259a4
|
refs/heads/master
| 2020-12-31T09:00:16.804893 | 2020-02-14T16:22:55 | 2020-02-14T16:22:55 | 238,964,163 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 351 |
py
|
class Solution:
def twoSum(self, nums: 'List[int]', target: 'int') -> 'List[int]':
for index_a, value_a in enumerate(nums):
for index_b, value_b in enumerate(nums):
if index_a == index_b:
continue
if value_a + value_b == target:
return [index_a, index_b]
|
[
"[email protected]"
] | |
f178b663d0ee93882d7f0f23f79762c86c9a62b3
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/ReverseRepo/YW_NHG_SHHG_019_GC028.py
|
697b950c9b5b4c9f6d0da0feb24a47bcfb16928d
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,026 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_NHG_SHHG_019_GC028(xtp_test_case):
# YW_NHG_SHHG_019_GC028
def test_YW_NHG_SHHG_019_GC028(self):
title = '上海逆回购--数量(等于100万张)-28天'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('204028', '1', '12', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_REPO'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['随机中间价'],
'quantity': 1000000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
fb4d4b99fef64675afb65af92c4e6b71f2d5ac46
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/M/markbrough/afd_1.py
|
711852c3ee8cd7a769192a8717034ae07c1ec594
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,542 |
py
|
import scraperwiki
from lxml import html
from urllib2 import urlopen, Request, URLError
import re
import string
URL = "http://www.afd.fr/base-projets/listerProjets.action?page=%s"
def cleanURL(data):
expression=re.compile("(\S*);jsessionid=(\S*)\?(\S*)")
d = expression.match(data)
return d.group(1)+"?"+d.group(3)
def cleandata(data):
if data:
newdata = string.strip(data)
else:
newdata=''
return newdata
def cleanamount(data):
eurosign = u"\u20AC"
commas = ','
spaces = '\r\n\t\t\t\t\t'
fixed = re.sub(eurosign, '', data)
fixed = re.sub(commas, '', fixed)
fixed = re.sub(spaces, '', fixed)
return fixed
def removeImage(data):
print "Trying to remove image from", data
fixed = re.sub('<img alt="" src="img/pdf.gif">', '', data)
fixed = re.sub("\r", '', data)
fixed = re.sub("\n", '', data)
fixed = re.sub("\t", '', data)
print "Final data after removing image is", data
return fixed
# utf8 : database_field_name
translations = {
u'Libell\xe9 du projet': 'name',
u'Num\xe9ro de projet': 'id',
u'Pays de r\xe9alisation': 'country',
u'B\xe9n\xe9ficiaire': 'beneficiary',
"Secteur d'intervention": 'aim',
'Agence de gestion': 'agency',
'Classement environnemental': 'environmental_impact',
'Classement social': 'social_impact',
u"Commentaire sur l'\xe9x\xe9cution du projet": 'comment',
'Execution': 'in progress',
'Etat du projet': 'status',
'Montant global du projet': 'funding_total_euros',
"Financement de l'AFD": 'funding_from_afd_euros',
'Forme de concours': 'funding_type',
'Cofinancement': 'is_co_financed',
u"Date d'identification valid\xe9e": 'date_validated',
"Date d'octroi du financement": 'date_funded',
'Chef de projet': 'project_manager',
'Responsable agence': 'responsible_agency',
'Structure responsable': 'responsible_structure',
'non': 'no',
'oui': 'yes',
}
def translate(french_str, warn_if_no_translation=False):
if not french_str:
return ''
if french_str in translations:
return translations[french_str].decode('utf8')
else:
if warn_if_no_translation:
print 'Could not translate: %s = %r' % (french_str, french_str)
return french_str
def scrape_project_page(data, project_url):
req = Request(project_url)
data['project_details'] = project_url
doc = html.parse(urlopen(req))
for tr in doc.findall('//table//tr'):
field = []
for cell_type in ('th', 'td'):
cells = tr.findall(cell_type)
if not cells:
# ignore row <th>Commentaire...</th> with no <td>
# TODO get the pdf links at this point
continue
warn_if_no_translation = cell_type == 'th'
if cells and cells[0].get('colspan') == '2':
# ignore section titles (they span both columns)
break
cells = [translate(cleanamount(cleandata(cell.text)),
warn_if_no_translation) \
for cell in cells]
field.append(' | '.join(cells))
if len(field) == 2:
if not field[0]:
# don't save a blank key
assert not field[1], 'Throwing away data without key: %r' % field[1]
continue
data[field[0]] = field[1]
#print 'SAVE %s : %s' % tuple(field)
document_field = doc.find('//tr//td//div/a')
if document_field is not None:
data["document_url"] = cleanURL("http://www.afd.fr"+document_field.get("href"))
data["document_name"] = document_field.text_content()
print "document name is", cleandata(document_field.text_content())
print "document url is", cleanURL("http://www.afd.fr"+document_field.get("href"))
scraperwiki.sqlite.save(unique_keys=["country", "description"],
data=data)
# loop over the pages of the "liste des projets"
page_number = 0
while True:
page_number += 1
req = Request(URL % (page_number))
try:
response = urlopen(req)
except URLError, e:
# import pdb; pdb.set_trace()
if response.status == 404:
break
doc = html.parse(response)
if not(doc.findall('//tbody//tr')):
break
# loop over each project summary
for tr in doc.findall('//tbody//tr'):
cells = list(tr.findall('td'))
if not len(cells):
continue
amount = re.sub(',', '', cells[2].text)
project_url = 'http://www.afd.fr' + cells[1].find('a').get('href')
data = {
'country' : cleandata(cells[0].text),
'description' : cleandata(cells[1].find('a').text),
'project_url' : cleanURL(project_url),
'funding_total_euros' : cleanamount(cleandata(amount)),
'status' : cleandata(cells[3].text),
'date_updated' : cells[4].text
}
# drill down into the project page
try:
scrape_project_page(data, project_url)
except:
# if that fails, save what we have!
scraperwiki.sqlite.save(unique_keys=["country", "description"],
data=data)
import scraperwiki
from lxml import html
from urllib2 import urlopen, Request, URLError
import re
import string
URL = "http://www.afd.fr/base-projets/listerProjets.action?page=%s"
def cleanURL(data):
expression=re.compile("(\S*);jsessionid=(\S*)\?(\S*)")
d = expression.match(data)
return d.group(1)+"?"+d.group(3)
def cleandata(data):
if data:
newdata = string.strip(data)
else:
newdata=''
return newdata
def cleanamount(data):
eurosign = u"\u20AC"
commas = ','
spaces = '\r\n\t\t\t\t\t'
fixed = re.sub(eurosign, '', data)
fixed = re.sub(commas, '', fixed)
fixed = re.sub(spaces, '', fixed)
return fixed
def removeImage(data):
print "Trying to remove image from", data
fixed = re.sub('<img alt="" src="img/pdf.gif">', '', data)
fixed = re.sub("\r", '', data)
fixed = re.sub("\n", '', data)
fixed = re.sub("\t", '', data)
print "Final data after removing image is", data
return fixed
# utf8 : database_field_name
translations = {
u'Libell\xe9 du projet': 'name',
u'Num\xe9ro de projet': 'id',
u'Pays de r\xe9alisation': 'country',
u'B\xe9n\xe9ficiaire': 'beneficiary',
"Secteur d'intervention": 'aim',
'Agence de gestion': 'agency',
'Classement environnemental': 'environmental_impact',
'Classement social': 'social_impact',
u"Commentaire sur l'\xe9x\xe9cution du projet": 'comment',
'Execution': 'in progress',
'Etat du projet': 'status',
'Montant global du projet': 'funding_total_euros',
"Financement de l'AFD": 'funding_from_afd_euros',
'Forme de concours': 'funding_type',
'Cofinancement': 'is_co_financed',
u"Date d'identification valid\xe9e": 'date_validated',
"Date d'octroi du financement": 'date_funded',
'Chef de projet': 'project_manager',
'Responsable agence': 'responsible_agency',
'Structure responsable': 'responsible_structure',
'non': 'no',
'oui': 'yes',
}
def translate(french_str, warn_if_no_translation=False):
if not french_str:
return ''
if french_str in translations:
return translations[french_str].decode('utf8')
else:
if warn_if_no_translation:
print 'Could not translate: %s = %r' % (french_str, french_str)
return french_str
def scrape_project_page(data, project_url):
req = Request(project_url)
data['project_details'] = project_url
doc = html.parse(urlopen(req))
for tr in doc.findall('//table//tr'):
field = []
for cell_type in ('th', 'td'):
cells = tr.findall(cell_type)
if not cells:
# ignore row <th>Commentaire...</th> with no <td>
# TODO get the pdf links at this point
continue
warn_if_no_translation = cell_type == 'th'
if cells and cells[0].get('colspan') == '2':
# ignore section titles (they span both columns)
break
cells = [translate(cleanamount(cleandata(cell.text)),
warn_if_no_translation) \
for cell in cells]
field.append(' | '.join(cells))
if len(field) == 2:
if not field[0]:
# don't save a blank key
assert not field[1], 'Throwing away data without key: %r' % field[1]
continue
data[field[0]] = field[1]
#print 'SAVE %s : %s' % tuple(field)
document_field = doc.find('//tr//td//div/a')
if document_field is not None:
data["document_url"] = cleanURL("http://www.afd.fr"+document_field.get("href"))
data["document_name"] = document_field.text_content()
print "document name is", cleandata(document_field.text_content())
print "document url is", cleanURL("http://www.afd.fr"+document_field.get("href"))
scraperwiki.sqlite.save(unique_keys=["id"],
data=data)
# loop over the pages of the "liste des projets"
page_number = 0
while True:
page_number += 1
req = Request(URL % (page_number))
try:
response = urlopen(req)
except URLError, e:
# import pdb; pdb.set_trace()
if response.status == 404:
break
doc = html.parse(response)
if not(doc.findall('//tbody//tr')):
break
# loop over each project summary
for tr in doc.findall('//tbody//tr'):
cells = list(tr.findall('td'))
if not len(cells):
continue
amount = re.sub(',', '', cells[2].text)
project_url = 'http://www.afd.fr' + cells[1].find('a').get('href')
data = {
'country' : cleandata(cells[0].text),
'description' : cleandata(cells[1].find('a').text),
'project_url' : cleanURL(project_url),
'funding_total_euros' : cleanamount(cleandata(amount)),
'status' : cleandata(cells[3].text),
'date_updated' : cells[4].text
}
# drill down into the project page
try:
scrape_project_page(data, project_url)
except:
# if that fails, save what we have!
scraperwiki.sqlite.save(unique_keys=["id"],
data=data)
|
[
"[email protected]"
] | |
188fdf6b0b163a2125f6a2108407d757622cc485
|
2ae26de885b7e64525028cf3d13be6cb43d39b44
|
/xiaoxiang/Python人工智能(小象学院)/4、Python机器学习1/lect04_codes/qhp_Test_proj/main.py
|
d011f1ef095e9aa84f643a4e850da2342a736ff4
|
[] |
no_license
|
cheneyshark/Learn_Myself
|
79c25e8fabc224343937d97bff644b0a6d565ffc
|
df69ac077c52ff01807aa34235569d1205a78c25
|
refs/heads/master
| 2021-03-16T05:48:49.707129 | 2018-03-12T11:06:00 | 2018-03-12T11:06:00 | 112,348,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,404 |
py
|
# coding: utf-8
"""
作者: qhp
版本: 1.0
日期: 2018/03
文件名: main.py
功能: 主程序
任务:使用scikit-learn建立不同的机器学习模型进行手机价格等级预测
"""
import os
import config
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from matplotlib.font_manager import FontProperties
np.set_printoptions(suppress=True)
def get_font():
"""
解决mac环境下 plot不能显示中文问题
:return: --fontproperties--
"""
fontproperties = FontProperties(fname='/System/Library/Fonts/PingFang.ttc')
return fontproperties
def inspect_dataset(train_data, test_data):
"""
观测数据
:param train_data: 训练集
:param test_data: 测试集
:return:
"""
print('\n===================== 数据查看 =====================')
print('训练集有{}条记录。'.format(len(train_data)))
print('测试集有{}条记录。'.format(len(test_data)))
# 通过可视化查看测试集和训练集的价格区间
plt.figure(figsize=(10, 5))
ax1 = plt.subplot(1, 2, 1)
sns.countplot(x=config.label_col, data=train_data)
plt.title('训练集数据', fontproperties=get_font())
plt.xlabel('价格等级', fontproperties=get_font())
plt.ylabel('数量', fontproperties=get_font())
plt.subplot(1, 2, 2, sharey=ax1)
plt.title('测试集数据', fontproperties=get_font())
sns.countplot(x=config.label_col, data=test_data)
plt.xlabel('价格等级', fontproperties=get_font())
plt.ylabel('数量', fontproperties=get_font())
plt.tight_layout()
plt.show()
def main():
"""
主函数
:return:
"""
# 加载数据
undressed_data = pd.read_csv(os.path.join(config.dataset_path, 'data.csv'))
train_data, test_data = train_test_split(undressed_data, test_size=0.25, random_state=77)
# 数据查看
# inspect_datasetfeat_cols(train_data, test_data)
# 构建数据集
X_train = train_data[config.feat_cols].values
X_test = test_data[config.feat_cols].values
y_train = train_data[config.label_col].values
y_test = test_data[config.label_col].values
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c8214a41a82f875f402de97e2db11c439208e33c
|
cf2ec51dfcb2d6777b5045137d2bcfe62afdec8c
|
/upvcarshare/core/templatetags/core_tags.py
|
9e4b4acedbe5f435252e61b22be188f25d1f1041
|
[] |
no_license
|
morrme/upvcarshare
|
c4b8b1587370e7931d8b5d6c78b948188617795c
|
189c91c608d0b61f6b68ef5c49a2546fdbbe38a2
|
refs/heads/master
| 2021-01-22T22:07:52.611880 | 2017-05-29T14:57:36 | 2017-05-29T14:57:36 | 88,732,669 | 0 | 0 | null | 2017-04-19T10:33:58 | 2017-04-19T10:33:58 | null |
UTF-8
|
Python
| false | false | 1,295 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
from django import template
from django.conf import settings
from django.http import QueryDict
from journeys import DEFAULT_GOOGLE_MAPS_SRID
from journeys.helpers import make_point
register = template.Library()
@register.simple_tag
def google_static_map(point, width=600, height=300, zoom=13):
google_maps_point = make_point(point, origin_coord_srid=point.srid, destiny_coord_srid=DEFAULT_GOOGLE_MAPS_SRID)
base_uri = "https://maps.googleapis.com/maps/api/staticmap"
args = {
"maptype": "roadmap",
"zoom": zoom,
"size": "{}x{}".format(width, height),
"key": settings.GOOGLE_MAPS_API_KEY,
"center": "{},{}".format(google_maps_point.coords[1], google_maps_point.coords[0]),
"markers": "color:red|{},{}".format(google_maps_point.coords[1], google_maps_point.coords[0]),
}
query_dict = QueryDict(mutable=True)
query_dict.update(args)
return "{}?{}".format(base_uri, query_dict.urlencode())
@register.simple_tag(takes_context=True)
def add_active_class(context, names, _class="active"):
request = context["request"]
names = names.split(",")
return _class if request.resolver_match.view_name in names else ""
|
[
"[email protected]"
] | |
698600cd6ef3db2cc2a39fdfc8019f5de78b9103
|
c24c0879b5498e6a4e68da58acfc459900370ea5
|
/Common/Message.py
|
9fc1d9b9114efd2a63552f24a2ade744fa0784a6
|
[
"MIT"
] |
permissive
|
RajivSIyer/Smart-Chat-Application
|
1ddec9eca524b7e5453dea9f6fdabe88da541b5a
|
fbfd8c45397fa27bc37bd6383ff01ca770369396
|
refs/heads/main
| 2023-08-30T23:25:42.273109 | 2021-10-07T05:35:30 | 2021-10-07T05:35:30 | 407,154,153 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 665 |
py
|
import json
import time
class Message(object):
def __init__(self, datetime, from_user, to_user, msg_str):
self.datetime = datetime
self.from_user = from_user
self.to_user = to_user
self.msg_str = msg_str
def to_Json(self):
return json.dumps(self, indent=4, default=self.encode_Json)
def encode_Json(self, msg):
return {"datetime":self.datetime, "from_user":self.from_user, "to_user":self.to_user, "msg_str":self.msg_str}
@classmethod
def from_Json(self, jsonStr):
jsonDict = json.loads(jsonStr)
return Message(**jsonDict)
def __str__(self):
return self.to_Json()
|
[
"[email protected]"
] | |
56d6ad634bebea67a2301e8f6388da48db7a0e97
|
64fae387fd7170e1395edbf8467d4dc04a0258f9
|
/event_application/urls.py
|
7334e82640445385ba4cfd3aa16ce70a2bf77e7b
|
[] |
no_license
|
nspilman/personal-django-server
|
79d5dc01a5861e5dfa889b235d1be4fa559b5bce
|
4a7a09012b06880fb2f0d63a01c820bddf604eb6
|
refs/heads/master
| 2022-12-08T06:11:22.459358 | 2021-01-03T06:15:28 | 2021-01-03T06:15:28 | 179,366,344 | 0 | 0 | null | 2022-12-08T03:12:32 | 2019-04-03T20:35:58 |
JavaScript
|
UTF-8
|
Python
| false | false | 850 |
py
|
from django.urls import path
from django.conf.urls import include
from django.conf.urls.static import static
from django.conf import settings
from . import views
urlpatterns = [
path('',views.AllEvents.as_view()),
path('users/', views.Users.as_view()),
path('login/',views.Login_Class.as_view()),
path('logout/',views.Logout_Class.as_view()),
#endpoint used to check if a user exists
path('users/<user>/', views.Users.as_view()),
path('remove/', views.Remove.as_view()),
path('mockup/',views.Mockup.as_view()),
#pulls events that a user is signed up for, based on username
path('usersignups/<user>/', views.Signups.as_view()),
path('eventsignups/<event>/', views.eventSignups.as_view()),
#pulls events that a user created, based on username
path('createdby/<user>/', views.CreatedBy.as_view()),
]
|
[
"[email protected]"
] | |
a7541c57545e14ba213ebab9391759bb91f25980
|
1165df824267975944decfd83baea842c94429bd
|
/app/app/settings.py
|
f8c8875b543938a1e17b7aac10fddd0a36d41d2c
|
[] |
no_license
|
Salvoid/classroom-assistance-system
|
79721372aa7c93d88aef68bd8c61fdbfd92cb339
|
4212b5348f6384d4892643347b2a6f9d6eff006d
|
refs/heads/main
| 2023-04-20T07:20:52.030856 | 2021-05-20T09:54:06 | 2021-05-20T09:54:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,793 |
py
|
"""
Django settings for pp
Generated by 'django-admin startproject' using Django 2.2.18.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_DIR = os.path.join(os.path.dirname(BASE_DIR), 'app')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-!a1!^&g=w_vqsdl+8j_u^6(3an%o1n^#p$pxar+3q(*x3+a)a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['superepicguysuper.pythonanywhere.com', '127.0.0.1']
# Application definition
AUTH_USER_MODEL = 'account.Account'
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
# 'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
# 'rest_framework',
# 'drf_multiple_model',
# 'django_js_reverse',
# 'webpack_loader',
]
LOCAL_APPS = [
'account',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(APP_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Login Rules
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = 'account_login'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(APP_DIR, 'assets'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
""
] | |
ccb254e11f8e18de6c2d464655ee04f09aeeab04
|
007c1bb62ee70fb387b24bac9387da90745a85db
|
/inelastic/direct_geometry/tests/dgreducetestmerlin.py
|
59fd074e376de7fcda16efd860473b6252660fce
|
[] |
no_license
|
mantidproject/scripts
|
fc14040f0674fda31b28bbc668a923fecc00fe99
|
f5a7b79825c7bdd8977e1409967bce979e4ca690
|
refs/heads/master
| 2021-01-17T11:36:00.426451 | 2017-08-25T13:01:26 | 2017-08-25T13:01:26 | 3,379,430 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,121 |
py
|
from qtiGenie import *
from PySlice2 import *
inst='mer'
iliad_setup(inst)
ext='.raw'
#mapfile='one2one_094'
mapfile='rings_113'
#det_cal_file must be specified if the reduction sends out put to a workpsace
cal_file='6399'
#load vanadium file
wb_run="6399"
LoadRaw(Filename=wb_run,OutputWorkspace="wb_wksp",LoadLogFiles="0")
#---------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------
ei=18
rebin_params='-10,.2,15'
#runs=[16654]# ,16643,16652]
runs=6398
LoadRaw(Filename=runs,OutputWorkspace="run",LoadLogFiles="0")
mono_van=16654
wb_mono=16637
samp_rmm=1
samp_mass=1
monovan_mapfile='mari_res'
#w1=iliad_abs(wb_run,runs,mono_van,wb_mono,samp_rmm,samp_mass,ei,rebin_params,mapfile,monovan_mapfile,use_sam_msk_on_monovan=True)
w1=iliad('wb_wksp','run',ei,rebin_params,mapfile,det_cal_file=cal_file,norm_method='current',diag_remove_zero=False,detector_van_range=[20,300])
w2=data2D(w1)
w2.rebinproj('0.2,.05,5')
w2.display(10)
w2.CutAlongE(0,3,-10,1,80)
|
[
"[email protected]"
] | |
7912959123d546558f08965faeb681242c792b57
|
7995802110b659d5bea414dee95beaec0ef6a399
|
/blog/migrations/0007_department_project.py
|
27ce9e6402b267ecd846f025da5a917fc72c88ea
|
[] |
no_license
|
jonjeden/jje_blog
|
120c63c414f87356305c0820e9844cf7837a63de
|
f3eef982fa2e7ed10f73c9a3a8abc36dd1469de8
|
refs/heads/master
| 2021-01-18T12:00:14.330366 | 2016-12-15T19:19:34 | 2016-12-15T19:19:34 | 68,279,097 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,167 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-11-10 06:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20161107_2231'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('staff_size', models.IntegerField(default=0)),
('website', models.URLField(blank=True)),
('address1', models.CharField(blank=True, max_length=200)),
('address2', models.CharField(blank=True, max_length=200)),
('city', models.CharField(blank=True, max_length=50)),
('state', models.CharField(blank=True, max_length=2)),
('zipcode', models.IntegerField(default=0)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('number', models.IntegerField(default=0)),
('description', models.CharField(blank=True, max_length=400)),
('fiscal_year', models.IntegerField(default=0)),
('total_cost', models.FloatField(default=0, max_length=40)),
('start_date', models.DateField(blank=True)),
('close_date', models.DateField(blank=True)),
('last_update', models.DateTimeField(auto_now=True)),
('department', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='blog.Department')),
],
options={
'ordering': ('name',),
},
),
]
|
[
"[email protected]"
] | |
b7ab56752c0350c54b5d11f7ff3d52afaae01b29
|
1db72cee9afd1d013205466478a67c902a0c3930
|
/firstapp/urls.py
|
445eb9f006f5d76cae88e982e94c91c1a4ba1483
|
[] |
no_license
|
kawiraanitah/Rendering-basic-html-using-django
|
f9bb41ca9f437d4653ee130b75f2a94eb37cce03
|
cc940fbcef8fc49b9626349f52b7add446b3971c
|
refs/heads/master
| 2021-07-21T19:35:25.882325 | 2017-09-18T13:56:15 | 2017-09-18T13:56:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 900 |
py
|
"""anitahkawira_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from firstapp import views
urlpatterns = [
url(r'^$', views.lists, name='list'),
url('^Student/$', views.Student),
]
|
[
"[email protected]"
] | |
270d5dd6dc7d1ac5dfdf0eeb82eaa30901b3cb1c
|
18dba2f82e17873e5e8161e74bc714ef88b09b36
|
/realestate/estatebase/migrations/0044_auto__add_localitytype__add_field_locality_locality_type.py
|
a5aaef7a8313514f2c8f3e1d85d08d0dbfdeaacf
|
[] |
no_license
|
sanchellius/estate-agent
|
8013573624b62ea3b6362fa0c22edf8371ca6966
|
53c15c2f2c970bd432ae579b5aa6f76ab2fbac49
|
refs/heads/master
| 2021-01-17T21:15:35.988578 | 2016-07-25T21:51:24 | 2016-07-25T21:51:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 45,365 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LocalityType'
db.create_table('estatebase_localitytype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, db_index=True)),
('prep_name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('estatebase', ['LocalityType'])
# Adding field 'Locality.locality_type'
db.add_column('estatebase_locality', 'locality_type',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['estatebase.LocalityType'], null=True, on_delete=models.PROTECT, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'LocalityType'
db.delete_table('estatebase_localitytype')
# Deleting field 'Locality.locality_type'
db.delete_column('estatebase_locality', 'locality_type_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'estatebase.appliance': {
'Meta': {'ordering': "['name']", 'object_name': 'Appliance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.beside': {
'Meta': {'ordering': "['name']", 'object_name': 'Beside'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.bid': {
'Meta': {'ordering': "['-history__created']", 'object_name': 'Bid'},
'agency_price_max': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'agency_price_min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bid_status': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.BidStatus']", 'null': 'True', 'blank': 'True'}),
'broker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'broker_list'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"}),
'brokers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'cleaned_filter': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bids'", 'to': "orm['estatebase.Client']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'estate_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.EstateTypeCategory']", 'null': 'True', 'blank': 'True'}),
'estate_filter': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'estate_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.EstateType']", 'null': 'True', 'blank': 'True'}),
'estates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Estate']", 'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Locality']", 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Region']", 'null': 'True', 'blank': 'True'})
},
'estatebase.bidevent': {
'Meta': {'object_name': 'BidEvent'},
'bid': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bid_events'", 'to': "orm['estatebase.Bid']"}),
'bid_event_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.BidEventCategory']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Estate']", 'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'estatebase.bideventcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'BidEventCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.bidg': {
'Meta': {'ordering': "['id']", 'object_name': 'Bidg'},
'appliances': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Appliance']", 'null': 'True', 'blank': 'True'}),
'basic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ceiling': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Ceiling']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'ceiling_height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Document']", 'null': 'True', 'blank': 'True'}),
'elevator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'estate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bidgs'", 'to': "orm['estatebase.Estate']"}),
'estate_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.EstateType']", 'on_delete': 'models.PROTECT'}),
'exterior_finish': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ExteriorFinish']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'floor': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor_count': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'flooring': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Flooring']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'heating': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Heating']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interior': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Interior']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'roof': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Roof']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'room_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'room_number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_area': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'used_area': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'wall_construcion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.WallConstrucion']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'wall_finish': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.WallFinish']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'window_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.WindowType']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'year_built': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'estatebase.bidstatus': {
'Meta': {'ordering': "['name']", 'object_name': 'BidStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.ceiling': {
'Meta': {'ordering': "['name']", 'object_name': 'Ceiling'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.client': {
'Meta': {'ordering': "['-id']", 'object_name': 'Client'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'client_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ClientType']", 'on_delete': 'models.PROTECT'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Origin']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.clienttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClientType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.comstatus': {
'Meta': {'ordering': "['name']", 'object_name': 'ComStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {})
},
'estatebase.contact': {
'Meta': {'ordering': "['contact_state__id', 'contact_type__id']", 'object_name': 'Contact'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': "orm['estatebase.Client']"}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'contact_state': ('django.db.models.fields.related.ForeignKey', [], {'default': '5', 'to': "orm['estatebase.ContactState']", 'on_delete': 'models.PROTECT'}),
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ContactType']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'estatebase.contacthistory': {
'Meta': {'object_name': 'ContactHistory'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Contact']"}),
'contact_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ContactState']", 'on_delete': 'models.PROTECT'}),
'event_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 21, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.contactstate': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactState'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.document': {
'Meta': {'ordering': "['name']", 'object_name': 'Document'},
'estate_type_category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['estatebase.EstateTypeCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.driveway': {
'Meta': {'ordering': "['name']", 'object_name': 'Driveway'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.electricity': {
'Meta': {'ordering': "['name']", 'object_name': 'Electricity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.estate': {
'Meta': {'ordering': "['-id']", 'object_name': 'Estate'},
'agency_price': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'beside': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Beside']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'beside_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'broker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'client_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'clients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'estates'", 'symmetrical': 'False', 'through': "orm['estatebase.EstateClient']", 'to': "orm['estatebase.Client']"}),
'com_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ComStatus']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Contact']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'driveway': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Driveway']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'driveway_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'electricity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Electricity']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'electricity_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'estate_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.EstateTypeCategory']", 'on_delete': 'models.PROTECT'}),
'estate_number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'estate_params': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.EstateParam']", 'null': 'True', 'blank': 'True'}),
'estate_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.EstateStatus']", 'on_delete': 'models.PROTECT'}),
'gassupply': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Gassupply']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'gassupply_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Internet']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'microdistrict': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Microdistrict']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Origin']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Region']", 'on_delete': 'models.PROTECT'}),
'saler_price': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sewerage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Sewerage']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'sewerage_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Street']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'telephony': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Telephony']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'validity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Validity']", 'null': 'True', 'blank': 'True'}),
'watersupply': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Watersupply']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'watersupply_distance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'estatebase.estateclient': {
'Meta': {'unique_together': "(('client', 'estate'),)", 'object_name': 'EstateClient'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Client']"}),
'estate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Estate']"}),
'estate_client_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.EstateClientStatus']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'estatebase.estateclientstatus': {
'Meta': {'ordering': "['name']", 'object_name': 'EstateClientStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.estateparam': {
'Meta': {'ordering': "['order']", 'object_name': 'EstateParam'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'blank': 'True'})
},
'estatebase.estatephoto': {
'Meta': {'ordering': "['order']", 'object_name': 'EstatePhoto'},
'estate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['estatebase.Estate']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'blank': 'True'})
},
'estatebase.estateregister': {
'Meta': {'ordering': "['-id']", 'object_name': 'EstateRegister'},
'bids': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'estate_registers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Bid']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'estates': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'estate_registers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Estate']"}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'register_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.RegisterCategory']", 'null': 'True', 'blank': 'True'})
},
'estatebase.estatestatus': {
'Meta': {'ordering': "['name']", 'object_name': 'EstateStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.estatetype': {
'Meta': {'ordering': "['estate_type_category__order', 'name']", 'object_name': 'EstateType'},
'estate_type_category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'types'", 'on_delete': 'models.PROTECT', 'to': "orm['estatebase.EstateTypeCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'blank': 'True'}),
'placeable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'template': ('django.db.models.fields.IntegerField', [], {})
},
'estatebase.estatetypecategory': {
'Meta': {'ordering': "['order']", 'object_name': 'EstateTypeCategory'},
'has_bidg': ('django.db.models.fields.IntegerField', [], {}),
'has_stead': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'independent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_commerce': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'blank': 'True'})
},
'estatebase.exteriorfinish': {
'Meta': {'ordering': "['name']", 'object_name': 'ExteriorFinish'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.flooring': {
'Meta': {'ordering': "['name']", 'object_name': 'Flooring'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.furniture': {
'Meta': {'ordering': "['name']", 'object_name': 'Furniture'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.gassupply': {
'Meta': {'ordering': "['name']", 'object_name': 'Gassupply'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.geogroup': {
'Meta': {'ordering': "['name']", 'object_name': 'GeoGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.heating': {
'Meta': {'ordering': "['name']", 'object_name': 'Heating'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.historymeta': {
'Meta': {'object_name': 'HistoryMeta'},
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creators'", 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modificated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updators'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"})
},
'estatebase.interior': {
'Meta': {'ordering': "['name']", 'object_name': 'Interior'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.internet': {
'Meta': {'ordering': "['name']", 'object_name': 'Internet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.landtype': {
'Meta': {'ordering': "['name']", 'object_name': 'LandType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.layout': {
'Meta': {'ordering': "['id']", 'object_name': 'Layout'},
'area': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2', 'blank': 'True'}),
'furniture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Furniture']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interior': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Interior']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'layout_feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LayoutFeature']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LayoutType']", 'on_delete': 'models.PROTECT'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Level']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'estatebase.layoutfeature': {
'Meta': {'ordering': "['name']", 'object_name': 'LayoutFeature'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.layouttype': {
'Meta': {'ordering': "['name']", 'object_name': 'LayoutType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.level': {
'Meta': {'ordering': "['level_name']", 'object_name': 'Level'},
'bidg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'levels'", 'to': "orm['estatebase.Bidg']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LevelName']"})
},
'estatebase.levelname': {
'Meta': {'ordering': "['name']", 'object_name': 'LevelName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'region'),)", 'object_name': 'Locality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LocalityType']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Region']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.localitytype': {
'Meta': {'ordering': "['name']", 'object_name': 'LocalityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'prep_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'estatebase.microdistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'locality'),)", 'object_name': 'Microdistrict'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'on_delete': 'models.PROTECT'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'estatebase.office': {
'Meta': {'ordering': "['name']", 'object_name': 'Office'},
'address': ('django.db.models.fields.TextField', [], {}),
'address_short': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['estatebase.Region']", 'symmetrical': 'False'})
},
'estatebase.origin': {
'Meta': {'ordering': "['name']", 'object_name': 'Origin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.purpose': {
'Meta': {'ordering': "['name']", 'object_name': 'Purpose'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
'geo_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.GeoGroup']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.registercategory': {
'Meta': {'ordering': "['name']", 'object_name': 'RegisterCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.roof': {
'Meta': {'ordering': "['name']", 'object_name': 'Roof'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.sewerage': {
'Meta': {'ordering': "['name']", 'object_name': 'Sewerage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.shape': {
'Meta': {'ordering': "['name']", 'object_name': 'Shape'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.stead': {
'Meta': {'object_name': 'Stead'},
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Document']", 'null': 'True', 'blank': 'True'}),
'estate': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'stead'", 'unique': 'True', 'to': "orm['estatebase.Estate']"}),
'estate_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '15', 'to': "orm['estatebase.EstateType']", 'on_delete': 'models.PROTECT'}),
'face_area': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LandType']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'purpose': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Purpose']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Shape']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'total_area': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'estatebase.street': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'locality'),)", 'object_name': 'Street'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'on_delete': 'models.PROTECT'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'estatebase.telephony': {
'Meta': {'ordering': "['name']", 'object_name': 'Telephony'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'geo_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['estatebase.GeoGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Office']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'estatebase.validity': {
'Meta': {'ordering': "['name']", 'object_name': 'Validity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.wallconstrucion': {
'Meta': {'ordering': "['name']", 'object_name': 'WallConstrucion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.wallfinish': {
'Meta': {'ordering': "['name']", 'object_name': 'WallFinish'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.watersupply': {
'Meta': {'ordering': "['name']", 'object_name': 'Watersupply'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.windowtype': {
'Meta': {'ordering': "['name']", 'object_name': 'WindowType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['estatebase']
|
[
"[email protected]"
] | |
de1275ebc2f6aa4b9161b36c637abba3cfb8339b
|
055b7c4c2118e6e862cfae344d722e8e90534cb4
|
/config.py
|
5aa53ff4827b52082755f58b81f4fb855ebf1ae7
|
[] |
no_license
|
Omulosi/iReporter
|
745b3194f5a06371ca01c4d790cac763a09cf89f
|
db80d76b84d786330fb389d94c2623cbbad13be9
|
refs/heads/develop
| 2022-12-09T13:42:32.856875 | 2019-04-23T04:14:27 | 2019-04-23T04:14:27 | 158,638,861 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,162 |
py
|
'''
instance.config
------------------
This module provides default configuration values.
'''
import os
from datetime import timedelta
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config:
'''
Base configuration values
'''
SECRET_KEY = os.environ.get('SECRET_KEY')
JWT_SECRET_KEY = os.environ.get('JWT_SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(minutes=60)
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
PROPAGATE_EXCEPTIONS = True
#: Database url
DATABASE = os.environ.get('DATABASE_URL')
#: Mail server configuration values
MAIL_SERVER=os.environ.get('MAIL_SERVER')
MAIL_PORT=os.environ.get('MAIL_PORT')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_USE_TLS=os.environ.get('MAIL_USE_TLS')
MAIL_USERNAME=os.environ.get('MAIL_USERNAME')
class TestConfig(Config):
'''
configuration values for testing
'''
TESTING = True
DEBUG = True
PROPAGATE_EXCEPTIONS = True
DATABASE = os.environ.get('TEST_DB_URL')
|
[
"[email protected]"
] | |
6895eb25c9f06392b002af645b8194900af063e1
|
01d070d9b1df07828cceafe079450186d7e1ad76
|
/npy/lsh_server.py
|
17f30299eddb00aa02a9c5ae9bc7c8b7b3321f62
|
[] |
no_license
|
ivychill/face_rec_server
|
781f05c85d5d205bf9f9b39940ad94d7d4694960
|
2b736a581cb13080ce75f2d9f0f86b696e69fc41
|
refs/heads/master
| 2020-07-01T13:39:27.117998 | 2019-08-08T05:38:03 | 2019-08-08T05:38:03 | 201,185,520 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,996 |
py
|
# -*- coding: utf8 -*-
# ! /usr/bin/python
'''
ChangeLog:
#2018-7-19 为工作线程加保护,避免异常情况下,无法响应
#2018-08-02 根据Hash Server接口设计V1.0文档修改
'''
from __future__ import division
import numpy as np
import falconn as fc
import time
import threading
import zmq
import sys, getopt
import pickle
import collections
import traceback
import msgpack
# import msgpack_numpy as m
import logging
import logging.handlers
eDistanceLimit = 2
LOG_FILE = 'lsh_server.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=100 * 1024 * 1024, backupCount=5) # 实例化handler
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt) # 实例化formatter
handler.setFormatter(formatter) # 为handler添加formatter
logger = logging.getLogger('lsh_server') # 获取名为lsh_server的logger
logger.addHandler(handler) # 为logger添加handler
logger.setLevel(logging.INFO)
def getsimi(v1,v2):
# print np.linalg.norm(v2-v1)
if np.linalg.norm(v2-v1) <eDistanceLimit:
return getcosine(v1,v2)
else:
return 0.0
def getcosine(v1,v2):
cos_angle = ( np.dot(v1, v2) / ((np.linalg.norm(v1, 2) * np.linalg.norm(v2, 2))) + 1) / 2
return cos_angle
class lsh_server:
def __init__(self,feature_file,label_file,port,worker_num=10):
self.url_worker = 'inproc://ping-workers'
url_router = "tcp://*:%s"%port
self.worker_num = worker_num
self.worker_counts=[0L]*worker_num
self.context = zmq.Context()
self.router = self.context.socket(zmq.ROUTER)
self.router.bind(url_router)
self.workers = self.context.socket(zmq.DEALER)
self.workers.bind(self.url_worker)
self.label = np.load(label_file)
logger.info ("start load feature data")
t1 = time.time()
self.feature = np.load(feature_file)
t2 = time.time()
logger.info ("load cost time:%f" % (t2 - t1))
dp = fc.get_default_parameters(self.feature.shape[0], self.feature.shape[1],
fc.DistanceFunction.EuclideanSquared)
ds = fc.LSHIndex(dp)
train_st = time.time()
ds.setup(self.feature)
train_et = time.time()
logger.info ("train cost time:%f" % (train_et - train_st))
# self.qo = ds.construct_query_object()
self.qp=ds.construct_query_pool()
def construct_name_confidence(self,query_array,indexs):
names=[]
confidences=[]
for i in range(len(indexs)):
if indexs[i] == None:
names.append(None)
else:
names.append(self.label[indexs[i]])
cos = np.dot(query_array[i],self.feature[indexs[i]])/(np.linalg.norm(query_array[i])*np.linalg.norm(self.feature[indexs[i]]))
confidences.append((cos+1)*0.5)
# freq = collections.Counter(names)
# s=sum(freq.values())
# for k,v in freq.items():
# freq[k]=v/s
return names,confidences
def loop(self):
threading.Thread(target=self.stater).start()
for i in range(self.worker_num):
thread = threading.Thread(target=self.worker, args=(i, self.url_worker, self.context,))
thread.start()
zmq.device(zmq.QUEUE, self.router, self.workers)
self.router.close()
self.workers.close()
self.context.term()
def stater(self):
last=sum(self.worker_counts)
sleep_base=5
while True:
time.sleep(sleep_base)
logger.info("%s"%self.worker_counts)
cur=sum(self.worker_counts)
logger.info("lsh_server Concurrency:%f"%((cur-last)/sleep_base))
last=cur
def worker(self,name, url_worker, context):
logger.info('worker {0} start'.format(name))
worker = context.socket(zmq.REP)
worker.connect(url_worker)
count=0
while True:
try:
errorcode = 0
message = worker.recv()
logger.info("##################\nworker %d receive count:%d" %(name,count))
t1 = time.time()
#query = pickle.loads(message)
#2018-08-02 根据Hash Server接口设计V1.0文档修改
# query_arr=msgpack.unpackb(message,object_hook=m.decode)
search_arg=msgpack.unpackb(message,raw=True)
cmd=search_arg[0]
map_start=1
find_k=5
if cmd == 0x8001:
map_start=1
elif cmd == 0x8002:
find_k=search_arg[1]
map_start=2
else:
errorcode=-5
raise ValueError("Invalid command")
query_arr=np.fromstring(search_arg[map_start]['data'],dtype=np.dtype(search_arg[map_start]['type'])).reshape(search_arg[map_start]['shape'])
h_angle=search_arg[map_start+1]
v_angle=search_arg[map_start+2]
logger.debug ("cmd:%x, %s---%s" %( cmd, h_angle, v_angle ))
if len(query_arr.shape) != 2:
errorcode = -3
raise ValueError('emb_array must be a two-dimensional array')
if len(query_arr) != len(h_angle) or len(query_arr) != len(v_angle):
errorcode=-2
raise ValueError('len of all array must be same')
# print query_arr
# print query_arr.dtype
t2 = time.time()
logger.debug ("unpack cost time:%f"%(t2-t1))
# print ("this is query:%s"%query)
# print query
#indexs=[]
names=[]
confidences=[]
ts=time.time()
for query in query_arr:
result=None
t1 = time.time()
print('.........................')
print('datatype:',query.dtype)
print('datasize:',query.shape)
# result = self.qo.find_nearest_neighbor(query)
if cmd == 0x8001:
result = self.qp.find_nearest_neighbor(query)
t2 = time.time()
logger.debug ("\nresult:[%d],cost time:%f"%(result,t2-t1))
names.append(self.label[result] if result != None else None)
confidences.append(getsimi(query,self.feature[result]) if result != None else 0.0)
elif cmd == 0x8002:
result = self.qp.find_k_nearest_neighbors(query,find_k)
t2 = time.time()
logger.debug ("\nresult:[%s],cost time:%f"%(",".join([str(x) for x in result]),t2-t1))
for x in result:
#print x
names.append(self.label[x])
confidences.append(getsimi(query,self.feature[x]))
te=time.time()
logger.debug ("Total compute time:%f,mean compute time:%f"%((te-ts),(te-ts)/len(query_arr)))
#print indexs
#name_confidence=self.construct_name_confidence(query_arr,indexs)
name_confidence=(errorcode,names,confidences)
t1 = time.time()
#serialized = pickle.dumps(name_confidence, protocol=0)
serialized=msgpack.packb(name_confidence,use_bin_type=True)
t2 = time.time()
logger.debug ("before send,msg pack cost time:%f"%(t2-t1))
worker.send(serialized)
logger.debug ("after send")
logger.info("#########################\nworker %d send count:%d"%(name,count))
count+=1
self.worker_counts[name]+=1
except Exception, e:
logger.error ("e.message:\t%s"%e.message)
logger.error ("trace info:\n%s" % traceback.format_exc())
print ("e.message:\t%s"%e.message)
print ("trace info:\n%s" % traceback.format_exc())
#2018-7-19 为工作线程加保护,避免异常情况下,无法响应
# break
if errorcode == 0:
if e.message.find('query dimension mismatch') != -1:
errorcode = -1
elif e.message.find('dataset and query must have the same dtype') != -1:
errorcode = -4
else:
errorcode = -1000
name_confidence=(errorcode,['Fatal Error:'+e.message],[0.0])
serialized=msgpack.packb(name_confidence,use_bin_type=True)
worker.send(serialized)
worker.close()
def usage():
print "python lsh_server -f feature.npy -l labels.npy -k 5 "
def get_arg():
opts, args = getopt.getopt(sys.argv[1:], "hf:k:l:p:t:")
feature_file = ""
label_file = ""
k = 5
port = "5555"
worker_num=10
for op, value in opts:
if op == "-f":
feature_file = value
elif op == "-l":
label_file = value
elif op == "-k":
k = int(value)
elif op == "-p":
port = value
elif op == "-t":
worker_num = int(value)
elif op == "-h":
usage()
sys.exit()
if feature_file == "" or label_file == "":
usage()
sys.exit()
return feature_file,label_file,k,port,worker_num
if __name__ == '__main__':
feature_file,label_file,k,port,worker_num=get_arg()
server=lsh_server(feature_file,label_file,port,worker_num)
server.loop()
# python lsh_server.py -f features.npy -l labels.npy -k 5
|
[
"[email protected]"
] | |
2a12af5857a968fd024dccf5178760d33619d0ea
|
6aa7ff9aaa9be965ac2969684680920782de711b
|
/djangoProject/api/model/guard/studentsGuard.py
|
c1c5a013134f47d59eb87f6536a55c08e564509f
|
[
"MIT"
] |
permissive
|
jorgejimenez98/django-guadria-obrera-uc
|
80d67cb4590622f16c69ea93c5c478ff7f920d8c
|
d0095250613694c969d471cb1bb35635f546d82e
|
refs/heads/master
| 2023-07-26T12:03:49.669730 | 2021-09-03T09:46:25 | 2021-09-03T09:46:25 | 402,413,407 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 844 |
py
|
from django.db import models
from ..person.student import Student
from .guard import Guard
from .dutyShift import DutyShift
class StudentsGuard(models.Model):
guard = models.ForeignKey(Guard, on_delete=models.PROTECT, related_name='studentsGuards')
assistance = models.BooleanField(default=False)
careerId = models.PositiveIntegerField(default=0, blank=True) # Se llena cuando se inserta una guardia
yearOfStudy = models.PositiveIntegerField(default=0, blank=True) # Se llena cuando se inserta una guardia
dutyShift = models.ForeignKey(DutyShift, on_delete=models.PROTECT)
student = models.ForeignKey(Student, on_delete=models.PROTECT)
def __str__(self):
return """{} | {} | {}""".format(self.guard.date, self.student.names + ' ' + self.student.surnames, self.guard.universitySeat.name)
|
[
"[email protected]"
] | |
16011c0ebe4ae0b5330d83fc1d4a9a63f5e4b0a1
|
437a0f81f161438bba3554f440364b965fc3f432
|
/tests/unit/types/test_document.py
|
57d2ae8e6082e82b1cab460573f4c8fb735dc581
|
[
"Apache-2.0"
] |
permissive
|
ApurvaMisra/jina
|
dbbe2873771eafbbdf429c9dd717e26733496d49
|
1ecf2d74179f29f196a964f6d779b1a32bf78e7c
|
refs/heads/master
| 2023-01-24T12:46:27.030417 | 2020-12-03T17:53:41 | 2020-12-03T17:53:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,070 |
py
|
import numpy as np
import pytest
from google.protobuf.json_format import MessageToDict
from jina import NdArray, Request
from jina.proto.jina_pb2 import DocumentProto
from jina.types.document import Document, BadDocID
from tests import random_docs
@pytest.mark.parametrize('field', ['blob', 'embedding'])
def test_ndarray_get_set(field):
a = Document()
b = np.random.random([10, 10])
setattr(a, field, b)
np.testing.assert_equal(getattr(a, field), b)
b = np.random.random([10, 10])
c = NdArray()
c.value = b
setattr(a, field, c)
np.testing.assert_equal(getattr(a, field), b)
b = np.random.random([10, 10])
c = NdArray()
c.value = b
setattr(a, field, c.proto)
np.testing.assert_equal(getattr(a, field), b)
def test_doc_update_fields():
a = Document()
b = np.random.random([10, 10])
c = {'tags': 'string', 'tag-tag': {'tags': 123.45}}
d = [12, 34, 56]
e = 'text-mod'
w = 2.0
a.set_attrs(embedding=b, tags=c, location=d, modality=e, weight=w)
np.testing.assert_equal(a.embedding, b)
assert list(a.location) == d
assert a.modality == e
assert MessageToDict(a.tags) == c
assert a.weight == w
def test_granularity_get_set():
d = Document()
d.granularity = 1
assert d.granularity == 1
def test_uri_get_set():
a = Document()
a.uri = 'https://abc.com/a.jpg'
assert a.uri == 'https://abc.com/a.jpg'
assert a.mime_type == 'image/jpeg'
with pytest.raises(ValueError):
a.uri = 'abcdefg'
def test_set_get_mime():
a = Document()
a.mime_type = 'jpg'
assert a.mime_type == 'image/jpeg'
b = Document()
b.mime_type = 'jpeg'
assert b.mime_type == 'image/jpeg'
c = Document()
c.mime_type = '.jpg'
assert c.mime_type == 'image/jpeg'
def test_no_copy_construct():
a = DocumentProto()
b = Document(a, copy=False)
a.id = '1' * 16
assert b.id == '1' * 16
b.id = '2' * 16
assert a.id == '2' * 16
def test_copy_construct():
a = DocumentProto()
b = Document(a, copy=True)
a.id = '1' * 16
assert b.id != '1' * 16
b.id = '2' * 16
assert a.id == '1' * 16
def test_bad_good_doc_id():
b = Document()
with pytest.raises(BadDocID):
b.id = 'hello'
b.id = 'abcd' * 4
b.id = 'de09' * 4
b.id = 'af54' * 4
b.id = 'abcdef0123456789'
def test_id_context():
with Document() as d:
assert not d.id
d.buffer = b'123'
assert d.id
def test_doc_content():
d = Document()
assert d.content is None
d.text = 'abc'
assert d.content == 'abc'
c = np.random.random([10, 10])
d.blob = c
np.testing.assert_equal(d.content, c)
d.buffer = b'123'
assert d.buffer == b'123'
def test_request_docs_mutable_iterator():
"""To test the weak reference work in docs"""
r = Request()
r.request_type = 'index'
for d in random_docs(10):
r.docs.append(d)
for idx, d in enumerate(r.docs):
assert isinstance(d, Document)
d.text = f'look I changed it! {idx}'
# iterate it again should see the change
doc_pointers = []
for idx, d in enumerate(r.docs):
assert isinstance(d, Document)
assert d.text == f'look I changed it! {idx}'
doc_pointers.append(d)
# pb-lize it should see the change
rpb = r.as_pb_object
for idx, d in enumerate(rpb.index.docs):
assert isinstance(d, DocumentProto)
assert d.text == f'look I changed it! {idx}'
# change again by following the pointers
for d in doc_pointers:
d.text = 'now i change it back'
# iterate it again should see the change
for idx, d in enumerate(rpb.index.docs):
assert isinstance(d, DocumentProto)
assert d.text == 'now i change it back'
def test_request_docs_chunks_mutable_iterator():
"""Test if weak reference work in nested docs"""
r = Request()
r.request_type = 'index'
for d in random_docs(10):
r.docs.append(d)
for d in r.docs:
assert isinstance(d, Document)
for idx, c in enumerate(d.chunks):
assert isinstance(d, Document)
c.text = f'look I changed it! {idx}'
# iterate it again should see the change
doc_pointers = []
for d in r.docs:
assert isinstance(d, Document)
for idx, c in enumerate(d.chunks):
assert c.text == f'look I changed it! {idx}'
doc_pointers.append(c)
# pb-lize it should see the change
rpb = r.as_pb_object
for d in rpb.index.docs:
assert isinstance(d, DocumentProto)
for idx, c in enumerate(d.chunks):
assert isinstance(c, DocumentProto)
assert c.text == f'look I changed it! {idx}'
# change again by following the pointers
for d in doc_pointers:
d.text = 'now i change it back'
# iterate it again should see the change
for d in rpb.index.docs:
assert isinstance(d, DocumentProto)
for c in d.chunks:
assert c.text == 'now i change it back'
|
[
"[email protected]"
] | |
774d14b2179139ab271f99c788c217d85202583e
|
f61db5940e29773aba8fc342a21de00e91a5ab2e
|
/base/day9/02python操作文件.py
|
a7f6d0023d40ea4faa5dadda9bbcdb01e1cb4462
|
[] |
no_license
|
liyaozr/project
|
c17a9dcbcda38fe9a15ec4c41a01242a13695991
|
0b0fc10e267ceb19f6792b490fede177035459fe
|
refs/heads/master
| 2020-11-29T18:38:03.297369 | 2020-03-10T01:11:00 | 2020-03-10T01:11:00 | 230,190,916 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,339 |
py
|
"""
============================
Author:柠檬班-木森
Time:2020/1/13 20:28
E-mail:[email protected]
Company:湖南零檬信息技术有限公司
============================
"""
"""
python操作文件
open的常用参数:
第一个:要打开的文件名字或者文件路径
第二个参数:文件打开的模式
r:只读模式
rb:只读模式,以二进制的编码格式去打开文件
第三个参数:
encoding:用来指定打开文件的编码格式(使用rb的时候,不需要加该参数)
"""
# 读取同级目录下的文件,可以直接写文件名
# 打开文件
# f = open("01内置函数的补充.py", "r", encoding="utf8")
# # 读取不在同一个目录下的文件,要写上文件的完整路径
# f = open(r"C:\project\py26_project\py26_01day\02python中的数值.py", "r", encoding="utf8")
#
# # 读取内容
# content = f.read()
#
# # 打印读取出来的内容
# print("文件中读取出来的内容为:", content)
#
# # 关闭文件
# f.close()
# -------------------如何去读取图片、视频等文件----------------
# 读取不在同一个目录下的文件,要写上文件的完整路径
f = open("bj2.png", "rb")
# 读取内容
content = f.read()
# 打印读取出来的内容
print("文件中读取出来的内容为:", content)
# 关闭文件
f.close()
|
[
"[email protected]"
] | |
1af5d6920fedf21d7e84131671e71e2e543d90e3
|
7b09b290a99dd6901991f2b64c73e29011c768ff
|
/FileDeleter/FileDeleter.py
|
89660bba4c34e76c0675185e5fb33532bed35ead
|
[
"Apache-2.0"
] |
permissive
|
zjutkz/UsefulPyScript
|
5e637b99ec352148417cabe5d263c7d9071082e6
|
e0cf077b71ff352e5e48d4b4ab54aa297ffe1d02
|
refs/heads/master
| 2021-01-23T06:26:08.051782 | 2017-06-01T06:55:05 | 2017-06-01T06:55:05 | 93,023,263 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,153 |
py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import logging
def delete(path):
delFile(path)
def changeEnv(path):
os.chdir(path)
def delFile(path):
if len(path) != 4:
logging.error('Please input the right paramters')
sys.exit(0)
changeEnv(path[1])
hitCount = 0
for parent, dirnames, filenames in os.walk(path[1]):
for fileName in filenames:
if path[2] == 'prefix' and fileName.startswith(path[3]):
os.remove(fileName)
hitCount = hitCount + 1
elif path[2] == 'normal' and fileName.find(path[3]) != -1:
os.remove(fileName)
hitCount = hitCount + 1
elif path[2] == 'suffix':
nameWithOutFormat = fileName.split('.')[0]
if nameWithOutFormat.endswith(path[3]):
os.remove(fileName)
hitCount = hitCount + 1
elif path[2] != 'prefix' and path[2] != 'normal' and path[2] != 'suffix':
logging.error('Please input the right matches')
sys.exit(0)
if hitCount == 0:
logging.warning('Not find the file with given matches')
if __name__ == "__main__":
delete(sys.argv)
|
[
"[email protected]"
] | |
084e617921030d38fd4eb4eebaf5f5132af4ded3
|
9ee07d45459451d0c68dab820361f45ead5835d5
|
/james_main.py
|
8ed3081d9f97bd5f35049687d092b0742843f73d
|
[] |
no_license
|
LeonChen66/James-docx
|
eae06fe6d883ca6c3f183b5d0cd03e0a73270e38
|
0562018771b7c9f4639ad9220a41b6b8231943da
|
refs/heads/master
| 2020-12-30T15:43:04.691396 | 2019-01-09T21:56:52 | 2019-01-09T21:56:52 | 91,172,243 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,993 |
py
|
# -*- coding: UTF-8 -*-
from PyQt5 import QtWidgets, QtGui
import sys
import docx
from os import mkdir,chdir,system
from james import Ui_Form # import qt designer's gui
from datetime import datetime
from dateutil.relativedelta import relativedelta
class mywindow(QtWidgets.QWidget,Ui_Form):
def __init__(self):
super(mywindow,self).__init__()
self.setupUi(self)
self.document = docx.Document('example2.docx')
self.table = self.document.tables
# click save function
def hello(self):
self.document = docx.Document('example2.docx')
self.table = self.document.tables
self.nessesary_information()
self.basic_information()
self.mitral()
self.aortic()
self.tricuspid()
self.pulmonary()
self.wall_motion()
self.comment_word()
self.save_docx()
# self.document.save('test_james.docx')
# os.system('test_james.docx')
def save_docx(self):
dir_name = self.ID_input.text()
file_name = self.ID_input.text()+datetime.now().strftime('_%Y_%m_%d') + '.docx'
try:
mkdir(dir_name)
chdir(dir_name)
self.document.save(file_name)
system(file_name)
chdir("..")
except:
chdir(dir_name)
self.document.save(file_name)
system(file_name)
chdir("..")
def mitral(self):
content = ''
if self.M_normal_check.isChecked():
content += '> ' + self.M_normal_check.text() + '\n'
if self.M_BMV_check.isChecked():
content += '> ' + self.M_BMV_check.text() + '\n'
if self.M_MMV_check.isChecked():
content += '> '+self.M_MMV_check.text()+'\n'
if self.M_MMVp_check.isChecked():
content += '> '+self.M_MMVp_check.text()+'\n'
if self.M_MMVp_combo1.currentIndex() !=0:
content += ': Anterior leaflet ==>' + self.M_MMVp_combo1.currentText()
if self.M_MMVp_combo.currentIndex() !=0:
content += ': Posterior leaflet ==>' + self.M_MMVp_combo.currentText()+'\n'
if self.M_mitral_check.isChecked():
content += '> Mitral annulus calcification\n'
if self.M_scler_check.isChecked():
content += '> '+self.M_scler_check.text()+'\n'
if self.M__fibro_check.isChecked():
content += '> '+self.M__fibro_check.text()+'\n'
if self.M_myxo_check.isChecked():
content += '> '+self.M_myxo_check.text()+'\n'
if self.M_scler_check.isChecked():
content += '> '+self.M_scler_check.text()+'\n'
if self.M_chordae_check.isChecked():
content += '> '+self.M_chordae_check.text()+' '+\
self.M_chordae_check_2.currentText()+'\n'
if self.M_MVA_check.isChecked():
content += '> '+self.M_MVA_check.text()+' : '+\
self.M_MVA_input.text()+' cm^2\n'
if self.M_meantrans_check.isChecked():
content += '> '+self.M_meantrans_check.text()+' MV P.G.: '+\
self.M_meantrans_input.text()+' mmHg\n'
if self.M_MR_check.isChecked():
content += ' '+self.M_MR_check.text()+' : '+\
self.M_MR_combo.currentText()+''
content += ' >> MR area : ' + self.M_MRarea_input.text() + ' cm^2'
content += ' >> MR area/LA area : ' + self.M_MRLA_input.text() + ' %'
content += ' >> Veno contrata: ' + self.M_Veno_input.text() + ' mm'
content += ' >> ERO : ' + self.M_ERO_input.text() + ' cm^2\n'
if self.M_others_check.isChecked():
content +='> Others : ' + self.M_others.toPlainText()
self.auto_paragraph('MM',content)
def aortic(self):
content = ''
if self.A_normal_check.isChecked():
content += '> '+self.A_normal_check.text()+'\n'
if self.A_BAV_check.isChecked():
content += '> ' + 'Bioprothetic AV' + '\n'
if self.A_MAV_check.isChecked():
content += '> '+ self.A_MAV_check.text() +'\n'
if self.A_scler_check.isChecked():
content += '> '+self.A_scler_check.text()+'\n'
if self.A_bicu_check.isChecked():
content += '> '+self.A_bicu_check.text()+'\n'
if self.A_tricu_check.isChecked():
content += '> '+self.A_tricu_check.text()+'\n'
if self.A_AVA_check.isChecked():
content += '> '+self.A_AVA_check.text()+' : '+\
self.A_AVA_input.text()+' cm^2\n'
if self.A_meantrans_check.isChecked():
content += ' >>'+self.A_meantrans_check.text()
content += ' AV PG : '+self.A_AVPG_input.text()+' mmHg'
content += ' >> peak trans AV PG : ' + self.A_peckPG_input.text() + ' mmHg'
content += ' >> AV Vmax : ' + self.A_AVVmax_input.text() + ' m/s\n'
if self.A_AR_check.isChecked():
content += '> AR : ('+self.A_AR_combo.currentText() +')'
content += ' >> jet height ratio = ' + self.A_jet_input.text()+' %'
content += ' >> PHT = ' + self.A_PHT_input.text()+' ms'
content += ' >> Veno contrata = ' + self.A_veno_input.text()+' mm'
content += ' >> '+self.A_dias_combo.currentText()+' diastolic reversal flow\n'
if self.A_others_check.isChecked():
content += '> Others : ' + self.A_others.toPlainText()
self.auto_paragraph('AA', content)
def tricuspid(self):
content = ''
if self.T_normal_check.isChecked():
content += '> Normal'
if self.T_pro_check.isChecked():
content += '\n> Prolapse : '
if self.T_anter_check.isChecked():
content += 'anterior leaflet, '
if self.T_post_check.isChecked():
content += 'posterior leaflet, '
if self.T_septal_check.isChecked():
content += 'septal leaflet '
if self.T_TR_check.isChecked():
content += '\n> TR : ' + self.T_TR_combo.currentText()
content += '\n >> TR area : '+self.T_TRarea_input.text()+ 'cm^2'
if self.T_TRV_check.isChecked():
content += ' > TRV : ' + self.T_TRV_input.text() + ' m/s'
if self.T_TV_check.isChecked():
content += ' > Trans-TV PG : ' + self.T_Trans_TV_input.text() + ' mm HG '
if self.T_others_check.isChecked():
content += '\n> Others : ' + self.T_others.toPlainText()
content += '\n'
self.auto_paragraph('TT',content)
def pulmonary(self):
content = ''
if self.P_normal_check.isChecked():
content += '> '+self.P_normal_check.text()
if self.P_PR_check.isChecked():
content += '\n> PR : ' + self.P_PR_combo.currentText()
if self.P_others_check.isChecked():
content += '\n> Others : ' + self.P_others.toPlainText()
self.auto_paragraph('PP',content)
def wall_motion(self):
content = ''
if self.W_normal_check.isChecked():
content += self.W_normal_check.text()
if self.W_abnormal_check.isChecked():
content += self.W_abnormal_check.text() + ':'
# -------- basal ---------
content_1=''
if self.W_basal_check.isChecked():
content_1 += '' + self.W_basal_check.text() + ' ('
if self.W_anterior_check.isChecked():
content_1 += self.W_anterior_check.text() + ', '
if self.W_septal_check.isChecked():
content_1 += self.W_septal_check.text() + ', '
if self.W_inferior_check.isChecked():
content_1 +=self.W_inferior_check.text()+ ', '
if self.W_posterior_check.isChecked():
content_1 += self.W_posterior_check.text() + ', '
if self.W_lateral_check.isChecked():
content_1 += self.W_lateral_check.text() + ', '
if self.W_abnormal_check.isChecked() and self.W_basal_check.isChecked():
content_1 += ') '
if self.W_hypo_check.isChecked():
content_1 += self.W_hypo_check.text() + ', '
if self.W_akinesis_check.isChecked():
content_1 += self.W_akinesis_check.text() + ', '
if self.W_dysk_check.isChecked():
content_1 += self.W_dysk_check.text()
# ------- midcavity ------
content_2 = ''
if self.W_midcavity_check.isChecked():
content_2 += '' + self.W_midcavity_check.text() + ' ('
if self.W_m_anterior_check.isChecked():
content_2 += self.W_anterior_check.text() + ', '
if self.W_m_septal_check.isChecked():
content_2 += self.W_septal_check.text() + ', '
if self.W_m_inferior_check.isChecked():
content_2 += self.W_inferior_check.text()+ ', '
if self.W_m_posterior_check.isChecked():
content_2 += self.W_posterior_check.text() + ', '
if self.W_m_lateral_check.isChecked():
content_2 += self.W_lateral_check.text() + ', '
if self.W_abnormal_check.isChecked() and self.W_midcavity_check.isChecked():
content_2 += ') '
if self.W_m_hypokinesis_check.isChecked():
content_2 += self.W_hypo_check.text() + ', '
if self.W_m_akinesis_check.isChecked():
content_2 += self.W_akinesis_check.text() + ', '
if self.W_m_dyskinesis_check.isChecked():
content_2 += self.W_dysk_check.text()
# ------- apical ----------
content_3 = ''
if self.W_apical_check.isChecked():
content_3 += '' + self.W_apical_check.text() + ' ('
if self.W_a_anterior_check.isChecked():
content_3 += self.W_anterior_check.text() + ', '
if self.W_a_septal_check.isChecked():
content_3 += self.W_septal_check.text() + ', '
if self.W_a_inferior_check.isChecked():
content_3 +=self.W_inferior_check.text()+ ', '
if self.W_a_posterior_check.isChecked():
content_3 += self.W_posterior_check.text() + ', '
if self.W_a_lateral_check.isChecked():
content_3 += self.W_lateral_check.text() + ', '
if self.W_abnormal_check.isChecked() and self.W_apical_check.isChecked():
content_3 += ') '
if self.W_a_hypokinesis_check.isChecked():
content_3 += self.W_hypo_check.text() + ', '
if self.W_a_akinesis_check.isChecked():
content_3 += self.W_akinesis_check.text() + ', '
if self.W_a_dyskinesis_check.isChecked():
content_3 += self.W_dysk_check.text()
self.auto_paragraph('WW',content)
self.auto_paragraph('w_i',content_1)
self.auto_paragraph('w_j', content_2)
self.auto_paragraph('w_k', content_3)
def comment_word(self):
self.auto_paragraph('CC',self.comment.toPlainText())
def basic_information(self):
self.auto_paragraph('exam', datetime.now().strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日'))
self.auto_paragraph('birth',self.birthdate_edit.date().toPyDate(\
).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日'))
self.auto_paragraph('name',self.name_input.text())
self.auto_paragraph('ID',self.ID_input.text())
if self.male_input.isChecked():
self.auto_paragraph('gender','男')
elif self.female_input.isChecked():
self.auto_paragraph('gender', '女')
#calculate the years old
diff = relativedelta(datetime.now(), self.birthdate_edit.date().toPyDate())
year_old = str(diff.years) + '歲' + str(diff.months) +'個月'
self.auto_paragraph('old',year_old)
self.auto_paragraph('height',self.height_input.text())
self.auto_paragraph('weight',self.weight_input.text())
def nessesary_information(self):
self.auto_word_cell('a_i',self.IVDd_input.text())
self.auto_word_cell('b_i', self.PWDd_input.text())
self.auto_word_cell('c_i', self.LVDd_input.text())
self.auto_word_cell('d_i', self.LVDs_input.text())
self.auto_word_cell('e_i', self.Ao_input.text())
self.auto_word_cell('f_i', self.LA_input.text())
self.auto_word_cell('g_i', self.LVEF_input.text())
self.auto_word_cell('h_i', self.TAPSE_input.text())
self.auto_word_cell('i_i', self.EPSS_input.text())
self.auto_word_cell('j_i', self.IVCexp_input.text())
self.auto_word_cell('k_i', self.IVCixp_input.text())
try:
Res_value = '%.2f' %((float(self.IVCexp_input.text())-float(self.IVCixp_input.text()))*100/float(self.IVCexp_input.text()))
except:
Res_value = ''
self.auto_word_cell('l_i', Res_value) #Respiratory charge = (exp-isp)/esp (%)
self.auto_word_cell('m_i', self.RVD_input.text())
self.auto_word_cell('n_i', self.RVAd_input.text())
self.auto_word_cell('o_i', self.RVAD2_input.text())
self.auto_word_cell('p_i', self.RVFAC_input.text())
self.auto_word_cell('q_i', self.RAA_input.text())
self.auto_word_cell('r_i', self.LVEF_input2.text())
self.auto_word_cell('s_i', self.EA_input.text())
self.auto_word_cell('t_i', self.MVEv_input.text())
# self.auto_word_cell('u_i', self..text())
self.auto_word_cell('v_i', self.Eneed_input.text())
self.auto_word_cell('w_i', self.Elat_input.text())
try:
mean_e_value = '%.2f' %((float(self.Eneed_input.text())+float(self.Elat_input.text()))/2)
except:
mean_e_value = ''
self.auto_word_cell('x_i', mean_e_value) #mean e'
try:
E_e_value = '%.2f' %(float(self.MVEv_input.text())/float(mean_e_value))
except:
E_e_value = ''
self.auto_word_cell('y_i', E_e_value)
self.auto_word_cell('z_i', self.RVS_input.text())
self.auto_word_cell('a_j', self.PVSD_combo.currentText())
self.auto_word_cell('c_j',self.MV_input.text())
self.auto_word_cell('b_j',self.AsAo_input.text())
self.auto_word_cell('d_j',self.LVM_index_input.text())
self.auto_word_cell('z_j',self.MVDt_input.text())
if self.Per_eff_check.isChecked():
content = ''
content += self.Per_combo.currentText()
if self.Per_combo.currentText()=='nil':
pass
else:
content += '-('+self.Per_eff_combo.currentText()+')' + ' '+self.Per_eff_input.text()+' cm '+ self.with_without_combo.currentText()+' echo-tamponade-sign'
self.auto_word_cell('p_j',content)
if self.M_mode_others_check.isChecked():
self.auto_word_cell('input', self.M_mode_others.toPlainText())
else:
self.auto_word_cell('input', '')
def auto_word_cell(self,replaced_word, content):
style = self.document.styles['Normal']
font = style.font
font.name = 'DFKai-SB'
font.bold=False
for row in self.table[0].rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
if replaced_word in paragraph.text:
temp = paragraph.text
temp = temp.replace(replaced_word,content)
paragraph.text = temp
paragraph.style = 'Normal'
def auto_paragraph(self,replaced_word,content):
style = self.document.styles['Normal']
font = style.font
font.name = 'DFKai-SB'
for paragraph in self.document.paragraphs:
inline = paragraph.runs
for i in range(len(inline)):
if replaced_word in inline[i].text:
temp = inline[i].text
temp = temp.replace(replaced_word, content)
inline[i].text = temp
def reset_input(self):
self.document = docx.Document('example2.docx')
self.table = self.document.tables
self.height_input.setText('')
self.name_input.setText('')
self.ID_input.setText('')
self.weight_input.setText('')
self.old_input.setText('')
self.IVDd_input.setText('')
self.PWDd_input.setText('')
self.LVDd_input.setText('')
self.LVDs_input.setText('')
self.Ao_input.setText('')
self.LA_input.setText('')
self.LVEF_input.setText('')
self.TAPSE_input.setText('')
self.EPSS_input.setText('')
self.IVCexp_input.setText('')
self.IVCixp_input.setText('')
self.Res_input.setText('')
self.RVD_input.setText('')
self.RVAd_input.setText('')
self.RVAD2_input.setText('')
self.RVFAC_input.setText('')
self.RAA_input.setText('')
self.LVEF_input.setText('')
self.Per_eff_input.setText('')
self.MV_input.setText('')
self.EA_input.setText('')
self.E_input.setText('')
self.MVEv_input.setText('')
self.MVDt_input.setText('')
self.Eneed_input.setText('')
self.Elat_input.setText('')
self.mean_e_input.setText('')
self.E_e_input.setText('')
self.RVS_input.setText('')
self.M_MVA_input.setText('')
self.M_meantrans_input.setText('')
self.M_MRarea_input.setText('')
self.M_MRLA_input.setText('')
self.M_Veno_input.setText('')
self.M_ERO_input.setText('')
self.A_AVA_input.setText('')
self.A_AVPG_input.setText('')
self.A_peckPG_input.setText('')
self.A_AVVmax_input.setText('')
self.A_jet_input.setText('')
self.A_PHT_input.setText('')
self.A_veno_input.setText('')
self.T_TRarea_input.setText('')
self.T_TRV_input.setText('')
self.LVEF_input2.setText('')
self.M_mode_others_check.setCheckState(0)
self.M_mode_others.setPlainText('')
self.Per_eff_check.setCheckState(0)
self.MV_check.setCheckState(0)
self.Per_eff_combo.setCurrentIndex(0)
self.with_without_combo.setCurrentIndex(0)
self.PVSD_combo.setCurrentIndex(0)
self.M_normal_check.setCheckState(0)
self.M_BMV_check.setCheckState(0)
self.M_MMV_check.setCheckState(0)
self.M_MMVp_check.setCheckState(0)
self.M_MMVp_combo.setCurrentIndex(0)
self.M_MMVp_combo1.setCurrentIndex(0)
self.M_mitral_check.setCheckState(0)
self.M_scler_check.setCheckState(0)
self.M__fibro_check.setCheckState(0)
self.M_myxo_check.setCheckState(0)
self.M_chordae_check.setCheckState(0)
self.M_chordae_check_2.setCurrentIndex(0)
self.M_MVA_check.setCheckState(0)
self.M_MR_check.setCheckState(0)
self.M_MR_combo.setCurrentIndex(0)
self.M_others_check.setCheckState(0)
self.M_others.setPlainText('')
self.A_normal_check.setCheckState(0)
self.A_BMV_check.setCheckState(0)
self.A_MMV_check.setCheckState(0)
self.A_scler_check.setCheckState(0)
self.A_mitral_check.setCheckState(0)
self.A_bicu_check.setCheckState(0)
self.A_tricu_check.setCheckState(0)
self.A_AVA_check.setCheckState(0)
self.A_meantrans_check.setCheckState(0)
self.A_AR_check.setCheckState(0)
self.A_AR_combo.setCurrentIndex(0)
self.A_dias_combo.setCurrentIndex(0)
self.A_others_check.setCheckState(0)
self.A_others.setPlainText('')
self.T_normal_check.setCheckState(0)
self.T_pro_check.setCheckState(0)
self.T_anter_check.setCheckState(0)
self.T_post_check.setCheckState(0)
self.T_septal_check.setCheckState(0)
self.T_TR_check.setCheckState(0)
self.T_TR_combo.setCurrentIndex(0)
self.T_TRV_check.setCheckState(0)
self.T_TV_check.setCheckState(0)
self.T_others_check.setCheckState(0)
self.T_others.setPlainText('')
self.P_normal_check.setCheckState(0)
self.P_PR_check.setCheckState(0)
self.P_others.setPlainText('')
self.W_normal_check.setCheckState(0)
self.W_abnormal_check.setCheckState(0)
self.W_basal_check.setCheckState(0)
self.W_midcavity_check.setCheckState(0)
self.W_apical_check.setCheckState(0)
self.W_anterior_check.setCheckState(0)
self.W_septal_check.setCheckState(0)
self.W_inferior_check.setCheckState(0)
self.W_posterior_check.setCheckState(0)
self.W_lateral_check.setCheckState(0)
self.W_hypo_check.setCheckState(0)
self.W_akinesis_check.setCheckState(0)
self.W_dysk_check.setCheckState(0)
self.P_PR_check.setCheckState(0)
self.comment.setPlainText('')
if __name__=="__main__":
app = QtWidgets.QApplication(sys.argv)
window = mywindow()
window.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
2d6f42b49f96827079ca82d4e83b489775bfc4a8
|
f7c45b28a126ed15e2b5d09a4a657111a2b04582
|
/login.py
|
9c385bcd7b0cae7a1817245048ba3bf36ed549a2
|
[] |
no_license
|
dhivyaganesan14/MEDIPASS
|
a0509560790744bb257edeebf425daf2e69f2f69
|
0c73e17b7b842a769e95eef3a5d33542e37325b2
|
refs/heads/main
| 2023-06-21T04:44:36.593922 | 2021-07-22T04:17:11 | 2021-07-22T04:17:11 | 388,323,268 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,979 |
py
|
from flask import Flask
from flask.ext.login import LoginManager
from pymongo import MongoClient
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from flask import request, redirect, render_template, url_for, flash
from flask.ext.login import login_user, logout_user, login_required
from werkzeug.security import check_password_hash
class User():
def __init__(self,username):
self.username = username
self.password = None
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.username
@staticmethod
def validate_login(password_hash, password):
return check_password_hash(password_hash, password)
#Defining the LoginForm class
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
app = Flask(__name__)
#-----All Endpoints
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == 'POST':
print "POST REQ"
user = app.config['USERS_COLLECTION'].find_one({"_id": form.username.data})
print "USER:-------------- " + form.username.data
if user and User.validate_login(user['password'], form.password.data):
user_obj = User(user['_id'])
login_user(user_obj)
flash("Logged in successfully!", category='success')
return '1'
flash("Wrong username or password!", category='error')
return '0'
@app.route('/logout')
def logout():
logout_user()
return '1'
def load_user(username):
u = app.config['USERS_COLLECTION'].find_one({"_id": username})
if not u:
return None
return User(u['_id'])
if __name__ == '__main__':
app.run('0.0.0.0',9000,debug = True)
|
[
"[email protected]"
] | |
122734a9363ec340c61f73c2a29c53e1ee6cf657
|
c204100919a3711059b9155d7b995109055e468a
|
/authorization/serializers.py
|
2659ede4240ab284a855ef6591ee599650e784c4
|
[] |
no_license
|
Mercyplus/Authorization
|
8f439e04f34c2490cec1ee218868816204bfd0ee
|
898a8007f2d98e8c9fd9f094a99c9dc11549d084
|
refs/heads/main
| 2023-04-03T02:11:09.972190 | 2021-04-06T16:07:23 | 2021-04-06T16:07:23 | 355,245,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,969 |
py
|
import os
import hashlib
import binascii
from bip32utils import BIP32Key
from eth_keys import keys
def generate_memo(m):
memo_str = os.urandom(8)
m.update(memo_str)
memo_str = binascii.hexlify(memo_str + m.digest()[0:2])
return memo_str
def init_profile(user, is_social=False, metamask_address=None, lang='en', swaps=False):
m = hashlib.sha256()
memo_str1 = generate_memo(m)
# memo_str2 = generate_memo(m)
# memo_str3 = generate_memo(m)
memo_str4 = generate_memo(m)
memo_str5 = generate_memo(m)
# memo_str6 = generate_memo(m)
wish_key = BIP32Key.fromExtendedKey(ROOT_PUBLIC_KEY, public=True)
# eosish_key = BIP32Key.fromExtendedKey(ROOT_PUBLIC_KEY_EOSISH, public=True)
# tron_key = BIP32Key.fromExtendedKey(ROOT_PUBLIC_KEY_TRON, public=True)
swaps_key = BIP32Key.fromExtendedKey(ROOT_PUBLIC_KEY_SWAPS, public=True)
protector_key = BIP32Key.fromExtendedKey(ROOT_PUBLIC_KEY_PROTECTOR, public=True)
btc_address1 = wish_key.ChildKey(user.id).Address()
# btc_address2 = eosish_key.ChildKey(user.id).Address()
# btc_address3 = tron_key.ChildKey(user.id).Address()
btc_address4 = swaps_key.ChildKey(user.id).Address()
btc_address5 = protector_key.ChildKey(user.id).Address()
# btc_address6 = swaps_key.ChildKey(user.id).Address()
eth_address1 = keys.PublicKey(wish_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
# eth_address2 = keys.PublicKey(eosish_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
# eth_address3 = keys.PublicKey(tron_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
eth_address4 = keys.PublicKey(swaps_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
eth_address5 = keys.PublicKey(protector_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
# eth_address6 = keys.PublicKey(swaps_key.ChildKey(user.id).K.to_string()).to_checksum_address().lower()
|
[
"[email protected]"
] | |
efdb806d91f39a06a1b0fe783e20d18f226f6f1a
|
b793302e56ab40b82892191a3b3bbd916c9c0f47
|
/searsh_info.py
|
a3beb04645cc954b5921a45d3ec6e85ffd13900b
|
[] |
no_license
|
yuanfeisiyuetian/5gc-fuzz
|
5b06f053d64b5504a1667a5beb695936ce1a1381
|
13f1943a0d9e6cc79e2a2e8089e56ca31c1e5edb
|
refs/heads/master
| 2023-08-11T23:08:38.530392 | 2021-10-06T07:19:59 | 2021-10-06T07:19:59 | 411,973,402 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,323 |
py
|
import json
from urllib.parse import urlparse,parse_qs
from argparse import Namespace
from pyjfuzz.lib import *
file = "traffic.json"
def mergefile():
file1 = "traffic-new1.json"
file2 = "traffic-new2.json"
file3 = "traffic-new3.json"
filelist = [file1,file2,file3]
with open(file,"w") as f:
for jsonfile in filelist:
with open(jsonfile,"r") as fff:
for l in fff:
f.writelines(l)
'''
# 测试三种类型url的解析结果
test1 = "/nnrf-disc/v1/nf-instances?requester-nf-type=AMF&target-nf-type=AUSF"
test2 = "/nudr-dr/v1/subscription-data/imsi-208930000000019/20893/provisioned-data/am-data?supported-features="
test3 = "/nudr-dr/v1/subscription-data/imsi-208930000000019/20893/provisioned-data/sm-data?single-nssai={\"sst\":1,\"sd\":\"010203\"}"
demo:
test1 = "/nnrf-disc/v1/nf-instances?requester-nf-type=AMF&target-nf-type=AUSF"
parsed_result = urlparse(test1)
print(parsed_result)
r = parse_qs(parsed_result.query)
print(r)
printout:
test1:{'requester-nf-type': ['AMF'], 'target-nf-type': ['AUSF']}
test2:{}
test3:{'single-nssai': ['{"sst":1,"sd":"010203"}']}
'''
def get_data() -> dict:
'''
读出traffic的url并转换成path:qs的格式保存在字典中
:return:
'''
urls = []
with open(file,"r") as f:
for l in f:
data = json.loads(l)
urls.append(data["Request"]["Url"])
urls = set(urls)
ans = {}
for url in urls:
parsed_result = urlparse(url)
path = parsed_result.path
qs = parse_qs(parsed_result.query)
if qs!={}:
ans[path] = qs
# print(ans)
return ans
data = get_data()
with open("out.json","w") as f:
for url,qs in data.items():
print("before:")
print(url,qs)
print("fuzzed:")
config = PJFConfiguration(Namespace(json=qs, nologo=True, level=6))
fuzzer = PJFFactory(config)
i = 0
while i<10:
fuzz_data = json.loads(fuzzer.fuzzed)
qsstr = ""
for k in fuzz_data:
for v in fuzz_data[k]:
qsstr = qsstr+k+'='+str(v)+'&'
qsstr=qsstr[:-1]
print(url+"?"+qsstr)
out = url+"?"+qsstr
f.writelines(out+'\n')
i = i+1
|
[
"[email protected]"
] | |
a6a06bdd0516202480eb8748f797f82b9b0b9b49
|
56515c399947a9078a9b6b73ecaa7b455aa61a3a
|
/venv/Scripts/easy_install-script.py
|
b8424e6bb0b74845e7face3b62816515157a8884
|
[] |
no_license
|
ishimtsev/Secret_sharing
|
0a6a52e3b326fb3db4898af88f2503d4e05366ee
|
6a609802e957a326ed1e7ac40e11217ea9725fc0
|
refs/heads/master
| 2020-09-14T19:29:37.478861 | 2019-11-21T17:42:58 | 2019-11-21T17:42:58 | 223,230,588 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
#!C:\Users\Ivan\PycharmProjects\Secret_sharing\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | |
0c06a17c693143b6e7567310dfe57772ba8c8838
|
0c84329f5d1e4e9070cece982ac827644d8a1893
|
/venv/bin/django-admin.py
|
0b14e2b786d4bbc36959ba6df3a029dc36316065
|
[] |
no_license
|
Tiyas-13/LC
|
955deece0ecf1fc42e6a5d5fcc287ff0fca63679
|
ef45b44198532173c37793b9b0e8905ad7739f57
|
refs/heads/master
| 2020-06-09T16:30:02.176987 | 2019-07-02T22:10:50 | 2019-07-02T22:10:50 | 193,467,920 | 0 | 0 | null | 2019-07-02T22:10:51 | 2019-06-24T08:44:05 |
Python
|
UTF-8
|
Python
| false | false | 137 |
py
|
#!/home/hp/LC/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
df2718b59f6ac327ec4384944482bf00a1f784d2
|
1ab57fa76576134e734a26bebd813c2fa1dc23c6
|
/License Plate Recognition/Main.py
|
ae88f150ceefe03001cc0b02cd603f04d91d2dbf
|
[] |
no_license
|
AkbulutSemiha/BusStopViolationDetection
|
03e99b6626e1db6344bbe8a7bc5c5e09309d2863
|
0d847eb31005ab0505cc175fc1b18b85d10cb708
|
refs/heads/master
| 2021-06-05T05:09:05.652140 | 2021-06-03T17:00:59 | 2021-06-03T17:00:59 | 175,631,053 | 0 | 0 | null | 2019-03-14T13:47:22 | 2019-03-14T13:47:22 | null |
UTF-8
|
Python
| false | false | 8,418 |
py
|
# Main.py
import cv2
import numpy as np
import os
import DetectChars
import DetectPlates
import PossiblePlate
import time
import json
from subprocess import call
from datetime import datetime
from firebase import firebase
firebase=firebase.FirebaseApplication('your_firebase_table_url') #Like 'https://tablocezali.firebaseio.com/'
# module level variables ##########################################################################
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
showSteps = False
###################################################################################################
def main():
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if blnKNNTrainingSuccessful == False: # if KNN training was not successful
print ("\nerror: KNN traning was not successful\n") # show error message
return # and exit program
# end if
#this system using local path. You change path your project Path:/home/pi/Desktop/OpenCVCode/
#İts take photo from webcam
os.system('fswebcam -r 640x480 -S 3 --jpeg 50 --save /home/pi/Desktop/OpenCVCode/res1.jpg') # uses Fswebcam to take picture
imgOriginalScene=cv2.imread("res1.jpg") #open image from file
if imgOriginalScene is None: # if image was not read successfully
print ("\nerror: image not read from file \n\n") # print error message to std out
os.system("pause") # pause so user can see error message
return # and exit program
# end if
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene) # detect plates
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates) # detect chars in plates
cv2.imshow("imgOriginalScene", imgOriginalScene) # show scene image
if len(listOfPossiblePlates) == 0: # if no plates were found
print ("\nno license plates were detected\n") # inform user no plates were found
else: # else
# if we get in here list of possible plates has at leat one plate
# sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
# suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
licPlate = listOfPossiblePlates[0]
cv2.imshow("imgPlate", licPlate.imgPlate) # show crop of plate and threshold of plate
cv2.imshow("imgThresh", licPlate.imgThresh)
if len(licPlate.strChars) == 0: # if no chars were found in the plate
print ("\nno characters were detected\n\n") # show message
return # and exit program
# end if
drawRedRectangleAroundPlate(imgOriginalScene, licPlate) # draw red rectangle around plate
print ("\nlicense plate read from image = " + licPlate.strChars + "\n") # write license plate text to std out
print ("----------------------------------------")
##########FireBAse Kontrol
zaman=time.gmtime()
print(zaman.tm_year,zaman.tm_mon,zaman.tm_mday)
tarih={'Plaka':licPlate.strChars,'yıl':zaman.tm_year,'ay':zaman.tm_mon,'gün':zaman.tm_mday}
print(tarih)
arac_tip=firebase.get('tablocezali/-LCB-Ofu6AREmhpttBHS',licPlate.strChars)
print("Plaka:",licPlate.strChars,"Arac tipi",arac_tip)
if(arac_tip=="polis"):
print("Bu araca Ceza Yazılamaz")
elif (arac_tip=="ambulans"):
print("Bu araca Ceza Yazılamaz")
else:
data=firebase.post('/tablocezali/-LCFiJ0HruUzdCA0Jn_B',tarih)
# var aracref=firebase.database().ref("araclar/")
#aracref.orderBychild("plaka").equalTo("34RED27").on("")
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate) # write license plate text on the image
cv2.imshow("imgOriginalScene", imgOriginalScene) # re-show scene image
cv2.imwrite("imgOriginalScene.png", imgOriginalScene) # write image out to file
# end if else
cv2.waitKey(0) # hold windows open until user presses a key
return
# end main
###################################################################################################
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene) # get 4 vertices of rotated rect
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2) # draw 4 red lines
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
# end function
###################################################################################################
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0 # this will be the center of the area the text will be written to
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0 # this will be the bottom left of the area that the text will be written to
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX # choose a plain jane font
fltFontScale = float(plateHeight) / 30.0 # base font scale on height of plate area
intFontThickness = int(round(fltFontScale * 1.5)) # base font thickness on font scale
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness) # call getTextSize
# unpack roatated rect into center point, width and height, and angle
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX) # make sure center is an integer
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX) # the horizontal location of the text area is the same as the plate
if intPlateCenterY < (sceneHeight * 0.75): # if the license plate is in the upper 3/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6)) # write the chars in below the plate
else: # else if the license plate is in the lower 1/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6)) # write the chars in above the plate
# end if
textSizeWidth, textSizeHeight = textSize # unpack text size width and height
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2)) # calculate the lower left origin of the text area
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2)) # based on the text area center, width, and height
# write the text on the image
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
# end function
###################################################################################################
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
042f5e499527272b2d4e800785c81729348eedd8
|
8a049b6dcf9c497a4753488d75e265aed585afd0
|
/tests/seeder.py
|
e3704b4a72b23112b0b6c4c9ef3bf6354b8011b5
|
[
"MIT"
] |
permissive
|
ayeowch/bitnodes
|
d05901098eec6157f9fbd4a1aa2bcbdb1ce4d168
|
c73c3e4a07bfce37267708964de37891cab4850c
|
refs/heads/master
| 2023-07-05T22:24:15.769017 | 2023-05-07T06:41:29 | 2023-05-07T06:41:29 | 9,957,678 | 562 | 295 |
MIT
| 2023-06-22T00:41:26 | 2013-05-09T11:08:52 |
Python
|
UTF-8
|
Python
| false | false | 12 |
py
|
../seeder.py
|
[
"[email protected]"
] | |
2355da4fe0a15ebbd2427a4c7f7b891e2e2ad149
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1x_coco.py
|
4fb90266f00299d6ac45e49f928e81c2c3eb7535
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 6,403 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
[
"[email protected]"
] | |
8fac5461e02a1eeb7aab7205d72609f915b89331
|
59a593f54ab281b6d270d18a62ac03a902687a67
|
/DocumentSimilarity.py
|
795b58721f49d3684c1dd12ca1406f097c85f826
|
[] |
no_license
|
ROHITHKUMARN/Document-Similarity
|
7e873543b48146eb1120032fc2f16dda16dcfb00
|
980b07e0528209fd911f828334db60c793736e1b
|
refs/heads/master
| 2021-05-16T09:56:14.287673 | 2017-02-24T06:03:58 | 2017-02-24T06:03:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,357 |
py
|
import sys
import os
import math
from math import*
from decimal import Decimal
from sklearn.decomposition import PCA
import numpy as np
f = open(os.path.abspath(sys.argv[1]))
document = []
vocabulary = []
uniqueWords = []
vector = []
tf = []
dict_idf = {}
idf = []
d = []
manhattan = []
euclidean = []
supremum = []
cos_sim =[]
pca_euclidean_dist = []
for line in f:
document.append(line)
for i in document:
vocabulary.append(i.rstrip().split(" "))
for words in vocabulary:
for w in words:
if w not in uniqueWords:
uniqueWords.append(w)
print(len(uniqueWords))
for words in vocabulary:
dict_words = {}
for w in words:
dict_words[w] = dict_words.get(w, 0) + 1
vector.append(dict_words.copy())
doc_num = 0
for count in vector:
d_tf = {}
for wrd in count:
d_tf[wrd] = float(count.get(wrd))/float(len(vocabulary[doc_num]))
tf.append(d_tf.copy())
doc_num += 1
for wrd_idf in uniqueWords:
c = 0
for doc in vocabulary:
if wrd_idf in doc:
c += 1
dict_idf[wrd_idf] = math.log(len(vocabulary)/c)
for t in tf:
temp = []
for wrd in dict_idf.keys():
temp.append(dict_idf.get(wrd)*t.get(wrd,0))
d.append(temp)
### Minkowski Distance ###
def root(number, h):
root_value = 1/float(h)
return round((number ** root_value), 4)
def minkowski_distance(x, y, h):
sum = 0
for a, b in zip(x,y):
sum += pow(abs(a-b),h)
return float(root(sum,h))
#(a). Manhattan distance, h =1
query = d[len(d)-1]
def manhattan_distance(d):
count = 0
for i in d:
count += 1
manhattan.append((count, minkowski_distance(i, query, 1)))
return(sorted(manhattan, key=lambda x: x[1]))
result = [x[0] for x in manhattan_distance(d)[0:5]]
print(' '.join(map(str, result)))
def euclidean_distance(d):
count = 0
for i in d:
count += 1
euclidean.append((count, minkowski_distance(i, query, 2)))
return(sorted(euclidean, key=lambda x: x[1]))
result = [x[0] for x in euclidean_distance(d)[0:5]]
print(' '.join(map(str, result)))
max_dist = 0
def supremum_distance(d):
doc_num = 0
for i in d:
max_dist = 0
doc_num += 1
for count in range(len(i)):
max_dist = round(max(max_dist, abs(i[count] - query[count])),4)
supremum.append((doc_num, max_dist))
return(sorted(supremum, key=lambda x: x[1]))
result = [x[0] for x in supremum_distance(d)[0:5]]
print(' '.join(map(str, result)))
def cosine_similarity(d):
doc_num = 0
for i in d:
n = []
docmnt = []
q = []
doc_num += 1
for k in range(len(i)):
n.append(i[k]*query[k])
docmnt.append(i[k]*i[k])
q.append(query[k]*query[k])
numerator = sum(n)
denominator = root(sum(docmnt),2)*root(sum(q),2)
c_sim = round(float(numerator/denominator),4)
cos_sim.append((doc_num, c_sim))
return(sorted(cos_sim,key = lambda x: x[1],reverse=True))
result = [x[0] for x in cosine_similarity(d)[0:5]]
print(' '.join(map(str, result)))
### PCA ###
pca = PCA(n_components = 2)
principal_components = pca.fit_transform(d)
#### Euclidean Distance of Two projected Vectors ###
def pca_euclidean_distance(principal_components):
count = 0
for i in principal_components:
count += 1
pca_euclidean_dist.append((count, minkowski_distance(i, principal_components[len(principal_components)-1], 2)))
return(sorted(pca_euclidean_dist, key=lambda x: x[1]))
result = [x[0] for x in pca_euclidean_distance(principal_components)[0:5]]
print(' '.join(map(str, result)))
|
[
"[email protected]"
] | |
dab29d3a9ef3247026fdb1d378f83a86706a7b3c
|
98ad28779c97cd0f2e566e32530c7075b73c4ed7
|
/gen_env.py
|
33f631e0b5a4440ad9aeea62e93b4d1dc21eb034
|
[
"MIT"
] |
permissive
|
jgearheart/f5-azure-saca
|
a45d46862e76ad7f85965a1f5d9a5f7dddcd4f61
|
56c6e01b955a3622800d9293b46977d6d5456b62
|
refs/heads/master
| 2021-04-06T06:34:24.424002 | 2018-03-02T02:28:00 | 2018-03-02T02:28:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,566 |
py
|
#!/usr/bin/env python
import requests
import json
import sys
import os
import re
import random
is_regkey = re.compile("([A-Z]{5}-[A-Z]{5}-[A-Z]{5}-[A-Z]{5}-[A-Z]{7})",re.M)
is_valid_dnsname = re.compile("^[a-z][a-z0-9-]{1,61}[a-z0-9]$")
session = requests.Session()
headers = {'user-agent':'f5-gen-env/0.1','Metadata':'true'}
METADATA_URL="http://169.254.169.254/metadata/instance?api-version=2017-08-01"
output = {}
try:
request = session.get(METADATA_URL,headers=headers)
data = json.loads(request.text)
output['resource_group'] = data['compute']['resourceGroupName']
output['location'] = data['compute']['location']
output['subscription_id'] = data['compute']['subscriptionId']
except requests.exceptions.ConnectionError:
#print "Please run on Azure Linux JumpBox"
#sys.exit(1)
output['resource_group'] = os.environ.get('AZURE_RESOURCE_GROUP','')
output['subscription_id'] = os.environ.get('AZURE_SUBSCRIPTION_ID','')
output['location'] = os.environ.get('location','')
pass
try:
sp = json.load(open('sp.json'))
output['client_id'] = sp["appId"]
output['client_secret'] = sp["password"]
output["tenant_id"] = sp["tenant"]
except:
output['client_id'] = ''
output['client_secret'] = ''
output["tenant_id"] = ''
pass
try:
key_text = open('keys.txt').read()
keys = is_regkey.findall(key_text)
output['key1'] = ''
output['key2'] = ''
output['key3'] = ''
output['key4'] = ''
for x in range(len(keys)):
output['key%s' %(x+1)] = keys[x]
except:
output['key1'] = ''
output['key2'] = ''
output['key3'] = ''
output['key4'] = ''
pass
output['f5_username'] = os.environ.get('USER','')
output['f5_password'] = os.environ.get('f5_password','')
shortname = output['resource_group'].lower()
if shortname.endswith("_rg"):
shortname = shortname[:-3]
if "_" in shortname:
shortname = shortname.replace('_','-')
if not is_valid_dnsname.match(shortname):
shortname = "f5-" + str(int(random.random() * 1000))
output['shortname'] = shortname
output['use_oms'] = 'False'
if os.path.exists('.use_oms'):
output['use_oms'] = 'True'
if os.path.exists('.password.txt'):
output['f5_password'] = "`base64 --decode .password.txt`"
TEMPLATE="""export AZURE_SUBSCRIPTION_ID="%(subscription_id)s"
export AZURE_CLIENT_ID="%(client_id)s"
export AZURE_SECRET="%(client_secret)s"
export AZURE_TENANT="%(tenant_id)s"
export AZURE_RESOURCE_GROUP="%(resource_group)s"
export AZURE_RESOURCE_GROUPS="${AZURE_RESOURCE_GROUP}_F5_External,${AZURE_RESOURCE_GROUP}_F5_Internal"
export location="%(location)s"
export f5_unique_short_name="%(shortname)sext"
export f5_unique_short_name2="%(shortname)sint"
export f5_license_key_1="%(key1)s"
export f5_license_key_2="%(key2)s"
export f5_license_key_3="%(key3)s"
export f5_license_key_4="%(key4)s"
export f5_username="%(f5_username)s"
export f5_password="%(f5_password)s"
export use_oms="%(use_oms)s"
export F5_VALIDATE_CERTS=no
loc=$(curl -H metadata:true "http://169.254.169.254/metadata/instance?api-version=2017-08-01" --stderr /dev/null |jq .compute.location)
echo $loc | grep -i -E "(gov|dod)" > /dev/null;
#echo $?
if [ $? == 0 ]
then
export is_gov=1;
else
export is_gov=0;
fi
if [ $is_gov == 1 ]
then
az cloud set -n AzureUSGovernment;
export AZURE_CLOUD_ENVIRONMENT="AzureUSGovernment";
fi
which az
az login \
--service-principal \
-u "$AZURE_CLIENT_ID" \
-p "$AZURE_SECRET" \
--tenant "$AZURE_TENANT"
az account set -s $AZURE_SUBSCRIPTION_ID
"""
print TEMPLATE %(output)
|
[
"[email protected]"
] | |
dabffd515b7dd2a0abf3bf15380ace94082f2145
|
ed2a234be16e5ac95496cd959b531542a087faf6
|
/Functions Advanced - Exercise/10. Keyword Arguments Length.py
|
1b03e732297da99ed3703c06b09f393e7c4587db
|
[] |
no_license
|
Spas52/Python_Advanced
|
efc73eda5d10707f1f1a7407cc697448a985f014
|
7082c8947abba9b348f8372f68d0fc10ffa57fc1
|
refs/heads/main
| 2023-06-04T13:05:46.394482 | 2021-06-24T00:01:37 | 2021-06-24T00:01:37 | 379,756,494 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 52 |
py
|
def kwargs_length(**kwargs):
return len(kwargs)
|
[
"[email protected]"
] | |
bc3798906716aa41be8beb4ecc2a2c58459a8f86
|
dc767b48d46e2f6b9851ce61914e880fc95fe520
|
/myshop/shop/migrations/0001_initial.py
|
cca6008d38c1c259f458a69c4f61f46f334c2252
|
[] |
no_license
|
EdmilsonSantana/django-by-example
|
c06081a1a3915aaf3996d017fea91c8273cbe2e0
|
7c895b55b8f6fcc05a2d5cd2181bf207dc9256fc
|
refs/heads/master
| 2021-01-12T02:58:49.261515 | 2017-02-28T20:05:29 | 2017-02-28T20:05:29 | 78,144,090 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,044 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-16 23:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('stock', models.PositiveIntegerField()),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Category')),
],
options={
'ordering': ('-created',),
},
),
migrations.AlterIndexTogether(
name='product',
index_together=set([('id', 'slug')]),
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.