blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3356afae23c7ddb7429ccf7487de8234d072d1b8 | 27f7b7d7149c0faab7d8787901dfaa45f58b66d2 | /converter_to_hls/schemas.py | 55de5bd3be29242e648c48eec6720fcd264927c5 | []
| no_license | chistyakov/asyncio_video_converter | b9a183e9d26aae8acd7fa5dab09a18245b0f5348 | 4a0ad8f29cbc2e4e68720865d1184e6492ab3869 | refs/heads/master | 2021-08-30T10:17:18.075686 | 2017-12-17T13:19:05 | 2017-12-17T13:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | import re
from marshmallow import Schema, fields
from marshmallow.validate import Regexp
NO_SPECIAL_CHARS_PATTERN = re.compile('^[a-zA-Z0-9]+[a-zA-Z0-9_.\ -]*$')
class LaunchConverterSchema(Schema):
file = fields.Str(
required=True,
load_only=True,
validate=Regexp(NO_SPECIAL_CHARS_PATTERN, error='Invalid filename.')
)
| [
"[email protected]"
]
| |
f4f88e98234594d9ed15219107a75a3f1124d737 | 3f06d6f3dc106e50968f2c0090c376bbe89a906f | /oa/venv/bin/pip3 | a734ce6186f31a62ab590dfd4cbcf80c64c5002b | []
| no_license | xiaohuwu/Python-camp | b3f1699d35f636290b3aefa833288d4a31068375 | a74ca4f01c7ba5f15a724f3c1bf0877d414d644a | refs/heads/master | 2022-01-25T18:56:12.870092 | 2022-01-13T15:45:43 | 2022-01-13T15:45:43 | 176,092,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/Users/xiaohuge/PycharmProjects/Python-camp/oa/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
9b3ef03ef6d8de217adbc634e63f038ae42d5d52 | 0a3bf0a6f10eb143c9291090125946538ee73279 | /summarize/sumy/summarizers/edmundson_location.py | 406597f5a31d71b8b516c58c710328b273d06797 | [
"Apache-2.0"
]
| permissive | AIPHES/live-blog-summarization | 19ec1c01b7e254f74b2de153ac3972780daa7506 | a5f899ea07a098e1e0b3ab92cd3d430776e6412a | refs/heads/master | 2022-11-24T09:39:25.750313 | 2019-02-12T13:53:12 | 2019-02-12T13:53:12 | 166,268,167 | 2 | 1 | Apache-2.0 | 2022-11-02T20:47:14 | 2019-01-17T17:34:10 | Python | UTF-8 | Python | false | false | 2,516 | py | # -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from itertools import chain
from operator import attrgetter
from .._compat import ffilter
from ._summarizer import AbstractSummarizer
class EdmundsonLocationMethod(AbstractSummarizer):
def __init__(self, stemmer, null_words):
super(EdmundsonLocationMethod, self).__init__(stemmer)
self._null_words = null_words
def __call__(self, document, sentences_count, w_h, w_p1, w_p2, w_s1, w_s2):
significant_words = self._compute_significant_words(document)
ratings = self._rate_sentences(document, significant_words, w_h, w_p1,
w_p2, w_s1, w_s2)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _compute_significant_words(self, document):
headings = document.headings
significant_words = chain(*map(attrgetter("words"), headings))
significant_words = map(self.stem_word, significant_words)
significant_words = ffilter(self._is_null_word, significant_words)
return frozenset(significant_words)
def _is_null_word(self, word):
return word in self._null_words
def _rate_sentences(self, document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2):
rated_sentences = {}
paragraphs = document.paragraphs
for paragraph_order, paragraph in enumerate(paragraphs):
sentences = paragraph.sentences
for sentence_order, sentence in enumerate(sentences):
rating = self._rate_sentence(sentence, significant_words)
rating *= w_h
if paragraph_order == 0:
rating += w_p1
elif paragraph_order == len(paragraphs) - 1:
rating += w_p2
if sentence_order == 0:
rating += w_s1
elif sentence_order == len(sentences) - 1:
rating += w_s2
rated_sentences[sentence] = rating
return rated_sentences
def _rate_sentence(self, sentence, significant_words):
words = map(self.stem_word, sentence.words)
return sum(w in significant_words for w in words)
def rate_sentences(self, document, w_h=1, w_p1=1, w_p2=1, w_s1=1, w_s2=1):
significant_words = self._compute_significant_words(document)
return self._rate_sentences(document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2)
| [
"[email protected]"
]
| |
04bbe39817762a6bb17849d380462f527be2635f | 813fc649299282cbf5c2411de7aa99b32baa3dd0 | /vu/paginator.py | 6ec0ee3f60507551596315d9dfa614226743f1a0 | []
| no_license | kravciuk/vu | 1cc6003154f04ec14c08877e80bf0dc6622daaae | c8d0d093ef585c229d7f932ac5a7a8d779f4fc28 | refs/heads/master | 2023-04-21T08:06:43.829204 | 2023-04-09T00:01:50 | 2023-04-09T00:01:50 | 115,817,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | # -*- coding: utf-8 -*-
__author__ = 'Vadim Kravciuk, [email protected]'
"""
cloned from https://github.com/Flynsarmy/flynsarmy-paginator
"""
from django.core.paginator import Paginator, Page
class FlynsarmyPaginator(Paginator):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True, adjacent_pages=0):
self.adjacent_pages = adjacent_pages
super(FlynsarmyPaginator, self).__init__(object_list, per_page, orphans, allow_empty_first_page)
#Copied whole parent function returning a FlynsarmyPage instead. Ergh. Better way of doing this?
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return FlynsarmyPage(self.object_list[bottom:top], number, self, self.adjacent_pages)
class FlynsarmyPage(Page):
def __init__(self, object_list, number, paginator, adjacent_pages=0):
self.adjacent_pages = adjacent_pages
super(FlynsarmyPage, self).__init__(object_list, number, paginator)
def _get_page_range_data(self):
"""
Returns a floating digg-style or 1-based range of pages for
iterating through within a template for loop.
"""
if not self.adjacent_pages:
return self.paginator.page_range
startPage = max(1, self.number - self.adjacent_pages)
#Be a bit smarter about start page
if startPage <= 3: startPage = 1
endPage = self.number + self.adjacent_pages + 1
#Be a bit smarter about end page
if endPage >= self.paginator.num_pages - 1: endPage = self.paginator.num_pages + 1
page_range = [n for n in range(startPage, endPage) \
if n > 0 and n <= self.paginator.count]
return {
'page_range': page_range,
'show_first': 1 not in page_range,
'show_last': self.paginator.num_pages not in page_range,
}
page_range_data = property(_get_page_range_data)
| [
"[email protected]"
]
| |
9462edece292d3376d609f16332aeec61050205c | 3d7a02b5248ab41806e000dd5c243e205311bfd2 | /sort/tests/test_bubble_sort.py | b0b59ff21463815ed38d1944d79c7033db16e1ac | []
| no_license | mfilipelino/python-notes | 0fd93ce8cbc53d0792b95c4c9d0726c7e7fb08a6 | 98bf5831b42706caa3c7758720aef08097075dc1 | refs/heads/master | 2022-11-05T10:20:17.778910 | 2020-06-24T13:10:49 | 2020-06-24T13:10:49 | 125,086,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | from unittest import TestCase
from sort.bubble import bubble_sort
class TestCaseBubbleSort(TestCase):
def test_case_base(self):
self.assertEqual([], bubble_sort([]))
self.assertEqual([1], bubble_sort([1]))
self.assertEqual(['a'], bubble_sort(['a']))
self.assertEqual([[]], bubble_sort([[]]))
def test_case_1(self):
self.assertEqual([1, 2], bubble_sort([1, 2]))
self.assertEqual([1, 2], bubble_sort([2, 1]))
self.assertEqual(['a', 'b'], bubble_sort(['a', 'b']))
self.assertEqual(['a', 'b'], bubble_sort(['b', 'a']))
def test_case_2(self):
self.assertEqual([1, 2, 3], bubble_sort([1, 2, 3]))
self.assertEqual([1, 2, 3], bubble_sort([1, 3, 2]))
self.assertEqual([1, 2, 3], bubble_sort([2, 1, 3]))
self.assertEqual([1, 2, 3], bubble_sort([2, 3, 1]))
self.assertEqual([1, 2, 3], bubble_sort([3, 2, 1]))
self.assertEqual([1, 2, 3], bubble_sort([3, 1, 2]))
def test_case_3(self):
self.assertEqual(sorted(list("dhausdhuashdfjasjdfkjaskldf")),
bubble_sort(list("dhausdhuashdfjasjdfkjaskldf"))) | [
"[email protected]"
]
| |
79b1d36a208c5fc84008ea5b0d48494e2e503c40 | f9ecf558fd6e8405072d17317df605ea1cefd7f1 | /juvnews/juvnews/pipelines.py | ad10069afa304d75968e1a26661665d9f5afad55 | []
| no_license | nkuflk/scrapy | 279b7f98ba682a8043c01a8bad129251f079554a | d011741958ebd11ee775c367ed9850d89d8664d6 | refs/heads/master | 2020-04-27T12:38:46.546175 | 2015-06-28T06:39:39 | 2015-06-28T06:39:39 | 37,419,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class JuvnewsPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
]
| |
89c1e612bf15f10445b4a1ba4fec726a5987fbe4 | fe8cc40acb2acd23a9acf729f85c170d8cbcb55e | /split.py | 67138a0c2e3e2caee1351fa7069ddfb311d56ecb | [
"MIT"
]
| permissive | MoveOnOrg/merkle | 5c6eaad24958a36e7dc530669e1bdb1999e09a2c | 764351cf6a21bb718343ae7488735f077aee3afe | refs/heads/main | 2021-08-07T04:40:37.302769 | 2020-06-03T16:36:40 | 2020-06-03T16:36:40 | 163,700,745 | 0 | 0 | MIT | 2021-06-21T05:25:48 | 2018-12-31T22:29:33 | Python | UTF-8 | Python | false | false | 5,502 | py | import csv
import os
import sys
from pywell.entry_points import run_from_cli
DESCRIPTION = 'Split a donation import into separate files to avoid erasing data with empty columns.'
ARG_DEFINITIONS = {
'BASE_DIRECTORY': 'Path to where files are located.',
'CSV': 'CSV file to split.'
}
REQUIRED_ARGS = ['BASE_DIRECTORY', 'CSV']
def main(args):
prefix = args.CSV[:-4]
set_only_columns = [
'user_do_not_mail', 'user_sms_subscribed', 'home_phone',
'mobile_phone', 'first_name', 'last_name', 'prefix'
]
address_columns = [
'address1', 'address2', 'city', 'state', 'zip'
]
files = {
'donations-user': [],
'donations-email': [],
'invalid-user': [],
'invalid-email': [],
'address-user': [],
'address-email': []
}
for column in set_only_columns:
files['%s-user' % column] = []
files['%s-email' % column] = []
with open('%s%s' % (args.BASE_DIRECTORY, args.CSV), 'rt') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
user_id = row.get('user_id')
donation_payment_account = row.get('donation_payment_account')
source = row.get('source')
email = row.get('Email')
if float(row.get('donation_amount', 0)) > 0:
if user_id != '':
files['donations-user'].append({
'user_id': user_id, 'source': source,
'donation_amount': row.get('donation_amount'),
'donation_import_id': row.get('donation_import_id'),
'donation_date': row.get('donation_date'),
'donation_currency': row.get('donation_currency'),
'donation_payment_account': donation_payment_account,
'action_occupation': row.get('action_occupation'),
'action_employer': row.get('action_employer'),
})
elif email != '':
files['donations-email'].append({
'email': email, 'source': source,
'donation_amount': row.get('donation_amount'),
'donation_import_id': row.get('donation_import_id'),
'donation_date': row.get('donation_date'),
'donation_currency': row.get('donation_currency'),
'donation_payment_account': donation_payment_account,
'action_occupation': row.get('action_occupation'),
'action_employer': row.get('action_employer'),
})
for column in set_only_columns:
row_column = row.get(column, '')
if row_column != '':
if user_id != '':
files['%s-user' % column].append({
'user_id': user_id, 'source': source,
column: row.get(column),
})
elif email != '':
files['%s-email' % column].append({
'email': email, 'source': source,
column: row.get(column),
})
if row.get('address1', False) == 'Invalid':
if user_id != '':
files['invalid-user'].append({
'user_id': user_id, 'source': source,
'address1': '-', 'address2': '-', 'city': '-',
'state': '-', 'zip': '-'
})
elif email != '':
files['invalid-email'].append({
'email': email, 'source': source,
'address1': '-', 'address2': '-', 'city': '-',
'state': '-', 'zip': '-'
})
elif row.get('address1', False):
if user_id != '':
files['address-user'].append({
'user_id': user_id, 'source': source,
'address1': row.get('address1', ''),
'address2': row.get('address2', ''),
'city': row.get('city', ''),
'state': row.get('state', ''),
'zip': row.get('zip', '')
})
elif email != '':
files['address-email'].append({
'email': email, 'source': source,
'address1': row.get('address1', ''),
'address2': row.get('address2', ''),
'city': row.get('city', ''),
'state': row.get('state', ''),
'zip': row.get('zip', '')
})
filenames = []
for file in files:
if len(files[file]) > 0:
filename = prefix + '-' + file + '.csv'
filenames.append(filename)
with open('%s%s' % (args.BASE_DIRECTORY, filename), 'w') as csvfile:
fieldnames = list(files[file][0].keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in files[file]:
writer.writerow(row)
return filenames
if __name__ == '__main__':
run_from_cli(main, DESCRIPTION, ARG_DEFINITIONS, REQUIRED_ARGS)
| [
"[email protected]"
]
| |
a1db617af0c4b541bbf1394ddc07164641292ac4 | 9401b8dbf4ebda9b9a9fb44b10579e65175263c5 | /play.py | 693e63816f60b5ea85a88283127761264c4456f0 | []
| no_license | maxsun/MusicFinal | 17f3e1f9235a92c1a7308aed57cd35c9d4f90f81 | 235af5ac3a701c8e67d77fa84383215c0cdb5a83 | refs/heads/master | 2022-06-24T17:49:03.470141 | 2020-05-11T07:39:24 | 2020-05-11T07:39:24 | 261,663,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | # http://bspaans.github.io/python-mingus/
# Make sure to install FluidSynth: https://github.com/FluidSynth/fluidsynth/wiki/Download
# OS X: `brew install fluid-synth`
# Ubuntu/Debian: `sudo apt-get install fluidsynth`
# Also install SoundFonts, I got one from here: https://rkhive.com/piano.html
from os import listdir
from os.path import join
from parse_midi import Midi
from parse_midi import MessageType
import parse_midi
import mingus.core.notes as notes
from mingus.containers import Note
from mingus.midi import fluidsynth
from pathlib import Path
import time
import json
# TODO: try using Alsa, else use default
# fluidsynth.init(str(cwd / 'Steinway Grand Piano 1.2.sf2'))
fluidsynth.init(str(Path.cwd() / "Velocity Grand Piano.sf2"), 'alsa')
midi_dir = './midi_files'
midi_paths = [join(midi_dir, x) for x in listdir(midi_dir)]
fp = midi_paths[1]
print(fp)
m = Midi.from_file(fp)
# start_time = time.time()
# for t, evt in m.abs_times():
# dtime = max(t - (time.time() - start_time), 0)
# time.sleep(dtime)
# if evt.status == MessageType.Note_On:
# note = evt.data[0]
# vel = evt.data[1]
# channel = evt.channel
# if vel == 0:
# # TODO: check this is being called
# fluidsynth.stop_Note(Note(note))
# else:
# n = Note(note)
# n.channel = 1
# n.velocity = vel
# fluidsynth.play_Note(n, channel=channel)
# ! NOT NEEDED:
# elif evt.status == MessageType.Note_Off and len(evt.data) > 0:
# note = evt.data[0]
# if note not in active_notes:
# raise Exception('Released non-playing note')
# duration = (active_notes[note], t)
# note_times.append((note, duration))
# fluidsynth.stop_Note(Note(note))
def play_word(word, synth, word_duration=0.01):
# word_duration = 10
for note in word:
n = Note(int(note['midi']))
n.velocity = int(note['vel'])
fluidsynth.play_Note(n, channel=1)
time.sleep(word_duration)
sentences = json.loads(open('sentences.json', 'r').read())
# w = sentences['./midi_files/mz_545_3.mid'][10]
# play_word(w, fluidsynth, 10)
s = sentences['./midi_files/mz_545_3.mid']
print(len(s))
for word in s:
play_word(word, fluidsynth, 0.25)
| [
"[email protected]"
]
| |
0c9a4574557bc74a61ab2b0df43f45ef7cf8a2f1 | 45136997aaf3de82d2ba9d3d36f339c664a9d8c4 | /train_models.py | 2da6eee709f47048ed32139e2b017c73acbad479 | []
| no_license | GritcoSorinO/IMDB-SentimentAnalysisProject | 997b4310f4554d86216cd25320fe19959cc30963 | c0eda8a5f9cecc594a5e03bab9ce66bda8112ebf | refs/heads/main | 2023-02-04T00:39:39.842149 | 2020-12-23T04:27:16 | 2020-12-23T04:27:16 | 322,978,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import random
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
def train_models(X_train, X_test, y_train, y_test, random_seed):
lr_classifier = LogisticRegression(random_state=random_seed)
print("Train Logistic Regression model.")
lr_classifier.fit(X_train, y_train)
score = lr_classifier.score(X_test, y_test)
print('Accuracy obtained by Logistic Regression: {:.2f}%'.format(score * 100))
rf_classifier = RandomForestClassifier(n_estimators=200,
n_jobs=-1,
random_state=random_seed)
print("\nTrain Random Forest Classifier model.")
rf_classifier.fit(X_train, y_train)
score = rf_classifier.score(X_test, y_test)
print('Accuracy obtained by Random Forest Classifier: {:.2f}%'.format(score * 100))
return (lr_classifier, rf_classifier) | [
"[email protected]"
]
| |
da4f3374de30a5a35425c8c730f38545c87211eb | 952fca4bcce467294b2e2ca89e8231b4e0f72f34 | /monitoring/Monitor.py | 238438333337c1c5732c15d59ee33c6400ff7cbb | []
| no_license | ciniks117/cs639-project | a3ac4af455122f9a90793a93e8eff4c2ad508ec7 | f3e82c856b79ac331979d32da253c1a0c11ccc6f | refs/heads/main | 2023-01-18T20:17:26.509139 | 2020-12-01T13:51:29 | 2020-12-01T13:51:29 | 316,430,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,728 | py | import itertools
from copy import deepcopy
from . import *
from utils import *
from abstractions import *
class Monitor(object):
"""
A monitor consists of layer-abstraction mappings. It can evaluate a given input based on its abstractions.
The acceptance behavior of a monitor is defined in the class MonitorResult.
Fields:
- _layer2abstraction: mapping 'layer → abstraction'
- _score_fun: score function for training (see Score class)
The value 'None' means that this monitor is not trained.
default: AverageScore()
"""
_id_iter = itertools.count()
# --- public --- #
def __init__(self, layer2abstraction: dict, score_fun=AverageScore(), layer2dimensions=None,
learn_from_test_data=False, is_novelty_training_active=False):
self._id = next(Monitor._id_iter)
if self._id == 0:
self._id = next(Monitor._id_iter) # start with '1'
self._layer2abstraction = layer2abstraction
self._score_fun = score_fun
if layer2dimensions is None:
layer2dimensions = {layer: [0, 1] for layer in self._layer2abstraction.keys()}
self._layer2dimensions = layer2dimensions
self._layer2class2dimensions = None
self._learn_from_test_data = learn_from_test_data
self._is_novelty_training_active = is_novelty_training_active
@staticmethod
def reset_ids():
Monitor._id_iter = itertools.count()
def __str__(self):
return "Monitor {:d}".format(self.id())
def id(self):
return self._id
def layers(self):
return self._layer2abstraction.keys()
def abstraction(self, layer):
return self._layer2abstraction[layer]
def short_str(self):
string = ""
for l, a in self._layer2abstraction.items():
if string != "":
string += ", "
string += "layer {:d}: {}".format(l, a.short_str())
return string
def long_str(self):
string = ""
for l, a in self._layer2abstraction.items():
if string != "":
string += ", "
string += "layer {:d}: {}".format(l, a.long_str())
return string
def dimensions(self, layer, class_id=None):
if class_id is not None and self._layer2class2dimensions is not None:
return self._layer2class2dimensions[layer][class_id]
return self._layer2dimensions[layer]
def normalize_and_initialize(self, model, n_classes):
layer2abstraction_new = dict()
for layer, abstraction in self._layer2abstraction.items(): # type: int, Abstraction
# normalize layer index
layer_normalized = normalize_layer(model, layer)
# obtain number of neurons
n_neurons = model.layers[layer_normalized].output_shape[1]
# normalize abstraction (wrap in AbstractionVectors)
if isinstance(abstraction, AbstractionVector):
assert len(abstraction._abstractions) == n_classes, "Detected wrong number of abstractions!"
abstraction_new = abstraction
else:
abstraction_new = AbstractionVector(abstraction, n_classes)
print(abstraction_new._abstractions[0])
print(abstraction_new._abstractions[0].sets)
# initialize abstraction
abstraction_new.initialize(n_neurons)
print(" <<<<<<< Monitor >>>>>>>>>")
# update new mapping
if layer_normalized in layer2abstraction_new:
raise(ValueError("Duplicate layer index", layer_normalized, "found. Please use unique indexing."))
layer2abstraction_new[layer_normalized] = abstraction_new
self._layer2abstraction = layer2abstraction_new
layer2dimensions_new = dict()
for layer, dimensions in self._layer2dimensions.items(): # type: int, list
# normalize layer index
layer_normalized = normalize_layer(model, layer)
if layer_normalized in layer2dimensions_new:
raise(ValueError("Duplicate layer index", layer_normalized, "found. Please use unique indexing."))
layer2dimensions_new[layer_normalized] = dimensions
self._layer2dimensions = layer2dimensions_new
def initialize_abstractions(self, layer2class2nonzero_mask):
self._layer2class2dimensions = dict()
for layer, abstraction_vector in self._layer2abstraction.items(): # type: int, AbstractionVector
class2dimensions = dict()
self._layer2class2dimensions[layer] = class2dimensions
original_dimensions = self._layer2dimensions[layer]
class2nonzero_mask = layer2class2nonzero_mask[layer] # type: dict
for class_id, nonzero_mask in class2nonzero_mask.items():
abstraction = abstraction_vector._abstractions[class_id]
abstraction.initialize(sum([1 if nonzero else 0 for nonzero in nonzero_mask]))
# adapt plotting dimension
dimensions = []
for dim in [0, 1]:
res = original_dimensions[dim]
if not nonzero_mask[res]:
res = -1
else:
res -= sum(not is_nz for is_nz in nonzero_mask[:res + 1])
dimensions.append(res)
class2dimensions[class_id] = dimensions
def update_clustering(self, layer: int, class2clusters: dict):
abstraction_vector = self._layer2abstraction.get(layer)
if abstraction_vector is None:
# this monitor does not watch the given layer
return
assert isinstance(abstraction_vector, AbstractionVector)
for class_index, clusters in class2clusters.items():
abstraction_vector.update_clustering(class_index, clusters)
def add_clustered(self, layer2values, ground_truths, layer2class2clusterer):
for layer, abstraction_vector in self._layer2abstraction.items():
values = layer2values[layer]
# mapping: class_index -> values from watched layer
class2values = dict()
for j, yj in enumerate(ground_truths):
vj = values[j]
if yj in class2values:
class2values[yj].append(vj)
else:
class2values[yj] = [vj]
class2clusters = layer2class2clusterer[layer]
for class_index, values in class2values.items():
clusterer = class2clusters[class_index]
values_copy = deepcopy(values) # for some reason, the list is modified below
abstraction_vector.add_clustered(class_index, values_copy, clusterer)
def train_with_novelties(self, predictions: list, layer2values: dict):
for layer, abstraction in self._layer2abstraction.items():
for pj, vj in zip(predictions, layer2values[layer]):
abstraction.isknown(pj, vj, novelty_mode=True)
for abstraction_vector in self._layer2abstraction.values(): # type: AbstractionVector
for abstraction in abstraction_vector.abstractions():
abstraction.compute_credibility(len(predictions))
def run(self, layer2values: dict, predictions: list, history: History, zero_filter: list, skip_confidence=False):
results = [MonitorResult() for _ in predictions]
for layer, abstraction in self._layer2abstraction.items():
if zero_filter:
zero_filter_index = 0
zero_filter_value = zero_filter[0]
else:
zero_filter_index = -1
zero_filter_value = -1
for j, vj in enumerate(layer2values[layer]):
if j == zero_filter_value:
results[j].set_zero_filter()
# find next zero index
zero_filter_index += 1
if zero_filter_index == len(zero_filter):
zero_filter_value = -1
else:
zero_filter_value = zero_filter[zero_filter_index]
else:
c_predicted = predictions[j]
accepts, confidence = abstraction.isknown(c_predicted, vj, skip_confidence=skip_confidence)
results[j].add_confidence(confidence)
history.set_monitor_results(m_id=self.id(), results=results)
return results
def is_novelty_training_active(self):
return self._is_novelty_training_active
def is_test_training_active(self):
return self._learn_from_test_data
| [
"[email protected]"
]
| |
7fed7cef489aab9759ac75ed34f64b25a9d4617e | 8222cc82e990477f6d279ce7ee212739f4765111 | /Django-jQuery-File-Uploader-Integration-demo-master/settings.py | d836619a601abb8aca3120f5694b7ce3d97238e0 | []
| no_license | varrasivareddy/siva | a7117907f03ba914b3ae1d3ce1f4455e8a8be7c8 | eebcd53045f54b113655678cb8fdbf41d464c0e2 | refs/heads/master | 2020-05-22T13:07:38.836964 | 2014-10-06T10:15:50 | 2014-10-06T10:15:50 | 24,790,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,399 | py | # Django settings for djangoUpload project.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
('Miroslav Shubernetskiy', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'djangoupload.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "static"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
# So this file is on github so I guess the secret is out!!!
SECRET_KEY = 'gcs^9v-hbl9qwavnbn&@e794ir@tyrrnz(+0efshm!dzzo_xt+'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'djangoUpload.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "templates")
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
]
| |
a23451e561311fe8fd81476b96702f9bc83a0710 | aa18fb4bb13b6499ae9b1f669758d0ea326c1921 | /util.py | c901283bc804ccdb170cc460fd329c3edf4b2eef | []
| no_license | kaushikData/DoCAI | 8d144a1a6dc2adf5429389ee83aacae4498ab7a5 | e48ffbf9545f84f2426e5154ff626e94ae33f62f | refs/heads/master | 2020-12-18T12:58:28.280599 | 2020-01-24T11:21:57 | 2020-01-24T11:21:57 | 235,391,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,395 | py |
import logging
import os
import queue
import re
import shutil
import string
import torch
import torch.nn.functional as F
import torch.utils.data as data
import tqdm
import numpy as np
import ujson as json
from collections import Counter
class SQuAD(data.Dataset):
"""Stanford Question Answering Dataset (SQuAD).
Each item in the dataset is a tuple with the following entries (in order):
- context_idxs: Indices of the words in the context.
Shape (context_len,).
- context_char_idxs: Indices of the characters in the context.
Shape (context_len, max_word_len).
- question_idxs: Indices of the words in the question.
Shape (question_len,).
- question_char_idxs: Indices of the characters in the question.
Shape (question_len, max_word_len).
- y1: Index of word in the context where the answer begins.
-1 if no answer.
- y2: Index of word in the context where the answer ends.
-1 if no answer.
- id: ID of the example.
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
use_v2 (bool): Whether to use SQuAD 2.0 questions. Otherwise only use SQuAD 1.1.
"""
def __init__(self, data_path, use_v2=True):
super(SQuAD, self).__init__()
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset['context_idxs']).long()
self.context_char_idxs = torch.from_numpy(dataset['context_char_idxs']).long()
self.question_idxs = torch.from_numpy(dataset['ques_idxs']).long()
self.question_char_idxs = torch.from_numpy(dataset['ques_char_idxs']).long()
self.y1s = torch.from_numpy(dataset['y1s']).long()
self.y2s = torch.from_numpy(dataset['y2s']).long()
if use_v2:
# SQuAD 2.0: Use index 0 for no-answer token (token 1 = OOV)
batch_size, c_len, w_len = self.context_char_idxs.size()
ones = torch.ones((batch_size, 1), dtype=torch.int64)
self.context_idxs = torch.cat((ones, self.context_idxs), dim=1)
self.question_idxs = torch.cat((ones, self.question_idxs), dim=1)
ones = torch.ones((batch_size, 1, w_len), dtype=torch.int64)
self.context_char_idxs = torch.cat((ones, self.context_char_idxs), dim=1)
self.question_char_idxs = torch.cat((ones, self.question_char_idxs), dim=1)
self.y1s += 1
self.y2s += 1
# SQuAD 1.1: Ignore no-answer examples
self.ids = torch.from_numpy(dataset['ids']).long()
self.valid_idxs = [idx for idx in range(len(self.ids))
if use_v2 or self.y1s[idx].item() >= 0]
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
example = (self.context_idxs[idx],
self.context_char_idxs[idx],
self.question_idxs[idx],
self.question_char_idxs[idx],
self.y1s[idx],
self.y2s[idx],
self.ids[idx])
return example
def __len__(self):
return len(self.valid_idxs)
def collate_fn(examples):
"""Create batch tensors from a list of individual examples returned
by `SQuAD.__getitem__`. Merge examples of different length by padding
all examples to the maximum length in the batch.
Args:
examples (list): List of tuples of the form (context_idxs, context_char_idxs,
question_idxs, question_char_idxs, y1s, y2s, ids).
Returns:
examples (tuple): Tuple of tensors (context_idxs, context_char_idxs, question_idxs,
question_char_idxs, y1s, y2s, ids). All of shape (batch_size, ...), where
the remaining dimensions are the maximum length of examples in the input.
Adapted from:
https://github.com/yunjey/seq2seq-dataloader
"""
def merge_0d(scalars, dtype=torch.int64):
return torch.tensor(scalars, dtype=dtype)
def merge_1d(arrays, dtype=torch.int64, pad_value=0):
lengths = [(a != pad_value).sum() for a in arrays]
padded = torch.zeros(len(arrays), max(lengths), dtype=dtype)
for i, seq in enumerate(arrays):
end = lengths[i]
padded[i, :end] = seq[:end]
return padded
def merge_2d(matrices, dtype=torch.int64, pad_value=0):
heights = [(m.sum(1) != pad_value).sum() for m in matrices]
widths = [(m.sum(0) != pad_value).sum() for m in matrices]
padded = torch.zeros(len(matrices), max(heights), max(widths), dtype=dtype)
for i, seq in enumerate(matrices):
height, width = heights[i], widths[i]
padded[i, :height, :width] = seq[:height, :width]
return padded
# Group by tensor type
context_idxs, context_char_idxs, \
question_idxs, question_char_idxs, \
y1s, y2s, ids = zip(*examples)
# Merge into batch tensors
context_idxs = merge_1d(context_idxs)
context_char_idxs = merge_2d(context_char_idxs)
question_idxs = merge_1d(question_idxs)
question_char_idxs = merge_2d(question_char_idxs)
y1s = merge_0d(y1s)
y2s = merge_0d(y2s)
ids = merge_0d(ids)
return (context_idxs, context_char_idxs,
question_idxs, question_char_idxs,
y1s, y2s, ids)
class AverageMeter:
"""Keep track of average values over time.
Adapted from:
> https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, val, num_samples=1):
"""Update meter with new value `val`, the average of `num` samples.
Args:
val (float): Average value to update the meter with.
num_samples (int): Number of samples that were averaged to
produce `val`.
"""
self.count += num_samples
self.sum += val * num_samples
self.avg = self.sum / self.count
class EMA:
"""Exponential moving average of model parameters.
Args:
model (torch.nn.Module): Model with parameters whose EMA will be kept.
decay (float): Decay rate for exponential moving average.
"""
def __init__(self, model, decay):
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def __call__(self, model, num_updates):
decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = \
(1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
"""Assign exponential moving average of parameter values to the
respective parameters.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
"""Restore original parameters to a model. That is, put back
the values that were in each parameter at the last call to `assign`.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
class CheckpointSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(self, save_dir, max_checkpoints, metric_name,
maximize_metric=False, log=None):
super(CheckpointSaver, self).__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print('Saver will {}imize {}...'
.format('max' if maximize_metric else 'min', metric_name))
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return ((self.maximize_metric and self.best_val < metric_val)
or (not self.maximize_metric and self.best_val > metric_val))
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model, metric_val, device):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.DataParallel): Model to save.
metric_val (float): Determines whether checkpoint is best so far.
device (torch.device): Device where model resides.
"""
ckpt_dict = {
'model_name': model.__class__.__name__,
'model_state': model.cpu().state_dict(),
'step': step
}
model.to(device)
checkpoint_path = os.path.join(self.save_dir,
'step_{}.pth.tar'.format(step))
torch.save(ckpt_dict, checkpoint_path)
self._print('Saved checkpoint: {}'.format(checkpoint_path))
if self.is_best(metric_val):
# Save the best model
self.best_val = metric_val
best_path = os.path.join(self.save_dir, 'best.pth.tar')
shutil.copy(checkpoint_path, best_path)
self._print('New best checkpoint at step {}...'.format(step))
# Add checkpoint path to priority queue (lowest priority removed first)
if self.maximize_metric:
priority_order = metric_val
else:
priority_order = -metric_val
self.ckpt_paths.put((priority_order, checkpoint_path))
# Remove a checkpoint if more than max_checkpoints have been saved
if self.ckpt_paths.qsize() > self.max_checkpoints:
_, worst_ckpt = self.ckpt_paths.get()
try:
os.remove(worst_ckpt)
self._print('Removed checkpoint: {}'.format(worst_ckpt))
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
def load_model(model, checkpoint_path, gpu_ids, return_step=True):
"""Load model parameters from disk.
Args:
model (torch.nn.DataParallel): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
gpu_ids (list): GPU IDs for DataParallel.
return_step (bool): Also return the step at which checkpoint was saved.
Returns:
model (torch.nn.DataParallel): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved. Only if `return_step`.
"""
device = 'cuda:{}'.format(gpu_ids[0]) if gpu_ids else 'cpu'
ckpt_dict = torch.load(checkpoint_path, map_location=device)
# Build model, load parameters
model.load_state_dict(ckpt_dict['model_state'])
if return_step:
step = ckpt_dict['step']
return model, step
return model
def get_available_devices():
"""Get IDs of all available GPUs.
Returns:
device (torch.device): Main device (GPU 0 or CPU).
gpu_ids (list): List of IDs of all GPUs that are available.
"""
gpu_ids = []
if torch.cuda.is_available():
gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]
device = torch.device('cuda:{}'.format(gpu_ids[0]))
torch.cuda.set_device(device)
else:
device = torch.device('cpu')
return device, gpu_ids
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
def visualize(tbx, pred_dict, eval_path, step, split, num_visuals):
"""Visualize text examples to TensorBoard.
Args:
tbx (tensorboardX.SummaryWriter): Summary writer.
pred_dict (dict): dict of predictions of the form id -> pred.
eval_path (str): Path to eval JSON file.
step (int): Number of examples seen so far during training.
split (str): Name of data split being visualized.
num_visuals (int): Number of visuals to select at random from preds.
"""
if num_visuals <= 0:
return
if num_visuals > len(pred_dict):
num_visuals = len(pred_dict)
visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)
with open(eval_path, 'r') as eval_file:
eval_dict = json.load(eval_file)
for i, id_ in enumerate(visual_ids):
pred = pred_dict[id_] or 'N/A'
example = eval_dict[str(id_)]
question = example['question']
context = example['context']
answers = example['answers']
gold = answers[0] if answers else 'N/A'
tbl_fmt = ('- **Question:** {}\n'
+ '- **Context:** {}\n'
+ '- **Answer:** {}\n'
+ '- **Prediction:** {}')
tbx.add_text(tag='{}/{}_of_{}'.format(split, i + 1, num_visuals),
text_string=tbl_fmt.format(question, context, gold, pred),
global_step=step)
def save_preds(preds, save_dir, file_name='predictions.csv'):
"""Save predictions `preds` to a CSV file named `file_name` in `save_dir`.
Args:
preds (list): List of predictions each of the form (id, start, end),
where id is an example ID, and start/end are indices in the context.
save_dir (str): Directory in which to save the predictions file.
file_name (str): File name for the CSV file.
Returns:
save_path (str): Path where CSV file was saved.
"""
# Validate format
if (not isinstance(preds, list)
or any(not isinstance(p, tuple) or len(p) != 3 for p in preds)):
raise ValueError('preds must be a list of tuples (id, start, end)')
# Make sure predictions are sorted by ID
preds = sorted(preds, key=lambda p: p[0])
# Save to a CSV file
save_path = os.path.join(save_dir, file_name)
np.savetxt(save_path, np.array(preds), delimiter=',', fmt='%d')
return save_path
def get_save_dir(base_dir, name, training, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = 'train' if training else 'test'
save_dir = os.path.join(base_dir, subdir, '{}-{:02d}'.format(name, uid))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def torch_from_json(path, dtype=torch.float32):
"""Load a PyTorch Tensor from a JSON file.
Args:
path (str): Path to the JSON file to load.
dtype (torch.dtype): Data type of loaded array.
Returns:
tensor (torch.Tensor): Tensor loaded from JSON file.
"""
with open(path, 'r') as fh:
array = np.array(json.load(fh))
tensor = torch.from_numpy(array).type(dtype)
return tensor
def discretize(p_start, p_end, max_len=15, no_answer=False):
"""Discretize soft predictions to get start and end indices.
Choose the pair `(i, j)` of indices that maximizes `p1[i] * p2[j]`
subject to `i <= j` and `j - i + 1 <= max_len`.
Args:
p_start (torch.Tensor): Soft predictions for start index.
Shape (batch_size, context_len).
p_end (torch.Tensor): Soft predictions for end index.
Shape (batch_size, context_len).
max_len (int): Maximum length of the discretized prediction.
I.e., enforce that `preds[i, 1] - preds[i, 0] + 1 <= max_len`.
no_answer (bool): Treat 0-index as the no-answer prediction. Consider
a prediction no-answer if `preds[0, 0] * preds[0, 1]` is greater
than the probability assigned to the max-probability span.
Returns:
start_idxs (torch.Tensor): Hard predictions for start index.
Shape (batch_size,)
end_idxs (torch.Tensor): Hard predictions for end index.
Shape (batch_size,)
"""
if p_start.min() < 0 or p_start.max() > 1 \
or p_end.min() < 0 or p_end.max() > 1:
raise ValueError('Expected p_start and p_end to have values in [0, 1]')
# Compute pairwise probabilities
p_start = p_start.unsqueeze(dim=2)
p_end = p_end.unsqueeze(dim=1)
p_joint = torch.matmul(p_start, p_end) # (batch_size, c_len, c_len)
# Restrict to pairs (i, j) such that i <= j <= i + max_len - 1
c_len, device = p_start.size(1), p_start.device
is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device))
is_legal_pair -= torch.triu(torch.ones((c_len, c_len), device=device),
diagonal=max_len)
if no_answer:
# Index 0 is no-answer
p_no_answer = p_joint[:, 0, 0].clone()
is_legal_pair[0, :] = 0
is_legal_pair[:, 0] = 0
else:
p_no_answer = None
p_joint *= is_legal_pair
# Take pair (i, j) that maximizes p_joint
max_in_row, _ = torch.max(p_joint, dim=2)
max_in_col, _ = torch.max(p_joint, dim=1)
start_idxs = torch.argmax(max_in_row, dim=-1)
end_idxs = torch.argmax(max_in_col, dim=-1)
if no_answer:
# Predict no-answer whenever p_no_answer > max_prob
max_prob, _ = torch.max(max_in_col, dim=-1)
start_idxs[p_no_answer > max_prob] = 0
end_idxs[p_no_answer > max_prob] = 0
return start_idxs, end_idxs
def convert_tokens(eval_dict, qa_id, y_start_list, y_end_list, no_answer):
"""Convert predictions to tokens from the context.
Args:
eval_dict (dict): Dictionary with eval info for the dataset. This is
used to perform the mapping from IDs and indices to actual text.
qa_id (int): List of QA example IDs.
y_start_list (list): List of start predictions.
y_end_list (list): List of end predictions.
no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.
Returns:
pred_dict (dict): Dictionary index IDs -> predicted answer text.
sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).
"""
pred_dict = {}
sub_dict = {}
for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):
context = eval_dict[str(qid)]["context"]
spans = eval_dict[str(qid)]["spans"]
uuid = eval_dict[str(qid)]["uuid"]
if no_answer and (y_start == 0 or y_end == 0):
pred_dict[str(qid)] = ''
sub_dict[uuid] = ''
else:
if no_answer:
y_start, y_end = y_start - 1, y_end - 1
start_idx = spans[y_start][0]
end_idx = spans[y_end][1]
pred_dict[str(qid)] = context[start_idx: end_idx]
sub_dict[uuid] = context[start_idx: end_idx]
return pred_dict, sub_dict
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return metric_fn(prediction, '')
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def eval_dicts(gold_dict, pred_dict, no_answer):
avna = f1 = em = total = 0
for key, value in pred_dict.items():
total += 1
ground_truths = gold_dict[key]['answers']
prediction = value
em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)
f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)
if no_answer:
avna += compute_avna(prediction, ground_truths)
eval_dict = {'EM': 100. * em / total,
'F1': 100. * f1 / total}
if no_answer:
eval_dict['AvNA'] = 100. * avna / total
return eval_dict
def compute_avna(prediction, ground_truths):
"""Compute answer vs. no-answer accuracy."""
return float(bool(prediction) == bool(ground_truths))
# All methods below this line are from the official SQuAD 2.0 eval script
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Convert to lowercase and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_em(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
| [
"[email protected]"
]
| |
d4895d224f9ca53c8d7649654d5cad0186bede06 | 7dc408f2ec031ad647bf16a7b7a21448c1a83c87 | /commandLineCalc_easy.py | 3b1d8cc19a524dc2725998c2b11668a930541732 | []
| no_license | jsim123/lab1 | 920e76469f50377a78a5da4193112b4d7230b825 | 9cf17b04246f5ee16818c2f2ef8f68057b3a1600 | refs/heads/master | 2020-03-24T06:09:18.944243 | 2018-07-28T05:51:46 | 2018-07-28T05:51:46 | 142,518,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | '''
make a command line calculator
DIFFICULTY = MEDIUM
TOPICS = strings, variables, lists
your task is to write a command line calculator
this task is easy since we can use the eval function to do most of the legwork
however, we need to parse possible invalid user input. This is your task
return None if invalid input. Otherwise return the result
'''
def calculate(s):
'''
>>> calculate("1+3")
4
>>> calculate("1+3*4/3")
5.0
>>> calculate("(1+3)*5")
20
>>> calculate("-----1")
-1
>>> calculate("-+-1")
1
>>> calculate(\'print("bad guy coming to hack")\')
'''
# TODO = fill in this function
true =1
for i in range (len(s)):
if((ord(s[i])<40 or ord(s[i])>57)):
true = 0
if (true==0):
return
return eval(s)
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"[email protected]"
]
| |
7da0cdbd0ae336d14f7023b24a2e9169e58abf11 | 94fd1381adcfaa5ea64dc13123aef16697b0396a | /covid_dashboard/views/get_districts_daily_report_day_wise/request_response_mocks.py | 540280aed7e3719046e9b95366e4f81bc83ed4df | []
| no_license | bharathi151/covid_dashboard | 30ac9fe4720b8cd42028b33dcc1b620e0f1ebdb1 | 930bf3e46e8d7c56c682ce10f7f6e5fa7f50cab8 | refs/heads/master | 2022-11-14T20:18:24.648922 | 2020-06-27T10:08:53 | 2020-06-27T10:08:53 | 269,612,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py |
RESPONSE_200_JSON = """
[
{
"district_name": "string",
"district_id": 1,
"day_wise_statistics": [
{
"total_confirmed_cases": 1,
"total_deaths": 1,
"total_recovered_cases": 1,
"date": "string"
}
]
}
]
"""
| [
"[email protected]"
]
| |
80e74a1e105c49d5008ba9b83db32fe4f28c63d6 | 6010dc2a2ffdb2015d8d48962b65d18402d69c26 | /migrations/versions/02_add_asset_types.py | 3492916980d8e7a0c81d44def68e568a6cf2d94a | []
| no_license | mdmims/AzureIngesterApi | 2dad202e7417784049c24b20917df57d80bf9a73 | b8e00619d3c2def2941132f9ca439eb26e8fa013 | refs/heads/main | 2023-02-11T01:32:59.885960 | 2021-01-05T18:58:56 | 2021-01-05T18:58:56 | 301,219,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | """empty message
Revision ID: 02_add_asset_types
Revises: 01_healthz_table
"""
import sqlalchemy as sa
from alembic import op
import azure_ingester_api.api.models as m
# revision identifiers, used by Alembic.
revision = '02_add_asset_types'
down_revision = '01_healthz_table'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'asset_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(250), nullable=False),
sa.Column('description', sa.String(500), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
m.insert_data_from_csv('migrations/data/asset_type_v1.csv', m.AssetType.__table__, op.get_bind())
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('asset_type')
# ### end Alembic commands ###``
| [
"[email protected]"
]
| |
f307f47c4a7e44be5de0ad1f9ed05f6d75fd81de | 328c05bf9312044626609fb251bf1858beaf80ce | /bwb/pycompile/megaplot.spec | 69a7802e6cdcec3824def5a2497f46a3b08cc175 | []
| no_license | astroclark/osg_tools | a68422869b6d59afee00d1e7be0e95f2a5582f57 | b8dc00b1f3f2cc03eaaa194bcd164074d89aa71c | refs/heads/master | 2021-01-17T02:16:56.460527 | 2018-06-25T15:43:02 | 2018-06-25T15:43:02 | 53,345,744 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 873 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['megaplot.py'],
pathex=['/home/jclark308/src/lscsoft/bayeswave/trunk/postprocess'],
binaries=None,
datas=[('./navigate.js','.'),
('./BWBweb.css','.'),
('./secure_ajax.js','.'),
('./svn_info.txt','.')],
hiddenimports=['scipy.linalg'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='megaplot',
debug=False,
strip=True,
upx=True,
console=True )
| [
"[email protected]"
]
| |
94c7c3aed4ae2b5d29fba2f89c377e198d6a7277 | 908827768c13c1da86d8be4d66635f0e0356750a | /euler5.py | dc5e04493cdba89196c7e5e62284b1371b94371d | []
| no_license | jiema1989/Euler | 3f3a2526fd8f5c74f12736c66104f3dad224a392 | ca03de000634e0f37ac225daa6694806ae15b2a0 | refs/heads/master | 2020-06-29T03:33:32.341033 | 2017-02-22T10:41:04 | 2017-02-22T10:41:04 | 74,451,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | ## Euler Problem 5 ##
N=20;
def factorNum(n):
s={};
i=2;
exp=0;
while i*i<=n:
while n%i==0:
exp+=1;
n=n/i;
s[i]=exp;
i=i+1;
exp=0;
if n>1:
s[n]=1;
return s;
print factorNum(20)
dictEntire={};
for i in range(2,20,1):
dict2 = factorNum(i);
for factor in dict2.keys():
if factor not in dictEntire:
dictEntire[factor]=1;
if factor in dictEntire:
if dict2[factor]>=dictEntire[factor]:
dictEntire[factor]=dict2[factor];
print dictEntire
pro=1;
for key,value in dictEntire.items():
pro=pro*(key**value);
print pro;
| [
"jiema1989"
]
| jiema1989 |
06fd170031d6d6565c42dd89088f4689b1a53e92 | c5e92c7d4adb261b891ce0994556c8873e94216f | /kdk.py | a3c973a88566ac804ee140f5a7ae21107f3feaf4 | []
| no_license | kamrudeen007/guvi | b4b8faadfaad381be3bb2c2b8b175cfa2ad1d072 | 8c5abaca6510b996b0a307f1a0d9d366ab314fed | refs/heads/master | 2020-04-21T01:18:55.598550 | 2019-02-05T10:09:21 | 2019-02-05T10:09:21 | 169,220,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | num = int(input("Enter any number: "))
flag = num%2
if flag == 0:
print(num, "is an even number")
elif flag == 1:
print(num, "is an odd number")
| [
"[email protected]"
]
| |
5b603f199898400236d85298f7bd90c9585f7059 | 69636805a67ed244e13d61d838b56791018dee62 | /exercises/0001-hello-world/f.py | 183f7fe2f7e57902bc18475837a32b74921125b1 | []
| no_license | anacrochas1/compciv-2016 | 2176306d774642f7d9a22f02c9d6a599a9942a18 | abd94d0bfcc6c1612ada06f3f563c0764b2fe2b9 | refs/heads/master | 2021-01-18T21:09:33.755755 | 2016-06-02T05:54:18 | 2016-06-02T05:54:18 | 49,533,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | print("goodbye")
count) | [
"[email protected]"
]
| |
453f480b9138ed55e51cfedb6adf43fa64d25dcf | 0675b3632c25bc97f8e7ffcb69577c78e7ac5af7 | /TP2/plotterParticles.py | 2e73f5f8bd1517d2a661e10ba89778016bb13e44 | []
| no_license | ezeqlynch/SS-2019 | 5414dd9146edf5c000b14029570aa1e2b6adf29e | 82ef6153acc79f3bcdaab0b2b0d4e3a0083d3a17 | refs/heads/master | 2021-07-11T22:56:18.084498 | 2020-08-09T21:29:21 | 2020-08-09T21:29:21 | 175,205,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | import argparse
from argparse import RawTextHelpFormatter
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
# import PyQt5.QtGui
def argumentParser():
parser = argparse.ArgumentParser(
description='This program shows data from .\n', formatter_class=RawTextHelpFormatter)
parser.add_argument(
'--staticFile',
help="Path to the static data file.",
default='data/2333_2d/2333-stats-0.stats'
)
parser.add_argument(
'--name',
help="Path to the static data file.",
default="Simulación 2233"
)
parser.add_argument(
'--error',
help="Show error in simulation\n\n",
action='store_true'
)
return parser
if __name__ == "__main__":
# get parser
parsedArgs = argumentParser().parse_args()
staticFile = open(parsedArgs.staticFile, "r")
particlesPerFrame = []
# particleNum = int(staticFile.readline())
for line in staticFile:
stepData = [s for s in line.split()]
if (len(particlesPerFrame) > 300):
break
if (len(stepData) == 1):
time = int(stepData[0])
else:
particlesPerFrame.append(int(stepData[0]))
# Plot histogram data
plt.title('Cantidad de celdas vivas a lo largo del tiempo.')
plt.ylabel('Cantidad de celdas vivas')
plt.xlabel('Iteración')
plt.grid(b=True, which='major', linestyle='-')
plt.grid(b=True, which='minor', color="gray", linestyle='--')
plt.axes().yaxis.set_minor_locator(ticker.MultipleLocator(250))
plt.plot(range(len(particlesPerFrame)), particlesPerFrame)
plt.tight_layout()
plt.show()
| [
"[email protected]"
]
| |
66ee649d5a495b77ee74a20dc799dc69a17cb62a | 77da638f1b14f1059d9073fd716893a9cc2d32a6 | /Model/models.py | 1df56da05e92804fa806406b9b8595a32cfe3149 | []
| no_license | suzoosuagr/fNIRS_DeeperLook | 32cf8fc226576bd31b6a47880bbbea75b116a849 | 707069799aa93872a7928ab68f792e450f3f6c89 | refs/heads/main | 2023-08-27T18:31:03.145946 | 2021-10-15T17:43:12 | 2021-10-15T17:43:12 | 375,205,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import dropout
from torch.nn.modules.activation import ReLU
from Model.networks import FingerTapEmbd, Naive_Embedding, Attn
class BiGRU_Attn_Multi_Branch_SLA(nn.Module):
def __init__(self, in_ch, emb_ch, hidden_ch, out_ch, norm):
super(BiGRU_Attn_Multi_Branch_SLA, self).__init__()
self.embd = Naive_Embedding(in_ch, emb_ch, kernel_size=3, norm=norm)
self.bigru = nn.GRU(emb_ch, hidden_ch, batch_first=True, dropout=0, bidirectional=True)
self.fc_wml = nn.Linear(2*hidden_ch, out_ch)
self.fc_vpl = nn.Linear(2*hidden_ch, out_ch)
self.attn = Attn(2*hidden_ch)
def forward(self, x):
x = self.embd(x)
x = F.relu(x)
output, hidden = self.bigru(x)
hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=-1)
# rescale_hidden, atten_weight = self.attn(query=hidden, key=output, value=output)
rescale_hidden = self.attn(query=hidden, key=output, value=output)
out_wml = self.fc_wml(rescale_hidden)
out_vpl = self.fc_vpl(rescale_hidden)
return out_wml, out_vpl
# return out_vpl
class BiGRUFingerTap(BiGRU_Attn_Multi_Branch_SLA):
def __init__(self, in_ch, emb_ch, hidden_ch, out_ch, norm):
super(BiGRUFingerTap, self).__init__(in_ch, emb_ch, hidden_ch, out_ch, norm)
self.embd = FingerTapEmbd(in_ch, emb_ch)
class ANN(nn.Module):
def __init__(self, in_ch, hidden_layer, out_ch):
super(ANN,self).__init__()
self.ann = nn.Sequential(
nn.Linear(in_ch, hidden_layer[0]),
nn.ReLU(inplace=True),
nn.Linear(hidden_layer[0], hidden_layer[1]),
nn.ReLU(inplace=True),
nn.Linear(hidden_layer[1], out_ch),
)
def forward(self, x):
x = self.ann(x)
return x
class BaseConvLayer(nn.Module):
def __init__(self, in_ch, out_ch):
super(BaseConvLayer, self).__init__()
self.conv = nn.Sequential(
nn.Conv1d(in_ch, out_ch, kernel_size=3, padding=1), # input (N, C, L) | L = 104
nn.ReLU(),
)
self.pool = nn.Sequential(
nn.MaxPool1d(2),
nn.Dropout(0.5)
)
self.skip_connect = nn.Sequential(
nn.Conv1d(in_ch, out_ch, kernel_size=1),
nn.ReLU(),
)
def forward(self, x):
skip = self.skip_connect(x)
feat = self.conv(x)
x = skip + feat
x = torch.relu(x)
x = self.pool(x)
return x
class CNN1(nn.Module):
def __init__(self, in_ch, ch_list=[32], n_class=3):
super(CNN1, self).__init__()
self.conv1 = BaseConvLayer(in_ch, ch_list[0])
self.fc = nn.Sequential(
nn.Linear(ch_list[0]*25, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, n_class)
)
def forward(self, x):
x = self.conv1(x).view(x.size(0), -1)
x = self.fc(x)
return x
| [
"[email protected]"
]
| |
96c733a9b746b27413f837bde6c7dce363b7961c | 168bc919d9f03749d01cb3089a358c2ea7a928ea | /Create_sql.py | 57de72cc90c0ae8420dc86f786431a3543a6230f | []
| no_license | tacha-chang/ce63-46 | 175294f6f7fd6584aec1d1285d73028f0b2ed02e | 8fc0551104f986dd9058bb2e968469b2f1325f82 | refs/heads/master | 2023-03-19T22:27:59.086034 | 2021-03-18T23:54:23 | 2021-03-18T23:54:23 | 296,383,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | import sqlite3
import shutil
from Card_reading import reader_card
data = reader_card()
def create_user_officer(file_id): #move office
file_id = file_id
x = file_id[2]
# print(x[0])
print(x[1:18]) #ID_card
# 6 gender (7 8)name
#print('บ้านเลขที่ ' +x[14] +' ' + x[15]+' ' + x[16]+' ' + x[17]+' ' + x[18]+' ' + x[19]+' ' + x[20]+' ' + x[21]) #address
name_file = x[1:18]
Name_USER = file_id[7]+' '+file_id[8]
GENDER = file_id[6]
# address = x[14] +' ' + x[15]+' ' + x[16]+' ' + x[17]+' ' + x[18]+' ' + x[19]+' ' + x[20]+' ' + x[21]
address = file_id[15]
Office = "KMITL" #สมมุติ
file_name = name_file+'.db'
print(file_name)
conn = sqlite3.connect(file_name)
cursor = conn.cursor()
print("create database 0f " + file_name)
# conn.execute('''CREATE TABLE USER
# (ID INT PRIMARY KEY NOT NULL,
# GENDER TEXT NOT NULL,
# NAME TEXT NOT NULL,
# ADRESS TEXT NOT NULL,
# OFFICE TEXT NOT NULL);''')
sqlite_insert_with_param = """INSERT INTO USER
(ID, GENDER, NAME, ADRESS, OFFICE)
VALUES (?, ?, ?, ?, ?);"""
data_tuple = (name_file, Name_USER, GENDER, address, Office)
print("success created ")
# conn.execute("INSERT INTO USER VALUES (1, x[1],x[1],x[1],x[1])")
cursor.execute(sqlite_insert_with_param, data_tuple)
conn.commit()
conn.close()
# except sqlite3.Error as error:
# print("Failed to insert Python variable into sqlite table", error)
# finally:
# if conn:
# conn.close()
# print("The SQLite connection is closed")
create_user_officer(data)
| [
"[email protected]"
]
| |
1e665582eecd4b2ca4b9810a19d2f176e303fbd3 | c51d81a650b65ef8e8dc6e8f24dc56820c478ce9 | /flaskblog/main/routes.py | cba28e7837f06ca30414747de96a640b30408a5f | []
| no_license | ksh168/FlaskBlog | 956376e2815bcadc395c7b87d48183de5f3b8c0f | fc7e780da087501a191870d73582b0580c715ac9 | refs/heads/master | 2023-03-05T13:48:00.041700 | 2021-02-18T12:57:43 | 2021-02-18T12:57:43 | 328,122,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from flask import render_template, request, Blueprint
from flaskblog.models import Post
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
#retrieve all posts from db
#posts = Post.query.all()
page = request.args.get('page', 1, type=int)
# setting type "int" so that it throws error if anyone tries to pass something
# other than integer in pg no.
posts_ordered_by_latest_first = Post.query.order_by(Post.date_posted.desc())
posts = posts_ordered_by_latest_first.paginate(page=page, per_page=5)
return render_template('home.html', posts=posts)
@main.route("/about")
def about():
return render_template('about.html', title='About')
| [
"[email protected]"
]
| |
b90fbfd3c2d421fb70c9156499e70a3a7511340d | 4af090efabd08ef73c411a00ce4972a1c6f30a22 | /python_100days/7day/practice11.py | 82eb730e0a554302387bf8dc26b7ee42b67aaddd | []
| no_license | predatory123/byhytest | e52bca664f9461c9309aaa9bf779c02368ed937c | 578206c9ec9253d0d9325e72cdc13dde6eeb2fc1 | refs/heads/master | 2023-04-26T13:33:14.462408 | 2021-05-20T13:33:37 | 2021-05-20T14:26:22 | 369,213,148 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # 综合案例2:约瑟夫环问题
"""
《幸运的基督徒》
有15个基督徒和15个非基督徒在海上遇险,为了能让一部分人活下来不得不将其中15个人扔到海里面去,
有个人想了个办法就是大家围成一个圈,由某个人开始从1报数,报到9的人就扔到海里面,他后面的人接着从1开始报数,
报到9的人继续扔到海里面,直到扔掉15个人。由于上帝的保佑,15个基督徒都幸免于难,问这些人最开始是怎么站的,
哪些位置是基督徒哪些位置是非基督徒。
"""
def main():
persons = [True] * 30
counter, index, number = 0, 0, 0
while counter < 15:
if persons[index]:
number += 1
if number == 9:
persons[index] = False
counter += 1
number = 0
index += 1
index %= 30
for person in persons:
print('基' if person else '非', end='')
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
124d77348f3d3ea06d36c0de306ca45e624bbd99 | fb1e94f4b51ab342a81be7c38e7c09bf7d4a94fc | /apicode/Pytest0_case/test_07_article.py | d5dbef686fd1b50f7562cf54d137efc605990d1b | []
| no_license | HOHO-00/test_00 | cc1233b0809c171d51c2633fa7d886bea5a657d3 | 21fb066d0c1bac661af54e698e990beb3fbb1a2f | refs/heads/master | 2023-06-22T03:59:43.625128 | 2021-07-23T00:51:50 | 2021-07-23T00:51:50 | 292,587,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | """
文章相关接口测试用例
"""
import pytest
import requests
import os, sys
sys.path.append(os.getcwd())
from utils.dbtools import query
from utils.filetools import read_file
from utils.filetools import write_file
from utils.exceltools import read_excel
datas = read_excel("data/data.xlsx", "文章")
# 获取文章详情
def test_01_arictle_details():
url = datas[0][2]
header = eval(datas[0][3])
res = requests.get(url=url,headers=header)
assert res.status_code == datas[0][5]
assert res.json()["status"] == datas[0][6]
# 获取文章评论列表
def test_02_arictle_comments():
url = datas[1][2]
header = eval(datas[1][3])
data = eval(datas[1][4])
res = requests.post(url=url,headers=header,json=data)
assert res.status_code == datas[1][5]
assert res.json()["status"] == datas[1][6]
# 新增文章
def test_03_article_add():
url = datas[2][2]
header = eval(datas[2][3])
data = eval(datas[2][4])
res = requests.post(url=url,headers=header,json=data)
# print(res.text)
assert res.status_code == datas[2][5]
assert res.json()["status"] == datas[2][6]
articleid = res.json()["data"]["articleid"]
write_file('./tmp/article_id.txt',str(articleid))
sql = "select * from t_article where id = {}".format(read_file("./tmp/article_id.txt"))
assert len(query(sql)) != 0
# 修改文章
def test_04_article_update():
url = datas[3][2]
"""
payload={}
files=[('upload',('ho.png',open('C:/users/jssy/Pictures/ho.png','rb'),'image/png'))]
"""
header = eval(datas[3][3])
data = eval(datas[3][4])
res = requests.post(url=url,headers=header,json=data) # res = requests.post(url=url, json=data, headers=header,data=payload)
# print(res.text)
assert res.status_code == datas[3][5]
assert res.json()["status"] == datas[3][6]
title = eval(datas[3][4])["title"]
# sql = "select * from t_article where id = {} and title = '{}'"
# sql = "select * from t_article where id = {} and title = '为什么要学习测试123'".format(read_file("./tmp/article_id.txt"))
sql = "select * from t_article where id = {} and title = '{}'".format(read_file("./tmp/article_id.txt"),title)
# r = query(sql)
# assert len(r) != 0
assert len(query(sql)) != 0
# 删除文章
def test_05_article_delete():
url = datas[4][2]
header = eval(datas[4][3])
data = eval(datas[4][4])
res = requests.post(url=url,headers=header,json=data)
# print(res.text)
assert res.status_code == datas[2][5]
assert res.json()["status"] == datas[2][6]
sql = "select * from t_article where id = {} and status = '1'".format(read_file("./tmp/article_id.txt")) # status:0正常;1删除;2禁用
assert len(query(sql)) != 0 | [
"[email protected]"
]
| |
4cb19198eb478ec273b77373110121638e6859f3 | 2542c624ea59077b2f612c03b5856a9975b03b98 | /python_stack/django/django_fundamentals/DojoProj/DojoProj/settings.py | 1984a1ee003d28b5f8116890b3f36c4772bc668e | []
| no_license | Harryonismyname/CodingDojoProjects | 4aff0272978a3afe4d98acd421813b04c2b5bb66 | 896fa196c1281e4b9cdba9a3fdb819bfc23ba2d0 | refs/heads/master | 2023-01-09T03:14:54.053353 | 2020-11-16T19:59:03 | 2020-11-16T19:59:03 | 285,445,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | """
Django settings for DojoProj project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '07*k(y8@m+eav!p57%x1wnz@5t=sht*dy(*0le&))0u!+_qt_6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'survey',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DojoProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DojoProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
e41d6751ff6935e088b2fb2b8b0c6c1ce83a1d6b | eb82d1887df98b3cfdf3b5e7a7e594a97550ec7d | /Generator/stage3a_logExtractor.py | 4b9cefda79411439cbde8f009e773f67e9c667fd | []
| no_license | mohrez86/Denchmark_BRs | 102e32e94e16ab8b157b4098215f461b39986b16 | ae893de4b29761c81ebff52b0a6901d3bbb5dd04 | refs/heads/main | 2023-06-02T11:39:04.546330 | 2021-06-15T07:44:53 | 2021-06-15T07:44:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py |
import os
import requests
from bs4 import BeautifulSoup
import time
import util_extractor
extractor = util_extractor.extractor()
import subprocess
import sys
import re
def load_projects():
lines = open("./data/stage2_bugreports/1st_candidate_bugs.csv", "r", encoding="utf8").readlines()
project_bugs = {}
all_bug_num = 0
for line in lines[1:]:
line = line.replace("\n","")
project = line.split(",")[0].lower()
bugid = line.split(",")[2]
if project in project_bugs.keys():
if bugid not in project_bugs[project]:
project_bugs[project].add(bugid)
all_bug_num +=1
else:
project_bugs[project] = set()
project_bugs[project].add(bugid)
all_bug_num +=1
return project_bugs, all_bug_num
def cmd(command):
command = 'cmd /u /c '+command
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()
text = result[0]
text = text.decode('utf-8',errors='ignore')
if len(text)>1:
return text.split('\n')
else:
return []
project_bugs, bug_num = load_projects()
print(len(project_bugs), bug_num)
f = open("./data/stage3_groundtruths/1st_candidate_bugs_with_logs.csv", "r", encoding="utf8")
lines = f.readlines()
already_set = set()
for line in lines:
project = line.split(",")[0].lower()
bug_id = line.split(",")[1]
already_set.add(project+"-"+bug_id)
os.environ['PYTHONIOENCODING'] = 'utf-8'
pattern = re.compile(r'^[ 0-9]+$')
buggy_keywords = ['fix','bug','error','crash','#']
absolute_path = "E:\\Misoo\\Denchmark_GitRepositories\\"
f = open("./data/stage3_groundtruths/1st_candidate_bugs_with_logs.csv", "a", encoding="utf8")
allbug = 0
for project in project_bugs.keys():
os.chdir("E:\\Misoo\\Python_workspace\\Denchmark\\")
path = absolute_path + project.replace("/","+")
if os.path.isdir(path) is False:
os.makedirs(path)
os.chdir(path)
result = cmd("git clone http://github.com/"+project)
print("FINISH to clone the repository", project)
bug_list = project_bugs[project]
path = path +"\\"+project.split("/")[1]
os.chdir(path)
p = subprocess.Popen("cmd /u /c git checkout master", stdout=subprocess.PIPE)
result = p.communicate()
# print(result)
for bug_id in bug_list:
identifier = project+'-'+bug_id
if identifier in already_set:
print(identifier, "ALREADY")
continue
# time.sleep(5)
# Get Commit having the bugID as text
commit_list = cmd("git log --all --name-status --pretty=format:%h%x09%an%x09%ad%x09%s --grep \""+bug_id+"\"")
commit_ids = []
commit_id_files = {}
commit_id_date = {}
commit_id_text = {}
commit_text_id = {}
commit_messages = {}
prev_commit = ""
prev_commit_date = ""
prev_commit_summary = ""
fixed_files = []
for commit_text in commit_list:
if len(commit_text.replace(" ","")) < 2:
# Get Commit Message
commit_message= ' '.join(cmd("git show -s --format=%B "+prev_commit))
commit_messages[prev_commit] = commit_message
commit_id_files[prev_commit] = fixed_files
commit_id_date[prev_commit] = prev_commit_date
commit_id_text[prev_commit] = prev_commit_summary
commit_text_id[prev_commit_summary] = prev_commit
commit_ids.append(prev_commit)
prev_commit = ""
prev_commit_date = ""
prev_commit_summary = ""
fixed_files = []
continue
if len(prev_commit) == 0:
prev_commit = commit_text.split("\t")[0]
prev_commit_date = commit_text.split("\t")[2]
prev_commit_summary = commit_text.split("\t")[3]
else:
file_type = commit_text.split("\t")[0]
if file_type =="M":
fixed_files.append(commit_text.replace("\n","").split("\t")[1])
# Only selecting the commit having buggy-keywords and exact number of bug id
# print(bug_id, len(commit_list))
for commit in commit_ids:
flag = False
body = commit_messages[commit]
for key in buggy_keywords:
if body.lower().find(key) > -1:
numbers = re.findall(r"\d+", body)
for result in numbers:
if str(result) == bug_id:
# print(bug_id, key, numbers, result, body)
flag = True
break
if flag is False:
continue
print(project+","+bug_id+","+commit)
f.write(project+","+bug_id+","+commit+"\n")
f.close() | [
"misookim"
]
| misookim |
f96d397ea492bb7729fe2189e5f3084c51fa1f73 | 20dee5f717e4dd44bd5400150406fed9a0a80fe4 | /clustering/euclid_distance.py | 609b9cf7f869bd88b0c5443037dcce42d20b3f62 | []
| no_license | villank2/Datawarehousing_notes | 512ffc4fea01d633dec90a36b1d14ab5421d38cf | a6bc7d380e6d2ec0ae93b6ba6182c4dbf8ca96e9 | refs/heads/master | 2023-02-11T08:42:23.870465 | 2021-01-11T19:54:11 | 2021-01-11T19:54:11 | 325,084,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import sys
import math
def euclid_dist(x,y):
assert(len(x)==len(y))
distance = math.sqrt(sum([(a - b) ** 2 for a, b in zip(x, y)]))
return distance
def agglohi_cluster(li):
'''takes in a list of list of form [id,attr1,attr2,etc]
check if an item in the main list is a cluster meaning
it is a list of lists'''
item_list = []
for item in li:
x = Item(item)
item_list.append(x)
for item in item_list:
print(item.id)
class Item():
def __init__(self,li):
self.id = li[0]
self.atrrs = li[1:]
if __name__ == "__main__":
a1 =[1,1,1,1]
a10 = [3,1,1,2]
a24 = [3,2,2,2]
li = [[3,2,2,1]]
for x in li:
print(euclid_dist(x,a1),euclid_dist(x,a10),euclid_dist(x,a24))
print()
| [
"[email protected]"
]
| |
c4b04dc0b583f8776c8ef15d01fe5217c3fa07ee | 7df277d932f2d5de158d05e5bfc91fe2ebbc0f47 | /FirstWeb/make1/settings.py | ec963ec312b4e44adbd498616c83f226ba500d26 | []
| no_license | python-study-ko/django_study | 84eb485f60acf57f922b6fc9dcf3a8411bcae0a6 | 44dff5bd2f197ea5649db5cb83b2fa2d77da508f | refs/heads/master | 2021-01-20T19:59:51.423620 | 2016-07-02T13:50:15 | 2016-07-02T13:50:15 | 62,380,932 | 2 | 0 | null | 2016-07-02T13:50:16 | 2016-07-01T09:18:38 | null | UTF-8 | Python | false | false | 3,255 | py | """
Django settings for make1 project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=3w+je$ckvz23+vr1)xcwx-676(@&fafd2ygux2ezhzxb#l1dt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'make1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'make1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR,'templates')] | [
"[email protected]"
]
| |
8edaa67fda3c2d8e1a359fba81e6985b7270aa14 | 8a48adfaca1854854c79b7fbe1e60c67931a2cfb | /Datatype.py | 5393aaceed7bf53887c91a4fc2175a2713bbcdff | []
| no_license | karolcajo/Tarea-5-Ejemplos | 14cb049a402ea572a30b94d916037741eb18e8df | 5b25a00fb4c9532ac1e0040b26e7bdd038f77703 | refs/heads/main | 2022-12-24T20:47:30.941510 | 2020-10-11T15:32:05 | 2020-10-11T15:32:05 | 302,953,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # String
print("Hello World")
print("Hello world")
print("""Hello World""")
print("Bye" + "World")
# Integer
print(30)
# Float
print(30.5)
# Boolean
True
False
# List
[10, 20, 30, 55]
["hello","bye","adios"]
[10, "hello", true, 10.1]
[]
# Tuples
(10, 20, 30, 55)
()
# Dictorionies
print(type({"nombredelapersona":"Ryan"
"apellido":"Ray"
"apodo":"Fazt"
}))
None | [
"[email protected]"
]
| |
e58c0bd5a53b77ae5264a15a880f3355616d1f73 | acca191d5ebfc60111539b7de6d69ef68df33c39 | /mainapp/serializers.py | e2cd7d4ee4d4a6688980b6231ab963ca920bd395 | []
| no_license | Alksgest/python-todo | 45e0779f62cf709ed9ae0a3e0306eea67740aa93 | a1e1f82bb3dfaad73c981d089c3599e91b5188a7 | refs/heads/master | 2020-05-04T10:45:24.285289 | 2019-04-05T14:40:09 | 2019-04-05T14:40:09 | 179,094,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import TodoModel
class TodoSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = TodoModel
fields = ('id', 'date', 'content', 'owner')
class UserSerializer(serializers.ModelSerializer):
todos = serializers.PrimaryKeyRelatedField(many=True, queryset=TodoModel.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'todos') | [
"[email protected]"
]
| |
656a4c3375c120a263d8ecfce111ff6e5f902f18 | 3610997a177b286dd39676855284db196a08f87f | /target_volunteers.py | 424e56e6dc3bdc488a74ddbbb7c1892fd49accc3 | []
| no_license | saviaga/IDontEatMeat | 17f14dc73b12e242155eb4f095b8f80692f664a2 | 9d8e3ba32902d63680fd9d9483227df5b9198afc | refs/heads/master | 2021-01-10T04:08:51.841732 | 2016-03-26T18:28:42 | 2016-03-26T18:28:42 | 47,032,651 | 1 | 1 | null | 2016-01-19T17:59:22 | 2015-11-28T17:23:12 | Python | UTF-8 | Python | false | false | 7,417 | py | import retrive_twitter_info
from peewee import *
from create_recruited_database import Recruited, Tweets, Hashtag, SentDate
import datetime, time
class target:
def __init__(self, twitter, db, user):
self.twitter = twitter
self.db = db
self.user = user
def get_hashtags(self):
# Option 1: Retrieve the tweet to send from a previously filled table in the database
# Option 2: Create the tweet to send on the fly (easier to do)
"""
#retrieve all the hashtags from database
:return: the lis of the hashtags
"""
hashtag = Hashtag.select(Hashtag.hashtag_text).distinct()
hashtag_list = []
for item in hashtag:
hashtag_list.append(item.hashtag_text)
return hashtag_list
def show_menu(self, hashtag_list):
"""
#shows hashtagas as menu items
:return: nothing
"""
count = 1;
for item in hashtag_list:
print("{}:{}".format(count, item))
count += 1
def retrieve_users(self, target_hashtag_idx, the_hashtags):
"""
#retrieve all users that have used the selected hashtags and that have not received tweets recently (last 24 hours)
:return: the list of selected users
"""
now = datetime.datetime.now()
now = datetime.datetime.now()
pop_user = []
final_list = []
print(target_hashtag_idx)
the_target_hashtag = the_hashtags[target_hashtag_idx - 1]
# this query gets the user_ids of those users that tweeted the selected hashtag
users = Hashtag.select(Hashtag.user_of_hashtag).where(Hashtag.hashtag_text == the_target_hashtag)
users_list = []
for item in users:
users_list.append(item.user_of_hashtag_id)
print(users_list)
selected_user = []
# this query selects the datetime of the users that where retrieved with the last query (users who tweeted certain hashtag)
for item in users_list:
selected_user = SentDate.select(SentDate.user_sent, SentDate.date_tweet_sent, SentDate.tweet_sent_message).where(SentDate.user_sent == item)
# if the user has been sent a tweet in the last 24 hours skip it
print("selected users")
for item in selected_user:
print(item.user_sent_id)
for item in selected_user:
# print("printing selected user")
# print (item.date_tweet_sent)
# print(item.tweet_sent_message)
# print(item.user_sent_id)
date_retrieved = datetime.datetime.strptime(item.date_tweet_sent, '%Y-%m-%d %H:%M:%S.%f')
print('sent', date_retrieved)
print('48 hours', now - datetime.timedelta(hours=48))
if (now - datetime.timedelta(hours=48)) < date_retrieved:
print('Not have passed 48 hours, cannot send tweet')
if item.user_sent_id not in pop_user:
pop_user.append(item.user_sent_id)
print("user to pop: ", pop_user)
# only keeps the users that who didn't receive a tweet in the last 48 hours
print('user_list', users_list)
for elem in pop_user:
users_list = [value for value in users_list if value != elem]
print("final list", users_list)
return users_list
def construct_tweet(self, list_of_users, message):
"""
#append message to username
:return: the constructed tweet
"""
tweets = {}
print(list_of_users)
counter = 1
print("Constructing tweets")
for item in list_of_users:
if counter < 25:
string_name =twitter.get_screen_name(item)
print(string_name)
tweets[item]='@' + string_name + ' ' + message
print(type(tweets))
print(tweets[item])
counter +=1
else:
break
return tweets
def send_tweet(self, tweets_to_send):
"""
#just send the tweet to the list, sleep.time(15)
#save in the data_tweets_sent:
user_id to whom it was sent
the tweet_id fo the tweet
the text of the text
the date sent (possible hour?)
:return: nothing
"""
for k,v in tweets_to_send.items():
print('Mensaje a enviar: ', v)
# tweet_only= item.split(' ')[:0]
print("The following tweets will be sent")
print (k , 'corresponds to', v)
# print(tweet_only)
twitter.api.update_status(status=v)
time.sleep(600)
self.save_tweet_data(k,v)
# print "Tweeting!"
def save_tweet_data(self, k,v):
# Get id of the user
list_tweets = []
id = int(k)
tweet_sent = v
print('id vale', id)
user_sent = id
# tweet=last_tweet.split(',')[1:2]
print("save tweet ", tweet_sent)
tweet = self.twitter.get_user_timeline(self.user, 1)
# tweet = twitter.api.get_user(int_id).id_str
print(tweet_sent)
print(user_sent)
print(datetime.datetime.now())
list_tweets.append({'user_sent': user_sent, 'tweet_sent_message': tweet_sent,
'date_tweet_sent': datetime.datetime.now()})
for item in list_tweets:
a = SentDate(**item)
a.save()
def send_tweet_to_recruited(self):
# retrieve all the hashtags from database
the_hashtags = self.get_hashtags()
print(the_hashtags)
# show menu and
self.show_menu(the_hashtags)
# Ask for the hashtag to target
target = input("Which hashtag do you want to target: ")
# get targeted hashtag
target_hashtag_idx = int(target)
# get list of users according to hashtag
recruited = self.retrieve_users(target_hashtag_idx, the_hashtags)
print('recruited', recruited)
if recruited:
message = input("Write the tweet you want do send: ")
contructed_tweet = self.construct_tweet(recruited, message)
print(contructed_tweet)
send = input('Are you sure you want to send them? Y=yes, N=no: ')
if send.lower() == 'y':
self.send_tweet(contructed_tweet)
else:
exit()
# confirm send this tweet?
# if yes: send_tweet(constructed_message returned from def constructed_message)
# if not: cancel
else:
print("There are no recruiters")
exit()
def insert_in_database(user, id_tweet, message, date):
query_user = SentDate.create(user_sent=user, tweet_sent_message=message,
date_tweet_sent=date)
query_user.save()
user, ck, cs, at, atc = [line.rstrip('\n') for line in open('my_twitter_info.txt', 'r')]
print("the user is", user)
twitter = retrive_twitter_info.GetTwitterInfo(ck, cs, at, atc, user)
print("antes de procesar archivo")
db = SqliteDatabase('recruited.db')
db.connect()
# insert_in_database(44973121,995,"just python5",datetime.datetime.now())
new_target = target(twitter, db, user)
new_target.send_tweet_to_recruited()
| [
"[email protected]"
]
| |
af9e3790b0fec45cc8505e4cdb46559b29699168 | aa6422117b534e4f4eed197b71f9fcf00eb1983a | /build/flexbe_behavior_engine/flexbe_msgs/catkin_generated/pkg.installspace.context.pc.py | 59b4b32b461d5a6370401cf3f2035bab4875b704 | []
| no_license | Sinchiguano/StateMachineFlexBe | c07385b09e1ab15e88e894da8fd021d1cbf0de28 | d637acf2f26a3f0d83ef4f2d34a2636dff2515f6 | refs/heads/master | 2020-09-26T02:49:46.882388 | 2019-12-05T16:44:23 | 2019-12-05T16:44:23 | 226,146,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/catkin_ws/install/include".split(';') if "/home/casch/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "flexbe_msgs"
PROJECT_SPACE_DIR = "/home/casch/catkin_ws/install"
PROJECT_VERSION = "1.2.2"
| [
"[email protected]"
]
| |
5988d7b08d3aaeaec60b1ae8f30583b1ca2ae4bb | e2acf75ce0b24a595d0ae64c5f6f65d3ae6ab56b | /requires.py | 55b0d940bd1a495c21928a80bb2fa24ff6ee6744 | [
"Apache-2.0"
]
| permissive | ktsakalozos/interface-flume-agent | 9fbf4696301ece823534d68198b2f6b6857b1d2b | 20b11be7beec0d701ea1214102908c2a70804e25 | refs/heads/master | 2016-08-10T03:21:31.278419 | 2016-02-23T10:22:43 | 2016-02-23T10:22:43 | 48,242,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class FlumeRequires(RelationBase):
scope = scopes.UNIT
@hook('{requires:flume-agent}-relation-joined')
def joined(self):
conv = self.conversation()
conv.set_state('{relation_name}.connected')
@hook('{requires:flume-agent}-relation-changed')
def changed(self):
conv = self.conversation()
if self.get_flume_ip() and self.get_flume_port() \
and self.get_flume_protocol():
conv.set_state('{relation_name}.available')
@hook('{requires:flume-agent}-relation-departed')
def departed(self):
conv = self.conversation()
conv.remove_state('{relation_name}.connected')
conv.remove_state('{relation_name}.available')
def get_flume_ip(self):
return self.conversations()[0].get_remote('private-address')
def get_flume_port(self):
return self.conversations()[0].get_remote('port')
def get_flume_protocol(self):
return self.conversations()[0].get_remote('protocol')
| [
"[email protected]"
]
| |
9c802cf94c8ab1e2bcb8fbb3a0bc1c6bf82d537a | 54a08feb6473670578d6274df95468952a923892 | /scripts/create_submission_together.py | 4709040e8cfe876baa46d1591e4ec7d102750e43 | []
| no_license | burness/talking_data | 6fdcd168ee8f6912b8267d809aa8ae2a2a23d72a | d9d3689099da53abf4f6f4434cbc5f7a8138a5f2 | refs/heads/master | 2021-01-17T20:41:39.628470 | 2016-08-03T05:30:22 | 2016-08-03T05:30:22 | 64,817,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | def combine_submission_together(file1,file2):
submission2 = {}
with open(file1,'r') as fread:
for line in fread.readlines():
line_list = line.split(",")
device_id = line_list[0]
info = ','.join(line_list[1:])
submission2[device_id] = info
with open(file2, 'r') as fread:
with open('final_submission2.csv','w') as fwrite:
for line in fread.readlines():
line_list = line.split(",")
device_id = line_list[0]
if submission2.has_key(device_id):
info = submission2[device_id]
else:
info = ','.join(line_list[1:])
line_write = device_id+','+info
fwrite.write(line_write)
if __name__ == '__main__':
combine_submission_together('submission2_2.26726836242_2016-08-02-16-07.csv','submission2.csv')
| [
"[email protected]"
]
| |
d71a404752deb47fcc36b7785a681d5937f7e0e8 | 791c7d354488dfb643f170b669d324aa875f1059 | /FromLeetCode/Simple/License Key Formatting.py | ab85634202c5ebb4e3561cfb1bdcb178a058bac2 | []
| no_license | Bonnieliuliu/LeetCodePlayGround | db5567624f9458557239e3fe0eaef0ee0b59b0e7 | 9910bb7d142db9060b189748ebf7e85ee5f3443d | refs/heads/master | 2020-03-22T07:42:01.629468 | 2020-03-03T15:16:38 | 2020-03-03T15:16:38 | 139,719,046 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | """
author = Bonnieliuliu
email = [email protected]
file = License Key Formatting.py
time = 2018/8/2 20:29
more information
"""
class Solution:
def licenseKeyFormatting(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
S = S.replace("-", "")
n = len(S)
first = n % K
slash = n // K
if first == 0:
output = ""
else:
output = S[0:first]
for i in range(slash):
if i == 0:
if first == 0:
output += S[first + i * K: first + (i + 1) * K]
else:
output += "-" + S[first + i * K: first + (i + 1) * K]
else:
output += "-" + S[first + i * K: first + (i + 1) * K]
return output.upper()
def main():
input = "59F3Z-2e-9-w"
K = 4
s = Solution()
res = s.licenseKeyFormatting(input, K)
print(res)
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
15f2ce64da1c5f5bee70e930d1331822c81378e9 | 5defa72ddd76d88cd0cafd882f7bf8b71bbfd5e0 | /settings.py | e8bd9603b983267e8e1121c14ddff8b40fc56552 | []
| no_license | SanSanch5/try_seq2seq | 5a5bb64c510e4789a1f5a423e249de5fcdd5f670 | 8c4d82281dded79e6064ce174f5080b5408a4ebb | refs/heads/master | 2021-01-19T13:36:14.361055 | 2017-10-12T04:57:41 | 2017-10-12T04:57:41 | 82,398,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | TRAINING_DATA_KOEF = 0.8
enc_sentence_length = 15
dec_sentence_length = 20
batch_size = 250
n_epoch = 1000
hidden_size = 150
enc_emb_size = 300
dec_emb_size = 300
saved_model_file = 'model/model.ckpt'
log_file = 'model/training.log'
| [
"[email protected]"
]
| |
4be0a9347751505cc966aaaae4aa8a00df3626f7 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/macle_20220825/c/main.py | 02c948ca2212d942ef5f1445c169292d56933fb5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 5,236 | py | # -*- coding: utf-8 -*-
import math
from bisect import bisect_left, bisect_right, insort
from typing import Generic, Iterable, Iterator, TypeVar, Union, List
T = TypeVar('T')
class SortedMultiset(Generic[T]):
"""Sorted multi set (set) in C++.
See:
https://qiita.com/tatyam/items/492c70ac4c955c055602
https://github.com/tatyam-prime/SortedSet/blob/main/SortedMultiset.py
"""
BUCKET_RATIO = 50
REBUILD_RATIO = 170
def _build(self, a=None) -> None:
"Evenly divide `a` into buckets."
if a is None:
a = list(self)
size = self.size = len(a)
bucket_size = int(math.ceil(math.sqrt(size / self.BUCKET_RATIO)))
self.a = [a[size * i // bucket_size: size * (i + 1) // bucket_size] for i in range(bucket_size)]
def __init__(self, a: Iterable[T] = []) -> None:
"Make a new SortedMultiset from iterable. / O(N) if sorted / O(N log N)"
a = list(a)
if not all(a[i] <= a[i + 1] for i in range(len(a) - 1)): # type: ignore
a = sorted(a) # type: ignore
self._build(a)
def __iter__(self) -> Iterator[T]:
for i in self.a:
for j in i:
yield j # type: ignore
def __reversed__(self) -> Iterator[T]:
for i in reversed(self.a):
for j in reversed(i):
yield j
def __len__(self) -> int:
return self.size
def __repr__(self) -> str:
return "SortedMultiset" + str(self.a)
def __str__(self) -> str:
s = str(list(self))
return "{" + s[1: len(s) - 1] + "}"
def _find_bucket(self, x: T) -> List[T]:
"Find the bucket which should contain x. self must not be empty."
for a in self.a:
if x <= a[-1]: # type: ignore
return a
return a # type: ignore
def __contains__(self, x: T) -> bool:
if self.size == 0:
return False
a = self._find_bucket(x)
i = bisect_left(a, x) # type: ignore
return i != len(a) and a[i] == x
def count(self, x: T) -> int:
"Count the number of x."
return self.index_right(x) - self.index(x)
def add(self, x: T) -> None:
"Add an element. / O(√N)"
if self.size == 0:
self.a = [[x]]
self.size = 1
return
a = self._find_bucket(x)
insort(a, x) # type: ignore
self.size += 1
if len(a) > len(self.a) * self.REBUILD_RATIO:
self._build()
def discard(self, x: T) -> bool:
"Remove an element and return True if removed. / O(√N)"
if self.size == 0:
return False
a = self._find_bucket(x)
i = bisect_left(a, x) # type: ignore
if i == len(a) or a[i] != x:
return False
a.pop(i)
self.size -= 1
if len(a) == 0:
self._build()
return True
def lt(self, x: T) -> Union[T, None]:
"Find the largest element < x, or None if it doesn't exist."
for a in reversed(self.a):
if a[0] < x: # type: ignore
return a[bisect_left(a, x) - 1] # type: ignore
return None
def le(self, x: T) -> Union[T, None]:
"Find the largest element <= x, or None if it doesn't exist."
for a in reversed(self.a):
if a[0] <= x: # type: ignore
return a[bisect_right(a, x) - 1] # type: ignore
return None
def gt(self, x: T) -> Union[T, None]:
"Find the smallest element > x, or None if it doesn't exist."
for a in self.a:
if a[-1] > x: # type: ignore
return a[bisect_right(a, x)] # type: ignore
return None
def ge(self, x: T) -> Union[T, None]:
"Find the smallest element >= x, or None if it doesn't exist."
for a in self.a:
if a[-1] >= x: # type: ignore
return a[bisect_left(a, x)] # type: ignore
return None
def __getitem__(self, x: int) -> T:
"Return the x-th element, or IndexError if it doesn't exist."
if x < 0:
x += self.size
if x < 0:
raise IndexError
for a in self.a:
if x < len(a):
return a[x] # type: ignore
x -= len(a)
raise IndexError
def index(self, x: T) -> int:
"Count the number of elements < x."
ans = 0
for a in self.a:
if a[-1] >= x: # type: ignore
return ans + bisect_left(a, x) # type: ignore
ans += len(a)
return ans
def index_right(self, x: T) -> int:
"Count the number of elements <= x."
ans = 0
for a in self.a:
if a[-1] > x: # type: ignore
return ans + bisect_right(a, x) # type: ignore
ans += len(a)
return ans
def main():
import sys
input = sys.stdin.readline
l, q = map(int, input().split())
s = SortedMultiset([0, l])
for i in range(q):
ci, xi = map(int, input().split())
if ci == 1:
s.add(xi)
else:
print(s.gt(xi) - s.lt(xi))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
74c3487b1ce6284d456f24c7a822e7f5b042c1b0 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp83_0.py | 55e6471489d774a44032f55978e0c9af8a653f9c | []
| no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,894 | py | ITEM: TIMESTEP
0
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
3.3480991349454570e-01 4.6865190086497961e+01
3.3480991349454570e-01 4.6865190086497961e+01
3.3480991349454570e-01 4.6865190086497961e+01
ITEM: ATOMS id type xs ys zs
8 1 0.130808 0.0685954 0.067749
35 1 0.0615812 0.131941 0.0620756
130 1 0.0673745 0.0640743 0.11748
165 1 0.131142 0.128914 0.121465
2 1 0.0695213 0.0667569 0.00435885
37 1 0.125561 0.133088 0.00372516
1 1 0.00214951 0.00360363 0.00137352
129 1 0.00721661 0.000399364 0.132507
133 1 0.12787 0.0091779 0.125678
3 1 0.0615281 0.00283245 0.0560259
33 1 0.00628241 0.122947 0.00199967
41 1 0.255074 0.120998 0.00247506
12 1 0.256702 0.0621165 0.0635116
39 1 0.18849 0.128713 0.0601789
43 1 0.314491 0.133976 0.0600459
134 1 0.190575 0.0721728 0.127107
138 1 0.312259 0.0636323 0.128498
169 1 0.249922 0.133413 0.117835
7 1 0.186992 0.00728301 0.0645151
137 1 0.250507 0.00351034 0.121993
6 1 0.189619 0.0663561 0.00165912
16 1 0.369832 0.065535 0.0613228
47 1 0.437339 0.134886 0.0575771
142 1 0.43311 0.0693917 0.124919
173 1 0.374931 0.129585 0.123094
145 1 0.49597 0.00509139 0.121231
20 1 0.490534 0.060222 0.0599609
15 1 0.433715 0.00178399 0.059005
14 1 0.429334 0.0650988 0.00372852
49 1 0.494926 0.120136 0.000961326
177 1 0.500933 0.127737 0.120011
24 1 0.618947 0.0656048 0.0659352
51 1 0.564982 0.123398 0.0535858
146 1 0.556652 0.0696211 0.120498
181 1 0.623493 0.127887 0.119896
149 1 0.621413 0.00203271 0.126088
19 1 0.55821 0.00671903 0.065014
28 1 0.744291 0.051325 0.0639339
55 1 0.682399 0.124839 0.0582936
59 1 0.812581 0.119231 0.0638001
150 1 0.688481 0.0611819 0.121978
154 1 0.799091 0.0663457 0.132579
185 1 0.746599 0.127579 0.125456
22 1 0.67589 0.0536424 0.00674597
57 1 0.74755 0.118531 0.00724268
26 1 0.819197 0.0635787 0.00609031
4 1 0.996363 0.0584481 0.0633073
161 1 0.996703 0.121693 0.122247
32 1 0.878943 0.0628986 0.0636405
63 1 0.931276 0.130829 0.0663827
158 1 0.933467 0.0579956 0.122245
189 1 0.877272 0.121874 0.130926
61 1 0.867437 0.135243 0.00464827
30 1 0.938778 0.0567226 0.000580732
40 1 0.125694 0.194609 0.0654106
67 1 0.0543099 0.249068 0.0724686
72 1 0.12366 0.312019 0.0666992
162 1 0.0688229 0.188212 0.127454
194 1 0.0574666 0.306123 0.127401
197 1 0.121783 0.247949 0.123449
36 1 0.000173399 0.183002 0.069021
69 1 0.113044 0.250965 0.00828519
34 1 0.06401 0.190304 0.00124161
44 1 0.242643 0.193217 0.0630084
71 1 0.186982 0.25364 0.0596591
75 1 0.306053 0.243772 0.0614634
76 1 0.254313 0.303096 0.0699722
166 1 0.187876 0.188445 0.125002
170 1 0.316137 0.184086 0.123622
198 1 0.189632 0.311759 0.129538
201 1 0.253681 0.249862 0.130996
202 1 0.321845 0.320619 0.13367
74 1 0.313776 0.313242 0.0055655
73 1 0.248241 0.25639 0.000483841
48 1 0.383878 0.190133 0.0542299
79 1 0.44665 0.247464 0.0529615
80 1 0.372873 0.311057 0.0600798
174 1 0.435719 0.19442 0.120077
205 1 0.374042 0.248904 0.115022
206 1 0.444925 0.299641 0.11928
84 1 0.504138 0.308064 0.0623404
52 1 0.50307 0.182235 0.0613968
209 1 0.5133 0.251198 0.12277
56 1 0.622931 0.183591 0.0613469
83 1 0.561503 0.242679 0.0566626
88 1 0.618381 0.316386 0.0585294
178 1 0.566574 0.184471 0.121372
210 1 0.568165 0.312496 0.125149
213 1 0.626406 0.250788 0.121887
60 1 0.748808 0.183019 0.0567971
87 1 0.685999 0.253104 0.0553365
91 1 0.804547 0.247052 0.0560679
92 1 0.747832 0.318432 0.0616899
182 1 0.69039 0.185775 0.117104
186 1 0.808283 0.182532 0.119399
214 1 0.68067 0.313849 0.121539
217 1 0.746881 0.247696 0.114411
218 1 0.811768 0.314322 0.114754
54 1 0.685855 0.18377 0.00494905
58 1 0.802232 0.184376 0.00288401
62 1 0.9406 0.191793 0.006952
93 1 0.873021 0.243789 0.00528555
193 1 0.999166 0.243232 0.135248
68 1 0.999754 0.30919 0.0644451
64 1 0.870868 0.18797 0.0687111
95 1 0.94118 0.253044 0.0627872
96 1 0.871386 0.31454 0.0528561
190 1 0.941147 0.187395 0.126725
221 1 0.881809 0.247341 0.122331
222 1 0.938404 0.310315 0.132704
94 1 0.94391 0.313816 0.00201044
1153 1 0.000742689 0.497468 0.119691
1027 1 0.0628977 0.491051 0.0670263
101 1 0.130759 0.379155 0.00167892
99 1 0.0673753 0.3724 0.0642353
104 1 0.132739 0.432169 0.06254
226 1 0.0654797 0.435363 0.125966
229 1 0.128378 0.36903 0.125893
1157 1 0.124968 0.497623 0.116302
97 1 1.37735e-05 0.371807 0.00414642
105 1 0.256089 0.371076 0.0117414
103 1 0.190901 0.369792 0.062353
107 1 0.317192 0.372 0.0646675
108 1 0.256967 0.440648 0.0749559
230 1 0.184297 0.431328 0.134146
233 1 0.254244 0.363377 0.125442
234 1 0.316812 0.442074 0.127498
1031 1 0.189927 0.497284 0.0727541
102 1 0.198861 0.437305 0.0100326
106 1 0.305361 0.444269 0.00570288
1169 1 0.495688 0.49476 0.129357
111 1 0.439739 0.370403 0.0596494
112 1 0.382055 0.432033 0.0645835
237 1 0.390137 0.373927 0.124025
238 1 0.445138 0.432259 0.129537
1165 1 0.381304 0.491683 0.12932
1039 1 0.438648 0.493656 0.0706543
116 1 0.503189 0.436567 0.0606158
241 1 0.500144 0.368258 0.121423
115 1 0.559084 0.376872 0.0574744
120 1 0.625103 0.441154 0.0575074
242 1 0.559678 0.442361 0.121917
245 1 0.623913 0.381813 0.116352
1043 1 0.559189 0.499218 0.0563459
117 1 0.622536 0.372631 7.60526e-05
113 1 0.503824 0.366053 0.00101907
114 1 0.556925 0.43937 0.000178973
119 1 0.690029 0.376238 0.0609137
123 1 0.815313 0.378998 0.0616236
124 1 0.759391 0.439723 0.0560861
246 1 0.685807 0.436367 0.126553
249 1 0.750717 0.380718 0.121318
250 1 0.819825 0.435973 0.123049
1047 1 0.694157 0.498194 0.057227
1177 1 0.753685 0.496646 0.118017
126 1 0.93435 0.439977 0.00627683
1053 1 0.882555 0.499621 0.00115897
225 1 0.996166 0.3724 0.125924
100 1 0.997643 0.433141 0.0620464
127 1 0.935941 0.368689 0.0662383
128 1 0.876206 0.441698 0.0601959
253 1 0.873727 0.365926 0.121554
254 1 0.935601 0.434055 0.120289
1055 1 0.939368 0.497967 0.0678297
1181 1 0.878863 0.499827 0.126726
259 1 0.0565572 0.000414809 0.308376
136 1 0.129436 0.064618 0.182635
163 1 0.0640827 0.123877 0.184253
258 1 0.0662541 0.0596461 0.251425
264 1 0.127049 0.0570362 0.306392
291 1 0.0732193 0.116105 0.319356
293 1 0.129167 0.123144 0.2474
289 1 0.00204297 0.125387 0.256579
131 1 0.0711838 0.000999719 0.194282
139 1 0.317293 0.000885977 0.18865
140 1 0.256016 0.055746 0.185537
167 1 0.190176 0.133277 0.185186
171 1 0.309518 0.125062 0.183755
262 1 0.187576 0.0652493 0.240892
266 1 0.317563 0.0575759 0.246528
268 1 0.249912 0.0540318 0.300983
295 1 0.187193 0.125175 0.315931
297 1 0.248099 0.116348 0.244811
299 1 0.310135 0.122105 0.308364
267 1 0.316216 0.00456555 0.306209
273 1 0.499105 0.00997211 0.246188
269 1 0.384073 0.000217788 0.247436
144 1 0.37568 0.0707162 0.182797
175 1 0.438743 0.133769 0.181454
270 1 0.436509 0.0691415 0.235673
272 1 0.380344 0.0652718 0.314133
301 1 0.373224 0.129184 0.243789
303 1 0.437185 0.120059 0.316603
305 1 0.495062 0.122584 0.250722
148 1 0.493762 0.0703723 0.182514
147 1 0.555132 0.00229531 0.185603
276 1 0.503994 0.0584305 0.313844
152 1 0.625503 0.0712222 0.182925
179 1 0.564569 0.123968 0.184759
274 1 0.569237 0.0610354 0.247504
280 1 0.629743 0.0629336 0.309739
307 1 0.56563 0.129756 0.306172
309 1 0.62913 0.123242 0.25323
281 1 0.747218 0.00396904 0.250125
155 1 0.813087 0.00253837 0.196412
283 1 0.812299 0.0109066 0.31619
156 1 0.74391 0.0670888 0.190519
183 1 0.684164 0.126009 0.180956
187 1 0.807814 0.126769 0.189073
278 1 0.690184 0.0685558 0.257577
282 1 0.809024 0.0675577 0.252296
284 1 0.750274 0.0698333 0.3201
311 1 0.685946 0.131985 0.312882
313 1 0.740982 0.135896 0.252441
315 1 0.80625 0.128088 0.31386
151 1 0.682924 0.000817487 0.185213
279 1 0.689308 0.00113855 0.31504
285 1 0.872135 0.000415968 0.257827
132 1 0.999423 0.065671 0.197055
159 1 0.934174 0.00318601 0.188882
260 1 0.997122 0.0699007 0.320016
160 1 0.876468 0.0687117 0.192343
191 1 0.936258 0.131905 0.189741
286 1 0.930376 0.0588509 0.253288
288 1 0.870897 0.0705589 0.314562
317 1 0.875712 0.127353 0.244116
319 1 0.930524 0.129582 0.315306
287 1 0.940206 0.00301296 0.315358
257 1 0.996595 0.00350416 0.250593
168 1 0.127209 0.191558 0.189696
195 1 0.0660123 0.251225 0.196061
200 1 0.125058 0.309582 0.187085
290 1 0.0712299 0.184039 0.258893
296 1 0.127664 0.183216 0.314672
322 1 0.0667037 0.312513 0.256821
323 1 0.0642226 0.250456 0.311865
325 1 0.130138 0.250571 0.257949
328 1 0.119002 0.314136 0.320616
321 1 0.000721527 0.241674 0.253799
172 1 0.251115 0.18914 0.185656
199 1 0.19344 0.247326 0.189114
203 1 0.31503 0.255798 0.181041
204 1 0.249209 0.309581 0.191835
294 1 0.190447 0.177052 0.254652
298 1 0.302164 0.175845 0.252911
300 1 0.248631 0.186913 0.321318
326 1 0.183707 0.313727 0.250178
327 1 0.188983 0.246045 0.312051
329 1 0.249613 0.251089 0.251748
330 1 0.313827 0.317204 0.247669
331 1 0.311272 0.248405 0.308032
332 1 0.243707 0.309753 0.311956
176 1 0.371865 0.192375 0.187245
207 1 0.438288 0.255202 0.19267
208 1 0.37923 0.317742 0.189826
302 1 0.431498 0.186668 0.245193
304 1 0.378788 0.180091 0.312252
333 1 0.370331 0.250343 0.248901
334 1 0.447412 0.311098 0.252938
335 1 0.436785 0.247291 0.30907
336 1 0.378604 0.313373 0.311762
308 1 0.493547 0.183053 0.307382
340 1 0.499381 0.309883 0.316631
212 1 0.505823 0.302948 0.18548
180 1 0.503677 0.188953 0.180575
337 1 0.507168 0.247042 0.249991
184 1 0.633089 0.196016 0.187141
211 1 0.569013 0.248835 0.186554
216 1 0.631089 0.312309 0.184568
306 1 0.558406 0.184284 0.243431
312 1 0.623391 0.19324 0.311526
338 1 0.562591 0.314372 0.260251
339 1 0.562076 0.243118 0.318261
341 1 0.623631 0.256272 0.25304
344 1 0.627891 0.29938 0.323897
188 1 0.745594 0.188846 0.179614
215 1 0.703908 0.259117 0.187684
219 1 0.81558 0.253431 0.179238
220 1 0.764444 0.316499 0.190309
310 1 0.684419 0.195572 0.247556
314 1 0.815778 0.188495 0.250109
316 1 0.748454 0.194287 0.318858
342 1 0.692404 0.314329 0.252913
343 1 0.689102 0.252227 0.309533
345 1 0.760707 0.249191 0.24599
346 1 0.813306 0.324464 0.262143
347 1 0.801085 0.257967 0.31788
348 1 0.743556 0.315792 0.315912
196 1 0.999119 0.306968 0.194388
292 1 0.993512 0.181872 0.32329
164 1 0.998136 0.182228 0.193998
324 1 0.994198 0.31042 0.312565
192 1 0.863492 0.187834 0.183061
223 1 0.931013 0.242174 0.191254
224 1 0.87258 0.312959 0.186686
318 1 0.934218 0.191 0.2583
320 1 0.868164 0.193348 0.310307
349 1 0.862555 0.25335 0.247697
350 1 0.929198 0.310755 0.250597
351 1 0.934351 0.253789 0.313127
352 1 0.869143 0.314194 0.3249
227 1 0.0641682 0.376947 0.19514
232 1 0.121093 0.441191 0.189849
354 1 0.0649897 0.43891 0.249511
355 1 0.0670658 0.376731 0.319004
357 1 0.11907 0.376049 0.250163
360 1 0.125581 0.436376 0.321527
228 1 0.000452675 0.439151 0.183139
356 1 0.00718559 0.434872 0.314016
1285 1 0.126234 0.498334 0.252558
1163 1 0.311303 0.492584 0.19305
231 1 0.184628 0.370412 0.193416
235 1 0.303967 0.383904 0.189288
236 1 0.245148 0.445977 0.192844
358 1 0.186834 0.430916 0.256134
359 1 0.186173 0.373662 0.320578
361 1 0.244455 0.372797 0.255943
362 1 0.315268 0.43998 0.259447
363 1 0.313867 0.373648 0.314304
364 1 0.242189 0.42989 0.317176
1287 1 0.193393 0.498441 0.307844
1289 1 0.248281 0.49779 0.251756
1167 1 0.438262 0.493268 0.191139
239 1 0.4461 0.369281 0.191217
240 1 0.380109 0.430089 0.18568
365 1 0.372631 0.376715 0.254974
366 1 0.43981 0.438263 0.249753
367 1 0.439216 0.376651 0.308024
368 1 0.375136 0.435768 0.315129
244 1 0.499482 0.432883 0.19302
1295 1 0.438602 0.49781 0.302718
372 1 0.502235 0.440118 0.312916
369 1 0.506102 0.376853 0.257459
243 1 0.566916 0.3698 0.190502
248 1 0.625246 0.442098 0.18351
370 1 0.566183 0.440842 0.248823
371 1 0.570289 0.383304 0.315865
373 1 0.63593 0.372603 0.25175
376 1 0.629336 0.443156 0.312789
1171 1 0.562188 0.496071 0.186107
1305 1 0.75248 0.498055 0.251257
247 1 0.698181 0.368625 0.180705
251 1 0.81892 0.372814 0.186165
252 1 0.752613 0.438241 0.176517
374 1 0.684833 0.435744 0.232467
375 1 0.68918 0.371733 0.312853
377 1 0.748556 0.383661 0.2448
378 1 0.812586 0.43996 0.240946
379 1 0.807743 0.384511 0.318592
380 1 0.74848 0.441809 0.307478
1175 1 0.693097 0.498991 0.180175
1179 1 0.814064 0.498886 0.178295
1307 1 0.815758 0.495645 0.314288
1303 1 0.691868 0.498188 0.307526
1183 1 0.939527 0.499632 0.177487
1311 1 0.930977 0.496889 0.313792
353 1 0.993963 0.373152 0.246172
255 1 0.929462 0.375471 0.18908
256 1 0.878582 0.441149 0.187155
381 1 0.877812 0.375232 0.250494
382 1 0.940345 0.441483 0.247686
383 1 0.937163 0.381519 0.316678
384 1 0.877079 0.437654 0.309054
386 1 0.0671931 0.0560101 0.378639
392 1 0.139051 0.0511576 0.434894
419 1 0.0695381 0.118189 0.443757
421 1 0.129883 0.119813 0.377369
417 1 0.00906949 0.117649 0.380875
388 1 0.00158545 0.0617551 0.443217
518 1 0.195112 0.0541287 0.495823
390 1 0.186618 0.0585888 0.370251
394 1 0.317515 0.0620401 0.363414
396 1 0.251887 0.0595268 0.437592
423 1 0.190646 0.118318 0.434899
425 1 0.250026 0.113863 0.372133
427 1 0.308295 0.117554 0.438646
395 1 0.318794 0.00149328 0.434646
398 1 0.442448 0.060856 0.373163
400 1 0.385455 0.0581187 0.432899
429 1 0.368808 0.120165 0.373746
431 1 0.432768 0.130943 0.437007
525 1 0.382653 0.000477807 0.488197
403 1 0.559799 0.00729634 0.438625
405 1 0.624702 0.00834647 0.370963
404 1 0.501251 0.0755657 0.437406
433 1 0.501471 0.12243 0.372744
402 1 0.562954 0.0681385 0.378609
408 1 0.624553 0.0661749 0.43386
435 1 0.568164 0.125738 0.439739
437 1 0.628638 0.117937 0.376083
401 1 0.504656 0.00313212 0.370533
565 1 0.633412 0.122192 0.496744
529 1 0.500518 0.00631837 0.499628
533 1 0.628427 0.00274921 0.499927
409 1 0.75091 0.00172483 0.375486
406 1 0.691995 0.0642952 0.378951
410 1 0.8104 0.0713467 0.375642
412 1 0.748857 0.0616365 0.440691
439 1 0.689425 0.129481 0.440864
441 1 0.7507 0.132929 0.379265
443 1 0.810692 0.129719 0.443751
569 1 0.749629 0.135791 0.492733
415 1 0.938783 0.00664016 0.436108
414 1 0.93898 0.0619269 0.37646
416 1 0.87034 0.0612477 0.432591
445 1 0.870868 0.129988 0.376086
447 1 0.937757 0.121259 0.436445
573 1 0.880569 0.121708 0.495782
418 1 0.0678268 0.185727 0.377017
424 1 0.133697 0.182642 0.442668
450 1 0.0612166 0.316289 0.386841
451 1 0.0629657 0.244691 0.447917
453 1 0.122826 0.249759 0.376897
456 1 0.126958 0.30679 0.441484
578 1 0.0617769 0.314399 0.499922
585 1 0.24715 0.25447 0.495774
422 1 0.185527 0.175578 0.376049
426 1 0.307385 0.177534 0.367406
428 1 0.246243 0.186573 0.437882
454 1 0.181544 0.305131 0.375893
455 1 0.186571 0.250063 0.438609
457 1 0.241276 0.244043 0.374309
458 1 0.319711 0.308124 0.374438
459 1 0.315047 0.252458 0.435654
460 1 0.246682 0.308974 0.434999
554 1 0.309818 0.184274 0.493833
586 1 0.308402 0.309881 0.493538
590 1 0.440778 0.303222 0.491464
430 1 0.434555 0.182258 0.375137
432 1 0.368079 0.17678 0.434922
461 1 0.372862 0.242914 0.372612
462 1 0.434146 0.302745 0.374125
463 1 0.43704 0.24154 0.435763
464 1 0.377722 0.311453 0.431851
468 1 0.497617 0.309256 0.428502
558 1 0.432604 0.188541 0.494337
436 1 0.505639 0.176088 0.44679
465 1 0.500957 0.243346 0.376107
467 1 0.565361 0.244054 0.448013
434 1 0.569407 0.184443 0.382102
469 1 0.627587 0.246635 0.383947
472 1 0.621607 0.313744 0.444152
466 1 0.567878 0.309661 0.377673
440 1 0.624173 0.19177 0.44737
594 1 0.563425 0.311455 0.496941
473 1 0.746324 0.255658 0.381248
475 1 0.806953 0.258056 0.449302
470 1 0.688583 0.318958 0.382257
442 1 0.808859 0.186448 0.379792
438 1 0.684901 0.192229 0.378514
476 1 0.753549 0.311547 0.442639
471 1 0.687759 0.256555 0.439059
444 1 0.754227 0.201552 0.442306
474 1 0.804879 0.315776 0.377931
449 1 0.993778 0.250126 0.381954
420 1 0.993694 0.185156 0.442256
452 1 0.994702 0.317014 0.439571
446 1 0.928157 0.197524 0.373125
448 1 0.877711 0.1887 0.432458
478 1 0.931088 0.315133 0.377649
480 1 0.874909 0.315179 0.446565
477 1 0.867167 0.253173 0.381455
479 1 0.933798 0.253872 0.439222
574 1 0.935019 0.1899 0.496134
613 1 0.126726 0.379082 0.495474
482 1 0.0603128 0.443197 0.375878
483 1 0.0626419 0.381061 0.442962
488 1 0.116745 0.445854 0.435294
485 1 0.121161 0.374819 0.379627
489 1 0.25395 0.380146 0.382088
486 1 0.187077 0.434374 0.379135
491 1 0.318201 0.371148 0.426344
490 1 0.311875 0.437118 0.367354
492 1 0.254021 0.444172 0.430995
1419 1 0.318808 0.498721 0.430363
617 1 0.254469 0.382272 0.488775
487 1 0.190331 0.371155 0.439157
614 1 0.183885 0.438609 0.498256
618 1 0.316014 0.433347 0.484262
621 1 0.372627 0.3724 0.497651
493 1 0.383378 0.375253 0.372097
497 1 0.495735 0.373691 0.38187
495 1 0.44011 0.36954 0.433977
494 1 0.442609 0.436531 0.37838
496 1 0.383347 0.442892 0.436895
625 1 0.497939 0.373702 0.499305
622 1 0.43304 0.429084 0.496445
500 1 0.504724 0.436732 0.434012
498 1 0.564148 0.445574 0.374327
501 1 0.628375 0.373421 0.375721
499 1 0.560876 0.375579 0.4411
504 1 0.629138 0.435543 0.437651
1427 1 0.567187 0.495566 0.441552
626 1 0.570155 0.434641 0.496437
503 1 0.68564 0.37307 0.441296
502 1 0.692414 0.428278 0.375312
505 1 0.747178 0.372143 0.382875
508 1 0.755072 0.438569 0.441064
506 1 0.814096 0.446775 0.371958
507 1 0.812354 0.383609 0.435667
1431 1 0.687781 0.491296 0.435966
634 1 0.815141 0.444945 0.4955
1433 1 0.744385 0.498243 0.373916
481 1 0.997047 0.374586 0.376238
1409 1 0.99439 0.493779 0.382113
512 1 0.879944 0.436258 0.44217
511 1 0.934131 0.376557 0.442888
510 1 0.939314 0.43681 0.377536
509 1 0.872807 0.380066 0.3705
484 1 0.990854 0.434178 0.445828
520 1 0.132482 0.056368 0.567599
547 1 0.071341 0.118291 0.557594
642 1 0.0639421 0.0590157 0.618476
677 1 0.120767 0.120947 0.620026
673 1 0.00928493 0.118228 0.618831
549 1 0.129357 0.108455 0.501716
514 1 0.0606581 0.0576103 0.502378
641 1 0.00390032 0.000386603 0.618468
524 1 0.25138 0.0595999 0.563012
551 1 0.189389 0.128261 0.558067
555 1 0.311577 0.120246 0.563421
646 1 0.199596 0.0615075 0.62415
650 1 0.30853 0.0569248 0.616628
681 1 0.242778 0.128274 0.619754
522 1 0.308078 0.0641344 0.50413
553 1 0.250453 0.122139 0.504919
528 1 0.373923 0.0580183 0.562554
559 1 0.436285 0.128057 0.567539
654 1 0.434464 0.0581554 0.628362
685 1 0.376234 0.12335 0.624459
532 1 0.492372 0.0576537 0.567989
557 1 0.375399 0.123509 0.507263
526 1 0.434199 0.0647741 0.505771
689 1 0.505234 0.119591 0.624169
536 1 0.625574 0.0654157 0.564643
563 1 0.560882 0.124958 0.566258
658 1 0.564378 0.0526167 0.6352
693 1 0.620881 0.125838 0.625667
661 1 0.630164 0.00432539 0.618959
531 1 0.564625 0.000641042 0.566246
530 1 0.562806 0.0624388 0.504075
561 1 0.503837 0.115487 0.507806
540 1 0.764578 0.0697048 0.558381
567 1 0.697633 0.12637 0.56686
571 1 0.818794 0.128966 0.561506
662 1 0.692404 0.0604863 0.618962
666 1 0.811552 0.0650363 0.62531
697 1 0.755682 0.130289 0.626598
539 1 0.811378 0.00198473 0.564049
534 1 0.696217 0.0690999 0.509214
538 1 0.815356 0.0628043 0.503181
516 1 0.996026 0.0635124 0.561896
544 1 0.87277 0.0569664 0.560937
575 1 0.942122 0.122931 0.557163
670 1 0.937802 0.0649534 0.627623
701 1 0.877259 0.122896 0.623291
669 1 0.87724 0.00595232 0.632447
542 1 0.933977 0.0610287 0.50089
545 1 0.99922 0.12351 0.502583
552 1 0.123382 0.179836 0.560693
579 1 0.0549391 0.249495 0.564604
584 1 0.119182 0.309567 0.562076
674 1 0.0630856 0.188664 0.62423
706 1 0.0587391 0.307644 0.632963
709 1 0.118698 0.250905 0.632382
546 1 0.0583156 0.188455 0.505324
548 1 0.00196756 0.186885 0.562425
581 1 0.125719 0.243896 0.503333
577 1 0.00211722 0.252193 0.501316
556 1 0.252907 0.184089 0.561511
583 1 0.178328 0.243964 0.564208
587 1 0.307677 0.244391 0.564534
588 1 0.248035 0.316658 0.558609
678 1 0.177605 0.187232 0.621663
682 1 0.3083 0.183021 0.620478
710 1 0.179472 0.319183 0.620958
713 1 0.247173 0.247915 0.61462
714 1 0.315179 0.313143 0.618755
550 1 0.188996 0.187693 0.503548
582 1 0.178631 0.319056 0.502421
560 1 0.372241 0.183126 0.562002
591 1 0.439857 0.247456 0.559834
592 1 0.368014 0.314231 0.555691
686 1 0.438044 0.194053 0.621975
717 1 0.366778 0.242384 0.617639
718 1 0.429128 0.308821 0.616883
564 1 0.498269 0.187286 0.561255
589 1 0.366949 0.244998 0.502101
597 1 0.632653 0.258114 0.503969
596 1 0.50216 0.316501 0.557816
721 1 0.501131 0.251153 0.621794
568 1 0.62651 0.187484 0.566356
595 1 0.568106 0.25667 0.558005
600 1 0.630321 0.314333 0.567664
690 1 0.562565 0.192933 0.63089
722 1 0.555978 0.316641 0.626321
725 1 0.622161 0.250843 0.626002
562 1 0.566745 0.186899 0.505129
593 1 0.504731 0.246932 0.500378
566 1 0.68617 0.194159 0.507507
598 1 0.69136 0.318972 0.505415
572 1 0.754183 0.19045 0.563445
599 1 0.68817 0.254251 0.568889
603 1 0.810959 0.258746 0.567872
604 1 0.747655 0.313936 0.565451
694 1 0.687222 0.179485 0.624728
698 1 0.811415 0.197592 0.626582
726 1 0.693989 0.316558 0.632612
729 1 0.746441 0.252035 0.628461
730 1 0.811543 0.31721 0.629391
570 1 0.810517 0.195477 0.503345
602 1 0.816186 0.313732 0.502867
601 1 0.747997 0.257038 0.506109
705 1 0.995392 0.248578 0.62238
580 1 0.999885 0.313103 0.561486
576 1 0.877128 0.190332 0.56383
607 1 0.945047 0.253742 0.564145
608 1 0.887607 0.316513 0.565135
702 1 0.943052 0.179133 0.619518
733 1 0.88536 0.257348 0.621749
734 1 0.936526 0.315971 0.629526
605 1 0.872407 0.251276 0.503077
606 1 0.942238 0.306094 0.503111
1541 1 0.124039 0.496917 0.501833
611 1 0.0583257 0.370699 0.564345
616 1 0.126191 0.441022 0.557561
738 1 0.0622368 0.442388 0.630611
741 1 0.116889 0.375147 0.627585
610 1 0.0646782 0.436537 0.507656
1547 1 0.316033 0.49751 0.562502
1545 1 0.254697 0.497237 0.502756
615 1 0.188206 0.379853 0.564208
619 1 0.310142 0.376213 0.567693
620 1 0.255298 0.434379 0.561729
742 1 0.179733 0.43427 0.624703
745 1 0.250687 0.381134 0.621385
746 1 0.309682 0.441325 0.633992
1551 1 0.440367 0.488348 0.558494
623 1 0.44146 0.369674 0.558723
624 1 0.369632 0.430302 0.567591
749 1 0.379423 0.366315 0.62758
750 1 0.440848 0.435179 0.626056
753 1 0.497386 0.374858 0.619552
1549 1 0.381859 0.499121 0.501535
1677 1 0.376847 0.489747 0.626038
1685 1 0.613752 0.499158 0.632723
628 1 0.500509 0.436401 0.567535
627 1 0.563826 0.375209 0.561513
632 1 0.620634 0.433046 0.565807
754 1 0.557046 0.434693 0.633606
757 1 0.618946 0.37912 0.629962
1555 1 0.553314 0.498264 0.575869
1553 1 0.507024 0.493564 0.506305
1557 1 0.62575 0.495738 0.513247
629 1 0.629331 0.371671 0.506681
631 1 0.689504 0.379675 0.572694
635 1 0.810213 0.373842 0.565441
636 1 0.752874 0.437738 0.562063
758 1 0.674584 0.442161 0.626577
761 1 0.745941 0.38189 0.62817
762 1 0.810054 0.430806 0.622838
1559 1 0.690099 0.499345 0.56465
633 1 0.751519 0.377224 0.504097
1561 1 0.750893 0.498819 0.505447
630 1 0.684609 0.436676 0.501766
609 1 0.995034 0.376015 0.50706
737 1 0.997707 0.379018 0.634066
612 1 0.993757 0.438554 0.571105
639 1 0.935103 0.381943 0.569422
640 1 0.867752 0.439414 0.563665
765 1 0.874936 0.378194 0.623276
766 1 0.93425 0.440362 0.631145
637 1 0.87002 0.380834 0.507667
638 1 0.930018 0.436579 0.502554
643 1 0.0654935 0.00510844 0.678605
773 1 0.123847 0.00868498 0.752024
648 1 0.132781 0.0622276 0.677449
675 1 0.0651296 0.115879 0.686133
770 1 0.0607574 0.0600819 0.746825
776 1 0.121566 0.0606807 0.815977
803 1 0.0650735 0.130197 0.806
805 1 0.126864 0.119568 0.745938
772 1 0.00531154 0.0645393 0.811635
652 1 0.250868 0.0651833 0.694998
679 1 0.193725 0.116039 0.68423
683 1 0.312087 0.118134 0.676875
774 1 0.192439 0.0592989 0.750766
778 1 0.31397 0.0664772 0.749962
780 1 0.245111 0.0557268 0.810522
807 1 0.185434 0.121681 0.812034
809 1 0.24968 0.12676 0.746731
811 1 0.299526 0.115861 0.811181
775 1 0.180097 0.0053913 0.820043
656 1 0.368644 0.055332 0.684148
687 1 0.438836 0.129767 0.686854
782 1 0.436124 0.0726336 0.748047
784 1 0.364022 0.057688 0.806643
813 1 0.36654 0.117435 0.745043
815 1 0.436212 0.135468 0.801063
660 1 0.494369 0.0615541 0.681535
788 1 0.495001 0.069678 0.798511
783 1 0.434702 0.00210318 0.807104
789 1 0.627961 0.008086 0.754316
787 1 0.560549 0.00540985 0.815863
817 1 0.500518 0.130181 0.746166
664 1 0.630862 0.0610571 0.689599
691 1 0.563308 0.126436 0.685856
786 1 0.561355 0.0702487 0.749482
792 1 0.623475 0.0667451 0.814124
819 1 0.554524 0.129899 0.813432
821 1 0.627088 0.126611 0.753559
793 1 0.755324 0.00146496 0.750426
791 1 0.690745 0.0032831 0.814177
668 1 0.743744 0.0692725 0.68451
695 1 0.674656 0.130546 0.690206
699 1 0.807763 0.125826 0.684891
790 1 0.695393 0.06685 0.751011
794 1 0.813101 0.0721961 0.760031
796 1 0.755597 0.0601523 0.810878
823 1 0.687631 0.123056 0.806698
825 1 0.747246 0.129825 0.75067
827 1 0.814041 0.12924 0.821644
667 1 0.813049 0.00455409 0.690862
663 1 0.691036 0.0017942 0.683721
799 1 0.938433 0.00199386 0.811744
644 1 0.999889 0.0651257 0.680902
797 1 0.876195 0.0027061 0.746412
801 1 0.999624 0.125968 0.745551
672 1 0.866917 0.0704702 0.695819
703 1 0.939225 0.133525 0.6865
798 1 0.941107 0.0636447 0.750567
800 1 0.878959 0.0612945 0.810759
829 1 0.878543 0.133138 0.750409
831 1 0.947115 0.128634 0.817372
671 1 0.936513 0.00195513 0.686559
707 1 0.0567452 0.248598 0.689017
680 1 0.122937 0.185624 0.689622
712 1 0.123162 0.311729 0.691704
802 1 0.0695885 0.19251 0.750192
808 1 0.133884 0.184693 0.805449
834 1 0.0641591 0.31576 0.757684
835 1 0.0635511 0.251371 0.814009
837 1 0.126435 0.253905 0.74794
840 1 0.128036 0.316702 0.814438
836 1 0.000639547 0.309102 0.813216
676 1 0.00268381 0.18402 0.690894
804 1 0.00164552 0.184861 0.808198
684 1 0.243197 0.191789 0.6813
711 1 0.182258 0.253506 0.6772
715 1 0.30871 0.260312 0.679244
716 1 0.235495 0.317081 0.674815
806 1 0.189299 0.186615 0.745976
810 1 0.302744 0.186836 0.74181
812 1 0.246652 0.18206 0.808054
838 1 0.190376 0.311499 0.755955
839 1 0.190261 0.246346 0.806219
841 1 0.244945 0.250858 0.742392
842 1 0.301467 0.310569 0.745331
843 1 0.302684 0.250335 0.812656
844 1 0.251458 0.311519 0.814651
688 1 0.373627 0.185534 0.683486
719 1 0.424117 0.256062 0.682183
720 1 0.366802 0.316031 0.693674
814 1 0.42883 0.185742 0.740604
816 1 0.361828 0.184847 0.81001
845 1 0.364855 0.247258 0.749175
846 1 0.427591 0.307711 0.754493
847 1 0.427621 0.246682 0.809567
848 1 0.362402 0.313782 0.807214
692 1 0.492697 0.199556 0.680468
820 1 0.497495 0.184286 0.812129
724 1 0.486976 0.305186 0.683221
849 1 0.501108 0.248168 0.752954
852 1 0.500979 0.309542 0.815048
696 1 0.617485 0.200218 0.68941
723 1 0.552288 0.256883 0.694884
728 1 0.625372 0.319453 0.685955
818 1 0.569516 0.185711 0.751344
824 1 0.627585 0.184865 0.822775
850 1 0.563434 0.30781 0.758464
851 1 0.557167 0.241495 0.822429
853 1 0.623929 0.255181 0.759316
856 1 0.628589 0.323673 0.81331
700 1 0.744682 0.187628 0.684949
727 1 0.68346 0.252739 0.692315
731 1 0.815175 0.255491 0.69226
732 1 0.751456 0.319781 0.693443
822 1 0.687066 0.190573 0.745839
826 1 0.803111 0.189799 0.743831
828 1 0.743736 0.189489 0.812922
854 1 0.695406 0.319635 0.755151
855 1 0.687462 0.248887 0.815172
857 1 0.746936 0.254446 0.753419
858 1 0.807485 0.316472 0.744502
859 1 0.821163 0.247643 0.811978
860 1 0.751906 0.317285 0.814434
708 1 0.998732 0.317619 0.696324
833 1 0.994911 0.248358 0.744842
704 1 0.869949 0.186584 0.682823
735 1 0.936199 0.246848 0.68608
736 1 0.878651 0.320177 0.685767
830 1 0.932131 0.190985 0.756269
832 1 0.878361 0.191364 0.821104
861 1 0.873528 0.24938 0.752941
862 1 0.935368 0.317579 0.757322
863 1 0.933071 0.251271 0.818414
864 1 0.872532 0.323961 0.815274
1797 1 0.119819 0.495765 0.749979
739 1 0.0557871 0.376562 0.696521
744 1 0.117949 0.441948 0.686819
866 1 0.0613815 0.438576 0.756331
867 1 0.0628723 0.370227 0.82298
869 1 0.132396 0.375497 0.75301
872 1 0.117499 0.440129 0.815094
1795 1 0.0590278 0.495924 0.821515
1801 1 0.254104 0.498527 0.763374
1671 1 0.186614 0.497783 0.687088
1803 1 0.315553 0.491356 0.818512
743 1 0.180321 0.37717 0.680092
747 1 0.304786 0.375852 0.69046
748 1 0.2383 0.433113 0.68472
870 1 0.181253 0.43863 0.750149
871 1 0.177135 0.382205 0.817079
873 1 0.237605 0.375871 0.754876
874 1 0.302595 0.432506 0.75101
875 1 0.303816 0.381996 0.81484
876 1 0.244151 0.436813 0.818081
1675 1 0.302741 0.493843 0.69488
1805 1 0.374363 0.499774 0.753094
751 1 0.434987 0.368721 0.695488
752 1 0.364937 0.436383 0.689937
877 1 0.367733 0.37635 0.754262
878 1 0.433026 0.434524 0.750872
879 1 0.441409 0.374049 0.810592
880 1 0.375868 0.441449 0.814873
756 1 0.497775 0.438367 0.69702
1807 1 0.441531 0.498731 0.811072
1809 1 0.498251 0.499382 0.749514
1679 1 0.438488 0.499836 0.696106
1683 1 0.561115 0.499278 0.697419
881 1 0.500171 0.363111 0.756621
884 1 0.500866 0.433355 0.807968
755 1 0.558252 0.369765 0.698138
760 1 0.621461 0.433296 0.692645
882 1 0.562465 0.438846 0.7568
883 1 0.567935 0.374283 0.816354
885 1 0.621531 0.373604 0.749323
888 1 0.629088 0.434371 0.815309
1813 1 0.626462 0.495017 0.758068
1815 1 0.688872 0.499893 0.81742
759 1 0.684132 0.380503 0.686143
763 1 0.816036 0.375035 0.69015
764 1 0.754122 0.446274 0.686556
886 1 0.695129 0.43908 0.748738
887 1 0.692105 0.376315 0.810285
889 1 0.756756 0.381363 0.747136
890 1 0.815592 0.444683 0.750149
891 1 0.814312 0.377616 0.812338
892 1 0.75675 0.439796 0.817781
1823 1 0.926352 0.498269 0.813468
865 1 0.998336 0.373242 0.757999
868 1 0.994865 0.436142 0.810721
740 1 0.996874 0.431382 0.698683
1695 1 0.937526 0.49618 0.691932
767 1 0.941544 0.37358 0.697815
768 1 0.868801 0.448728 0.68437
893 1 0.877606 0.382124 0.748619
894 1 0.927437 0.442764 0.750656
895 1 0.928803 0.382487 0.817631
896 1 0.865849 0.442888 0.817803
1793 1 0.992715 0.49394 0.750044
899 1 0.0612795 0.0050437 0.93913
901 1 0.122834 0.00619409 0.881508
898 1 0.0555217 0.0627826 0.88011
904 1 0.127045 0.0695294 0.942327
931 1 0.0675366 0.130796 0.943739
933 1 0.121583 0.121467 0.885682
900 1 0.000239848 0.0672502 0.943459
929 1 0.00657832 0.129185 0.874615
5 1 0.124175 0.00932353 0.999299
903 1 0.185749 0.00762248 0.944161
10 1 0.318646 0.0613694 0.996619
902 1 0.182632 0.072281 0.874397
906 1 0.306477 0.0550451 0.865734
908 1 0.255825 0.056344 0.939216
935 1 0.191646 0.128303 0.937916
937 1 0.252892 0.127557 0.873589
939 1 0.316603 0.121751 0.930763
913 1 0.49337 0.00747771 0.87778
909 1 0.371146 0.00202364 0.876048
910 1 0.425591 0.0642587 0.868861
912 1 0.37055 0.0631489 0.933092
941 1 0.370474 0.130808 0.866748
943 1 0.436461 0.12817 0.931482
916 1 0.493539 0.0632454 0.941896
945 1 0.494719 0.123936 0.879313
911 1 0.436892 0.00249549 0.937036
45 1 0.372443 0.132167 0.986635
915 1 0.555217 0.000179435 0.936282
18 1 0.566157 0.0574414 0.999473
914 1 0.554772 0.0631903 0.879396
920 1 0.622796 0.0520015 0.936866
947 1 0.557753 0.112002 0.942524
949 1 0.618704 0.121559 0.881963
53 1 0.621488 0.12342 0.999314
923 1 0.81448 0.00346725 0.943919
918 1 0.689557 0.067372 0.873443
922 1 0.806945 0.0559852 0.881171
924 1 0.750383 0.0641593 0.938431
951 1 0.684907 0.111254 0.948361
953 1 0.748408 0.120975 0.869739
955 1 0.818732 0.115409 0.941632
897 1 0.99564 0.000667212 0.874335
925 1 0.882378 0.0030247 0.877454
926 1 0.945754 0.0665551 0.876951
928 1 0.881867 0.0599224 0.938075
957 1 0.880393 0.12767 0.875955
959 1 0.934487 0.127987 0.94782
930 1 0.0691058 0.193806 0.876275
936 1 0.12738 0.193739 0.941599
962 1 0.0612751 0.309793 0.877416
963 1 0.0657896 0.251527 0.940751
965 1 0.135118 0.250128 0.872485
968 1 0.133897 0.309368 0.934351
961 1 0.000968458 0.246137 0.880344
964 1 0.0005994 0.310432 0.941728
65 1 0.00625446 0.249008 0.999912
66 1 0.060373 0.311345 0.998664
42 1 0.312157 0.187783 0.995361
934 1 0.188828 0.183102 0.869915
938 1 0.305607 0.19726 0.876845
940 1 0.250092 0.19753 0.93661
966 1 0.187973 0.311414 0.868865
967 1 0.19433 0.255331 0.933841
969 1 0.243571 0.250497 0.871267
970 1 0.307712 0.312554 0.876334
971 1 0.312475 0.25396 0.946099
972 1 0.248611 0.312668 0.938412
70 1 0.184843 0.31044 0.997095
38 1 0.19642 0.181047 0.999136
78 1 0.437051 0.309789 0.997555
973 1 0.364853 0.25026 0.871449
944 1 0.375552 0.190308 0.927104
942 1 0.434757 0.185244 0.87061
976 1 0.368445 0.313169 0.933939
975 1 0.431901 0.249903 0.930762
974 1 0.428396 0.309201 0.876241
980 1 0.496028 0.30875 0.92984
948 1 0.499708 0.182457 0.939856
977 1 0.490724 0.246779 0.871567
77 1 0.372748 0.247825 0.997365
46 1 0.437888 0.193998 0.993139
81 1 0.503062 0.251655 0.997471
50 1 0.575688 0.189328 0.995369
85 1 0.632944 0.249769 0.995987
979 1 0.55524 0.246817 0.930807
981 1 0.62727 0.245657 0.880664
978 1 0.567346 0.30684 0.867227
984 1 0.624846 0.30791 0.936352
946 1 0.556056 0.183486 0.878612
952 1 0.637268 0.185947 0.935206
82 1 0.566391 0.305975 0.995522
86 1 0.684082 0.315988 0.995014
983 1 0.690242 0.246219 0.93294
954 1 0.80881 0.179819 0.88401
982 1 0.68289 0.313519 0.881353
986 1 0.816286 0.319758 0.881813
988 1 0.748055 0.311906 0.934952
950 1 0.689995 0.183515 0.87065
985 1 0.750251 0.245582 0.877467
987 1 0.812794 0.253733 0.939021
956 1 0.741299 0.175413 0.945861
89 1 0.745369 0.256281 0.99555
90 1 0.807598 0.315304 0.996842
932 1 0.999088 0.184568 0.937798
960 1 0.875951 0.189843 0.930117
991 1 0.940705 0.254942 0.941136
958 1 0.939258 0.194667 0.877338
992 1 0.879362 0.308598 0.946532
989 1 0.874271 0.26029 0.878486
990 1 0.944903 0.319625 0.877512
1923 1 0.0629493 0.498388 0.949693
995 1 0.0643745 0.367894 0.937658
1000 1 0.123494 0.435176 0.944016
997 1 0.123709 0.37618 0.885466
994 1 0.0522784 0.437886 0.882706
98 1 0.060169 0.431699 0.999102
1931 1 0.310373 0.499541 0.942233
1002 1 0.319497 0.435279 0.880218
1003 1 0.307242 0.377449 0.942893
1001 1 0.246309 0.382931 0.876187
998 1 0.173894 0.437356 0.877726
1004 1 0.249458 0.440587 0.941329
999 1 0.191593 0.378553 0.941222
1935 1 0.44196 0.491107 0.931039
1008 1 0.376621 0.437559 0.942794
1005 1 0.376253 0.376141 0.868145
110 1 0.435379 0.439735 0.999144
1006 1 0.434894 0.43876 0.866698
1007 1 0.432921 0.376281 0.928174
109 1 0.379448 0.370589 0.993874
1013 1 0.625886 0.377723 0.875556
1009 1 0.504922 0.373442 0.876519
1012 1 0.504505 0.438053 0.929542
1010 1 0.562949 0.441458 0.872225
1011 1 0.564553 0.37875 0.940062
1016 1 0.617203 0.438811 0.931463
1045 1 0.620271 0.498017 0.992266
121 1 0.740964 0.378555 0.996591
118 1 0.676781 0.433312 0.998123
122 1 0.812088 0.440898 0.998616
1017 1 0.747302 0.377329 0.877122
1015 1 0.681871 0.380474 0.937883
1018 1 0.806437 0.437115 0.87789
1020 1 0.744073 0.444109 0.947695
1014 1 0.688398 0.450115 0.88317
1019 1 0.810499 0.379521 0.947365
1947 1 0.820099 0.499061 0.935386
1024 1 0.88645 0.444743 0.940644
1023 1 0.939616 0.379793 0.945378
1022 1 0.936705 0.440042 0.874711
1021 1 0.877186 0.376375 0.889309
1921 1 0.994599 0.498257 0.874248
996 1 0.996757 0.445371 0.942683
993 1 0.996594 0.380287 0.878188
125 1 0.872365 0.379008 0.999838
1032 1 0.123337 0.560573 0.0693952
1059 1 0.0548818 0.624202 0.0654376
1154 1 0.0585238 0.558605 0.12532
1189 1 0.119351 0.629147 0.117175
1061 1 0.123379 0.624243 0.00540681
1029 1 0.134463 0.501693 0.00449949
1026 1 0.0559648 0.565729 0.009638
1036 1 0.250205 0.563731 0.0630249
1063 1 0.185957 0.619091 0.0628658
1067 1 0.307039 0.629187 0.0689701
1158 1 0.191133 0.558362 0.123051
1162 1 0.313135 0.565336 0.122048
1193 1 0.24994 0.627089 0.123658
1161 1 0.256975 0.505698 0.129468
1065 1 0.253662 0.624555 0.00137982
1030 1 0.18577 0.567409 0.00300649
1035 1 0.317182 0.500085 0.066397
1034 1 0.31109 0.554253 0.00710569
1033 1 0.242567 0.504977 0.0047797
1040 1 0.381745 0.555144 0.0614741
1071 1 0.435204 0.624965 0.0560759
1166 1 0.437069 0.558318 0.120723
1197 1 0.379136 0.627755 0.118923
1044 1 0.497001 0.558807 0.061636
1069 1 0.368525 0.617787 0.00365692
1038 1 0.4418 0.561101 0.00529595
1201 1 0.500693 0.624859 0.123562
1048 1 0.626564 0.565937 0.0585019
1075 1 0.563995 0.627535 0.0597615
1170 1 0.558846 0.561432 0.121288
1205 1 0.622577 0.635273 0.115532
1173 1 0.620589 0.503645 0.118793
1046 1 0.691165 0.559063 0.00376413
1052 1 0.754942 0.568985 0.0640089
1079 1 0.693955 0.625543 0.0581288
1083 1 0.816361 0.628913 0.0642424
1174 1 0.682121 0.560094 0.118394
1178 1 0.816008 0.57044 0.125643
1209 1 0.755295 0.630581 0.124361
1050 1 0.822138 0.567503 0.00767455
1049 1 0.757514 0.506516 0.000234039
1051 1 0.81748 0.503434 0.0642058
1028 1 0.988746 0.565196 0.0660308
1185 1 0.99394 0.620864 0.124848
1056 1 0.877103 0.559194 0.0600475
1087 1 0.934302 0.631304 0.0579823
1182 1 0.930649 0.562194 0.125349
1213 1 0.875025 0.620688 0.114715
1085 1 0.880228 0.629998 0.00311791
1025 1 0.992823 0.503793 0.00186612
1054 1 0.937423 0.565909 0.00549838
1057 1 0.994527 0.625972 0.00277323
1090 1 0.0647806 0.817089 0.00684543
1064 1 0.125945 0.688864 0.0629464
1091 1 0.0629295 0.752662 0.0743194
1096 1 0.118986 0.816714 0.0675398
1186 1 0.0604977 0.685292 0.122167
1218 1 0.0586507 0.816509 0.1247
1221 1 0.126137 0.742291 0.124969
1217 1 0.00392153 0.755838 0.127758
1092 1 0.00128865 0.818743 0.0616656
1093 1 0.122084 0.748164 0.00498665
1089 1 0.00847242 0.75096 0.00647201
1068 1 0.250774 0.68441 0.0592011
1095 1 0.187939 0.74992 0.0618228
1099 1 0.317774 0.750041 0.0519665
1100 1 0.255952 0.807776 0.0604752
1190 1 0.190856 0.690892 0.119849
1194 1 0.314123 0.684161 0.130358
1222 1 0.188209 0.812017 0.12775
1225 1 0.249998 0.750498 0.120827
1226 1 0.321948 0.817074 0.128608
1062 1 0.184251 0.687674 0.00291969
1072 1 0.36816 0.683176 0.0606539
1103 1 0.439643 0.743757 0.0644488
1104 1 0.383221 0.810068 0.061197
1198 1 0.443413 0.682231 0.118436
1229 1 0.375144 0.749471 0.118775
1230 1 0.447237 0.809305 0.122678
1108 1 0.496283 0.808364 0.0590627
1102 1 0.436705 0.809285 0.00175194
1233 1 0.502945 0.746539 0.119987
1076 1 0.502189 0.68263 0.0557975
1080 1 0.623132 0.694751 0.060107
1107 1 0.562579 0.750535 0.0553849
1112 1 0.63305 0.810515 0.0524618
1202 1 0.565281 0.695815 0.118993
1234 1 0.560725 0.815786 0.119899
1237 1 0.625272 0.762301 0.119921
1084 1 0.755246 0.685803 0.0590482
1111 1 0.694456 0.753428 0.0593082
1115 1 0.815872 0.7506 0.066954
1116 1 0.753734 0.810691 0.0594453
1206 1 0.690584 0.685912 0.117782
1210 1 0.818246 0.689925 0.121913
1238 1 0.688487 0.814942 0.123439
1241 1 0.748962 0.75308 0.121402
1242 1 0.820529 0.819188 0.119622
1082 1 0.817592 0.690674 0.00643256
1113 1 0.760993 0.748965 0.000950661
1078 1 0.684794 0.682301 0.00119638
1086 1 0.93881 0.694332 0.00638842
1060 1 0.997063 0.690469 0.0618444
1117 1 0.878951 0.750338 0.00249407
1088 1 0.879684 0.688094 0.0655788
1119 1 0.943897 0.749302 0.0646652
1120 1 0.879587 0.814392 0.0644169
1214 1 0.936081 0.676247 0.129615
1245 1 0.882225 0.751399 0.121672
1246 1 0.947709 0.816244 0.117453
1118 1 0.940068 0.812634 0.00438242
1123 1 0.0677413 0.878388 0.0655694
1128 1 0.125563 0.955178 0.057296
1250 1 0.0679797 0.940717 0.12179
1253 1 0.127527 0.876221 0.124827
1249 1 0.00150694 0.875059 0.127521
1124 1 0.00854869 0.941952 0.0641185
1125 1 0.129905 0.875415 0.00513504
1127 1 0.18624 0.875684 0.069364
1131 1 0.309683 0.871316 0.0538618
1132 1 0.250094 0.936736 0.0573411
1254 1 0.188725 0.940444 0.117676
1257 1 0.255306 0.869266 0.128162
1258 1 0.314723 0.937474 0.118079
11 1 0.316806 0.999613 0.0581834
1126 1 0.186984 0.942793 0.008099
1130 1 0.316275 0.93743 0.000744087
1135 1 0.438513 0.881371 0.0595115
1136 1 0.376366 0.935899 0.06036
1261 1 0.381772 0.875393 0.116078
1262 1 0.438994 0.937151 0.11687
1140 1 0.49879 0.938957 0.0653509
141 1 0.377434 0.993892 0.123186
1137 1 0.496306 0.87282 0.00644141
1265 1 0.505732 0.883797 0.129153
1139 1 0.567206 0.878607 0.0609333
1144 1 0.622589 0.935601 0.0677653
1266 1 0.563319 0.946433 0.125065
1269 1 0.625858 0.870015 0.128669
21 1 0.614138 0.995617 0.00961819
1141 1 0.622847 0.877817 0.0020733
23 1 0.676844 0.998508 0.0640805
153 1 0.748202 0.9955 0.129274
1143 1 0.695188 0.875476 0.0573997
1147 1 0.817315 0.880493 0.0615676
1148 1 0.746437 0.942606 0.0583016
1270 1 0.683664 0.937222 0.129664
1273 1 0.753847 0.872635 0.118503
1274 1 0.809046 0.936992 0.125758
27 1 0.819132 0.997511 0.0685613
31 1 0.940555 0.996699 0.0665835
1151 1 0.947481 0.88064 0.0694714
1152 1 0.881046 0.933152 0.068424
1277 1 0.877047 0.880449 0.13084
1278 1 0.937095 0.942219 0.128505
157 1 0.877427 0.99894 0.126564
1283 1 0.062645 0.507792 0.312086
1281 1 0.00119332 0.500831 0.252934
1160 1 0.125633 0.571902 0.177126
1187 1 0.062069 0.620007 0.178533
1282 1 0.067425 0.564089 0.247112
1288 1 0.127493 0.56189 0.310566
1315 1 0.0628223 0.625265 0.309546
1317 1 0.121509 0.625784 0.248347
1156 1 0.00129715 0.562967 0.189192
1155 1 0.0727329 0.507684 0.189997
1291 1 0.310965 0.505053 0.314607
1159 1 0.18801 0.504046 0.190245
1164 1 0.248402 0.566876 0.191218
1191 1 0.184548 0.626177 0.176827
1195 1 0.310226 0.621098 0.188592
1286 1 0.18882 0.56143 0.247181
1290 1 0.306301 0.555207 0.248351
1292 1 0.250118 0.569428 0.315245
1319 1 0.182921 0.623097 0.31689
1321 1 0.243073 0.628021 0.245438
1323 1 0.310669 0.631173 0.312782
1293 1 0.376398 0.501478 0.250292
1168 1 0.374456 0.557056 0.183423
1199 1 0.431745 0.619663 0.180828
1294 1 0.429403 0.565352 0.246034
1296 1 0.378594 0.566707 0.311529
1325 1 0.368455 0.618721 0.246048
1327 1 0.442721 0.623158 0.315511
1300 1 0.496183 0.556978 0.308737
1329 1 0.500988 0.618486 0.249717
1172 1 0.501155 0.566548 0.182292
1176 1 0.631566 0.567055 0.17925
1203 1 0.567414 0.622867 0.183454
1298 1 0.565781 0.561374 0.243871
1304 1 0.623582 0.562956 0.313042
1331 1 0.5585 0.627329 0.304127
1333 1 0.630399 0.624742 0.244752
1297 1 0.504477 0.500712 0.243518
1299 1 0.564996 0.504687 0.312796
1301 1 0.63012 0.503788 0.249582
1180 1 0.748115 0.561939 0.174537
1207 1 0.695007 0.629948 0.180143
1211 1 0.808476 0.625184 0.188917
1302 1 0.694001 0.559756 0.24344
1306 1 0.804969 0.559161 0.24614
1308 1 0.749068 0.566021 0.305594
1335 1 0.6888 0.629895 0.300414
1337 1 0.750272 0.624721 0.243703
1339 1 0.809607 0.624482 0.310355
1309 1 0.874782 0.500318 0.247434
1284 1 0.999815 0.558539 0.316464
1313 1 0.999068 0.623475 0.253647
1184 1 0.872906 0.562584 0.188929
1215 1 0.934197 0.621704 0.191057
1310 1 0.937004 0.557836 0.246734
1312 1 0.869935 0.558739 0.306619
1341 1 0.866283 0.625832 0.248973
1343 1 0.930625 0.623874 0.303976
1192 1 0.121315 0.682144 0.182397
1219 1 0.0658642 0.74876 0.181109
1224 1 0.123441 0.806822 0.185771
1314 1 0.067567 0.680242 0.244918
1320 1 0.121387 0.684503 0.315661
1346 1 0.0657647 0.811514 0.251259
1347 1 0.0682318 0.749577 0.310185
1349 1 0.128515 0.748702 0.245367
1352 1 0.129667 0.803321 0.30593
1316 1 0.00889064 0.691355 0.309281
1348 1 0.00553849 0.805411 0.308502
1188 1 0.00555038 0.683911 0.189657
1220 1 0.00562829 0.815705 0.187104
1345 1 0.00252701 0.749641 0.251122
1196 1 0.250136 0.680218 0.18203
1223 1 0.187319 0.742312 0.186541
1227 1 0.303428 0.746647 0.181104
1228 1 0.256744 0.817229 0.194669
1318 1 0.182045 0.684368 0.246889
1322 1 0.309717 0.690109 0.247699
1324 1 0.24817 0.679983 0.311438
1350 1 0.187132 0.808167 0.242536
1351 1 0.194924 0.749163 0.308212
1353 1 0.254505 0.757832 0.252958
1354 1 0.32317 0.817407 0.248386
1355 1 0.309241 0.749002 0.320474
1356 1 0.253366 0.812762 0.31526
1200 1 0.375827 0.686961 0.188402
1231 1 0.434284 0.740784 0.178755
1232 1 0.378772 0.809647 0.18903
1326 1 0.429806 0.676622 0.246093
1328 1 0.373572 0.681097 0.306275
1357 1 0.376078 0.745004 0.247799
1358 1 0.44257 0.804676 0.235684
1359 1 0.438002 0.742709 0.305655
1360 1 0.381418 0.803988 0.316611
1364 1 0.498394 0.808406 0.309765
1332 1 0.497658 0.686486 0.301762
1204 1 0.496164 0.688917 0.188828
1361 1 0.502942 0.750365 0.247656
1236 1 0.503074 0.814209 0.181765
1208 1 0.626205 0.69269 0.17999
1235 1 0.556636 0.749701 0.186022
1330 1 0.561246 0.691831 0.245483
1336 1 0.622818 0.689496 0.311197
1363 1 0.566052 0.7559 0.305096
1365 1 0.624614 0.74734 0.247743
1368 1 0.632085 0.809917 0.308462
1240 1 0.636376 0.805363 0.195089
1362 1 0.567843 0.812755 0.249564
1212 1 0.743782 0.698521 0.184747
1239 1 0.688726 0.750976 0.186648
1243 1 0.813274 0.749533 0.181944
1244 1 0.748021 0.812946 0.181916
1334 1 0.687421 0.696684 0.25018
1338 1 0.805068 0.682694 0.250297
1340 1 0.745689 0.687716 0.30379
1366 1 0.698684 0.807087 0.252646
1367 1 0.685216 0.747834 0.311093
1369 1 0.762617 0.754917 0.250067
1370 1 0.813176 0.813915 0.254681
1371 1 0.814587 0.750045 0.318213
1372 1 0.746236 0.816894 0.313889
1216 1 0.873006 0.688916 0.185072
1247 1 0.938975 0.753552 0.182833
1248 1 0.874625 0.812647 0.184601
1342 1 0.936253 0.688833 0.242172
1344 1 0.872415 0.689927 0.295685
1373 1 0.871555 0.752639 0.251582
1374 1 0.945465 0.809778 0.243781
1375 1 0.939825 0.740297 0.30529
1376 1 0.880744 0.815444 0.316206
1251 1 0.0629316 0.883502 0.182283
1256 1 0.129947 0.943632 0.18093
1378 1 0.0680711 0.935206 0.249306
1379 1 0.0621226 0.872506 0.310709
1381 1 0.12377 0.875542 0.243287
1384 1 0.130905 0.933322 0.313781
1377 1 0.00271139 0.879014 0.245199
261 1 0.129025 0.991171 0.256219
135 1 0.192878 0.996585 0.185668
1255 1 0.190273 0.877806 0.17825
1259 1 0.318247 0.877422 0.184444
1260 1 0.264307 0.939578 0.17786
1382 1 0.188791 0.933783 0.239815
1383 1 0.190964 0.870725 0.306764
1385 1 0.251604 0.875399 0.248981
1386 1 0.310184 0.938797 0.248039
1387 1 0.313521 0.873651 0.311153
1388 1 0.252573 0.932599 0.312081
265 1 0.246237 0.986803 0.249918
263 1 0.197394 0.996624 0.309041
1263 1 0.443416 0.879912 0.182599
1264 1 0.374795 0.936869 0.181855
1389 1 0.381421 0.872497 0.249555
1390 1 0.436095 0.931075 0.245667
1391 1 0.433585 0.866596 0.314556
1392 1 0.383266 0.933989 0.306872
1268 1 0.499071 0.938318 0.183986
1393 1 0.499808 0.869319 0.251289
271 1 0.441281 0.999322 0.315895
143 1 0.443972 0.997772 0.187541
275 1 0.569112 0.998609 0.312686
277 1 0.630301 0.997724 0.255982
1396 1 0.500789 0.94428 0.304847
1267 1 0.563561 0.869175 0.192149
1272 1 0.623925 0.935229 0.185471
1394 1 0.571528 0.940555 0.248058
1395 1 0.565749 0.878778 0.308217
1397 1 0.628691 0.86774 0.247956
1400 1 0.630768 0.932615 0.316241
1271 1 0.685567 0.874085 0.188878
1275 1 0.81288 0.880242 0.190026
1276 1 0.742468 0.935408 0.190044
1398 1 0.69 0.936625 0.261557
1399 1 0.686085 0.872537 0.312539
1401 1 0.755178 0.875745 0.253877
1402 1 0.804275 0.937611 0.258994
1403 1 0.815481 0.87646 0.322073
1404 1 0.748483 0.941403 0.321674
1380 1 0.992871 0.935847 0.309239
1252 1 0.99976 0.938809 0.191236
1279 1 0.932064 0.869675 0.192263
1280 1 0.866105 0.936473 0.196878
1405 1 0.879528 0.871521 0.252609
1406 1 0.931259 0.937522 0.252014
1407 1 0.945321 0.871211 0.310519
1408 1 0.870071 0.940235 0.311838
1411 1 0.0554653 0.5025 0.443998
1413 1 0.12576 0.505361 0.374534
1410 1 0.057914 0.565158 0.384669
1416 1 0.124348 0.556287 0.440408
1443 1 0.0596079 0.630921 0.439129
1445 1 0.121513 0.624217 0.378413
1412 1 0.00306583 0.564244 0.442132
1538 1 0.0622683 0.572828 0.494632
1441 1 0.00255037 0.623518 0.370752
1573 1 0.131707 0.618626 0.496666
1417 1 0.255329 0.502375 0.371064
1449 1 0.250743 0.622499 0.373701
1451 1 0.306974 0.62328 0.441374
1420 1 0.245607 0.55697 0.433311
1447 1 0.192644 0.621207 0.437126
1414 1 0.189204 0.558252 0.373546
1418 1 0.312037 0.561892 0.378326
1415 1 0.187586 0.500534 0.438394
1546 1 0.310912 0.563565 0.497547
1423 1 0.442588 0.503032 0.441072
1421 1 0.378256 0.503607 0.369805
1581 1 0.372608 0.619 0.490808
1550 1 0.437702 0.559319 0.498891
1422 1 0.44423 0.559474 0.377783
1455 1 0.440558 0.623515 0.440004
1424 1 0.374968 0.561825 0.436399
1453 1 0.37836 0.622076 0.377875
1585 1 0.494005 0.623079 0.49968
1425 1 0.503399 0.502466 0.373554
1428 1 0.505261 0.562077 0.444596
1457 1 0.502611 0.617591 0.378046
1461 1 0.626716 0.625587 0.365975
1426 1 0.56657 0.555145 0.383209
1432 1 0.627727 0.561702 0.443909
1459 1 0.569385 0.62493 0.432676
1429 1 0.630111 0.501032 0.376533
1435 1 0.811004 0.507789 0.441036
1590 1 0.680059 0.664201 0.492166
1430 1 0.687126 0.566108 0.367509
1463 1 0.685381 0.623455 0.426036
1436 1 0.746217 0.555463 0.439463
1434 1 0.811435 0.565633 0.369177
1465 1 0.745865 0.624908 0.369557
1467 1 0.808891 0.622248 0.433172
1562 1 0.809755 0.567073 0.4958
1593 1 0.748606 0.62637 0.496593
1437 1 0.881821 0.508052 0.378138
1439 1 0.933556 0.505516 0.446432
1440 1 0.877457 0.572314 0.440259
1469 1 0.870947 0.635785 0.363719
1471 1 0.942682 0.622367 0.437492
1438 1 0.932776 0.576708 0.369848
1602 1 0.0665041 0.814699 0.497904
1480 1 0.121618 0.820358 0.438743
1477 1 0.128411 0.749664 0.374391
1442 1 0.0615653 0.68764 0.377119
1475 1 0.0642824 0.744337 0.438189
1448 1 0.128259 0.686715 0.443631
1474 1 0.0615422 0.823333 0.371228
1476 1 0.0057104 0.815158 0.442266
1473 1 0.00909688 0.751311 0.368636
1609 1 0.239927 0.748911 0.497697
1482 1 0.309355 0.807271 0.373507
1478 1 0.187462 0.819582 0.365618
1484 1 0.250812 0.817016 0.43772
1479 1 0.187238 0.752099 0.439291
1452 1 0.243464 0.688668 0.439954
1481 1 0.241248 0.75322 0.38629
1450 1 0.302461 0.691466 0.379904
1446 1 0.186081 0.683287 0.374191
1483 1 0.310059 0.748857 0.441749
1578 1 0.314095 0.681443 0.496152
1613 1 0.369102 0.744865 0.496484
1614 1 0.434518 0.813058 0.493987
1488 1 0.373143 0.812348 0.430071
1487 1 0.436061 0.737935 0.44268
1454 1 0.432242 0.684247 0.375743
1456 1 0.36648 0.682421 0.431675
1486 1 0.441088 0.808763 0.377365
1485 1 0.374263 0.74516 0.373837
1489 1 0.497436 0.745385 0.373701
1617 1 0.498596 0.748594 0.497043
1621 1 0.640303 0.746537 0.496996
1492 1 0.502427 0.807847 0.436257
1460 1 0.502814 0.677287 0.440215
1490 1 0.554542 0.810929 0.371185
1458 1 0.556962 0.68768 0.374916
1464 1 0.623086 0.689771 0.435457
1493 1 0.625082 0.750963 0.377121
1496 1 0.629568 0.809101 0.441104
1491 1 0.576186 0.748488 0.439619
1626 1 0.806915 0.819438 0.493199
1466 1 0.805691 0.681642 0.372171
1497 1 0.748231 0.747585 0.373
1498 1 0.82431 0.808521 0.377481
1495 1 0.688266 0.736482 0.434952
1499 1 0.809361 0.752132 0.439567
1462 1 0.679322 0.687345 0.368053
1468 1 0.748332 0.686705 0.436848
1500 1 0.746469 0.81479 0.431738
1494 1 0.683871 0.805598 0.381416
1622 1 0.693895 0.8108 0.498953
1444 1 0.996368 0.687442 0.438677
1629 1 0.868257 0.754915 0.496523
1501 1 0.884665 0.745234 0.370901
1504 1 0.875833 0.813402 0.440608
1502 1 0.94833 0.803593 0.373288
1470 1 0.943407 0.681572 0.370515
1472 1 0.882858 0.687956 0.441179
1503 1 0.940278 0.755686 0.438066
1637 1 0.121278 0.876871 0.49736
1634 1 0.0611565 0.939256 0.493272
385 1 0.00104268 0.9999 0.373703
389 1 0.126037 0.998054 0.371823
1506 1 0.0637327 0.942364 0.365029
1512 1 0.125257 0.941605 0.435469
1509 1 0.121765 0.876383 0.366645
1507 1 0.066598 0.884364 0.431216
1505 1 0.00115062 0.882692 0.372172
387 1 0.0689596 0.999496 0.438848
393 1 0.252246 0.999615 0.376489
391 1 0.196008 0.990443 0.43458
1516 1 0.263853 0.942546 0.436543
1511 1 0.187267 0.87515 0.428823
1513 1 0.252081 0.87568 0.372816
1515 1 0.318 0.88283 0.432063
1510 1 0.182287 0.931079 0.370886
1514 1 0.313257 0.942809 0.364878
521 1 0.26367 0.999715 0.494697
1641 1 0.254124 0.875067 0.496374
1638 1 0.185991 0.927032 0.497565
397 1 0.374869 0.998446 0.374766
1645 1 0.376261 0.873734 0.498182
1520 1 0.381843 0.934223 0.429925
1517 1 0.375263 0.867918 0.370586
1518 1 0.439026 0.937452 0.372089
1519 1 0.43683 0.872705 0.430627
399 1 0.446287 0.99637 0.437713
1646 1 0.43779 0.935337 0.494418
1649 1 0.503611 0.871541 0.49853
1523 1 0.56444 0.867779 0.435626
1525 1 0.623305 0.873466 0.376082
1528 1 0.622426 0.951184 0.439184
1650 1 0.566145 0.9279 0.497763
1521 1 0.500136 0.877009 0.36735
1522 1 0.557975 0.94399 0.378393
1524 1 0.501299 0.933146 0.439355
1653 1 0.620158 0.870916 0.495947
1654 1 0.679652 0.936545 0.49318
1529 1 0.747505 0.873558 0.374525
1532 1 0.749341 0.938255 0.436435
407 1 0.691863 0.999959 0.440626
1527 1 0.682407 0.879056 0.434397
1526 1 0.688684 0.938746 0.373565
1530 1 0.817367 0.940357 0.373027
1531 1 0.811776 0.875856 0.431446
411 1 0.80639 0.994811 0.442933
1657 1 0.750036 0.875606 0.497199
413 1 0.875476 0.998306 0.373839
1533 1 0.880145 0.87657 0.376295
1534 1 0.93864 0.942672 0.376262
1535 1 0.938028 0.867609 0.441721
1536 1 0.877031 0.928474 0.442575
1508 1 0.996913 0.939686 0.438
541 1 0.867608 0.998251 0.498125
1544 1 0.119034 0.558835 0.558959
1571 1 0.0622497 0.624514 0.555996
1666 1 0.0623058 0.561177 0.623441
1701 1 0.119241 0.626076 0.624157
1669 1 0.118194 0.50082 0.621859
1539 1 0.0564996 0.501978 0.563569
1569 1 0.00100256 0.628324 0.500892
1548 1 0.249173 0.556247 0.563001
1575 1 0.187547 0.618584 0.57098
1579 1 0.325862 0.622661 0.556394
1670 1 0.176815 0.562281 0.631317
1674 1 0.310627 0.566355 0.620664
1705 1 0.25216 0.623421 0.635799
1577 1 0.246053 0.618743 0.503303
1673 1 0.251988 0.5052 0.629215
1543 1 0.184152 0.500821 0.574027
1542 1 0.189115 0.559321 0.501209
1552 1 0.381908 0.558366 0.570188
1583 1 0.439386 0.632372 0.568168
1678 1 0.437038 0.558483 0.632116
1709 1 0.373042 0.629 0.626758
1556 1 0.496682 0.558214 0.557535
1713 1 0.498145 0.627534 0.633548
1681 1 0.499543 0.50003 0.635116
1560 1 0.614556 0.556281 0.566657
1587 1 0.559893 0.631758 0.560061
1682 1 0.555905 0.567391 0.621158
1717 1 0.62741 0.617741 0.626296
1589 1 0.617261 0.616516 0.50318
1554 1 0.562074 0.556583 0.501442
1563 1 0.811225 0.508185 0.560117
1564 1 0.747734 0.561416 0.568511
1591 1 0.678531 0.617738 0.560444
1595 1 0.810336 0.625436 0.566117
1686 1 0.688642 0.55909 0.637881
1690 1 0.813954 0.561745 0.618414
1721 1 0.747103 0.624709 0.631495
1689 1 0.755999 0.5052 0.629425
1558 1 0.689747 0.555419 0.505059
1693 1 0.872997 0.501304 0.61738
1697 1 0.992815 0.625613 0.634273
1540 1 0.997993 0.566601 0.558286
1568 1 0.87479 0.566887 0.560951
1599 1 0.93999 0.629393 0.566069
1694 1 0.932354 0.56247 0.624228
1725 1 0.88093 0.626316 0.620311
1597 1 0.875068 0.632727 0.50623
1567 1 0.930587 0.504879 0.555144
1537 1 0.996044 0.503128 0.502183
1665 1 0.995877 0.503905 0.6222
1565 1 0.872761 0.506204 0.501038
1566 1 0.93836 0.568262 0.504545
1570 1 0.0641096 0.689474 0.504569
1605 1 0.128333 0.755729 0.504609
1576 1 0.123313 0.688005 0.561624
1603 1 0.0637295 0.754081 0.565347
1608 1 0.119373 0.817291 0.560459
1698 1 0.0558616 0.687858 0.621818
1730 1 0.0653288 0.815964 0.630712
1733 1 0.123736 0.754145 0.624311
1729 1 0.00517334 0.753756 0.630001
1572 1 0.000624174 0.686709 0.563886
1601 1 0.000998443 0.743621 0.500798
1580 1 0.252475 0.679551 0.566297
1607 1 0.186525 0.753887 0.572127
1611 1 0.311721 0.744349 0.560013
1612 1 0.249538 0.811428 0.557614
1702 1 0.190241 0.684821 0.62826
1706 1 0.310456 0.680721 0.627007
1734 1 0.183218 0.820738 0.627
1737 1 0.256088 0.74138 0.624233
1738 1 0.308336 0.813387 0.621994
1574 1 0.185285 0.683019 0.503614
1606 1 0.182582 0.817984 0.500264
1610 1 0.312026 0.810989 0.501657
1584 1 0.374085 0.689632 0.558912
1615 1 0.437456 0.746392 0.564988
1616 1 0.374605 0.80291 0.556984
1710 1 0.433157 0.68523 0.630239
1741 1 0.373031 0.745513 0.626219
1742 1 0.430077 0.808817 0.626754
1620 1 0.499628 0.813417 0.56158
1745 1 0.495118 0.748997 0.623059
1582 1 0.436609 0.686037 0.505048
1586 1 0.560263 0.689836 0.500605
1588 1 0.502896 0.689405 0.560717
1592 1 0.623943 0.680723 0.556968
1619 1 0.562727 0.753175 0.56814
1624 1 0.627815 0.805504 0.561308
1714 1 0.561984 0.687005 0.617026
1746 1 0.579373 0.813032 0.629986
1749 1 0.622096 0.747815 0.624301
1618 1 0.566936 0.805463 0.5003
1596 1 0.752166 0.67996 0.562281
1623 1 0.693012 0.747008 0.57195
1627 1 0.81554 0.751219 0.562735
1628 1 0.750553 0.807019 0.559189
1718 1 0.684203 0.682665 0.624207
1722 1 0.819246 0.686438 0.621419
1750 1 0.673784 0.812397 0.625214
1753 1 0.753367 0.7511 0.626049
1754 1 0.813362 0.815413 0.627734
1594 1 0.806022 0.68406 0.500437
1625 1 0.75078 0.750653 0.506146
1604 1 0.995347 0.811066 0.569382
1600 1 0.880317 0.694439 0.562016
1631 1 0.938456 0.750143 0.563102
1632 1 0.881275 0.808861 0.564992
1726 1 0.941066 0.697449 0.62793
1757 1 0.87879 0.752309 0.634008
1758 1 0.937595 0.806078 0.628623
1630 1 0.934778 0.80325 0.502442
1598 1 0.939184 0.69032 0.50195
1635 1 0.0541817 0.870841 0.56846
1640 1 0.12635 0.93388 0.562746
1762 1 0.0681476 0.932663 0.620367
1765 1 0.122891 0.87666 0.622708
1636 1 0.00157425 0.938534 0.562022
515 1 0.0703673 0.999518 0.560968
645 1 0.131743 0.995555 0.625593
1633 1 0.00532228 0.873063 0.504375
517 1 0.130482 0.993892 0.503383
1639 1 0.188148 0.877234 0.56338
1643 1 0.31236 0.869711 0.563016
1644 1 0.252374 0.933924 0.559707
1766 1 0.18728 0.937412 0.622797
1769 1 0.25338 0.879961 0.621401
1770 1 0.313217 0.932796 0.620364
649 1 0.247137 0.992232 0.621698
523 1 0.310493 0.996307 0.565896
519 1 0.192141 0.995749 0.55773
1642 1 0.315905 0.938842 0.500896
527 1 0.442207 0.990902 0.558618
1647 1 0.435686 0.866802 0.56337
1648 1 0.376841 0.940462 0.567177
1773 1 0.374848 0.875037 0.623439
1774 1 0.449204 0.93334 0.620878
653 1 0.373752 0.995633 0.623835
657 1 0.501091 0.995658 0.625647
1652 1 0.508506 0.927614 0.557993
1777 1 0.514153 0.876984 0.622685
1651 1 0.565307 0.863759 0.563472
1656 1 0.623787 0.9373 0.557918
1778 1 0.568042 0.940677 0.633984
1781 1 0.627751 0.879667 0.623737
535 1 0.690702 0.99469 0.556191
1658 1 0.811953 0.936467 0.50156
1655 1 0.687288 0.868958 0.558591
1659 1 0.815921 0.865259 0.56601
1660 1 0.748102 0.941139 0.559396
1782 1 0.68932 0.937411 0.623969
1785 1 0.749303 0.871937 0.624402
1786 1 0.809934 0.92365 0.625135
537 1 0.749148 0.999248 0.502024
665 1 0.748864 0.997697 0.628126
1761 1 0.994217 0.883749 0.623713
1662 1 0.937157 0.937996 0.50414
1663 1 0.940811 0.87562 0.559837
1664 1 0.870262 0.930471 0.564493
1789 1 0.881728 0.876805 0.630772
1790 1 0.937116 0.94171 0.618211
543 1 0.932132 0.999901 0.557159
1661 1 0.874568 0.873138 0.507702
513 1 0.993735 0.996835 0.503275
1672 1 0.128661 0.556564 0.69278
1699 1 0.0626459 0.622614 0.684618
1794 1 0.0584561 0.565807 0.754422
1800 1 0.117255 0.560376 0.816762
1827 1 0.0581086 0.621906 0.815313
1829 1 0.122176 0.626201 0.746265
1825 1 0.000927646 0.623167 0.748832
1667 1 0.0644101 0.510196 0.691298
1676 1 0.250939 0.565924 0.693475
1703 1 0.186532 0.618494 0.688592
1707 1 0.316606 0.623236 0.693135
1798 1 0.184798 0.555719 0.753055
1802 1 0.308803 0.55985 0.753777
1804 1 0.242829 0.555955 0.812048
1831 1 0.178174 0.61642 0.808433
1833 1 0.246819 0.623384 0.757083
1835 1 0.314235 0.617847 0.808405
1799 1 0.176944 0.502566 0.818962
1680 1 0.366963 0.553971 0.691225
1711 1 0.440018 0.622556 0.686064
1806 1 0.434579 0.55732 0.74967
1808 1 0.378216 0.556348 0.807915
1837 1 0.380236 0.620338 0.748389
1839 1 0.429927 0.624142 0.810753
1841 1 0.496434 0.618979 0.738948
1812 1 0.497855 0.569108 0.812532
1811 1 0.555183 0.505299 0.81179
1684 1 0.500338 0.56212 0.687628
1688 1 0.619556 0.560397 0.689018
1715 1 0.561802 0.624637 0.684268
1810 1 0.560588 0.569353 0.754978
1816 1 0.627843 0.564667 0.812796
1843 1 0.560764 0.627658 0.815531
1845 1 0.626142 0.624854 0.75372
1691 1 0.812886 0.510429 0.68981
1819 1 0.813908 0.5072 0.807933
1687 1 0.682539 0.500192 0.694734
1692 1 0.745767 0.570186 0.687849
1719 1 0.691758 0.638048 0.691398
1723 1 0.815 0.629478 0.688214
1814 1 0.685206 0.566238 0.749605
1818 1 0.807231 0.573731 0.746857
1820 1 0.750576 0.561896 0.810396
1847 1 0.691083 0.628666 0.821795
1849 1 0.741565 0.625369 0.750561
1851 1 0.814918 0.633615 0.808603
1817 1 0.749462 0.50167 0.749182
1668 1 0.994123 0.564479 0.689767
1796 1 0.992341 0.556906 0.819162
1821 1 0.875112 0.519107 0.750626
1696 1 0.868303 0.566617 0.686306
1727 1 0.931006 0.624428 0.687636
1822 1 0.938765 0.565836 0.751024
1824 1 0.873004 0.570549 0.81468
1853 1 0.878055 0.627869 0.749892
1855 1 0.937233 0.629258 0.815238
1704 1 0.127357 0.688403 0.685922
1731 1 0.0656157 0.739667 0.689659
1736 1 0.126642 0.81402 0.68583
1826 1 0.0630989 0.681175 0.751552
1832 1 0.126434 0.683295 0.812578
1858 1 0.0685548 0.813385 0.748242
1859 1 0.0654836 0.742769 0.804829
1861 1 0.123546 0.743764 0.749121
1864 1 0.123814 0.814739 0.809601
1732 1 0.00410423 0.811858 0.686413
1857 1 0.00546487 0.75727 0.743508
1700 1 0.00337364 0.685107 0.685149
1708 1 0.246202 0.684647 0.694699
1735 1 0.18533 0.748488 0.687897
1739 1 0.314409 0.745686 0.683164
1740 1 0.248577 0.815179 0.684342
1830 1 0.186578 0.687984 0.747689
1834 1 0.308037 0.680154 0.758826
1836 1 0.244019 0.691593 0.808686
1862 1 0.185986 0.814852 0.752746
1863 1 0.189409 0.755641 0.808953
1865 1 0.25004 0.752794 0.749262
1866 1 0.314838 0.823347 0.749681
1867 1 0.31622 0.757218 0.796315
1868 1 0.255785 0.820876 0.809593
1712 1 0.373635 0.686638 0.692328
1743 1 0.441849 0.749204 0.6858
1744 1 0.375673 0.816271 0.692405
1838 1 0.439246 0.684541 0.743689
1840 1 0.373856 0.685276 0.805048
1869 1 0.379728 0.744766 0.751222
1870 1 0.44066 0.803871 0.747369
1871 1 0.442798 0.75053 0.808357
1872 1 0.376326 0.820104 0.806345
1844 1 0.495254 0.682664 0.803896
1873 1 0.501716 0.750363 0.747141
1716 1 0.504841 0.69062 0.688428
1876 1 0.501742 0.812484 0.809302
1748 1 0.500809 0.815223 0.68336
1720 1 0.616985 0.684974 0.685445
1747 1 0.566337 0.751423 0.680909
1752 1 0.627526 0.82043 0.694109
1842 1 0.555983 0.681574 0.753152
1848 1 0.623222 0.688778 0.812994
1874 1 0.564661 0.803745 0.748038
1875 1 0.56412 0.748311 0.808607
1877 1 0.623871 0.746396 0.74477
1880 1 0.621131 0.807044 0.807666
1724 1 0.752983 0.689397 0.691297
1751 1 0.692449 0.752581 0.682638
1755 1 0.810602 0.753414 0.686071
1756 1 0.751069 0.812528 0.68141
1846 1 0.692438 0.694763 0.75718
1850 1 0.812349 0.694788 0.752263
1852 1 0.751713 0.686402 0.810863
1878 1 0.68747 0.809029 0.745115
1879 1 0.690596 0.75519 0.819292
1881 1 0.746244 0.754285 0.754112
1882 1 0.809917 0.812979 0.752302
1883 1 0.812113 0.748382 0.809449
1884 1 0.749354 0.812123 0.812536
1860 1 0.995338 0.81524 0.811748
1828 1 0.998973 0.683127 0.812445
1728 1 0.872015 0.688281 0.693198
1759 1 0.937343 0.750166 0.69745
1760 1 0.874753 0.810894 0.694431
1854 1 0.939853 0.688355 0.749811
1856 1 0.880896 0.695499 0.813197
1885 1 0.874497 0.750009 0.753822
1886 1 0.93311 0.819712 0.74887
1887 1 0.941366 0.748159 0.811771
1888 1 0.868601 0.815179 0.809952
771 1 0.0652551 0.998573 0.817833
1763 1 0.0595545 0.875037 0.679436
1768 1 0.122077 0.933827 0.686508
1890 1 0.0754607 0.939243 0.757212
1891 1 0.0665838 0.879346 0.812965
1893 1 0.129874 0.87298 0.753244
1896 1 0.127682 0.943416 0.823455
1764 1 0.000737483 0.948535 0.688658
1889 1 0.00152018 0.870852 0.740848
1892 1 0.00956521 0.939134 0.816562
769 1 0.00646791 0.996925 0.751376
651 1 0.312254 0.995286 0.684397
777 1 0.248955 0.995976 0.745769
1767 1 0.186497 0.873788 0.688215
1771 1 0.315509 0.873705 0.686776
1772 1 0.246137 0.938839 0.691968
1894 1 0.178034 0.943885 0.760966
1895 1 0.188015 0.870944 0.820605
1897 1 0.249049 0.872571 0.748926
1898 1 0.314196 0.934689 0.74448
1899 1 0.312011 0.879898 0.813035
1900 1 0.244108 0.933743 0.812697
779 1 0.302316 0.996075 0.813684
647 1 0.182582 0.993628 0.693338
781 1 0.368634 0.999751 0.748338
655 1 0.433668 0.998089 0.682406
785 1 0.494596 0.993296 0.746944
1775 1 0.439646 0.868516 0.679036
1776 1 0.379029 0.934447 0.679424
1901 1 0.376592 0.87721 0.745309
1902 1 0.432611 0.942566 0.750461
1903 1 0.431779 0.87466 0.821221
1904 1 0.369699 0.944457 0.815061
1908 1 0.489023 0.937407 0.816359
1905 1 0.498196 0.875171 0.744337
1780 1 0.496238 0.936127 0.689293
659 1 0.561137 0.993709 0.695018
1779 1 0.566371 0.877202 0.689495
1784 1 0.630843 0.940965 0.685754
1906 1 0.554945 0.937221 0.750168
1907 1 0.557115 0.880611 0.813823
1909 1 0.615933 0.874299 0.749116
1912 1 0.618265 0.940706 0.813014
795 1 0.812359 0.99992 0.809992
1783 1 0.691266 0.873545 0.68353
1787 1 0.816168 0.872625 0.688535
1788 1 0.74734 0.935007 0.690563
1910 1 0.685529 0.932213 0.742827
1911 1 0.686367 0.864264 0.810475
1913 1 0.74911 0.877539 0.743126
1914 1 0.814855 0.937031 0.747482
1915 1 0.808865 0.877879 0.807432
1916 1 0.744332 0.940419 0.808937
1791 1 0.938941 0.871601 0.685237
1792 1 0.877685 0.933984 0.691598
1917 1 0.872895 0.876231 0.754815
1918 1 0.938216 0.938037 0.754049
1919 1 0.93273 0.879161 0.814623
1920 1 0.875082 0.945752 0.811891
1925 1 0.115442 0.50013 0.882325
1922 1 0.0624937 0.56336 0.881055
1928 1 0.124154 0.560157 0.945431
1955 1 0.0643197 0.624432 0.942539
1957 1 0.120807 0.619835 0.882251
1924 1 0.00691918 0.564055 0.943627
1927 1 0.189645 0.505098 0.940493
1929 1 0.249734 0.501064 0.878832
1930 1 0.309503 0.5591 0.875755
1926 1 0.184794 0.561382 0.881998
1963 1 0.310629 0.615005 0.942467
1961 1 0.248489 0.616534 0.874031
1932 1 0.248518 0.562332 0.938656
1959 1 0.187392 0.625902 0.939319
1041 1 0.497269 0.50332 0.99081
1933 1 0.381089 0.504555 0.876314
1073 1 0.498667 0.620253 0.997603
1037 1 0.382452 0.500998 0.997237
1965 1 0.370931 0.617818 0.878549
1934 1 0.438366 0.566883 0.869529
1936 1 0.370007 0.559655 0.940722
1967 1 0.429809 0.620207 0.945876
1969 1 0.496138 0.63353 0.872352
1940 1 0.495981 0.568499 0.927648
1042 1 0.553235 0.559515 0.991594
1939 1 0.558882 0.500234 0.934215
1077 1 0.622034 0.619021 0.999373
1937 1 0.501454 0.500098 0.870899
1938 1 0.559521 0.566101 0.871358
1944 1 0.62387 0.563557 0.933492
1971 1 0.558954 0.625778 0.937839
1973 1 0.628654 0.624055 0.876901
1941 1 0.61971 0.503522 0.869527
1943 1 0.687763 0.512576 0.937344
1945 1 0.751291 0.501339 0.883137
1942 1 0.697516 0.562156 0.87082
1946 1 0.811446 0.558847 0.86926
1977 1 0.768596 0.630878 0.87158
1979 1 0.817754 0.629393 0.941844
1948 1 0.750465 0.561053 0.932377
1975 1 0.69477 0.626106 0.931374
1081 1 0.754896 0.618411 0.996646
1949 1 0.875218 0.50395 0.87256
1953 1 0.992481 0.625938 0.87819
1950 1 0.939078 0.56467 0.874298
1981 1 0.873885 0.631321 0.877664
1983 1 0.937413 0.622494 0.939802
1952 1 0.872688 0.561735 0.935327
1951 1 0.93788 0.506483 0.937786
1986 1 0.0698715 0.806325 0.870003
1987 1 0.0632812 0.74858 0.938523
1954 1 0.0556766 0.683892 0.87875
1992 1 0.126331 0.813577 0.938953
1989 1 0.129027 0.743516 0.867178
1960 1 0.121693 0.693166 0.928875
1985 1 0.00475754 0.752623 0.869966
1058 1 0.0649677 0.684551 0.997604
1094 1 0.187194 0.805347 0.995163
1098 1 0.317555 0.808865 0.995948
1097 1 0.250086 0.744663 0.997966
1996 1 0.241803 0.815243 0.934471
1990 1 0.184851 0.810403 0.879641
1994 1 0.316597 0.808941 0.876513
1958 1 0.191184 0.679829 0.871486
1993 1 0.247906 0.74988 0.874902
1964 1 0.245311 0.676424 0.93429
1995 1 0.312599 0.746914 0.936445
1962 1 0.311306 0.688372 0.874594
1991 1 0.190793 0.742122 0.933117
1066 1 0.312434 0.681681 0.996786
2000 1 0.374037 0.816024 0.936316
1966 1 0.425765 0.68362 0.868846
1997 1 0.372853 0.74833 0.876708
1999 1 0.435871 0.744228 0.926123
1998 1 0.434721 0.809243 0.873897
1968 1 0.36785 0.685098 0.939323
2001 1 0.498688 0.737869 0.865196
1972 1 0.497389 0.687418 0.934671
2004 1 0.494638 0.806411 0.937733
1070 1 0.435601 0.684571 0.99748
1101 1 0.378405 0.752876 0.990688
1106 1 0.565719 0.807593 0.993464
1074 1 0.564017 0.683227 0.994829
1105 1 0.501508 0.750519 0.996474
2003 1 0.559888 0.747135 0.93119
1976 1 0.626957 0.691464 0.936545
2005 1 0.627255 0.751546 0.873488
2002 1 0.560135 0.804248 0.870269
2008 1 0.621512 0.811488 0.93675
1970 1 0.561633 0.688494 0.867445
1109 1 0.628932 0.751041 0.99706
2006 1 0.689639 0.810962 0.877862
2012 1 0.760923 0.817648 0.93498
2009 1 0.750874 0.752182 0.872264
2011 1 0.811676 0.743974 0.936279
2010 1 0.812295 0.81434 0.870188
1980 1 0.751407 0.686871 0.933274
1974 1 0.69116 0.689354 0.874778
2007 1 0.700375 0.750969 0.941062
1978 1 0.820917 0.693206 0.877639
1110 1 0.699962 0.818602 0.99564
1114 1 0.824946 0.815628 0.9991
1956 1 0.999436 0.686185 0.937307
1988 1 0.995585 0.81517 0.940553
1982 1 0.94266 0.692717 0.876111
1984 1 0.883811 0.691165 0.939776
2016 1 0.871872 0.80527 0.931034
2015 1 0.936095 0.757894 0.936308
2013 1 0.87302 0.754588 0.8684
2014 1 0.938512 0.818933 0.868913
2017 1 0.0104741 0.872219 0.875095
2018 1 0.0641637 0.940272 0.879881
2021 1 0.122058 0.875625 0.876234
2024 1 0.121978 0.943024 0.939036
2020 1 0.0130986 0.93323 0.942253
2019 1 0.0689774 0.870519 0.94127
1122 1 0.0692996 0.941594 0.993751
905 1 0.243662 0.997242 0.87654
907 1 0.312456 0.989022 0.93389
2023 1 0.181154 0.877381 0.936318
2028 1 0.240474 0.938732 0.947242
2025 1 0.250772 0.869018 0.872725
2022 1 0.184045 0.938206 0.881224
2027 1 0.305709 0.884342 0.939571
2026 1 0.298802 0.93523 0.873704
1129 1 0.243674 0.871934 0.998796
9 1 0.257176 0.998053 0.998817
17 1 0.49963 0.993594 0.996769
2036 1 0.497658 0.935594 0.932123
2031 1 0.433783 0.873561 0.937563
2032 1 0.371095 0.927622 0.93481
2030 1 0.434524 0.944204 0.880385
1133 1 0.373849 0.877495 0.99641
2033 1 0.495125 0.873016 0.877088
2029 1 0.36751 0.871011 0.877305
13 1 0.376928 0.996454 0.991166
1134 1 0.438165 0.934005 0.99125
2035 1 0.561437 0.866497 0.934565
2034 1 0.556522 0.938878 0.877989
2040 1 0.620703 0.93928 0.93445
2037 1 0.627134 0.869473 0.868351
1138 1 0.560755 0.930604 0.993915
917 1 0.629312 0.998096 0.871117
1146 1 0.812314 0.944641 0.994347
2038 1 0.681985 0.934376 0.865693
921 1 0.749994 0.996311 0.874173
2042 1 0.815743 0.936553 0.872318
2044 1 0.751111 0.936747 0.93767
2043 1 0.8185 0.876889 0.938709
1142 1 0.685408 0.940446 0.996802
2041 1 0.747362 0.878802 0.874908
2039 1 0.679463 0.878265 0.9325
919 1 0.683039 0.996018 0.933229
1145 1 0.759272 0.877068 0.998892
25 1 0.741738 0.995281 0.994121
1149 1 0.882261 0.883536 0.997016
927 1 0.942771 0.995405 0.940353
2045 1 0.875798 0.875155 0.866489
2046 1 0.940973 0.942026 0.873173
2047 1 0.934804 0.876119 0.931346
2048 1 0.879052 0.944519 0.934353
1121 1 0.992805 0.873387 0.996621
1150 1 0.943477 0.935311 0.993898
29 1 0.88387 0.98976 0.999217
| [
"[email protected]"
]
| |
c2f18cbb824c384e8769a74318cae6f4045561a3 | 4ddd555d485354221085daebcb6f09a71dbb34e7 | /container_balls.py | fb561c5b30bada58d475071792b49392eba2b458 | []
| no_license | nizarhmain/hackerrank | 6aa4849c73c59e0c7f6e7508438c8ba28c8b7c22 | 885bb876f93d1b29299adab551cc1ac0076f1743 | refs/heads/master | 2020-11-23T23:32:35.572825 | 2020-02-27T11:53:03 | 2020-02-27T11:53:03 | 227,865,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py |
"""
2
3
1 3 1
2 1 2
3 3 3
3
0 2 1
1 1 1
2 0 0
"""
"""
Impossible
Possible
"""
# q number of queries
# n number of containers and ball types
first = [[999336263, 998799923], [998799923, 999763019]]
third = [[1, 3, 1], [2, 1, 2], [3, 3, 3]]
fourth = [[0, 2, 1], [1, 1, 1], [2, 0, 0]]
# check this one later
query = 2
def read_from_txt():
f = open("container_balls2.txt", "r")
# the first line represents the queries
q = f.readline()
# print(q)
# this is the size of the matrix, so the next 3 lines will be put in a container
for query in range(int(q)):
n = f.readline()
# print(f'{n}')
container = []
for i in range(int(n)):
line = f.readline()
result = list(map(int, (line.split())))
container.append(result)
# print(container)
organizingContainers(container)
def organizingContainers(container):
typesum = []
containersum = []
for x in range(len(container)):
tmpcolsum = []
tmprowsum = []
for i in range(len(container)):
tmpcolsum.append(container[i][x])
tmprowsum.append(container[x][i])
total_balls = sum(ball for ball in tmpcolsum)
typesum.append(total_balls)
total_containers = sum(container for container in tmprowsum)
containersum.append(total_containers)
print(typesum.sort())
print(containersum.sort())
print(typesum)
print(containersum)
print('Possible' if typesum == containersum else 'Impossible')
# organizingContainers(first)
read_from_txt()
| [
"[email protected]"
]
| |
3fce41e05b897b1b5f9cb8483bc9db41b2f751a0 | 3c8701e04900389adb40a46daedb5205d479016c | /oldboy-python18/day02-列表-字典/home-work-stu/购物车.py | 63b937b4063f23e586269f417564b2537968ebdd | []
| no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,584 | py | #coding:utf-8
goods = [
{"name": "电脑", "price": 1999},
{"name": "鼠标", "price": 10},
{"name": "游艇", "price": 20},
{"name": "美女", "price": 998},
]
shopping_car=[]
while True:
# 获取总资产
total_assets = input('请输入你的总资产:').strip()
if len(total_assets) == 0:
continue
else:
if total_assets.isdigit():
total_assets = int(total_assets)
print('您的总资产:%d' % total_assets)
break
else:
print('您输入的不符合标准:')
continue
while True:
#显示商品信息
n=1
print('-----------商品信息-----------')
for good in goods:
good['id']=n
print('商品编号:%d ,商品名称:%s ,商品价格:%d' %(n,good['name'],good['price']))
n+=1
print('-----------------------------')
#
#
while True:
choice = input('请选择商品:').strip()
if len(choice) == 0:
continue
else:
if choice.isdigit():
n=0
for good in goods:
if int(choice) == good['id']:
#加入到购物车
shopping_car.append((good['name'],good['price']))
n=1
if n == 0:
print('你选择的商品不存在:')
else:
#显示购物车
print('-----------购物车信息-----------')
if len(shopping_car) == 0:
print('购物车为空')
else:
for value in shopping_car:
print('商品名称:%s ,商品价格:%d' % (value[0], value[1]))
print('-----------------------------')
break
# 结算
while True:
is_buy=input('结算请输入y,继续选择商品按任意键').strip()
if len(is_buy) != 0 and is_buy == 'y':
total_price=0
for i in shopping_car:
total_price+=i[1]
print('您购买的商品总价格为:%d' %total_price)
if total_price > total_assets:
print('余额不足。您的余额为%d' %total_assets)
break
else:
total_assets=total_assets-total_price
print('购买成功,余额为%d' %total_assets)
shopping_car.clear()
break
else:
break
| [
"[email protected]"
]
| |
d260c463d7443c4a515d2e19e29b33c7081dce1f | 5783be589f9f6ab590ea097eb9b84fa3786617e4 | /Misc/fileCollection/main.py | 881cc8b3eafa07bb6a66470eb5cf2c497c11eb1a | [
"Apache-2.0"
]
| permissive | suyash248/ds_algo | 5751e46ba4b959f0dd3f6843800f3e21d52100ac | 1ca9470c33236016cbb88a38b2f19db41535e457 | refs/heads/master | 2022-12-10T10:39:16.135888 | 2022-12-06T16:45:25 | 2022-12-06T16:45:25 | 58,738,512 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from Misc.fileCollection.file import File
from Misc.fileCollection.file_util import FileUtility
if __name__ == '__main__':
file_utility: FileUtility = FileUtility()
f1: File = File(name="f1", size=100)
f2: File = File(name="f2", size=50)
f3: File = File(name="f3", size=500)
f4: File = File(name="f4", size=200)
f5: File = File(name="f5", size=120)
f6: File = File(name="f6", size=180)
f7: File = File(name="f7", size=170)
file_utility.save_files_to_collection("col1", f1, f2, f3)
file_utility.save_files_to_collection("col2", f4, f5, f2)
file_utility.save_files_to_collection("col3", f6, f1)
file_utility.save_files_to_collection("col4", f3, f7)
print(file_utility.get_total_size_processed())
print(file_utility.get_top_k_collections(3))
| [
"[email protected]"
]
| |
6aafd67487c0bd93b6877eceb974ad1a5b907767 | ec7ecc5abbdd03fb55f24e89dbbdfa23ebd7b60f | /evaluate postfix expression.py | 0287083b4698fdbb7abd669aeabc7e66044a9f3e | []
| no_license | poojithayadavalli/codekata | cd290e009cf3e2f504c99dd4f6de9171f217c6be | 1885c45a277cf1023e483bd77edf0c6edf8d95f3 | refs/heads/master | 2020-07-18T14:06:17.190229 | 2020-05-30T09:00:29 | 2020-05-30T09:00:29 | 206,259,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | class Evaluate:
# Constructor to initialize the class variables
def __init__(self, capacity):
self.top = -1
self.capacity = capacity
# This array is used a stack
self.array = []
# check if the stack is empty
def isEmpty(self):
return True if self.top == -1 else False
# Return the value of the top of the stack
def peek(self):
return self.array[-1]
# Pop the element from the stack
def pop(self):
if not self.isEmpty():
self.top -= 1
return self.array.pop()
else:
return "$"
# Push the element to the stack
def push(self, op):
self.top += 1
self.array.append(op)
# The main function that converts given infix expression
# to postfix expression
def evaluatePostfix(self, exp):
# Iterate over the expression for conversion
for i in exp:
# If the scanned character is an operand
# (number here) push it to the stack
if i.isdigit():
self.push(i)
# If the scanned character is an operator,
# pop two elements from stack and apply it.
else:
val1 = self.pop()
val2 = self.pop()
self.push(str(eval(val2 + i + val1)))
return int(self.pop())
exp =input()
obj = Evaluate(len(exp))
print(obj.evaluatePostfix(exp))
| [
"[email protected]"
]
| |
2372a02f129a67fbf7970e593aecdaeb2bdb38b5 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/messenger/doc_loaders/colors_schemes.py | 5d932c37ceee7ccf7724d9394a83e08eff0f0204 | []
| no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,484 | py | # 2016.11.19 19:53:40 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/doc_loaders/colors_schemes.py
from messenger.doc_loaders import _xml_helpers
def _readColors(xmlCtx, section, colorsNames, defName):
result = {}
notFound = colorsNames[:]
for tagName, subSec in section.items():
if tagName != 'color':
raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined')
if name not in colorsNames:
raise _xml_helpers.XMLError(ctx, 'Name of color {0:>s} is invalid'.format(name))
result[name] = _xml_helpers.readRGB(ctx, subSec, 'rgb', 'Color is invalid.')
notFound.remove(name)
if len(notFound):
defColor = 0
if defName in result:
defColor = result[defName]
for name in notFound:
result[name] = defColor
return result
def _readColorScheme(xmlCtx, section, colorScheme):
names = colorScheme.getColorsNames()
defName = colorScheme.getDefColorName()
for tagName, subSec in section.items():
if tagName == 'name':
continue
if tagName != 'item':
raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined')
colorsSec = subSec['colors']
if not colorsSec:
raise _xml_helpers.XMLError(ctx, 'Section "colors" is not defined')
colorScheme[name] = _readColors(ctx.next(colorsSec), colorsSec, names, defName)
def load(xmlCtx, section, messengerSettings):
for tagName, subSec in section.items():
if tagName != 'colorScheme':
raise _xml_helpers.XMLError(xmlCtx, 'Tag {0:>s} is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Color scheme name is not defined')
colorScheme = messengerSettings.getColorScheme(name)
if colorScheme is not None:
_readColorScheme(ctx, subSec, colorScheme)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\doc_loaders\colors_schemes.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:53:40 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
4afbaa853bd42ac76b84fe0c43d7682cbd589fc9 | e3a6f67e9d75353dee73e2856a5cc50761fe0f52 | /.vscode/chi.py/dictionary.py | 7b11ae5c09658e77d9669c59cba484c6c5c3a794 | []
| no_license | Tchalz/delete_now | f9973f176b6667c4c0d36c0d533896c6e51c149a | 9f8788046c1bd7eb2906eaa1488617477938f3c7 | refs/heads/master | 2023-08-07T19:48:21.745810 | 2022-06-19T18:55:58 | 2022-06-19T18:55:58 | 242,335,515 | 0 | 0 | null | 2023-07-20T14:53:56 | 2020-02-22T12:11:20 | Python | UTF-8 | Python | false | false | 248 | py |
scores = {"bayode": 10, "chibuzor": 20, "philip": 10, "goke": 90}
print(scores["bayode"])
# d = {"chibuzor": 20, "goke": 90}
# d["chibuzor"] = 20
# d["goke"] = 90
# print(d)
league = {"english": 100, "spanish":80, "french": 60, "danish": 50}
| [
"[email protected]"
]
| |
23621a57dc183fa40c3fc2a1470f752f3e36f339 | d0f0e99360baa4abe68e1cbd2b7b265238f3568e | /wordcount/wordcount/urls.py | 5471804a85d82df1c9f5885c3be455d52543456c | []
| no_license | alfianinda/Learning-Django-1 | facf3859a00e3afb1248da8e97d0b86f2b4b25f5 | 3391095f27a5806669e481480b4a00e49c85ba6d | refs/heads/master | 2022-12-28T14:10:40.280573 | 2020-10-16T16:03:00 | 2020-10-16T16:03:00 | 304,676,495 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | """wordcount URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
# path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('count/', views.count, name='count'),
path('about/', views.about, name='about'),
]
| [
"[email protected]"
]
| |
1514e431abe87087c79b3300a5fd9372aaab32c5 | a29e2dfb97caa5eb4e4ad21585778cde993df3bb | /stochastic/private_robust/funcdef_util_privacy.py | 782ec06e2932563e6f1e10cefefdd5bcf2b0a7c0 | []
| no_license | proroka/diversity | 6cb3fa86eb2a57c1a5bc45d538089f8991810709 | 2e9b42f9ed9b847e3ee28cebf34c9d873ee51606 | refs/heads/master | 2021-01-21T04:36:26.943641 | 2016-04-17T14:30:36 | 2016-04-17T14:30:36 | 37,940,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 6 09:12:53 2016
@author: amandaprorok
"""
import numpy as np
import scipy as sp
import pylab as pl
import matplotlib.pyplot as plt
import networkx as nx
import sys
import time
import pickle
# my modules
sys.path.append('../plotting')
sys.path.append('../utilities')
sys.path.append('..')
from optimize_transition_matrix_hetero import *
from funcdef_macro_heterogeneous import *
from funcdef_micro_heterogeneous import *
from funcdef_util_heterogeneous import *
import funcdef_draw_network as nxmod
# -----------------------------------------------------------------------------#
# utilities
# returns time of success; if no success, return num_time_steps
def get_convergence_time(ratio, min_ratio):
for t in range(len(ratio)):
if ratio[t] <= min_ratio:
return t
return t
# defines relationship between alpha and beta
def relation_ab(b, range_alpha):
a = len(range_alpha) - b - 1
return a | [
"[email protected]"
]
| |
3a55653869604aef5ea9a379f8015741a146140e | 89ccc41a81e00404f620f15b926471418412cb2b | /cloudbaseinit/tests/utils/windows/test_vfat.py | ebfd8786f8e90fb1c971c9bdef7905f5fb3425eb | [
"Apache-2.0"
]
| permissive | pellaeon/bsd-cloudinit | bf6bf73aa4f22cd15bb34ccca06cb8fdfcd6dbc3 | d2fabf40119267164b9e765e59e3f99cd61fdcef | refs/heads/master | 2021-06-11T17:02:36.812016 | 2021-02-25T06:51:02 | 2021-02-25T06:51:02 | 13,364,507 | 75 | 40 | Apache-2.0 | 2021-02-25T06:51:03 | 2013-10-06T15:52:33 | Python | UTF-8 | Python | false | false | 5,250 | py | # Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import exception
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils.windows import vfat
CONF = vfat.CONF
class TestVfat(unittest.TestCase):
def _test_is_vfat_drive(self, execute_process_value,
expected_logging,
expected_response):
mock_osutils = mock.Mock()
mock_osutils.execute_process.return_value = execute_process_value
with testutils.LogSnatcher('cloudbaseinit.utils.windows.'
'vfat') as snatcher:
with testutils.ConfPatcher('mtools_path', 'mtools_path'):
response = vfat.is_vfat_drive(mock_osutils,
mock.sentinel.drive)
mdir = os.path.join(CONF.mtools_path, "mlabel.exe")
mock_osutils.execute_process.assert_called_once_with(
[mdir, "-i", mock.sentinel.drive, "-s"],
shell=False)
self.assertEqual(expected_logging, snatcher.output)
self.assertEqual(expected_response, response)
def test_is_vfat_drive_fails(self):
test_stderr = b"test stderr"
expected_logging = [
"Could not retrieve label for VFAT drive path %r"
% (mock.sentinel.drive),
"mlabel failed with error %r" % test_stderr,
]
execute_process_value = (None, test_stderr, 1)
expected_response = False
self._test_is_vfat_drive(execute_process_value=execute_process_value,
expected_logging=expected_logging,
expected_response=expected_response)
def test_is_vfat_drive_different_label(self):
mock_out = b"Volume label is config"
expected_logging = [
"Obtained label information for drive %r: %r"
% (mock.sentinel.drive, mock_out)
]
execute_process_value = (mock_out, None, 0)
expected_response = False
self._test_is_vfat_drive(execute_process_value=execute_process_value,
expected_logging=expected_logging,
expected_response=expected_response)
def test_is_vfat_drive_works(self):
mock_out = b"Volume label is config-2 \r\n"
expected_logging = [
"Obtained label information for drive %r: %r"
% (mock.sentinel.drive, mock_out)
]
execute_process_value = (mock_out, None, 0)
expected_response = True
self._test_is_vfat_drive(execute_process_value=execute_process_value,
expected_logging=expected_logging,
expected_response=expected_response)
@testutils.ConfPatcher('mtools_path', 'mtools_path')
@mock.patch('os.chdir')
def test_copy(self, mock_os_chdir):
cwd = os.getcwd()
mock_osutils = mock.Mock()
vfat.copy_from_vfat_drive(mock_osutils,
mock.sentinel.drive,
mock.sentinel.target_path)
mock_os_chdir_calls = [
mock.call(mock.sentinel.target_path),
mock.call(cwd),
]
self.assertEqual(mock_os_chdir_calls, mock_os_chdir.mock_calls)
self.assertEqual(os.getcwd(), cwd)
mcopy = os.path.join(CONF.mtools_path, "mcopy.exe")
mock_osutils.execute_process.assert_called_once_with(
[mcopy, "-s", "-n", "-i", mock.sentinel.drive, "::/", "."],
shell=False)
def test_is_vfat_drive_mtools_not_given(self):
with self.assertRaises(exception.CloudbaseInitException) as cm:
vfat.is_vfat_drive(mock.sentinel.osutils,
mock.sentinel.target_path)
expected_message = ('"mtools_path" needs to be provided in order '
'to access VFAT drives')
self.assertEqual(expected_message, str(cm.exception.args[0]))
def test_copy_from_vfat_drive_mtools_not_given(self):
with self.assertRaises(exception.CloudbaseInitException) as cm:
vfat.copy_from_vfat_drive(mock.sentinel.osutils,
mock.sentinel.drive_path,
mock.sentinel.target_path)
expected_message = ('"mtools_path" needs to be provided in order '
'to access VFAT drives')
self.assertEqual(expected_message, str(cm.exception.args[0]))
| [
"[email protected]"
]
| |
5a8562ac50a851a7512ab77b535f0040415aacc5 | a89e90c05f5804590643cf5d6d23c8e3d800b089 | /lab4/src/PathPublisher.py | d4f57d5e49d6394c5c27a4cc66ef805484f29c73 | []
| no_license | rajatsc/autonomous-rc-car | edb8cf04fad7fcc8dc53885b5be93a492c0148dc | 6f75bba7d0723fe367c6d23ec8a3874e896e7c82 | refs/heads/master | 2020-04-23T17:59:51.956926 | 2020-01-15T03:36:22 | 2020-01-15T03:36:22 | 171,351,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,445 | py | #!/usr/bin/env python
from threading import Lock
from lab4.msg import *
from nav_msgs.srv import GetMap
from geometry_msgs.msg import PoseArray, Pose, PoseStamped
from std_msgs.msg import Float64
import numpy as np
import rospy
import Utils
class PathPublisher(object):
XY_THRESHOLD = 1
THETA_THRESHOLD = np.pi # TODO decide if we want theta threshold
XY_GOAL_THRESHOLD = 0.35
THETA_GOAL_THRESHOLD = np.pi # TODO decide if we want theta threshold
XY_OBS_THRESHOLD = 1
OBSTACLE_VEL = 0.95
MAX_VEL = 1.5
def __init__(self):
pub_topic_goal = "/pp/path_goal"
pub_topic_max_vel = "/pp/max_vel"
sub_topic_path = "/multi_planner/mppi_path"
sub_topic_cur_loc = "/pf/ta/viz/inferred_pose"
self.state_lock = Lock()
self.cur_path_idx = 0 # Index of current path in self.paths
self.cur_dest_idx = 0 # Index of current destination in self.paths[self.cur_path_idx]
self.paths = [] # List of paths to process. First element is source, last element is source
self.non_permissible_region = np.load('/home/nvidia/catkin_ws/src/lab4/maps/permissible_region.npy')[::-1,:]
map_service_name = rospy.get_param("~static_map", "/planning/static_map")
print("Getting map from service: ", map_service_name)
rospy.wait_for_service(map_service_name)
self.map_info = rospy.ServiceProxy(map_service_name, GetMap)().map.info
bad_waypoints_csv = rospy.get_param("~bad_waypoints_csv", "/home/nvidia/catkin_ws/src/lab4/final/bad_waypoints.csv")
mode = rospy.get_param("~mode", "pixel")
self.obstacles = map(np.array, Utils.load_csv_to_configs(bad_waypoints_csv, mode, self.map_info))
self.goal_pub = rospy.Publisher(pub_topic_goal, PoseStamped, queue_size=10)
self.max_vel_pub = rospy.Publisher(pub_topic_max_vel, Float64, queue_size=10)
path_sub = rospy.Subscriber(sub_topic_path, MPPIPath, self.path_cb)
cur_loc_sub = rospy.Subscriber(sub_topic_cur_loc, PoseStamped, self.location_cb)
print "Ready to receive path!"
def process_mppi_path(self, msg):
path = msg.path
paths = []
for pose_arr in path:
sub_path = []
for pose in pose_arr.poses:
sub_path.append(np.array(Utils.pose_to_config(pose)))
paths.append(sub_path)
return paths
def path_cb(self, msg):
rospy.logerr("Received path!")
self.state_lock.acquire()
self.paths = self.process_mppi_path(msg)
self.cur_path_idx = 0
self.cur_dest_idx = 0
self.state_lock.release()
goal = self.get_next_dest()
rospy.logerr("Publishing new goal")
self.goal_pub.publish(Utils.config_to_posestamped(goal))
def location_cb(self, msg):
# gets current location
# checks to see
curr_pose = np.array(Utils.posestamped_to_config(msg))
if len(self.paths) == 0:
return
if self.near_cur_dest(curr_pose):
goal = self.get_next_dest()
if goal == None:
return
rospy.logerr("Publishing new goal")
self.goal_pub.publish(Utils.config_to_posestamped(goal))
if self.near_obstacle(curr_pose):
self.max_vel_pub.publish(Float64(self.OBSTACLE_VEL))
print "near obstacle"
else:
self.max_vel_pub.publish(Float64(self.MAX_VEL))
def near_cur_dest(self, curr_pose):
if len(self.paths) == 0:
return False
dest = self.paths[self.cur_path_idx][self.cur_dest_idx]
difference_from_dest = np.abs(curr_pose - dest)
xy_distance_to_dest = np.linalg.norm(difference_from_dest[:2])
theta_distance_to_dest = difference_from_dest[2] % (2 * np.pi)
if self.dest_is_goal():
return xy_distance_to_dest < self.XY_GOAL_THRESHOLD and theta_distance_to_dest < self.THETA_GOAL_THRESHOLD
else:
return xy_distance_to_dest < self.XY_THRESHOLD# and theta_distance_to_dest < self.THETA_THRESHOLD
def near_obstacle(self, curr_pose):
if len(self.paths) == 0:
return False
for obstacle in self.obstacles:
difference_from_obs = np.abs(curr_pose - obstacle)
xy_distance_to_obs = np.linalg.norm(difference_from_obs[:2])
if xy_distance_to_obs < self.XY_OBS_THRESHOLD:
return True
return False
def get_next_dest(self):
self.state_lock.acquire()
while True:
# Try to advance dest_idx within path
if self.cur_dest_idx < len(self.paths[self.cur_path_idx])-1:
self.cur_dest_idx += 1
if self.dest_is_goal():
self.state_lock.release()
return self.paths[self.cur_path_idx][self.cur_dest_idx]
# Otherwise advance to beginning of next path (if possible)
elif self.cur_path_idx < len(self.paths)-1:
self.cur_path_idx += 1
self.cur_dest_idx = 0
if self.dest_is_goal():
self.state_lock.release()
return self.paths[self.cur_path_idx][self.cur_dest_idx]
else:
rospy.logerr("Route completed! No more paths to publish!")
self.state_lock.release()
return None
config = self.paths[self.cur_path_idx][self.cur_dest_idx]
map_config = Utils.our_world_to_map(config, self.map_info)
if self.non_permissible_region[int(map_config[1]),int(map_config[0])]:
continue
else:
self.state_lock.release()
return config
def dest_is_goal(self):
return self.cur_dest_idx == len(self.paths[self.cur_path_idx]) - 1
if __name__ == "__main__":
rospy.init_node("path_publisher", anonymous=True)
pp = PathPublisher()
rospy.spin()
| [
"[email protected]"
]
| |
ed215f23e89ce11cc33f6bae5bb84b135ebc61c1 | 1ba4527e21ee1bf52a2603fc7f2ea2458bb88544 | /venv/Scripts/pip-script.py | 60f316b8241a67f76b1054219a824e21c05d02d6 | []
| no_license | random-Amanda/Cerebrex-1 | de5b903cbef94c0f536c253c28f3929781d3a2c3 | d48249a0b38a22b59e23dff9f5a22565b4901d7f | refs/heads/master | 2020-05-16T23:24:44.599944 | 2019-04-24T06:47:22 | 2019-04-24T06:47:22 | 183,363,269 | 0 | 0 | null | 2019-04-25T05:37:01 | 2019-04-25T05:37:01 | null | UTF-8 | Python | false | false | 409 | py | #!C:\Users\Dell\PycharmProjects\Trial3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
]
| |
c994ba0a911d0bf5726934a74e94cc5b6ea8197c | da878a03674024f290775b2c10d745edf091a4dc | /Global Fires/venv/Scripts/pip3-script.py | d05b0aa101ecf28df5c3555bf979ec367071f105 | [
"MIT"
]
| permissive | EnriqueGambra/Global-Fires | 1b3aa5670dbb69804c733b865c7906f6e9698995 | 652606ccd573e7bfd7a232876f0b59fcefc15f9b | refs/heads/master | 2020-08-03T00:44:38.156931 | 2019-09-28T23:30:43 | 2019-09-28T23:30:43 | 211,568,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!"C:\Users\Owner\github-Repos\Global-Fires\Global Fires\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
]
| |
8f9b8fb3dfdb8b5128d7300ed65cfae9c99243b2 | 0acedcc629061f76c942192be0047876e6307db8 | /Bittorrent/bittorrent_tests.py | 4e2096217582816a5f30ebd2bc6a01abd68ff36c | []
| no_license | MFAshby/pybittorrent | 9d58280c0c72874e68f319b864dbc477fb29c5cf | 0a679e06d7696be1813c74c071920a9fcfac88b3 | refs/heads/master | 2021-01-20T08:44:11.917987 | 2014-08-12T22:07:59 | 2014-08-12T22:07:59 | 22,546,520 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,670 | py | from unittest import TestCase, main
from io import BytesIO
import bittorrent
from bitarray import bitarray
from struct import unpack
def do_test_fn_return_output_socket(function, *args):
output_socket = BytesIO()
function(output_socket, *args)
output_socket.seek(0)
return output_socket
def read_int(socket):
i, = unpack(">I", socket.read(4))
return i
class BittorrentTest(TestCase):
def test_handshake(self):
info_hash = "info4567890123456789"
peer_id = "peer4567890123456789"
output_socket = do_test_fn_return_output_socket(bittorrent.do_handshake, info_hash, peer_id)
self.assertTrue(output_socket.read(1)[0] == 19) #first byte is protocol string length, should be 19
self.assertTrue(output_socket.read(19).decode("UTF-8") == "BitTorrent protocol") #next 19 bytes are the protocol string
self.assertTrue(output_socket.read(8) == b'\x00'*8) #8 empty bytes for protocol extensions
self.assertTrue(output_socket.read(20).decode("UTF-8") == info_hash) #20 byte info hash
self.assertTrue(output_socket.read(20).decode("UTF-8") == peer_id) #20 byte peer id
def test_peer_id(self):
peer_id = bittorrent.get_peer_id()
self.assertTrue(len(peer_id) == 20)
self.assertTrue(peer_id.startswith("-PY0001-"))
#subsequent call returns a different peer_id
peer_id_2 = bittorrent.get_peer_id()
self.assertTrue(len(peer_id_2) == 20)
self.assertTrue(peer_id_2.startswith("-PY0001-"))
self.assertTrue(peer_id != peer_id_2)
def test_keep_alive(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_keep_alive)
#4 byte unsigned int, representing 0 length
self.assertTrue(read_int(output_socket) == 0)
def test_choke(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_choke)
self.assertTrue(read_int(output_socket) == 1) #length 1
self.assertTrue(output_socket.read(1)[0] == 0) #message ID 0
def test_unchoke(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_unchoke)
self.assertTrue(read_int(output_socket) == 1) #length 1
self.assertTrue(output_socket.read(1)[0] == 1) #message ID 1
def test_interested(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_interested)
self.assertTrue(read_int(output_socket) == 1) #length 1
self.assertTrue(output_socket.read(1)[0] == 2) #message ID 2
def test_not_interested(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_not_interested)
self.assertTrue(read_int(output_socket) == 1) #length 1
self.assertTrue(output_socket.read(1)[0] == 3) #message ID 3
def test_have(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_have, 1)
self.assertTrue(read_int(output_socket) == 5) #length 5
self.assertTrue(output_socket.read(1)[0] == 4) #message ID 4
self.assertTrue(read_int(output_socket) == 1) #int piece index, 1
output_socket = do_test_fn_return_output_socket(bittorrent.do_have, 9999)
self.assertTrue(read_int(output_socket) == 5) #length 5
self.assertTrue(output_socket.read(1)[0] == 4) #message ID 4
self.assertTrue(read_int(output_socket) == 9999) #int piece index, 9999
def test_bitfield(self):
pieces_bitarray = bitarray("00000000000001")
output_socket = do_test_fn_return_output_socket(bittorrent.do_bitfield, pieces_bitarray)
self.assertTrue(read_int(output_socket) == 3) #length 3
self.assertTrue(output_socket.read(1)[0] == 5) #message ID 5
self.assertTrue(output_socket.read() == pieces_bitarray.tobytes()) #14 bits with the last bit set, padded to 2 bytes with 0s.
def test_request(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_request, 15, 100, 2^14)
self.assertTrue(read_int(output_socket) == 13) #length 13
self.assertTrue(output_socket.read(1)[0] == 6) #message ID 6
self.assertTrue(read_int(output_socket) == 15) #piece index (0 in this case)
self.assertTrue(read_int(output_socket) == 100) #begin index (0 in this case)
self.assertTrue(read_int(output_socket) == 2^14) #length (2^14 in this case)
def test_piece(self):
test_block = b"TESTINGBLOCKOFDATA"
output_socket = do_test_fn_return_output_socket(bittorrent.do_piece, 0, 100, test_block)
self.assertTrue(read_int(output_socket) == 9+len(test_block))
self.assertTrue(output_socket.read(1)[0] == 7) #message ID 7
self.assertTrue(read_int(output_socket) == 0) #piece index
self.assertTrue(read_int(output_socket) == 100) #begin
self.assertTrue(output_socket.read() == test_block)
def test_cancel(self):
output_socket = do_test_fn_return_output_socket(bittorrent.do_cancel, 15, 100, 2^14)
self.assertTrue(read_int(output_socket) == 13) #length 13
self.assertTrue(output_socket.read(1)[0] == 8) #message ID 8
self.assertTrue(read_int(output_socket) == 15) #piece index (0 in this case)
self.assertTrue(read_int(output_socket) == 100) #begin index (0 in this case)
self.assertTrue(read_int(output_socket) == 2^14) #length (2^14 in this case)
def test_piece_check(self):
from hashlib import sha1
self.assertFalse(bittorrent.check_piece(b"TEST_PIECE_DATA", b"01234567890123456789"))
piece_hash = sha1()
piece_hash.update(b"TEST_PIECE_DATA")
self.assertFalse(bittorrent.check_piece(b"TEST_PIECE_DATA", piece_hash))
def test_should_poll_tracker(self):
peers = [] # poll if no peers.
last_poll_time = 0
current_time = 10
interval = 30
self.assertTrue(bittorrent.should_poll_tracker(peers, last_poll_time, current_time, interval))
peers = ["Here is a peer"] # poll if time over interval time.
last_poll_time = 0
current_time = 40
interval = 30
self.assertTrue(bittorrent.should_poll_tracker(peers, last_poll_time, current_time, interval))
peers = ["Here is a peer"] # don't poll if peers, and under time
last_poll_time = 0
current_time = 10
interval = 30
self.assertFalse(bittorrent.should_poll_tracker(peers, last_poll_time, current_time, interval))
def test_tracker_request(self):
from urllib.parse import parse_qsl, urlparse
from hashlib import sha1
from urllib.parse import quote
from bencode import bencode
#generate parameters
info_dict = {"piece length": 512000,
"pieces": "0123456789012345678901234567890123456789",
"name":"myfile.txt",
"length": 1024000}
s = sha1()
s.update(bytes(bencode(info_dict), "UTF-8"))
test_info_hash = quote(s.digest())
metainfo_file = {"info": info_dict,
"announce": "http://localhost:8000"}
peer_id = "01234567890123456789"
port = 8001
uploaded = 512000
downloaded = 511999
left = 1
event = "started"
#generate the request URL
request_url = bittorrent.tracker_request_url(metainfo_file, peer_id, port, uploaded, downloaded, left, event)
#check the request URL can be parsed as a URL...
request_dict = dict(parse_qsl(urlparse(request_url).query, keep_blank_values=1))
#check the request contains the appropriate info.
self.assertTrue(request_dict["info_hash"] == test_info_hash)
self.assertTrue(request_dict["peer_id"] == peer_id)
self.assertTrue(request_dict["port"] == "8001")
self.assertTrue(request_dict["uploaded"] == "512000")
self.assertTrue(request_dict["downloaded"] == "511999")
self.assertTrue(request_dict["left"] == "1")
self.assertTrue(request_dict["event"] == "started")
def test_check_pieces(self):
from hashlib import sha1
input_file = BytesIO(b"0123456789012345678901234567890123456789012345678901234567890123456789")
piece_length = 20
#generate pieces as if we had every piece of the file.
pieces = b""
file_piece = input_file.read(piece_length)
while file_piece:
s = sha1()
s.update(file_piece)
pieces += s.digest()
file_piece = input_file.read(piece_length)
input_file.seek(0) #seek the file back to 0...
have_pieces = bittorrent.check_pieces(input_file, piece_length, pieces)
self.assertTrue(have_pieces == bitarray("1111"))
#same thing again, but change one of the piece hashes
input_file.seek(0) #seek the file back to 0...
pieces = pieces[0:20] + b"00000000000000000000" + pieces[40:]
have_pieces = bittorrent.check_pieces(input_file, piece_length, pieces)
#should now be "missing" the second piece of the file.
self.assertTrue(have_pieces == bitarray("1011"))
def test_chunks(self):
index = 0
for chunk in bittorrent.chunks("TESTING", 2):
if index == 0:
self.assertTrue(chunk == "TE")
elif index == 2:
self.assertTrue(chunk == "ST")
elif index == 4:
self.assertTrue(chunk == "IN")
elif index == 6:
self.assertTrue(chunk == "G")
self.assertTrue(index <= 6)
index = index+2
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
e84eb65f2492848d6267cfd4c0c92be71ce43095 | b2ddfebe4a2414741a9d1c6ecb6a236640f80063 | /SomePractice/tensorflow机器学习/CNN实现/StanfordDogs.py | a759561e0ba95a329e570ffa1167b8f582ddda15 | []
| no_license | BBHNation/PracticeProjects | e4753ad0c753996925e7d0b70235f823d85e2513 | 1c89c6122a045b784a7773848355b2c4dbc80e25 | refs/heads/master | 2021-01-20T14:23:07.385766 | 2018-08-26T14:13:32 | 2018-08-26T14:13:32 | 90,598,401 | 7 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | # coding:utf-8
import glob
# 获取图片信息
image_file_names = glob.glob("./Images-dogs/n02*/*.jpg")
print image_file_names[0:2]
from itertools import groupby
from collections import defaultdict
# 初始化训练集合
training_data_set = defaultdict(list)
# 初始化测试集合
testing_data_set = defaultdict(list)
# 将文件名分解为品种和相应的文件名,品种对应于文件夹名称
image_file_name_with_breed = map(lambda filename:(filename.split("/")[2], filename), image_file_names)
print image_file_name_with_breed[0:2]
# 依据品种(image_file_name_with_breed 的第0个分量) 对图像分组
for dog_breed, breed_images in groupby(image_file_name_with_breed, lambda x: x[0]):
# 枚举每个品种的图像, 并将大致20%的图像加入到测试集
for i, breed_image in enumerate(breed_images):
if i % 5 == 0:
testing_data_set[dog_breed].append(breed_image[1])
else:
training_data_set[dog_breed].append(breed_image[1])
# 检查每个品种的图像是否至少是全部图像的18%以上
breed_training_count = len(training_data_set[dog_breed])
breed_testing_count = len(testing_data_set[dog_breed])
print breed_testing_count
print breed_training_count
assert round(breed_testing_count / (breed_testing_count + breed_training_count), 2) == 0, "Not enough testing images"
| [
"[email protected]"
]
| |
969a5a3d3bdc82c7be29ab7942e91a3bae7f295f | 7f382ec3228f1f41e7ec305a4322c6f72d28ea78 | /src/mapLoader.py | 22de4f52b66fe941b5bc013df2966f9ba47f5f70 | []
| no_license | NPIPHI/Python-Platformer | 76e03fc24e179c42bdb1a25c815f47e6ffd67b84 | 1133c16ad96f7288e6f76455dc9bc17d1eb17055 | refs/heads/master | 2022-01-05T19:56:30.429949 | 2019-06-28T15:19:12 | 2019-06-28T15:19:12 | 192,562,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from mapElements import *
def load(map_name):
file = open(map_name, 'r')
text = file.read().splitlines()
return list(map(eval, text)) | [
"[email protected]"
]
| |
b0059167390bda100df2b9fb1dfdfd3c359fe18c | 4b2f0dae781d91baec5b94055e23720838c0feda | /Fleet Simulation/archive/simFunctionsVer8.py | 8b6810e2f51188af4585a31306f5e394dbfc12ca | []
| no_license | tiff413/EV-Technology-2019 | ec88eb411a3ce5f39387b682cd853da86e364ec3 | 7fe8a3bda28a5ac9b6d0b29fa27621e9ceca4fe5 | refs/heads/master | 2021-01-03T22:28:16.873563 | 2020-03-13T11:01:46 | 2020-03-13T11:01:46 | 240,260,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,093 | py | import pandas as pd
import numpy as np
import datetime as dt
import time
# CHOOSE NUMBER OF CHUNKS IN AN HOUR
# e.g. 3 chunks would divide the hour into 20-min shifts
chunks = 2
##############################
# TIME FUNCTIONS
##############################
# CONVERTS TIME INTO DATETIME
def readTime(ti):
if len(ti) == 5: read = (dt.datetime.strptime(ti, "%H:%M")).time()
elif len(ti) == 8: read = (dt.datetime.strptime(ti, "%H:%M:%S")).time()
elif len(ti) == 10: read = (dt.datetime.strptime(ti, "%Y-%m-%d")).date()
else: read = dt.datetime.strptime(ti, "%Y-%m-%d %H:%M:%S")
return read
# READS IN A DATETIME AND REFORMATS IT
def rereadTime(ti):
reread = str(ti)
read = dt.datetime.strptime(reread, "%Y-%m-%d %H:%M:%S")
return read
# INCREMENTS TIME BY THE HOUR TO EXECUTE SIMULATION
def incrementTime(ti):
return (rereadTime(ti) + dt.timedelta(hours=1/chunks))
##############################
# MISC FUNCTIONS
##############################
# SELECT FLEET DATA IN EXECUTION FILE BASED ON:
# number of cars
# battery size
# number of fast charge points
def selectCase(df, params):
for key in params: df = df.loc[df[key] == params[key]]
return df
# RETRIEVES COLUMN DATA FROM DATAFRAME
def getData(df, col):
return df[col].values[0]
# GENERATE CAR DATA AND CHARGE POINT DATA
def getLists(df):
# initialise charge points data
slow_cps = getData(df, 'slowChargePts')
fast_cps = getData(df, 'fastChargePts')
rapid_cps = getData(df, 'rapidChargePts')
chargePts = slow_cps + fast_cps + rapid_cps
chargePt_data = ([[22,1]]*rapid_cps + [[7,1]]*fast_cps + [[3,1]]*slow_cps)
# initialise car data
smallCars = getData(df, 'smallCars')
mediumCars = getData(df, 'mediumCars')
largeCars = getData(df, 'largeCars')
car_data = [[30, 1, 30, np.nan, -1, np.nan, np.nan]]*smallCars + [[40, 1, 40, np.nan, -1, np.nan, np.nan]]*mediumCars + [[70, 1, 70, np.nan, -1, np.nan,np.nan]]*largeCars
# assign available charge points to cars
for cp_id in range(chargePts):
size = car_data[cp_id][0]
car_data[cp_id] = [size,1,size,cp_id,-1,np.nan,np.nan]
return car_data, chargePt_data
# ORGANISE DATAFRAME FOR VIEWING
def dfFunction(df, col):
DF = df.set_index(['time','totalCost',col])
DF = DF.T.stack().T
return DF
######################################
# FOR COLOURING CELLS IN SIMULATION DF
######################################
def crColour(val):
if val > 0: color = 'green'
elif val == 0: color = 'green'
else: color = 'red'
return 'color: %s' % color
def crBackground(val):
if val > 0: color = '#adfc83'
elif val == 0: color = '#daed0c'
else: color = '#fab9b9'
return 'background-color: %s' % color
def eventBackground(val):
if val == 'full': color = '#00b200'
elif val == 'charge': color = '#adfc83'
elif val == 'drive': color = '#fab9b9'
elif val == 'wait': color = '#daed0c'
elif val == 'RC': color = 'red'
else: color = None
return 'background-color: %s' % color
def styleDF(df):
DF = df.style.\
applymap(crColour, subset=['chargeDiff']).\
applymap(crBackground, subset=['chargeDiff']).\
applymap(eventBackground, subset=['event'])
return DF
################################################################
# UNPACK SHIFT DATA FROM DATA FRAME INTO LIBRARY (SHIFTS BY CAR)
################################################################
def unpackShifts(carData, allShiftsDF):
# INITIALISE LIBRARY
shiftsByCar = {}
# FOR ALL CARS:
for cars in range(0, len(carData)):
# SELECT DATA FOR CAR
shiftsDFcar = allShiftsDF.loc[allShiftsDF['car']==cars]
# CREATE NEW DATAFRAME FOR UNPACKED SHIFTS
shiftsDF = pd.DataFrame(columns=["startShift","endShift"])
# FOR EVERY DAY, UNPACK SHIFTS INTO DATA FRAME:
for day in range(len(shiftsDFcar)):
# READ IN THE DATE AS A STRING AND LIST OF SHIFTS
dayStr = str(shiftsDFcar.loc[(shiftsDFcar.index[day]), 'day'])
shiftsLi = eval(shiftsDFcar.loc[(shiftsDFcar.index[day]), 'shift'])
# ***** UNPACK AND REFORMAT SHIFTS INTO NEW DATAFRAME *****
# FOR EVERY SHIFT:
for shift in range(0, len(shiftsLi)):
# SPLIT SHIFT INTO START SHIFT AND END SHIFT
splitShift = shiftsLi[shift].split("-")
# IF START SHIFT < END SHIFT, ASSUME SHIFT DOESN'T RUN OVERNIGHT
if readTime(splitShift[0]) < readTime(splitShift[1]):
# FORMAT DATE AND TIME TO START AND END SHIFT
startS = dayStr + " " + splitShift[0]
endS = dayStr + " " + splitShift[1]
# IF START SHIFT > END SHIFT, ASSUME SHIFT RUNS OVERNIGHT
else:
# FOR START SHIFT, FORMAT USING CURRENT DATE
startS = dayStr + " " + splitShift[0]
# FOR END SHIFT, FORMAT USING DATE OF THE NEXT DAY
nextDay = readTime(dayStr) + dt.timedelta(days=1)
endS = str(nextDay) + " " + splitShift[1]
# APPEND START AND END SHIFT AS A ROW IN SHIFTS DF
newRow = {"startShift" : startS,
"endShift" : endS}
shiftsDF = shiftsDF.append(newRow, ignore_index=True)
# SORT SHIFTS DF AND ASSIGN TO LIBRARY
shiftsDF = shiftsDF.sort_values(by=['startShift'])
shiftsDF = shiftsDF.reset_index(drop=True)
shiftsByCar['%s' % cars] = shiftsDF
return shiftsByCar
##############################################
# IMPLEMENT CHANGES AT START AND END OF SHIFTS
##############################################
# WHEN SHIFT STARTS:
# Remove from depot
# Let inDepot = 0 in carDataDF
# If connected to chargePt, remove chargePt
# WHEN SHIFT ENDS:
# Enter depot
# Let inDepot = 1 in carDataDF
def inOutDepot(carDataDF, shiftsByCar, time, depot, chargePtDF, toChargeDF, eventChange):
# FOR EVERY CAR:
for car in range(0, len(carDataDF)):
# ***** CHECK IF CAR IS AT THE END OF A SHIFT *****
# IF TIME == END TIME OF CURRENT SHIFT:
if str(time) == carDataDF.loc[car, 'latestEndShift']:
# ENTER DEPOT
carDataDF.loc[car,'inDepot'] = 1
depot.append(car)
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
# ***** CHECK IF CAR IS AT THE START OF A SHIFT *****
# READ INDEX OF CURRENT SHIFT AND LENGTH OF SHIFTS BY CAR
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS:
if (shiftIndex + 1) < lastShiftIndex:
# READ START TIME AND END TIME OF THE NEXT SHIFT
nextStartShift = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
nextEndShift = shiftsByCar[str(car)].loc[shiftIndex+1, 'endShift']
# IF TIME == START TIME OF THE NEXT SHIFT:
if str(time) == nextStartShift:
# EXIT DEPOT
carDataDF.loc[car,'inDepot'] = 0
depot.remove(car)
# REMOVE CHARGE PT IN CHARGE PT DF
pt = carDataDF.loc[car,'chargePt']
if not np.isnan(pt):
chargePtDF.loc[pt,'inUse'] = np.nan
# print("remove charge point "+str(pt))
# REMOVE CHARGE PT IN CAR DATA DF
carDataDF.loc[car,'chargePt'] = np.nan
# LET CHARGE RATE = 0 IN TO-CHARGE DF
toChargeDF.loc[car,'chargeRate'] = 0
# UPDATE SHIFT DATA IN CAR DATA DF
carDataDF.loc[car, 'shiftIndex'] = shiftIndex + 1
carDataDF.loc[car, 'latestStartShift'] = nextStartShift
carDataDF.loc[car, 'latestEndShift'] = nextEndShift
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return carDataDF, depot, chargePtDF, toChargeDF, eventChange
################################################
# READ CARS WITH FULL BATTERY INTO SIMULATION DF
################################################
def readFullBattCars(carDataDF, simulationDF, toChargeDF, time, totalCost, eventChange):
# SELECT VEHICLES IN THE DEPOT WITH FULL BATTERY
chargeDF = carDataDF.loc[carDataDF['inDepot'] == 1]
fullBattDF = chargeDF.loc[chargeDF['battkW'] == chargeDF['battSize']]
# IF CAR IS FULLY CHARGED, LET CHARGE RATE = 0 IN TO-CHARGE DF
for row in range(len(fullBattDF)):
car = fullBattDF.index[row]
toChargeDF.loc[car, 'chargeRate'] = 0
# ***** IF NEW CARS REACH FULL BATT, RECOGNISE EVENT *****
# CREATE A SET FOR CARS THAT HAD FULL BATT IN PREVIOUS TIME
prevSimData = simulationDF.iloc[-len(carDataDF):]
prevFullBatt = prevSimData.loc[prevSimData['event']=="full"]
prevFullBattCars = set(prevFullBatt['car'].values.tolist())
# CREATE A SET FOR CARS THAT CURRENTLY HAVE FULL BATT
fullBattCars = set(fullBattDF.index.tolist())
# IF NO. OF FULL BATT CARS >= PREVIOUS NO. OF FULL BATT CARS:
if len(fullBattCars) >= len(prevFullBattCars):
# AND IF INDEX OF FULL BATT CARS ARE DIFFERENT FROM PREVIOUS FULL BATT CARS:
if fullBattCars != prevFullBattCars:
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return toChargeDF, eventChange
################################################
# READ TARIFF CHANGES
################################################
def readTariffChanges(time, pricesDF, company, eventChange):
# READ IN START AND END TIMES OF GREEN ZONE
greenStart = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEnd = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# READ IN TIME WITHOUT DATE
timeHr = readTime(str(time.time()))
# TIME == START OR END OF GREEN ZONE, THERE IS A TARIFF CHANGE
if timeHr == readTime(greenStart) or timeHr == readTime(greenEnd):
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return eventChange
###############################
# LOOK AT CARS OUTSIDE THE DEPOT
# FOR CARS THAT NEED RAPID CHARGING: RAPID CHARGE
# FOR CARS THAT DON'T NEED RAPID CHARGING: DECREASE BATT
###############################
def driving(carDataDF, time, rcCount, RCduration, RCperc, simulationDF, driveDataByCar, ind, totalCost):
# FIND CARS OUTSIDE OF DEPOT
drivingCarsDF = carDataDF.loc[carDataDF["inDepot"]==0]
# ***** DIVIDE CARS THAT NEED RAPID CHARGING AND CARS THAT DONT INTO 2 LISTS *****
# FIND CARS TO RAPID CHARGE AND APPEND TO LIST
toRapidCharge = []
# IF NO NEED TO RAPID CHARGE, APPEND TO ANOTHER LIST
dontRapidCharge = []
# FOR CARS OUTSIDE OF DEPOT:
# * CHECK FOR CARS CURRENTLY RAPID CHARGING
# * THEN CHECK FOR CARS THAT NEED RAPID CHARGING
for row in range(len(drivingCarsDF)):
car = drivingCarsDF.index[row]
# FIND DURATION OF RAPID CHARGE IN CHUNKS
RCchunks = np.ceil(chunks/(60/RCduration))
# PREPARE BASE CASE FOR WHILE LOOP
chunkCount = 1
checkTime = str(time - ((dt.timedelta(hours=1/chunks))*chunkCount))
prevSimChunk = simulationDF.loc[simulationDF['time']==checkTime]
checkEvent = prevSimChunk.loc[prevSimChunk['car']==car, 'event'].to_string(index=False)
# CHECK IF CAR HAS BEEN RAPID CHARGING
while checkEvent == "RC":
chunkCount += 1
checkTime = str(time - ((dt.timedelta(hours=1/chunks))*chunkCount))
prevSimChunk = simulationDF.loc[simulationDF['time']==checkTime]
checkEvent = prevSimChunk.loc[prevSimChunk['car']==car, 'event'].to_string(index=False)
# IF CAR IS RAPID CHARGING AND REQUIRES MORE RAPID CHARGING:
if 1 < chunkCount <= RCchunks:
# APPEND TO RAPID CHARGE LIST
toRapidCharge.append(car)
# ELSE (CAR HAS NOT BEEN RAPID CHARGING), CHECK IF CAR NEEDS RAPID CHARGING
else:
# IF BATTERY < RC PERCENTAGE (INPUT), CAR NEEDS RAPID CHARGING
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
if batt < (battSize*(RCperc/100)):
# APPEND TO RAPID CHARGE LIST
toRapidCharge.append(car)
# INCREASE RAPID CHARGE COUNT
rcCount += 1
# OTHERWISE, ADD TO DON'T RAPID CHARGE LIST
else: dontRapidCharge.append(car)
# ***** FOR CARS THAT DON'T NEED RAPID CHARGING, DECREASE BATT (DRIVE) *****
for carsDontRC in range(len(dontRapidCharge)):
car = dontRapidCharge[carsDontRC]
# READ BATTERY
batt = carDataDF.loc[car, 'battkW']
# GET RANDOMISED VALUE FOR MILEAGE AND MPKW
mileage = driveDataByCar[str(car)].loc[ind, 'mileage']
mpkw = driveDataByCar[str(car)].loc[ind, 'mpkw']
# CALCULATE RATE OF BATT DECREASE
kwphr = mileage/mpkw
# UPDATE SIMULATION ACCORDINGLY
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(-kwphr/chunks, 1),
'batt': round(batt, 1),
'event': 'drive',
'costPerCharge': 0,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# DECREASE BATTERY
batt -= kwphr/chunks
# ASSIGN BATTERY
carDataDF.loc[car,'battkW'] = batt
# ***** FOR CARS THAT NEED RAPID CHARGING, RAPID CHARGE *****
for carsToRC in range(len(toRapidCharge)):
car = toRapidCharge[carsToRC]
# READ BATTERY AND BATTERY SIZE
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
# CALCULATE BATTERY INCREASE
RCbattIncrease = 50/chunks
# UPDATE RAPID CHARGE COUNT AND TOTAL COST
RCcost = 0.3*(50/chunks)
totalCost += RCcost
# UPDATE SIMULATION ACCORDINGLY
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(RCbattIncrease, 1),
'batt': round(batt, 1),
'event': 'RC',
'costPerCharge': RCcost,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# RAPID CHARGE
batt += RCbattIncrease
if batt > battSize: batt = battSize
# ASSIGN BATTERY
carDataDF.loc[car,'battkW'] = batt
return carDataDF, rcCount, simulationDF, totalCost
#############################################################
# ALLOCATE AN AVAILABLE CHARGE PT OR SELECT CURRENT CHARGE PT
#############################################################
def findChargePt(carDataDF, car, chargePtDF):
# SELECT AVAILABLE CHARGE PTS
availablePts = chargePtDF.loc[chargePtDF['inUse'] != 1]
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR IS NOT ON A CHARGE PT, PLUG INTO FIRST AVAILABLE CHARGE PT
if np.isnan(chargePt) and len(availablePts) > 0:
pt = availablePts.index[0]
# print("car "+str(car)+" plugged into CP "+str(pt))
availablePts = availablePts.drop(pt, axis=0)
# UPDATE CHARGE PT DF and CAR DATA DF
chargePtDF.loc[pt, 'inUse'] = 1
carDataDF.loc[car, 'chargePt'] = pt
# IF CAR HAS A CHARGE PT, PT = CHARGE PT, ELSE PT = NAN
else:
pt = chargePt
# print("car "+str(car)+" has charge pt "+str(pt))
return pt, carDataDF, chargePtDF
###################################
# CHARGE VEHICLE FOR ONE HOUR
###################################
def charge(carDataDF, depot, simulationDF, time, chargePtDF, toChargeDF, pricesDF, company, totalCost):
# FOR EVERY CAR IN THE DEPOT
for index in range(len(depot)):
car = depot[index]
# READ IN BATTERY, BATTERY SIZE AND CHARGE RATE
batt = carDataDF.loc[car,'battkW']
battSize = carDataDF.loc[car,'battSize']
chargeRate = toChargeDF.loc[car,'chargeRate']
# FIND PRICE OF CHARGE AT TIME
# * Read in start and end times of green zone
greenStart = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEnd = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# * Read in time without date
timeHr = readTime(str(time.time()))
# IF TIME IS WITHIN GREEN ZONE, PRICE = GREEN ZONE PRICE
if readTime(greenStart) <= timeHr < readTime(greenEnd):
price = float(pricesDF.loc[pricesDF['company']==company, 'priceGreenZone'])
# ELSE, PRICE = RED ZONE PRICE
else:
price = float(pricesDF.loc[pricesDF['company']==company, 'priceRedZone'])
# CALCULATE COST OF CHARGE AND ADD THIS TO TOTAL COST
costOfCharge = (chargeRate*price)/chunks
totalCost += costOfCharge
# DETERMINE EVENT STATUS
if chargeRate > 0:
event = "charge"
else:
if batt == battSize: event = "full"
else: event = "wait"
# APPEND DATA TO SIMULATION DATA
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(chargeRate/chunks, 1),
'batt': round(batt, 1),
'event': event,
'costPerCharge': round(costOfCharge, 1) if chargeRate > 0 else 0,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# print("CHARGE")
# INCREASE BATTERY PERCENTAGE ACCORDING TO CHARGE RATE
batt += chargeRate/chunks
batt = battSize if batt >= battSize else batt
# ASSIGN BATTERY
carDataDF.loc[car, 'battkW'] = batt
return carDataDF, simulationDF, chargePtDF, totalCost
############################################
# CHOOSE MAX TOTAL COST OF THE ROW idk how to explain
############################################
def adjustTotalCost(time, simulationDF):
# SELECT ROWS IN SIMULATION WHERE TIME == TIME
selectRows = simulationDF.loc[simulationDF['time']==time]
# SELECT THE MAXIMUM VALUE IN THE TOTAL COST COLUMN
maxCost = selectRows['totalCost'].max()
# REPLACE EVERY OTHER TOTAL COST VALUE WITH MAXIMUM VALUE FOR THIS TIME
simulationDF.loc[simulationDF['time']==time, 'totalCost'] = maxCost
return simulationDF
#################################################################################################################################
# CORE FUNCTIONS
#################################
# INCREASE BATT DURING CHARGE
#################################
def dumbCharge(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# SELECT CARS IN DEPOT THAT ARE NOT FULLY CHARGED
needChargeDF = carDataDF.loc[(carDataDF['inDepot'] == 1) &
(carDataDF['battkW'] < carDataDF['battSize'])]
# FOR CARS IN DEPOT:
for cars in range(len(needChargeDF)):
car = needChargeDF.index[cars]
# ALLOCATE AVAILABLE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
# SELECT CARS IN DEPOT WITH VALID CHARGE PTS
chargeDF = carDataDF.loc[(carDataDF['inDepot'] == 1) &
(carDataDF['battkW'] < carDataDF['battSize']) &
(~carDataDF['chargePt'].isna())]
# IF THERE ARE CARS WITH VALID CHARGE POINTS THAT REQUIRE CHARGING
if len(chargeDF) > 0:
# SPLIT CHARGE RATE EQUALLY BETWEEN CARS THAT ARE CHARGING
if len(chargeDF) <= len(chargePtDF): splitChargeRate = availablePower/len(chargeDF)
else: splitChargeRate = availablePower/len(chargePtDF)
# CHARGE SELECTED CARS IN DEPOT
for cars in range(len(chargeDF)):
car = chargeDF.index[cars]
# LET CHARGE RATE = SPLIT CHARGE RATE
chargeRate = splitChargeRate
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# LIMIT CHARGE RATE TO MAX RATE OF CHARGE PT
maxRatePt = chargePtDF.loc[pt, 'maxRate']
if maxRatePt < chargeRate: chargeRate = maxRatePt
# IF NO CHARGE PTS AVAILABLE, DON'T CHARGE
else: chargeRate = 0
# UPDATE TO-CHARGE DF
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# FOR CARS IN DEPOT THAT ARE FULLY CHARGED
return carDataDF, chargePtDF, toChargeDF, totalCost
#########################################
# INCREASE BATT DURING CHARGE (LEAVETIME)
#########################################
def smartCharge_leavetime(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) > 0:
# CREATE A LIST FOR CARS AND THEIR LEAVETIMES (TIME UNTIL CAR LEAVES DEPOT)
leaveTList = []
# # ***** FIND LEAVETIMES AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
car = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME BEYOND RUN TIME
else:
lastStart = shiftsByCar[str(car)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT UNTIL CAR LEAVES AND APPEND TO LIST
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
leaveTList.append([car, hrsLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT *****
leaveTimes = pd.DataFrame.from_records(leaveTList, columns=['car','hrsLeft'])
leaveTimes = leaveTimes.sort_values(by=['hrsLeft'])
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** CHARGE CARS IN SORTED ORDER *****
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT:
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE THE ENERGY LEFT IF CAR WAS CHARGED AT MAX
energyLeft = availablePower - maxRate
# IF THERE IS ENOUGH ENERGY FOR MAX RATE, CHARGE CAR AT MAX
if energyLeft >= 0:
chargeRate = maxRate
# IF THERE ISN'T ENOUGH FOR MAX RATE, CHARGE USING REMAINING POWER
elif energyLeft < 0 and energyLeft > -maxRate:
chargeRate = availablePower
# IF VEHICLE IS PLUGGED IN BUT NOT ALLOCATED CHARGE
else:
chargeRate = 0
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# ADJUST AVAILABLE POWER
availablePower -= chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
######################################
# INCREASE BATT DURING CHARGE (BATT)
######################################
def smartCharge_batt(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR BATT NEEDED
battNeededList = []
# ***** FOR ALL CARS, FIND BATT NEEEDED UNTIL FULLY CHARGED *****
for cars in range(0, len(depot)):
carNum = depot[cars]
# CALCULATE BATTERY NEEDED AND APPEND TO LIST
battLeft = abs(carDataDF.loc[carNum,'battSize']-carDataDF.loc[carNum,'battkW'])
battNeededList.append([carNum, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT *****
battNeeded = pd.DataFrame.from_records(battNeededList, columns=['car','battLeft'])
battNeeded = battNeeded.sort_values(by=['battLeft'], ascending=False)
battNeeded = battNeeded.reset_index(drop=True)
# ***** CHARGE CARS IN SORTED ORDER *****
for row in range(0, len(battNeeded)):
# READ IN DATA FOR SELECTED CAR
car = battNeeded.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE THE ENERGY LEFT IF CAR WAS CHARGED AT MAX
energyLeft = availablePower - maxRate
# IF THERE IS ENOUGH ENERGY FOR MAX RATE, CHARGE CAR AT MAX
if energyLeft >= 0:
chargeRate = maxRate
# IF THERE ISN'T ENOUGH FOR MAX RATE, CHARGE USING REMAINING POWER
elif energyLeft < 0 and energyLeft > -maxRate:
chargeRate = availablePower
# IF VEHICLE IS PLUGGED IN BUT NOT ALLOCATED CHARGE
else:
chargeRate = 0
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# ADJUST AVAILABLE POWER
availablePower -= chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
###########################################
# INCREASE BATT DURING CHARGE (SUPER SMART)
###########################################
# PRIORITY = BATT NEEDED/TIME LEFT IN DEPOT
# CHARGE RATE = (PRIORITY/SUM OF ALL PRIORITIES)*AVAILABLE POWER
def smartCharge_battOverLeavetime(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR LEAVETIMES AND BATT NEEDED
priorityRows = []
# ***** FIND LEAVETIMES AND BATT NEEDED AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
car = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME
else:
lastStart = shiftsByCar[str(car)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT AND BATT NEEDED
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
battLeft = carDataDF.loc[car,'battSize']-carDataDF.loc[car,'battkW']
# LET PRIORITY = BATT LEFT/TIME LEFT, APPEND TO LIST
priorityRows.append([car, battLeft/hrsLeft, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT BY PRIORITY *****
leaveTimes = pd.DataFrame.from_records(priorityRows, columns=['car','priority','battLeft'])
leaveTimes = leaveTimes.sort_values(by=['priority'], ascending=False)
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** IN SORTED ORDER, CALCULATE PRIORITY RATIO AND CHARGE *****
# CALCULATE THE SUM OF PRIORITY VALUES
prioritySum = sum(leaveTimes.priority)
# FOR EVERY CAR:
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
battLeft = leaveTimes.loc[row, 'battLeft']
priority = leaveTimes.loc[row, 'priority']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE CHARGE RATE USING PRIORITY/SUM OF PRIORITIES
chargeRate = (priority/prioritySum)*availablePower
# IF CHARGE RATE EXCEEDS MAX RATE:
if chargeRate > maxRate: chargeRate = maxRate
# IF CHARGE RATE EXCEEDS CHARGE NEEDED:
if chargeRate > battLeft*chunks: chargeRate = battLeft*chunks
# ADJUST REMAINING AVAILABLE POWER AND PRIORITY SUM
availablePower -= chargeRate
prioritySum -= priority
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
##############################################
# INCREASE BATT DURING CHARGE (COST SENSITIVE)
##############################################
# PRIORITY = BATT NEEDED/TIME LEFT IN DEPOT
# IF CAR WILL CHARGE OVER GREEN ZONE:
# DELAY CHARGING UNTIL START GREEN ZONE STARTS (PRIORITY = 0)
# CHARGE RATE = (PRIORITY/SUM OF ALL PRIORITIES)*AVAILABLE POWER
def costSensitiveCharge(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR LEAVETIME AND BATT NEEDED
priorityRows = []
# ***** CALCULATE PRIORITY FOR EACH CAR AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
carNum = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[carNum, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(carNum)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = readTime(shiftsByCar[str(carNum)].loc[shiftIndex+1, 'startShift'])
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME
else:
lastStart = shiftsByCar[str(carNum)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT AND BATT NEEDED
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
battLeft = carDataDF.loc[carNum,'battSize']-carDataDF.loc[carNum,'battkW']
prior = battLeft/hrsLeft
# ***** DELAY CHARGING FOR CARS THAT ARE IN DEPOT DURING THE GREEN ZONE *****
# READ IN START AND END TIMES OF GREEN ZONE
greenStartHr = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEndHr = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# IF GREEN ZONE RUNS OVERNIGHT:
if (readTime(greenStartHr) > readTime(greenEndHr)):
# GREEN START = CURRENT DAY + GREEN ZONE START TIME
greenStart = readTime(str(time.date()) + " " + greenStartHr)
# GREEN END = NEXT DAY + GREEN END TIME
greenEnd = readTime(str(time.date() + dt.timedelta(days=1)) + " " + greenEndHr)
# IF GREEN ZONE DOESN'T RUN OVERNIGHT, CONSIDER CASE WHERE TIME IS PAST MIDNIGHT
else:
# CALCULATE DIFFERENCE GREEN ZONE START TIME AND MIDNIGHT
arbGreenStart = dt.datetime.combine(dt.date.today(), readTime(greenStartHr))
arbMidnight = dt.datetime.combine(dt.date.today(), readTime("00:00:00"))
gap = arbGreenStart - arbMidnight
# GREEN START = (TIME-GAP) + 1 DAY + GREEN ZONE START TIME
greenStart = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + greenStartHr)
# GREEN END = (TIME-GAP) + 1 DAY + GREEN ZONE END TIME
greenEnd = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + greenEndHr)
# IF GREEN ZONE HASN'T STARTED YET,
# AND IF CAR WILL BE CHARGING THROUGHOUT WHOLE OF GREEN ZONE:
if (time < greenStart) and (nextStart >= greenEnd):
# DELAY CHARGING UNTIL GREEN ZONE
prior = 0.0
# LET PRIORITY = BATTLEFT/TIME LEFT, APPEND TO LIST
priorityRows.append([carNum, prior, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT BY PRIORITY *****
leaveTimes = pd.DataFrame.from_records(priorityRows, columns=['car','priority','battLeft'])
leaveTimes = leaveTimes.sort_values(by=['priority'], ascending=False)
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** IN SORTED ORDER, CALCULATE PRIORITY RATIO AND CHARGE *****
# CALCULATE THE SUM OF PRIORITY VALUES
prioritySum = sum(leaveTimes.priority)
# FOR EVERY CAR:
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
battLeft = leaveTimes.loc[row, 'battLeft']
priority = leaveTimes.loc[row, 'priority']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE CHARGE RATE USING PRIORITY/SUM OF PRIORITIES
if prioritySum == 0.0: chargeRate = 0
else: chargeRate = (priority/prioritySum)*availablePower
# IF CHARGE RATE EXCEEDS MAX RATE:
if chargeRate > maxRate: chargeRate = maxRate
# IF CHARGE RATE EXCEEDS CHARGE NEEDED:
if chargeRate > battLeft*chunks: chargeRate = battLeft*chunks
# ADJUST REMAINING AVAILABLE POWER AND PRIORITY SUM
availablePower -= chargeRate
prioritySum -= priority
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
#################################################################################################################################
############################################
# RUN SIMULATION FROM SEPARATE FILE
############################################
def runSimulation(startTime, runTime, RCduration, RCperc,
fleetData, driveDataDF, allShiftsDF, pricesDF, company,
algo):
# INITIALISE MAIN DATAFRAMES WITH DATA AT START TIME
# Get data from csv inputs
carData, chargePtData = getLists(fleetData)
# Choose column names
carCols = ["battkW","inDepot","battSize","chargePt","shiftIndex","latestStartShift","latestEndShift"]
cpCols = ["maxRate","inUse"]
simCols = ["time","car","chargeDiff","batt","event","costPerCharge","totalCost"]
tcCols = ["car","chargeRate"] # Columns for cars that need to charge and the
# rate at which they will charge given by the algorithm
# Initialise dataframes
carDataDF = pd.DataFrame.from_records(carData, columns=carCols)
chargePtDF = pd.DataFrame.from_records(chargePtData, columns=cpCols)
simulationDF = pd.DataFrame(columns=simCols)
# Create rows for every car in toChargeDF
toChargeDFrows = []
for i in range(len(carDataDF)):
toChargeDFrows.append([i, 0])
# Initialise toChargeDF
toChargeDF = pd.DataFrame(toChargeDFrows, columns=tcCols)
# APPEND CARS INTO DEPOT AT START TIME
depot = []
for car in range(0, len(carDataDF)):
if carDataDF.loc[car,'inDepot']: depot.append(car)
# CREATE LIBRARY FOR SHIFTS BY CAR
shiftsByCar = unpackShifts(carDataDF, allShiftsDF)
# CREATE LIBRARY FOR DRIVING DATA
driveDataByCar = {}
for car in range(0, len(carDataDF)):
findData = driveDataDF.loc[driveDataDF['car']==car]
dataNoIndex = findData.reset_index(drop=True)
driveDataByCar['%s' % car] = dataNoIndex
# RETRIEVE AVAILABLE POWER FROM FLEET DATA
availablePower = getData(fleetData, 'availablePower')
rcCount = 0 # INITIALISE A COUNTER FOR RAPID CHARGES
totalCost = 0 # INITIALISE A COUNTER FOR TOTAL COST
time = startTime # CHOOSE START TIME
# RUN SIMULATION FOR ALL OF RUN TIME
for i in range(0, runTime*chunks):
# print("*****" + str(time))
# INITIALISE A VARIABLE TO CHECK FOR EVENT CHANGES
eventChange = False
# *** RUN FUNCTIONS THAT INCLUDE WILL RECOGNISE CHANGES IN EVENTS ***
carDataDF, depot, chargePtDF, toChargeDF, eventChange = inOutDepot(carDataDF, shiftsByCar, time, depot, chargePtDF, toChargeDF, eventChange)
toChargeDF, eventChange = readFullBattCars(carDataDF, simulationDF, toChargeDF, time, totalCost, eventChange)
eventChange = readTariffChanges(time, pricesDF, company, eventChange)
# *** RUN FUNCTIONS AFFECTING CARS OUTSIDE THE DEPOT ***
# DECREASE BATT/RAPID CHARGE CARS OUTSIDE THE DEPOT
carDataDF, rcCount, simulationDF, totalCost = driving(carDataDF, time, rcCount, RCduration, RCperc, simulationDF, driveDataByCar, i, totalCost)
# *** RUN FUNCTIONS AFFECTING CARS IN THE DEPOT ***
# IF THERE IS AN EVENT, RUN CHARGING ALGORITHM
if eventChange == True:
carDataDF, chargePtDF, toChargeDF, totalCost = algo(carDataDF, depot, shiftsByCar, time, availablePower, simulationDF, chargePtDF, toChargeDF, pricesDF, company, totalCost)
# CHARGE/READ WAITING CARS IN THE DEPOT
carDataDF, simulationDF, chargePtDF, totalCost = charge(carDataDF, depot, simulationDF, time, chargePtDF, toChargeDF, pricesDF, company, totalCost)
# FORMAT TOTAL COST COLUMN IN SIMULATION DF
simulationDF = adjustTotalCost(time, simulationDF)
# INCREMENT TIME OF SIMULATION
time = incrementTime(time)
# print("\n")
# print("No. of rapid charges: " + str(rcCount))
# FORMAT FINAL SIMULATION DF FOR VIEWING OR ANIMATION
sim = dfFunction(simulationDF, 'car')
return styleDF(sim), simulationDF # second dataframe, 'sim', is for animation purposes
| [
"[email protected]"
]
| |
c1f7f5a8fdcb8e87bf303027ecd2d3053561bdfd | abb64b652cf908aaa17257464a12395b014b6093 | /test/test_quantized_nn_mods.py | 7203fb371c6255be2b47c7441de524a677698d85 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | beifangfazhanlu/pytorch | 8a1c5a4a11b29da26af4d3839aff0ca22e4a298a | b7d992eb46a1e085d2b8b7f0df9817bf569616d3 | refs/heads/master | 2020-07-13T15:43:26.647301 | 2019-08-29T05:18:56 | 2019-08-29T05:20:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,424 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn._intrinsic.quantized as nnq_fused
import torch.nn.quantized.functional as qF
from torch.nn.quantized.modules import Conv2d
from torch.nn._intrinsic.quantized import ConvReLU2d
import torch.quantization
from common_utils import run_tests, tempfile
from common_quantization import QuantizationTestCase, no_deadline, prepare_dynamic
from common_quantized import _calculate_dynamic_qparams
from hypothesis import given
from hypothesis import strategies as st
import unittest
'''
Note that tests in this file are just API test, to make sure we wrapped the
quantized operator implementations correctly in the user facing APIs, these are
not correctness test for the underlying quantized operators. For correctness
test please see `caffe2/test/test_quantized.py`.
'''
class FunctionalAPITest(QuantizationTestCase):
def test_relu_api(self):
X = torch.arange(-5, 5, dtype=torch.float)
scale = 2.0
zero_point = 1
qX = torch.quantize_linear(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
qY = torch.relu(qX)
qY_hat = qF.relu(qX)
self.assertEqual(qY, qY_hat)
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
use_bias=st.booleans(),
)
def test_conv_api(self, use_bias):
"""Tests the correctness of the conv module.
The correctness is defined against the functional implementation.
"""
N, iC, H, W = 10, 10, 10, 3
oC, g, kH, kW = 16, 1, 3, 3
scale, zero_point = 1.0 / 255, 128
stride = (1, 1)
i_padding = (0, 0)
dilation = (1, 1)
X = torch.randn(N, iC, H, W, dtype=torch.float32)
X = X.permute([0, 2, 3, 1]).contiguous()
qX = torch.quantize_linear(X, scale=scale, zero_point=128, dtype=torch.quint8)
w = torch.randn(oC, iC // g, kH, kW, dtype=torch.float32)
qw = torch.quantize_linear(w, scale=scale, zero_point=0, dtype=torch.qint8)
b = torch.randn(oC, dtype=torch.float32) if use_bias else None
q_bias = torch.quantize_linear(b, scale=1.0 / 1024, zero_point=0, dtype=torch.qint32) if use_bias else None
q_filters_ref = torch.ops.quantized.fbgemm_conv_prepack(qw.permute([0, 2, 3, 1]),
stride,
i_padding,
dilation,
g)
requantized_bias = torch.quantize_linear(q_bias.dequantize(), scale * scale, 0 , torch.qint32) if use_bias else None
ref_result = torch.ops.quantized.fbgemm_conv2d(qX.permute([0, 2, 3, 1]), q_filters_ref,
requantized_bias, stride,
i_padding, dilation,
g, scale, zero_point).permute([0, 3, 1, 2])
q_result = torch.nn.quantized.functional.conv2d(qX,
qw,
bias=q_bias, scale=scale,
zero_point=zero_point,
stride=stride, padding=i_padding,
dilation=dilation, groups=g,
dtype=torch.quint8)
self.assertEqual(ref_result, q_result)
class DynamicModuleAPITest(QuantizationTestCase):
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
batch_size=st.integers(1, 5),
in_features=st.integers(16, 32),
out_features=st.integers(4, 8),
use_bias=st.booleans(),
use_default_observer=st.booleans(),
)
def test_linear_api(self, batch_size, in_features, out_features, use_bias, use_default_observer):
"""test API functionality for nn.quantized.dynamic.Linear"""
W = torch.rand(out_features, in_features).float()
W_scale, W_zp = _calculate_dynamic_qparams(W, torch.qint8)
W_q = torch.quantize_linear(W, W_scale, W_zp, torch.qint8)
X = torch.rand(batch_size, in_features).float()
B = torch.rand(out_features).float() if use_bias else None
qlinear = nnqd.Linear(in_features, out_features)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X)
qlinear.set_weight(W_q)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q)
W_pack = qlinear._packed_weight
qlinear.bias = B if use_bias else None
Z_dq = qlinear(X)
# Check if the module implementation matches calling the
# ops directly
Z_ref = torch.ops.quantized.fbgemm_linear_dynamic(X, W_pack, B)
self.assertEqual(Z_ref, Z_dq)
# Test serialization of dynamic quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], B)
with tempfile.TemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qlinear = nnqd.Linear(in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.fbgemm_linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_weight),
linear_unpack(loaded_qlinear._packed_weight))
if use_bias:
self.assertEqual(qlinear.bias, loaded_qlinear.bias)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertTrue(hasattr(qlinear, '_packed_weight'))
self.assertTrue(hasattr(loaded_qlinear, '_packed_weight'))
self.assertTrue(hasattr(qlinear, 'weight'))
self.assertTrue(hasattr(loaded_qlinear, 'weight'))
self.assertEqual(qlinear.weight(), loaded_qlinear.weight())
self.assertEqual(qlinear.weight(), torch.ops.quantized.fbgemm_linear_unpack(qlinear._packed_weight))
Z_dq2 = qlinear(X)
self.assertEqual(Z_dq, Z_dq2)
# test serialization of module directly
with tempfile.TemporaryFile() as f:
torch.save(qlinear, f)
f.seek(0)
loaded = torch.load(f)
# This check is disabled pending an issue in PyTorch serialization:
# https://github.com/pytorch/pytorch/issues/24045
# self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test JIT
self.checkScriptable(qlinear, list(zip([X], [Z_ref])), check_save_load=True)
# Test from_float
float_linear = torch.nn.Linear(in_features, out_features).float()
if use_default_observer:
float_linear.qconfig = torch.quantization.default_dynamic_qconfig
prepare_dynamic(float_linear)
float_linear(X.float())
quantized_float_linear = nnqd.Linear.from_float(float_linear)
# Smoke test to make sure the module actually runs
quantized_float_linear(X)
# Smoke test extra_repr
str(quantized_float_linear)
class ModuleAPITest(QuantizationTestCase):
def test_relu(self):
relu_module = nnq.ReLU()
relu6_module = nnq.ReLU6()
x = torch.arange(-10, 10, dtype=torch.float)
y_ref = torch.relu(x)
y6_ref = torch.nn.modules.ReLU6()(x)
qx = torch.quantize_linear(x, 1.0, 0, dtype=torch.qint32)
qy = relu_module(qx)
qy6 = relu6_module(qx)
self.assertEqual(y_ref, qy.dequantize(),
message="ReLU module API failed")
self.assertEqual(y6_ref, qy6.dequantize(),
message="ReLU6 module API failed")
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
batch_size=st.integers(1, 5),
in_features=st.integers(16, 32),
out_features=st.integers(4, 8),
use_bias=st.booleans(),
use_fused=st.booleans(),
)
def test_linear_api(self, batch_size, in_features, out_features, use_bias, use_fused):
"""test API functionality for nn.quantized.linear and nn._intrinsic.quantized.linear_relu"""
W = torch.rand(out_features, in_features).float()
W_q = torch.quantize_linear(W, 0.1, 4, torch.qint8)
X = torch.rand(batch_size, in_features).float()
X_q = torch.quantize_linear(X, 0.2, 10, torch.quint8)
B = torch.rand(out_features).float() if use_bias else None
B_q = torch.quantize_linear(B, W_q.q_scale() * X_q.q_scale(), 0, torch.qint32) if use_bias else None
scale = 0.5
zero_point = 3
if use_fused:
qlinear = nnq_fused.LinearReLU(in_features, out_features)
else:
qlinear = nnq.Linear(in_features, out_features)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X_q)
qlinear.set_weight(W_q)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q)
W_pack = qlinear._packed_weight
qlinear.bias = B_q if use_bias else None
qlinear.scale = float(scale)
qlinear.zero_point = int(zero_point)
Z_q = qlinear(X_q)
# Check if the module implementation matches calling the
# ops directly
if use_fused:
Z_ref = torch.ops.quantized.fbgemm_linear_relu(X_q, W_pack, B_q, scale, zero_point)
else:
Z_ref = torch.ops.quantized.fbgemm_linear(X_q, W_pack, B_q, scale, zero_point)
self.assertEqual(Z_ref, Z_q)
# Test serialization of quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], B_q)
with tempfile.TemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
if use_fused:
loaded_qlinear = nnq_fused.LinearReLU(in_features, out_features)
else:
loaded_qlinear = nnq.Linear(in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.fbgemm_linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_weight),
linear_unpack(loaded_qlinear._packed_weight))
if use_bias:
self.assertEqual(qlinear.bias, loaded_qlinear.bias)
self.assertEqual(qlinear.scale, loaded_qlinear.scale)
self.assertEqual(qlinear.zero_point, loaded_qlinear.zero_point)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertTrue(hasattr(qlinear, '_packed_weight'))
self.assertTrue(hasattr(loaded_qlinear, '_packed_weight'))
self.assertTrue(hasattr(qlinear, 'weight'))
self.assertTrue(hasattr(loaded_qlinear, 'weight'))
self.assertEqual(qlinear.weight(), loaded_qlinear.weight())
self.assertEqual(qlinear.weight(), torch.ops.quantized.fbgemm_linear_unpack(qlinear._packed_weight))
Z_q2 = loaded_qlinear(X_q)
self.assertEqual(Z_q, Z_q2)
# test serialization of module directly
with tempfile.TemporaryFile() as f:
torch.save(qlinear, f)
f.seek(0)
loaded = torch.load(f)
# This check is disabled pending an issue in PyTorch serialization:
# https://github.com/pytorch/pytorch/issues/24045
# self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.bias, loaded.bias)
self.assertEqual(qlinear.scale, loaded.scale)
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test JIT
self.checkScriptable(qlinear, list(zip([X_q], [Z_ref])), check_save_load=True)
# Test from_float
float_linear = torch.nn.Linear(in_features, out_features).float()
float_linear.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(float_linear)
float_linear(X.float())
quantized_float_linear = torch.quantization.convert(float_linear)
# Smoke test to make sure the module actually runs
quantized_float_linear(X_q)
# Smoke test extra_repr
str(quantized_float_linear)
def test_quant_dequant_api(self):
r = torch.tensor([[1., -1.], [1., -1.]], dtype=torch.float)
scale, zero_point, dtype = 1.0, 2, torch.qint8
# testing Quantize API
qr = torch.quantize_linear(r, scale, zero_point, dtype)
quant_m = nnq.Quantize(scale, zero_point, dtype)
qr2 = quant_m(r)
self.assertEqual(qr, qr2)
# testing Dequantize API
rqr = qr.dequantize()
dequant_m = nnq.DeQuantize()
rqr2 = dequant_m(qr2)
self.assertEqual(rqr, rqr2)
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
use_bias=st.booleans(),
use_fused=st.booleans(),
)
def test_conv_api(self, use_bias, use_fused):
"""Tests the correctness of the conv module.
The correctness is defined against the functional implementation.
"""
N, iC, H, W = 10, 10, 10, 3
oC, g, kH, kW = 16, 1, 3, 3
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, iC, H, W, dtype=torch.float32)
X = X.permute([0, 2, 3, 1]).contiguous()
qX = torch.quantize_linear(X, scale=scale, zero_point=128, dtype=torch.quint8)
w = torch.randn(oC, iC // g, kH, kW, dtype=torch.float32)
qw = torch.quantize_linear(w, scale=scale, zero_point=0, dtype=torch.qint8)
b = torch.randn(oC, dtype=torch.float32) if use_bias else None
qb = torch.quantize_linear(b, scale=1.0 / 1024, zero_point=0, dtype=torch.qint32) if use_bias else None
if use_fused:
conv_under_test = ConvReLU2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
else:
conv_under_test = Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
conv_under_test(qX)
conv_under_test.set_weight(qw)
conv_under_test.bias = qb
conv_under_test.scale = scale
conv_under_test.zero_point = zero_point
# Test members
self.assertTrue(hasattr(conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(conv_under_test, 'scale'))
self.assertTrue(hasattr(conv_under_test, 'zero_point'))
# Test properties
self.assertEqual(qw, conv_under_test.weight())
self.assertEqual(qb, conv_under_test.bias)
self.assertEqual(scale, conv_under_test.scale)
self.assertEqual(zero_point, conv_under_test.zero_point)
# Test forward
result_under_test = conv_under_test(qX)
result_reference = qF.conv2d(qX, qw, bias=qb,
scale=scale, zero_point=zero_point,
stride=1, padding=0,
dilation=1, groups=g, dtype=torch.quint8
)
if use_fused:
# result_reference < zero_point doesn't work for qtensor yet
# result_reference[result_reference < zero_point] = zero_point
MB, OC, OH, OW = result_reference.size()
for i in range(MB):
for j in range(OC):
for h in range(OH):
for w in range(OW):
if result_reference[i][j][h][w].int_repr() < zero_point:
# assign 0. that gets converted to zero_point
result_reference[i][j][h][w] = 0.
self.assertEqual(result_reference, result_under_test,
message="Tensors are not equal.")
# Test serialization of quantized Conv Module using state_dict
model_dict = conv_under_test.state_dict()
self.assertEqual(model_dict['weight'], qw)
if use_bias:
self.assertEqual(model_dict['bias'], qb)
with tempfile.NamedTemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(loaded_dict[key], model_dict[key])
if use_fused:
loaded_conv_under_test = ConvReLU2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
else:
loaded_conv_under_test = Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
loaded_conv_under_test.load_state_dict(loaded_dict)
self.assertEqual(loaded_conv_under_test.weight(), conv_under_test.weight())
if use_bias:
self.assertEqual(loaded_conv_under_test.bias, conv_under_test.bias)
self.assertEqual(loaded_conv_under_test.scale, conv_under_test.scale)
self.assertEqual(loaded_conv_under_test.zero_point, conv_under_test.zero_point)
self.assertTrue(dir(loaded_conv_under_test) == dir(conv_under_test))
self.assertTrue(hasattr(conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(loaded_conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(conv_under_test, 'weight'))
self.assertTrue(hasattr(loaded_conv_under_test, 'weight'))
self.assertEqual(loaded_conv_under_test.weight(), conv_under_test.weight())
self.assertEqual(loaded_conv_under_test.weight(), qw)
loaded_result = loaded_conv_under_test(qX)
self.assertEqual(loaded_result, result_reference)
with tempfile.NamedTemporaryFile() as f:
torch.save(conv_under_test, f)
f.seek(0)
loaded_conv = torch.load(f)
self.assertEqual(conv_under_test.bias, loaded_conv.bias)
self.assertEqual(conv_under_test.scale, loaded_conv.scale)
self.assertEqual(conv_under_test.zero_point, loaded_conv.zero_point)
# JIT testing
self.checkScriptable(conv_under_test, list(zip([qX], [result_reference])), check_save_load=True)
# Test from_float
float_conv = torch.nn.Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros').float()
float_conv.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(float_conv)
float_conv(X.float())
quantized_float_conv = torch.quantization.convert(float_conv)
# Smoke test to make sure the module actually runs
quantized_float_conv(qX)
# Check that bias is quantized based on output scale
if use_bias:
qbias = torch.quantize_linear(float_conv.bias, quantized_float_conv.scale / 2**16, 0, torch.qint32)
self.assertEqual(quantized_float_conv.bias.dequantize(), qbias.dequantize())
# Smoke test extra_repr
str(quantized_float_conv)
def test_pool_api(self):
"""Tests the correctness of the pool module.
The correctness is defined against the functional implementation.
"""
N, C, H, W = 10, 10, 10, 3
kwargs = {
'kernel_size': 2,
'stride': None,
'padding': 0,
'dilation': 1
}
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, C, H, W, dtype=torch.float32)
qX = torch.quantize_linear(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
qX_expect = torch.nn.functional.max_pool2d(qX, **kwargs)
pool_under_test = torch.nn.quantized.MaxPool2d(**kwargs)
qX_hat = pool_under_test(qX)
self.assertEqual(qX_expect, qX_hat)
# JIT Testing
self.checkScriptable(pool_under_test, list(zip([X], [qX_expect])))
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
]
| |
ec574c2f9acf0be8ef12d257b98b21de90949488 | 8192bef768e3fad6b858e17797eb6d8d615617b6 | /bowling/bowling.py | 24ec39882cd7549da9c3a991f1aa1f68cb16cf41 | []
| no_license | pabllo87/python_poznan_2014_08_05 | 6a8ca0b9fe1753bf5df228d94b444448c6e32f9d | 50b8b2dfd3193c56ed0b424e9fd4cb335fb29859 | refs/heads/master | 2020-12-25T10:59:17.099519 | 2014-08-07T14:15:35 | 2014-08-07T14:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #print("I'm imported right now")
class PlayerScore:
def __init__(self):
#print("new constructor")
self.__rolls = []
def roll(self, pins):
if isinstance(pins, int):
self.__rolls.append(pins)
else:
raise TypeError
def score(self):
score = 0
index = 0
for frame in range(10):
if self.__rolls[index] == 10:
#STRIKE
score += 10 + self.__rolls[index+1] + self.__rolls[index+2]
index += 1
elif self.__rolls[index] + self.__rolls[index+1] == 10:
#Spare
score += 10 + self.__rolls[index+2]
index += 2
else:
score += self.__rolls[index] + self.__rolls[index+1]
index += 2
return score
| [
"[email protected]"
]
| |
f56c458caf70a998bdeaa60b7858948b14420bec | a551f2e9671059e57b0533bfa5f740fb81ac6435 | /test_uloha_den_4.py | 1ef0ff67e6a91097b987b7fabb7ae12d4259efaa | []
| no_license | Natasa66/PythonAcademy | a372beadba7f2676aba2842050bb369d5b2c29c9 | d39eb1ab4c88575eb7b2cca49659e48c1971b39f | refs/heads/master | 2022-12-09T18:36:31.547790 | 2020-09-10T11:44:34 | 2020-09-10T11:44:34 | 279,904,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | from unittest import TestCase
from uloha_den_4 import zacina_samohlaskou
class TestZacinaSamohlaskou(TestCase):
def test_zacina_samohlaskou_male_pismena_true(self):
result = zacina_samohlaskou("alena")
self.assertTrue(result)
def test_zacina_samohlaskou_velke_pismena_true(self):
result = zacina_samohlaskou("Alena")
self.assertTrue(result)
def test_zacina_samohlaskou_male_pismena_false(self):
result = zacina_samohlaskou("lenka")
self.assertFalse(result)
def test_zacina_samohlaskou_velke_pismena_false(self):
result = zacina_samohlaskou("Lenka")
self.assertFalse(result)
def test_zacina_samohlaskou_nie_je_retazec_false(self):
result = zacina_samohlaskou(45)
self.assertFalse(result) | [
"[email protected]"
]
| |
6142e7a74039e267ec08477e21952b9991b89888 | 4ee5affb8b16ff7d26df9b19ffee8d675df11e4e | /nested-loops/train_the_trainers.py | ce76aebb5569e2ac15837eb95cccaa5edc35603a | []
| no_license | ayk-dev/python-basics | f60849f6502d64445105a0d27272d9910ea1d509 | af6d04f9001d9a45e8474f9bd4fa2b3ebe380c97 | refs/heads/main | 2023-01-12T11:56:12.210880 | 2020-11-17T20:06:40 | 2020-11-17T20:06:40 | 311,747,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | n = int(input()) # number of people in jury
presentation_counter = 0
presentaion = input()
all_presentations_grades = 0
while presentaion != 'Finish':
total = 0
for pres in range(1, n + 1):
grade = float(input())
total += grade
average_grade = total / n
all_presentations_grades += average_grade
print(f'{presentaion} - {average_grade:.2f}.')
presentaion = input()
presentation_counter += 1
final_average = all_presentations_grades / presentation_counter
print(f"Student's final assessment is {final_average:.2f}.")
| [
"[email protected]"
]
| |
5e72b33c4b8237aa5a528020daab42fd030adac0 | def7dd81a7f95025c2f4689ce9779152291f0297 | /io_equalizerviz.py | f7f59d1f6caa35429df9a2262629db0c7253b18c | [
"MIT"
]
| permissive | PROPHESSOR/Blender-Equalizer-Audio-Visualizer | a255ec038be9cc1611ad0decfef74fd260202227 | 0405f2c47a76b47f22948a65561db5efe66f5bd8 | refs/heads/master | 2020-12-04T07:41:14.203339 | 2020-01-03T23:47:45 | 2020-01-03T23:47:45 | 231,681,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | # EqualizerViz - Audio visualization plugin
# Created by PROPHESSOR for Blender 2.80 (04.01.2020)
#
# Based on sirrandalot's "Audio visualisation script" for Blender 2.71
import bpy
from bpy_extras.io_utils import ImportHelper
from bpy.props import IntProperty, StringProperty
from bpy.types import Operator
from bpy_extras.wm_utils.progress_report import ProgressReport
bl_info = {
"name": "Import Equalizer Audio",
"author": "PROPHESSOR",
"description": "Imports the audio file to create equalizer visualization. Wav import is more faster.",
"version": (1, 0, 1),
"blender": (2, 80, 0),
"location": "File > Import > Equalizer",
"url": "https://github.com/PROPHESSOR/Blender-Equalizer-Audio-Visualizer",
"tracker_url": "https://github.com/Blender/Blender-Equalizer-Audio-Visualizer/issues",
"category": "Import-Export"
}
def menu_func_import(self, context):
self.layout.operator(ImportEqualizerAudioFile.bl_idname, text="Audio for EqualizerViz")
def register():
bpy.utils.register_class(ImportEqualizerAudioFile)
# Add import menu item
if hasattr(bpy.types, 'TOPBAR_MT_file_import'):
#2.8+
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
else:
bpy.types.INFO_MT_file_import.append(menu_func_import)
class ImportEqualizerAudioFile(Operator, ImportHelper):
"""Imports the audio file to visualize using equalizer simulator"""
bl_idname = "equalizerviz_blender.import_audio"
bl_label = "Import audio file to visualize. Wav is more faster."
filename_ext = ".wav" # Wav import is more faster
#filter_glob = StringProperty(
# default = "*.wav",
# options = { 'HIDDEN' },
# maxlen= 255
#)
numbars = IntProperty(
name="Number of equalizer bars",
description=(
"Number of bars and frequency ranges."
),
default=64
)
def execute(self, context):
with ProgressReport(context.window_manager) as progress:
progress.enter_substeps(self.numbars, "Importing frequency %d ranges as bars %r..." % (self.numbars, self.filepath))
for i in range(0, self.numbars):
# Add a plane and set it's origin to one of its edges
bpy.ops.mesh.primitive_plane_add(location=((i + (i * 0.5)), 0, 0))
bpy.context.scene.cursor.location = bpy.context.active_object.location
bpy.context.scene.cursor.location.y -= 1
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Scale the plane on the x and y axis, then apply the transformation
bpy.context.active_object.scale.x = 0.5
bpy.context.active_object.scale.y = 20
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# Insert a scaling keyframe and lock the x and z axis
bpy.ops.anim.keyframe_insert_menu(type='Scaling')
bpy.context.active_object.animation_data.action.fcurves[0].lock = True
bpy.context.active_object.animation_data.action.fcurves[2].lock = True
# Set the window context to the graph editor
bpy.context.area.type = 'GRAPH_EDITOR'
# Expression to determine the frequency range of the bars
low = i**2 + 20
high = (i + 1)**2 + 20
progress.step("Bar %d of %d: %d Hz - %d Hz. Baking..." % (i, self.numbars, low, high))
# Bake that range of frequencies to the current plane (along the y axis)
bpy.ops.graph.sound_bake(filepath=self.filepath, low=(low), high=(high))
# Lock the y axis
bpy.context.active_object.animation_data.action.fcurves[1].lock = True
progress.leave_substeps("Done.")
return { "FINISHED" }
if __name__ == "__main__":
register()
| [
"[email protected]"
]
| |
f3e2452d08102097b71299f1835a5000ecc6f07d | e4f8b14cead542586a96bcaa75993b0a29b3c3d0 | /pyNastran/f06/test/test_f06.py | 1bd6ea7db2cd64bd4ae4a058a7e38f9e763c9e81 | []
| no_license | afcarl/cyNastran | f1d1ef5f1f7cb05f435eac53b05ff6a0cc95c19b | 356ee55dd08fdc9880c5ffba47265125cba855c4 | refs/heads/master | 2020-03-26T02:09:00.350237 | 2014-08-07T00:00:29 | 2014-08-07T00:00:29 | 144,398,645 | 1 | 0 | null | 2018-08-11T15:56:50 | 2018-08-11T15:56:50 | null | UTF-8 | Python | false | false | 5,968 | py | import os
import sys
import time
from traceback import print_exc
import pyNastran
from pyNastran.f06.f06 import F06
#from pyNastran.op2.test.test_op2 import parseTableNamesFromF06, getFailedFiles
def run_lots_of_files(files, debug=True, saveCases=True, skipFiles=[],
stopOnFailure=False, nStart=0, nStop=1000000000):
n = ''
iSubcases = []
failedCases = []
nFailed = 0
nTotal = 0
nPassed = 0
t0 = time.time()
for i, f06file in enumerate(files[nStart:nStop], nStart): # 149
baseName = os.path.basename(f06file)
#if baseName not in skipFiles and not baseName.startswith('acms') and i not in nSkip:
if baseName not in skipFiles:
print("%" * 80)
print('file=%s\n' % f06file)
n = '%s ' % (i)
sys.stderr.write('%sfile=%s\n' % (n, f06file))
nTotal += 1
isPassed = run_f06(f06file, iSubcases=iSubcases, debug=debug,
stopOnFailure=stopOnFailure) # True/False
if not isPassed:
sys.stderr.write('**file=%s\n' % (f06file))
failedCases.append(f06file)
nFailed += 1
else:
nPassed += 1
#sys.exit('end of test...test_f06.py')
if saveCases:
f = open('failedCases.in', 'wb')
for f06file in failedCases:
f.write('%s\n' % (f06file))
f.close()
print("dt = %s seconds" % (time.time() - t0))
#f06 = F06('test_tet10_subcase_1.f06')
#f06.readF06()
sys.exit('-----done with all models %s/%s=%.2f%% nFailed=%s-----' % (nPassed, nTotal, 100. * nPassed / float(nTotal), nTotal - nPassed))
def run_f06(f06_filename, iSubcases=[], write_f06=True, debug=False,
stopOnFailure=True):
isPassed = False
#stopOnFailure = False
#debug = True
try:
f06 = F06(debug=debug)
#f06.set_subcases(iSubcases) # TODO not supported
#f06.readBDF(f06.bdf_filename,includeDir=None,xref=False)
f06.read_f06(f06_filename)
#tableNamesF06 = parseTableNamesFromF06(f06.f06FileName)
#tableNamesF06 = f06.getTableNamesFromF06()
assert write_f06 == True, write_f06
if write_f06:
(model, ext) = os.path.splitext(f06_filename)
f06.write_f06(model + '.test_f06.f06')
#print "subcases = ",f06.subcases
#assert tableNamesF06==tableNamesF06,'tableNamesF06=%s tableNamesF06=%s' %(tableNamesF06,tableNamesF06)
#f06.caseControlDeck.sol = f06.sol
#print f06.caseControlDeck.getF06Data()
#print f06.print_results()
#print f06.caseControlDeck.getF06Data()
isPassed = True
except KeyboardInterrupt:
sys.stdout.flush()
print_exc(file=sys.stdout)
sys.stderr.write('**file=%r\n' % f06file)
sys.exit('keyboard stop...')
#except AddNewElementError:
# raise
#except IOError: # missing file
#pass
#except AssertionError:
# isPassed = True
#except InvalidFormatCodeError:
# isPassed = True
#except RuntimeError: #InvalidAnalysisCode
# isPassed = True
#except SyntaxError: #Invalid Markers
# isPassed = True
except SystemExit:
#print_exc(file=sys.stdout)
#sys.exit('stopping on sys.exit')
raise
#except NameError: # variable isnt defined
# if stopOnFailure:
# raise
# else:
# isPassed = True
#except AttributeError: # missing function
# if stopOnFailure:
# raise
# else:
# isPassed = True
#except KeyError:
# raise
#except TypeError: # numpy error
# isPassed = True
#except IndexError: # bad bdf
# isPassed = True
#except IOError: # missing bdf file
#isPassed = False
#raise
#except SyntaxError: #Invalid Subcase
# isPassed = True
#except SyntaxError: # Param Parse:
# isPassed = True
#except NotImplementedError:
#isPassed = True
#except InvalidFieldError: # bad bdf field
# isPassed = True
except:
#print e
print_exc(file=sys.stdout)
if stopOnFailure:
raise
else:
isPassed = False
print "isPassed =", isPassed
return isPassed
def main():
from docopt import docopt
msg = 'Tests to see if an F06 will work with pyNastran.\n'
msg += 'Usage:\n'
msg += ' f06.py [-f] [-p] [-q] F06_FILENAME'
msg += ' f06.py -h | --help\n'
msg += ' f06.py -v | --version\n'
msg += '\n'
msg += 'Positional Arguments:\n'
msg += ' F06_FILENAME path to F06 file\n'
msg += '\n'
msg += 'Options:\n'
msg += ' -q, --quiet prints debug messages (default=False)\n'
msg += ' -f, --write_f06 writes the f06 to fem.f06.out (default=True)\n'
msg += ' -h, --help show this help message and exit\n'
msg += " -v, --version show program's version number and exit\n"
# disabled b/c the F06 doesn't support complex well
#msg += ' -z, --is_mag_phase F06 Writer writes Magnitude/Phase instead of\n'
#msg += ' Real/Imaginary (still stores Real/Imag)\n'
if len(sys.argv) == 1:
sys.exit(msg)
ver = str(pyNastran.__version__)
data = docopt(msg, version=ver)
for key, value in sorted(data.iteritems()):
print("%-12s = %r" % (key.strip('--'), value))
if os.path.exists('skippedCards.out'):
os.remove('skippedCards.out')
run_f06(data['F06_FILENAME'],
write_f06 = data['--write_f06'],
debug = not(data['--quiet']),
stopOnFailure = True
)
if __name__ == '__main__': # f06
main()
| [
"mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b"
]
| mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b |
ba50261f4095195e91f34f82c65ee1d79a2c97aa | 5e87661f1ddba14b750b374eff4a15bcda6c4ce1 | /ex1.py | b3d17b6c2117daba7a4625d607bfaf77c1d601e8 | []
| no_license | gabe32130/AST4320-A2 | cf894a9c798e15d6076ee7170a878d83593a656c | 7a17d2c491e8d5818de45180b2849b4abd865211 | refs/heads/master | 2021-07-16T04:00:16.787186 | 2017-10-20T16:16:04 | 2017-10-20T16:16:04 | 107,699,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import pylab as pl
import numpy as np
import cmath as m
from scipy.fftpack import fft, ifft
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.interpolate import UnivariateSpline
import pylab as pl
#plot the step function
step=1000
x=np.linspace(-10, 10, step)
xn=np.zeros(len(x))
xp=np.zeros(len(x))
Wx=np.zeros(len(x))
Wxn=np.zeros(len(x))
Wxp=np.zeros(len(x))
R=6.5
for i in range (len(x)):
if x[i] <0:
xn[i]=x[i]
if abs(xn[i]) < R:
Wxn[i]=1
else:
Wxn[i]=0
else:
xn[i]=0
for i in range (len(x)):
if x[i] >0:
xp[i]=x[i]
if abs(xp[i]) < R:
Wxp[i]=1
else:
Wxp[i]=0
else:
xp[i]=0
x= xn+xp
Wx=Wxn+Wxp
plt.plot(x,Wx, label=r'linewidth')
plt.xlabel(r'x', size=14)
plt.ylabel(r'W(x)', size=14)
plt.ylim([0,2])
plt.legend(fontsize=14)
plt.savefig("fig1.pdf",bbox_inches='tight')
plt.show()
################################################################################
#Fourier Transform
W_f=np.zeros(len(x))
k=x
W_f = np.sin(2.0*R*k)/(2.0*np.pi*k)
plt.plot(x,W_f, label=r'linewidth')
plt.xlabel(r'x', size=14)
plt.ylabel(r'W(f)', size=14)
plt.ylim([-0.5,2.5])
plt.legend(fontsize=14)
plt.savefig("fig2.pdf",bbox_inches='tight')
plt.show()
################################################################################
#FWHM
half_max=np.max(W_f)/2
print (half_max)
#max_x = x[W_f.index(half_max)]
#print (max_x)
#indx=x.index(-0.14695)
#print (indx)
x_curve = UnivariateSpline(x, W_f, s=0)
r=x_curve.roots()
L=len(r)
#print (L)
max= (L/2)-2
min= (L/2)-1
r1=r[40]
r2=r[41]
FWHM=abs(r1-r2)
print(FWHM)
pl.plot(x, W_f)
pl.axvspan(r1, r2, facecolor='g', alpha=0.5)
plt.savefig("fig3.pdf",bbox_inches='tight')
pl.show()
#-0.14695
| [
"[email protected]"
]
| |
eb66fd2975ad3f094b37a4fdcc40596102ed3e36 | 5226953da8873d944327497e4f37864c38f68de0 | /ratioICAtest.py | 44af988ca0b4f4ac2d7b458fad5635760a38ea3b | []
| no_license | tailintalent/PredictionCode | 64a9ada4abddb053928d74825aab4fde7d5d2097 | dfe59941568cf43b835c59e8d7d3d1e34ca581b4 | refs/heads/master | 2020-04-17T05:42:59.659203 | 2019-01-18T07:27:02 | 2019-01-18T07:29:00 | 166,293,616 | 0 | 0 | null | 2019-01-17T20:40:51 | 2019-01-17T20:40:50 | null | UTF-8 | Python | false | false | 4,312 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 6 13:11:50 2018
test ratiometric versus ICA for some toy data.
@author: monika
"""
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import FastICA
from stylesheet import *
import matplotlib.gridspec as gridspec
#####################################
fig = plt.figure('FigICA', figsize=(9.5, 7.5))
letters = ['A', 'B', 'C', 'D']
x0 = 0
locations = [(x0,0.95), (0.5,0.95), (0,0.45), (0.5,0.45)]
for letter, loc in zip(letters, locations):
plt.figtext(loc[0], loc[1], letter, weight='semibold', size=18,\
horizontalalignment='left',verticalalignment='baseline',)
gs1 = gridspec.GridSpec(4, 2, height_ratios=[1,4,1,4])
gs1.update(left=0.1, right=0.99, wspace=0.25, bottom = 0.1, top=0.95, hspace=0.2)
ax1 = plt.subplot(gs1[0,0])
ax2 = plt.subplot(gs1[1,0])
ax3 = plt.subplot(gs1[0:2,1])
ax4 = plt.subplot(gs1[3:,0])
ax5 = plt.subplot(gs1[3:,1])
######################################
x = np.arange(100)
#R = 100-np.arange(100)+np.random.rand(100)
#G = 1-0.1*np.arange(100)+1*np.random.rand(100)
R = 2*np.exp(-x/50)+np.random.rand(100)
G = 2*np.exp(-x/50)+np.random.rand(100)
A = 0.25
R = 1+A*np.random.normal(loc=0, scale=1, size=100) #+ 5*np.exp(-x/10)
G = 1+ A*np.random.normal(loc=0, scale=1, size=100)#+ 5*np.exp(-x/50)
######################################
S = np.ones(len(x))
Bg = np.ones(len(x))
S[15:20] +=2.5
S[55:60] +=4.5
S[75:80] +=8.5
# add artefacts
a = 1.5
Bg[30:40] -=a
S[30:40] -=a
#Bg[80:90] -=1.5
#S[80:90] -=1.5
# add signal and background
R += Bg
G += S
# true signal
S[30:40] +=a
#S[80:90] +=1.5
S0 = np.percentile(S, [20])
S = np.divide(S-S0,np.abs(S0))
# ICA signal
ica = FastICA(n_components=2)
signal = np.vstack([R,G])
comp = ica.fit_transform(signal.T)
index = np.argmax([np.abs(np.corrcoef(s, G/R)[0][1]) for s in comp.T])
factor = np.sign(np.corrcoef(comp[:,index], G)[0][1])
comp[:,index]*=factor
factor = np.sign(np.corrcoef(comp[:,1-index], R)[0][1])
comp[:,1-index]*=-factor
I0 = np.percentile(comp, [20], axis=0)
ICA = np.divide(comp-I0,np.abs(I0))
# ratiometric signal
G0, R0 = np.percentile(G, [20]), np.percentile(R, [20])
#tmpRatio = (G/G0)/(R/R0)
tmpRatio=G/R
Ratio0 = np.percentile(tmpRatio, [20])
Ratio = np.divide(tmpRatio-Ratio0,np.abs(Ratio0))
ax1.plot(S, color='k')
ax1.set_title('True signal')
cleanAxes(ax1)
ax2.set_ylabel('Raw intensity')
ax2.plot(R, R1, label="RFP")
ax2.plot(G, 'g', label="GCaMP")
ax2.set_ylim(-1,12)
lh = 0.2
h0=0.6
ax2.annotate('', xy=(0.37, h0), xytext=(0.37, h0+lh),ha="center", va="center",
arrowprops=dict(facecolor='black', shrink=0.05),xycoords='axes fraction'
)
ax3.annotate('', xy=(0.37, h0*0.75), xytext=(0.37, (h0+lh)*0.75),ha="center", va="center",
arrowprops=dict(facecolor='black', shrink=0.05),xycoords='axes fraction'
)
ax4.annotate('', xy=(0.37, h0), xytext=(0.37, h0+lh),ha="center", va="center",
arrowprops=dict(facecolor='black', shrink=0.05),xycoords='axes fraction'
)
ax5.annotate('', xy=(0.37, h0), xytext=(0.37, h0+lh),ha="center", va="center",
arrowprops=dict(facecolor='black', shrink=0.05),xycoords='axes fraction'
)
#ax3.annotate('', xy=(35, 5), xytext=(35, 7.5),ha="center", va="center",
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#ax4.annotate('', xy=(35, 5), xytext=(35, 7.5),ha="center", va="center",
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#ax5.annotate('', xy=(35, 5), xytext=(35, 7.5),ha="center", va="center",
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#ax2.set_xlabel("Time (a.u.)")
ax2.legend()
ax3.set_title('Ratiometric')
ax3.set_ylabel(r"$\Delta R/R_0$")
ax3.plot(Ratio, L2)
ax3.plot(S, 'k--',zorder=-1, lw=1.5, label='True signal')
ax3.set_ylim(-2,10)
ax3.set_yticks([0,5,10])
#ax3.set_xlabel("Time (a.u.)")
ax3.legend()
ax4.set_title('ICA background')
ax4.set_ylabel(r"$\Delta I/I_0$")
ax4.plot(ICA[:,1-index])
ax4.set_ylim(-2,10)
ax4.set_xlabel("Time (a.u.)")
ax5.set_title('ICA signal')
ax5.set_ylabel(r"$\Delta I/I_0$")
ax5.plot(ICA[:,index], L0)
ax5.plot(S, 'k--', zorder=-1, lw=1.5)
ax5.set_ylim(-2,10)
ax5.set_xlabel("Time (a.u.)")
ax5.set_yticks([0,5,10])
plt.show()
| [
"[email protected]"
]
| |
9cfc6c28763ed3a3ef9c48c4afd6a23290c5d1ff | 000b796eeeb47c9e4df6e940fabda9eefb262feb | /k_neighbors.py | 0309ff278543d333192d486819757fafde50c296 | []
| no_license | udupashreyas/Machine-Learning | 448189f4320c625e0b3100456a8c04c4ce2c5d49 | 5e1b5946c52fc4bf31400bca4d2a08747df918e9 | refs/heads/master | 2021-01-18T22:16:18.993015 | 2016-10-31T04:07:15 | 2016-10-31T04:07:15 | 72,398,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('C:\Users\udupa\Documents\\breast-cancer-wisconsin.data.txt')
df.replace('?',-99999, inplace=True)
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print accuracy | [
"[email protected]"
]
| |
e5a0abd35749ee7bf4574fc3a0f4af6c92bc6254 | ee8cccde9139b8bfb661cafefa1db12d03dc4898 | /products/models.py | a2cc98192d8c648d833f95e98f54533e19498bfe | []
| no_license | GhattiM/producthunt-project | 5876f75feb824a5a43d0b54ed8024916420c873c | 462dadc44dc8c4e435bcd6bed791a5bfa27bc6cc | refs/heads/master | 2020-03-19T22:23:03.837984 | 2018-06-11T19:49:04 | 2018-06-11T19:49:04 | 136,968,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from django.db import models
from django.contrib.auth.models import User
class Product(models.Model):
title = models.CharField(max_length=255)
url = models.TextField()
pub_date = models.DateTimeField()
votes_total = models.IntegerField(default=1)
image = models.ImageField(upload_to='images/')
icon = models.ImageField(upload_to='images/')
body = models.TextField()
hunter = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.title
def summary(self):
return self.body[:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
| [
"[email protected]"
]
| |
bbb7bae1eb34c8aa75f685c887250a7cb89db904 | ce63ca8f6e7ed3df709347bba88273e82be233dc | /lib/__init__.py | 2bdb2b20e489b383dd5b0646aff092f0aaf8cd28 | []
| no_license | dha9011/tag_agent | d990a15af880bce22752acbc75565ab5be9011a7 | 0c7490addd683e3c8c2a499a4584c0ce170c64b2 | refs/heads/master | 2021-01-17T13:29:41.192469 | 2016-07-26T06:53:04 | 2016-07-26T06:53:04 | 59,182,108 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Date : 17/2/16 PM4:13
# Copyright: TradeShift.com
__author__ = 'liming'
| [
"[email protected]"
]
| |
e16b715804c18a6384e3118d83a4956a07d4ee1e | f7fd13ec25600ceeda7423aa0066619d13adf08c | /乙/1011.py | 30431a36deb4656bf0671c11e9df54db8a4c3bce | []
| no_license | YWithT/PAT | b6c75fe0e075acf5a871969176010f56b733b44d | 1db79913cd9b6e0f54da33d2696a3c1e4dd4daaf | refs/heads/master | 2021-09-09T12:44:42.881154 | 2018-03-16T07:28:25 | 2018-03-16T07:28:25 | 114,212,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | a = int(input())
result = []
for i in range(0, a):
Nums = input()
Nums = Nums.split()
for j in range(0, len(Nums)):
Nums[j] = int(Nums[j])
if Nums[0] + Nums[1] > Nums[2]:
result.append("true")
else:
result.append("false")
for i in range(0, len(result)):
print("Case #"+str(i+1)+": "+result[i])
| [
"[email protected]"
]
| |
1423e61fcaddb8a485741d88cc9d6c4ab6f7b527 | 12c6f045f9e9c2886ddb2482f6ec161d02287ddd | /chapter1/ssd1.py | 868a0f8a95d6423230a6d1d572c15f39f72eca17 | [
"MIT"
]
| permissive | alicengh/multi-analysis | fc45d6f136b7a085e7dcc0afeef91d1f76d1526a | cdcab67ca05fd6b8e591dd3a6ecb87dd7e72be53 | refs/heads/master | 2020-07-02T23:58:20.631051 | 2016-11-20T10:34:34 | 2016-11-20T10:34:34 | 74,212,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # coding: utf-8
"""Sum of Squared Deviation Example1
@author: Liz
@modified: 11-20-2016
"""
import numpy as np
data = np.array([2, 5, 7, 12, 15])
dev = data - np.ones_like(data) * data.mean()
ssd = sum(i ** 2 for i in dev)
print("data: {}".format(data))
print("ave : {}".format(data.mean()))
print("dev : {}".format(dev))
print("sum of dev: {}".format(sum(dev)))
print("sum of sqd dev: {}".format(ssd))
| [
"[email protected]"
]
| |
1491c941a137b8757857f6a599b96563f99bb0ba | e9597319306a89d477d49e34215dd079b000d41c | /project/settings/apps.py | eaf88f539c4cb0368ff4e067ece49d64777d8a08 | []
| no_license | LucasBerbesson/ottaviano | 75e4c897cfc34d0e5aaaa643b8e137ee6466cf06 | c03fc95e1441f5ecca7167e19fa30ac682c2fb4b | refs/heads/master | 2023-04-28T01:55:15.382859 | 2019-09-16T13:02:37 | 2019-09-16T13:02:37 | 208,800,365 | 0 | 0 | null | 2022-11-22T04:14:59 | 2019-09-16T12:59:13 | CSS | UTF-8 | Python | false | false | 350 | py | # Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CONTRIB_APPS = [
]
PROJECT_APPS = [
'reservations',
]
INSTALLED_APPS = DJANGO_APPS + CONTRIB_APPS + PROJECT_APPS
| [
"[email protected]"
]
| |
505e6466c3ca23f5decd5203746f71c43373cbe0 | 794bb12645c8f502cde6506afa684bc435d85ae7 | /VideoChat_server.py | 759b26f0c8ea72106c5c813b6973ba68b200d497 | []
| no_license | natasha012/Live-Video-Streaming-Chat-App | 4cefa0321a21b9d8e46a765dab47ac013b67e64f | 8ac8af829e3fae9a5dd7dcb6e04735910dcbaccf | refs/heads/main | 2023-05-25T20:41:41.121269 | 2021-06-10T12:35:04 | 2021-06-10T12:35:04 | 375,691,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | import os
import cv2
import numpy as np
import socket
cap=cv2.VideoCapture(1)
# Create Socket
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip="192.168.56.1"
port=8888
# Socket Binding
s.bind((ip,port))
s.listen(5)
# Listening and waiting for connection
conn,addr = s.accept()
while True:
data = conn.recv(90456)
# Decode the image
arry = np.fromstring(data, np.uint8)
photo = cv2.imdecode(arry, cv2.IMREAD_COLOR)
if type(photo) is type(None):
pass
else:
cv2.imshow("SERVER-SCREEN",photo)
if cv2.waitKey(10)==13:
break
stat,photo=cap.read()
# Encode image and send via network
photo_data = cv2.imencode('.jpg', photo)[1].tobytes()
conn.sendall(photo_data)
cv2.destroyAllWindows()
cap.release()
os.system("cls") | [
"[email protected]"
]
| |
aff2e38898c5fd8fae9c7aae527d7627d11dd228 | 2c2dafc1a9d8febfd107971c5afc1376b458e8dc | /181009 - 4673.py | 33c5ca73f86a3607e5b832d5a9cc094ddf2e26b7 | []
| no_license | to-besomeone/algorithm | 2e1e0f720a4d1762a35c9573175f3080b14db00a | 6055061e33f93c4e776a7fa4f87d85b1dc97a732 | refs/heads/master | 2020-03-31T16:09:04.266251 | 2018-11-18T12:29:50 | 2018-11-18T12:29:50 | 152,364,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | def d(n):
result = n
while n != 0:
result += n % 10
n = n // 10
return result
arr = [0] * 20000
for i in range(1, 10001):
arr[d(i)] = i
if arr[i] == 0:
print(i) | [
"[email protected]"
]
| |
c7e22f0bf7b01228370e01f7a828269910eb4af3 | a4b2af9318c47b084193dbf63ebc0f2dfd6861b5 | /VUsbTools/Decoders/iPhone.py | 982b39032def6bd372294a4c48b9369c0be6e56f | [
"MIT"
]
| permissive | vpelletier/vusb-analyzer | 2ac55a01173fd71fcc9025991b9d7a8788b5763e | 30905f38ffc625bb76a35532e60ba3dd109f8f1c | refs/heads/master | 2020-03-29T12:54:10.849734 | 2012-07-08T14:16:52 | 2012-07-08T14:16:52 | 3,121,557 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,665 | py | #
# VUsbTools.Decoders.iPhone
# Micah Elizabeth Scott <[email protected]>
#
# Decodes the usbmuxd protocol used by iPhone and iPod Touch devices.
# Based on protocol information from marcan's open source usbmuxd
# implementation at http://marcansoft.com/blog/iphonelinux/usbmuxd/
#
# Copyright (C) 2010 VMware, Inc. Licensed under the MIT
# License, please see the README.txt. All rights reserved.
#
import plistlib
import struct
from VUsbTools import Decode, Struct, Types
def isascii(s):
for c in s:
if ord(c) < 32 or ord(c) > 126:
return False
return True
class USBMuxDecoder:
"""Decodes incoming or outgoing usbmuxd bulk plackets."""
ipProto = Struct.EnumDict({
0: 'VERSION',
1: 'ICMP',
6: 'TCP',
17: 'UDP',
41: 'IPv6',
})
portNumbers = Struct.EnumDict({
62078: 'lockdownd',
})
remainingLength = 0
lockdownBuffer = ""
def handleEvent(self, event):
if not event.isDataTransaction():
return
if self.remainingLength == 0:
# Beginning a new packet
self.handleGenericPacket(event)
elif self.remainingLength >= event.datalen:
# Continuing a packet
self.remainingLength -= event.datalen
event.pushDecoded("[usbmuxd continuation, %d bytes left]" %
self.remainingLength)
else:
event.pushDecoded("[usbmuxd ERROR, only expected %d bytes]" %
self.remainingLength)
self.remainingLength = 0
def handleGenericPacket(self, event):
"""Decode the usbmuxd header."""
muxHeader = Struct.Group(None,
Struct.UInt32BE("protocol"),
Struct.UInt32BE("length"))
data = muxHeader.decode(event.data)
description = "iPhone usbmuxd: "
if muxHeader.length is None:
description += "ERROR"
else:
self.remainingLength = muxHeader.length - event.datalen
description += "proto=%s len=0x%04x" % (self.ipProto[muxHeader.protocol],
muxHeader.length)
if self.remainingLength:
description += " (0x%04x remaining)" % self.remainingLength
event.pushDecoded(description)
if self.ipProto[muxHeader.protocol] == 'TCP':
self.handleTCP(event, data, muxHeader.length - 0x08)
def handleTCP(self, event, data, datalen):
"""Decode an IPPROTO_TCP packet header, and log the payload."""
datalen -= 0x14
tcpHeader = Struct.Group(None,
Struct.UInt16BEHex("source"),
Struct.UInt16BEHex("dest"),
Struct.UInt32BE("seq"),
Struct.UInt32BE("ack_seq"),
Struct.UInt16BEHex("flags"),
Struct.UInt16BE("window"),
Struct.UInt16BEHex("checksum"),
Struct.UInt16BEHex("urg_ptr"))
data = tcpHeader.decode(data)
event.pushDecoded("iPhone TCP [%s -> %s] len=0x%04x" % (
self.portNumbers[tcpHeader.source],
self.portNumbers[tcpHeader.dest],
datalen,
))
event.appendDecoded("\nTCP Header:\n%s" % str(tcpHeader))
event.appendDecoded("\nTCP Payload:\n%s" % Types.hexDump(data))
# Look for a protocol-specific handler
for port in tcpHeader.source, tcpHeader.dest:
fn = getattr(self, "port_%s" % self.portNumbers[port], None)
if fn:
fn(event, data, datalen)
def port_lockdownd(self, event, data, datalen):
"""Handle lockdownd packets. These form a stream, which may or
may not line up with the underlying USB packets. Each
lockdownd packet is an XML plist, prefixed with a 32-bit
length.
"""
summary = []
self.lockdownBuffer += data
if datalen == 0:
# Leave the TCP decoder at the top of the stac
return
elif datalen != len(data):
# Nothing we can reliably do without the whole log.
self.lockdownBuffer = ""
summary.append("ERROR, incomplete log!")
elif (len(self.lockdownBuffer) >= 10 and
self.lockdownBuffer[0] == '\0' and
isascii(self.lockdownBuffer[1:])):
# I haven't seen this documented, but sometimes lockdownd sends
# ASCII error messages that are prefixed with one NUL byte.
summary.append("Message, %r" % self.lockdownBuffer[1:])
elif len(self.lockdownBuffer) >= 10 and self.lockdownBuffer[4:9] != "<?xml":
# Something else that isn't a plist?
self.lockdownBuffer = ""
summary.append("UNRECOGNIZED (SSL encrypted?)")
else:
# Decode all the packets we can
while len(self.lockdownBuffer) >= 4:
length = struct.unpack(">I", self.lockdownBuffer[:4])[0]
if len(self.lockdownBuffer) < length + 4:
break
packet = self.lockdownBuffer[4:length + 4]
self.lockdownBuffer = self.lockdownBuffer[length + 4:]
event.appendDecoded("\nComplete lockdownd packet:\n%s" %
Types.hexDump(packet))
kvFull = []
kvAbbrev = []
for k, v in plistlib.readPlistFromString(packet).items():
kvFull.append(" %s = %s" % (k, v))
if isinstance(v, plistlib.Data):
v = "(data)"
elif isinstance(v, dict):
v = "(dict)"
kvAbbrev.append("%s=%s" % (k, v))
event.appendDecoded("\nDecoded plist:\n%s" % "\n".join(kvFull))
summary.append("{%s}" % " ".join(kvAbbrev))
event.pushDecoded("lockdownd: %s" % (" ".join(summary) or "fragment"))
def detector(context):
if (context.interface and context.endpoint and
context.device.idVendor == 0x05ac and
context.device.idProduct >= 0x1290 and
context.device.idProduct <= 0x12A0 and
context.interface.bInterfaceClass == 0xFF and
context.interface.bInterfaceSubClass == 0xFE and
context.interface.bInterfaceProtocol == 2 and
context.endpoint.bmAttributes == 2
):
return USBMuxDecoder()
| [
"[email protected]"
]
| |
d68cb20998e97b817a3738f409f6bad813653bd7 | c7c4b4b5b182b277cf499e775e04bf02e8081149 | /jwt_auth/migrations/0001_initial.py | 32e435faf25f2c5b783b1751db1f4704094c5a2c | []
| no_license | annamonkman/sei-project-four | e568a7d46bbdb1ab3107bb7cad28e396dc8f27e5 | 84528f4f11c9784edacfaab579c4d16b1586939f | refs/heads/main | 2023-08-15T10:20:09.504538 | 2021-10-18T15:16:20 | 2021-10-18T15:16:20 | 358,233,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,901 | py | # Generated by Django 3.2 on 2021-04-16 13:29
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('items', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.CharField(max_length=50, unique=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
('wishlist', models.ManyToManyField(blank=True, related_name='items', to='items.Item')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
]
| |
0953b70940408ed4aba544cf03f8f8481aec4171 | f98e56e05787eba8a0705395676837f8efa99b6a | /application.py | 5f9f80352ec27ab30bbbdd5c8099a2fb85964142 | [
"MIT"
]
| permissive | Darshansingh11/Essay-Analyzer | 6378be9756fee96c9f36e5161bb8ec2fd7653f55 | 4d64c43bc4783dfc38c0a1e924a3d5a15e41d6f0 | refs/heads/master | 2021-07-19T07:03:51.465954 | 2020-05-25T07:08:55 | 2020-05-25T07:08:55 | 158,946,369 | 0 | 0 | null | 2020-05-25T07:07:49 | 2018-11-24T14:55:40 | Python | UTF-8 | Python | false | false | 6,238 | py | from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session, url_for
from flask_session import Session
from passlib.apps import custom_app_context as pwd_context
from tempfile import mkdtemp
import nltk
from helpers import *
import re
import sys
from dictionary import Dictionary
# configure application
app = Flask(__name__)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# custom filter
app.jinja_env.filters["usd"] = usd
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///words.db")
@app.route("/")
@login_required
def index():
return render_template("index.html")
@app.route("/write",methods=["GET","POST"])
@login_required
def write():
if request.method=="GET":
return render_template("write.html")
elif request.method=="POST":
LENGTH = 20
misspelling=[]
# default dictionary
dictionary = "large.txt"
# load dictionary
d = Dictionary()
loaded = d.load(dictionary)
file_a=request.form["text_area"]
fp_a=open("t1.txt","w") #essay entered by the user is stored in t1.txt
fp_a.write(file_a)
fp_a.close()
# try to open file
file = "t1.txt"
fp = open(file, "r", encoding="latin_1")
if not fp:
print("Could not open {}.".format(file))
exit(1)
# prepare to spell-check
word = ""
index, misspellings, words = 0, 0, 0
# spell-check word
while True:
c=fp.read(1)
if not c:
break
if re.match(r"[A-Za-z]", c) or (c == "'" and index > 0):
word += c
index += 1
if index > LENGTH:
while True:
c=fp.read(1)
if not c or not re.match(r"[A-Za-z]", c):
break
# prepare for new word
index, word = 0, ""
elif c.isdigit():
# consume remainder of alphabetical string
while True:
c=fp.read(1)
if not c or (not c.isalpha() and not c.isdigit()):
break
index, word = 0, ""
elif index > 0:
# update counter
words += 1
# check word's spelling
misspelled = not d.check(word)
if misspelled:
print(word)
misspelling.append(word)
misspellings += 1
# prepare for next word
index, word = 0, ""
# close file
fp.close()
# unload dictionary
unloaded = d.unload()
str1=' '.join(misspelling)
db.execute("INSERT INTO spell (title,essay,mispell,words_e,misspelling) VALUES (:title,:essay,:mispell,:words_e,:misspelling)",title=request.form["tile"],essay=file_a,mispell=misspellings,words_e=words,misspelling=str1)
rows_e=db.execute("SELECT * FROM spell WHERE title = :title",title=request.form["tile"])
session["essay_id"]=rows_e[0]["id"]
return redirect(url_for("write"))
@app.route("/result")
@login_required
def result():
rows_c=db.execute("SELECT * FROM spell WHERE id = :id",id=session["essay_id"])
a=rows_c[0]["misspelling"]
b=rows_c[0]["words_e"]
x=len(a)
tokenizer=nltk.tokenize.TweetTokenizer()
tokens=tokenizer.tokenize(a)
return render_template("result.html",mispell_c=tokens,words_g=b)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
return apology("invalid username and/or password")
# remember which user has logged in
session["user_id"] = rows[0]["id"]
# redirect user to home page
return redirect("/")
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user."""
if request.method=="POST":
a=request.form.get("password")
b=request.form.get("password_again")
c=request.form.get("username")
if not c:
return apology("Please provide your username")
elif not a:
return apology("Please provide your password")
elif a!=b:
return apology("The passwords entered does not match")
d=pwd_context.encrypt(a)
session["user_id"]=db.execute("INSERT INTO users (username,hash) VALUES (:username, :hash)",username=c,hash=d)
return redirect(url_for("index"))
else:
return render_template("register.html")
| [
"[email protected]"
]
| |
70e545ac170d761713d00f1976876cde10b88210 | c6a69fb99ecf609404201a63d33d339162eb400b | /脚本/压缩包解压套娃.py | 36c0226152a077e4cfd8fc4eb3e96995d15a4946 | []
| no_license | npfs06/CTF-Tools | 519e7f51c3fde64027519f370bf9204a34abfb86 | 2334b715ad849bdf2f48d2b6225990062c8e2aa3 | refs/heads/main | 2023-07-05T02:37:19.265394 | 2021-08-25T01:13:58 | 2021-08-25T01:13:58 | 399,643,092 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | import zipfile
def lalala(zipname):
while True:
passwd = zipname.split(".")[0]
zf = zipfile.ZipFile(zipname,'r')
zf.extractall(pwd=passwd.encode())
zipname = zf.namelist()[0]
zf.close()
lalala("hW1ES89jF.tar.gz") | [
"[email protected]"
]
| |
340124038ec0a382226157ab2e053dae131e5599 | e4bc426159c5622f42f53fb26f4aa627f337ef5c | /snippets/serializers.py | e9c6078ddfd0e2426fcfc451a288f4a1f6470e7c | []
| no_license | gtzago/django-rest-tutorial | e3f02593ce7f06ea16f8cc3a7100d276deeede98 | ad37b09fc8b724ef37ec7b0e39c88e43ccf202cd | refs/heads/master | 2021-01-01T05:26:01.983047 | 2016-04-14T14:18:42 | 2016-04-14T14:18:42 | 56,093,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class SnippetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')
class Meta:
model = Snippet
fields = ('url', 'highlight', 'owner',
'title', 'code', 'linenos', 'language', 'style')
class UserSerializer(serializers.HyperlinkedModelSerializer):
snippets = serializers.HyperlinkedRelatedField(many=True, view_name='snippet-detail', read_only=True)
class Meta:
model = User
fields = ('url', 'username', 'snippets')
| [
"[email protected]"
]
| |
36dcee7028b02b4850f2c21b50d78950e0dc44ef | 95a842b1ca06bef01669a2f862cd34cdec8594fb | /api/app.py | cb924e46787507df1a36c1b724c881d62a687faf | []
| no_license | pyalwin/brainwaves | 608c37031d2170b9635aab2432d7521b4e2dd7c8 | 518f0501d2291d20f6b8a3edda33adab454a32b7 | refs/heads/master | 2020-04-18T08:45:22.966522 | 2019-01-24T17:35:41 | 2019-01-24T17:35:41 | 167,407,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | from flask import Flask
from flask_graphql import GraphQLView
from schema import schema
from mongoengine import connect
from flask import jsonify
from models import Stocks as StocksModel
from flask_cors import CORS
connect('brainwaves', host='mongodb+srv://user:password@host/db', alias='default')
app = Flask(__name__)
CORS(app)
app.debug = True
default_query = '''
{
allStocks{
edges{
node{
id,
date,
symbol,
open,
close,
low,
high,
volume
}
}
}
}'''.strip()
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema,graphiql=True))
app.add_url_rule('/api', view_func=GraphQLView.as_view('api', schema=schema,graphiql=False))
@app.route('/api/ticker-list')
def list_tickers():
tickers = StocksModel.objects.distinct(field='symbol')
return jsonify(tickers)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
| [
"[email protected]"
]
| |
9088bf41a81af3370a353cf42c33dfa04f4db997 | 2093ba85aafb2b91b98d4cc334eb390c6cd7d3c5 | /3a-model-scikit/model.py | e2aa6de6680d0f7a2d0c93f6c0a0d3476a8ab641 | []
| no_license | rakesh283343/covid-kubeflow | 068c2a56d02c405aa03d0b5b893d8ed40bb65229 | adc14ecaa9b84eedfaf649adcf6fe493f8bb0e1e | refs/heads/main | 2023-02-24T02:15:45.579063 | 2021-01-26T21:10:42 | 2021-01-26T21:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | import pandas as pd
from sklearn.linear_model import LinearRegression, Ridge
import numpy as np
from scipy import stats
data = pd.read_csv('flat_file.csv')
geos = list(set(data['Province_State'].to_list()))
dropGeos = ['Princess', "Islands", "Guam", "Puerto"]
for g in geos:
if any([p in g for p in dropGeos]):
continue
state_data = data[data['Province_State'] == g]
X = pd.get_dummies(state_data['FIPS'], drop_first=True, sparse= True)
X['y'] = state_data['newCases']
X['pActive'] = state_data['pActive']
X['herdImmune'] = state_data['herdImmune']
X['population'] = state_data['population']
X = X.dropna()
y = X['y']
X = X.drop('y', 1)
model = LinearRegression(normalize=False, n_jobs=4).fit(X,y)
break
#model = Ridge(normalize=False, solver='lsqr').fit(X,y)
### Get stats:
def getStats(lm, X, y):
params = np.append(lm.intercept_,lm.coef_)
predictions = lm.predict(X)
newX = X
newX['Constant'] = 1.0
MSE = (sum((y-predictions)**2))/(len(newX)-len(newX.columns))
# Note if you don't want to use a DataFrame replace the two lines above with
# newX = np.append(np.ones((len(X),1)), X, axis=1)
# MSE = (sum((y-predictions)**2))/(len(newX)-len(newX[0]))
var_b = MSE*(np.linalg.inv(np.dot(newX.T,newX)).diagonal())
sd_b = np.sqrt(var_b)
ts_b = params/ sd_b
p_values =[2*(1-stats.t.cdf(np.abs(i),(len(newX)-len(newX[0])))) for i in ts_b]
sd_b = np.round(sd_b,3)
ts_b = np.round(ts_b,3)
p_values = np.round(p_values,3)
params = np.round(params,4)
myDF3 = pd.DataFrame()
myDF3["Coefficients"],myDF3["Standard Errors"],myDF3["t values"],myDF3["Probabilities"] = [params,sd_b,ts_b,p_values]
print(myDF3)
| [
"[email protected]"
]
| |
5eaf78a2b9a510e79ce66541431b5ab2510701ea | 85ae021daca623a690a5dc84515378eb8dfc6f5c | /XMLparsing.py | 6d96c6dc657bc4af7df782d5af9e38612332e971 | []
| no_license | Kharianne/AccesingDataPython | 0b1b1074ba30fdf95543066bff04150afde00006 | f041da4602943535fc49bab1829e2b62c7dee368 | refs/heads/master | 2021-05-04T10:12:26.543081 | 2017-07-10T13:16:23 | 2017-07-10T13:16:23 | 54,231,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import urllib.request
import xml.etree.ElementTree as ET
go_to_url = "http://python-data.dr-chuck.net/comments_235575.xml"
xml_file = urllib.request.urlopen(go_to_url).read()
sum = 0
tree = ET.fromstring(xml_file)
comment = tree.findall('.//comment')
for child in tree.findall('.//comment'):
count = child.find('count').text
number = int(count)
sum += number
print(sum)
#counts = [x.text for x in tree.findall('.//count')]
#print(counts)
| [
"[email protected]"
]
| |
52564c55ce188af128e41cc3810567e62b0cb71c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_wisecracked.py | df762aa40f8d90ebf7ab0b38869d1bab6c31eb7e | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _WISECRACKED():
def __init__(self,):
self.name = "WISECRACKED"
self.definitions = wisecrack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['wisecrack']
| [
"[email protected]"
]
| |
c674c6e16654a00ab60bf8e87c02f64ebac92101 | bb59bafc83ede90f7e99e504abebac4a324557f8 | /Assignment2/receive.py | 5acf86928126751c39dd212e466c0bb7f0d3def4 | []
| no_license | mlevy94/ECE4564 | 17e823c950a83888bfc8f0f280c71bf2c2e2788e | efd812856250a4c55438419102daca3ecf9fec63 | refs/heads/master | 2021-06-14T07:00:06.021759 | 2017-05-09T20:58:14 | 2017-05-09T20:58:14 | 81,128,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | #!/usr/bin/env python
import pika
import argparse
import time
import sys
import json
parser = argparse.ArgumentParser()
parser.add_argument("-b", action="store", default="localhost")
parser.add_argument("-p", action="store", default="/")
parser.add_argument("-c", action="store", default=None)
parser.add_argument("-k", action="store", required=True)
fields = parser.parse_args(sys.argv[1:])
if fields.c is not None:
i = 0
while fields.c[i] != ':': #parse login credentials
i+=1
login = fields.c[:i]
password = fields.c[i+1:]
credentials = pika.PlainCredentials(login, password)
parameters = pika.ConnectionParameters(fields.b,
5672,
fields.p,
credentials)
else: #attempt to login as guest
parameters = pika.ConnectionParameters('localhost')
connection = pika.BlockingConnection(parameters) #need error handling
channel = connection.channel()
channel.exchange_declare(exchange='pi_utilization',
type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='pi_utilization',
queue=queue_name,
routing_key=fields.k)
def callback(ch, method, properties, body):
#import pdb; pdb.set_trace()
print(" [{}] Received: {} ".format(method.routing_key, json.loads(body.decode())))
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
| [
"[email protected]"
]
| |
ff22176a2b050a193f1882462e0d36e591e42784 | cb0e7d6493b23e870aa625eb362384a10f5ee657 | /solutions/python3/0567.py | 65478b7cc2fb087117f7698fe743cdccb13f091a | []
| no_license | sweetpand/LeetCode-1 | 0acfa603af254a3350d457803449a91322f2d1a7 | 65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94 | refs/heads/master | 2022-11-14T07:01:42.502172 | 2020-07-12T12:25:56 | 2020-07-12T12:25:56 | 279,088,171 | 1 | 0 | null | 2020-07-12T15:03:20 | 2020-07-12T15:03:19 | null | UTF-8 | Python | false | false | 500 | py | class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
count1 = collections.Counter(s1)
required = len(s1)
for r, c in enumerate(s2):
count1[c] -= 1
if count1[c] >= 0:
required -= 1
if r >= len(s1):
count1[s2[r - len(s1)]] += 1
if count1[s2[r - len(s1)]] > 0:
required += 1
if required == 0:
return True
return False
| [
"[email protected]"
]
| |
28d7853629e519d31e6615eabe002706b6b08b38 | 4cb2bbd929ba3722d78cd6bd9feb2c5c0dd57025 | /olympic/forms.py | d27d0f59c4a933f98e12550b23203e2966edaad2 | [
"BSD-2-Clause"
]
| permissive | mjtamlyn/tamlynscore | ebeebdc73feeab86995a2cb888e1bea203854553 | c6ac4e9a5e37dc3778b1f754b3143e44fa8dc0bc | refs/heads/master | 2023-08-24T11:00:16.153489 | 2023-08-08T11:30:24 | 2023-08-08T11:30:24 | 17,013,657 | 7 | 2 | BSD-3-Clause | 2023-08-05T19:52:51 | 2014-02-20T08:28:08 | Python | UTF-8 | Python | false | false | 3,102 | py | from django import forms
from .models import Result, SessionRound
class ResultForm(forms.ModelForm):
class Meta:
model = Result
exclude = ('match', 'seed')
class SetupForm(forms.Form):
SPREAD_CHOICES = (
('', 'No special options'),
('expanded', 'One target per archer'),
)
MATCH_CHOICES = (
('', 'All matches'),
('half', 'Only allocate half of the matches'),
('quarter', 'Only allocate 1/4 of the matches'),
('eighth', 'Only allocate 1/8 of the matches'),
('three-quarter', 'Only allocate 3/4 of the matches'),
('first-half', 'Only allocate first half of the matches / Final only'),
('second-half', 'Only allocate second half of the matches / Bronze only'),
)
LEVEL_CHOICES = (
(1, 'Finals'),
(2, 'Semis'),
(3, 'Quarters'),
(4, '1/8'),
(5, '1/16'),
(6, '1/32'),
(7, '1/64'),
(8, '1/128'),
)
TIMING_CHOICES = (
(1, 'Pass A'),
(2, 'Pass B'),
(3, 'Pass C'),
(4, 'Pass D'),
(5, 'Pass E'),
(6, 'Pass F'),
(7, 'Pass G'),
(8, 'Pass H'),
(9, 'Pass I'),
(10, 'Pass J'),
)
session_round = forms.ModelChoiceField(SessionRound.objects)
start = forms.IntegerField(label='Start target')
level = forms.TypedChoiceField(coerce=int, choices=LEVEL_CHOICES)
timing = forms.TypedChoiceField(label='Pass', coerce=int, choices=TIMING_CHOICES)
spread = forms.ChoiceField(label='Target spread', choices=SPREAD_CHOICES, required=False)
matches = forms.ChoiceField(label='Matches', choices=MATCH_CHOICES, required=False)
delete = forms.BooleanField(required=False)
def __init__(self, session_rounds, **kwargs):
self.session_rounds = session_rounds
super(SetupForm, self).__init__(**kwargs)
self.fields['session_round'].queryset = session_rounds
def save(self):
sr = self.cleaned_data['session_round']
kwargs = {
'level': self.cleaned_data['level'],
'start': self.cleaned_data['start'],
'timing': self.cleaned_data['timing'],
}
if sr.shot_round.team_type:
kwargs['expanded'] = True
if self.cleaned_data['spread'] == 'expanded':
kwargs['expanded'] = True
if self.cleaned_data['matches'] == 'half':
kwargs['half_only'] = True
if self.cleaned_data['matches'] == 'quarter':
kwargs['quarter_only'] = True
if self.cleaned_data['matches'] == 'eighth':
kwargs['eighth_only'] = True
if self.cleaned_data['matches'] == 'three-quarter':
kwargs['three_quarters'] = True
if self.cleaned_data['matches'] == 'first-half':
kwargs['first_half_only'] = True
if self.cleaned_data['matches'] == 'second-half':
kwargs['second_half_only'] = True
if self.cleaned_data['delete']:
sr.remove_matches(self.cleaned_data['level'])
else:
sr.make_matches(**kwargs)
| [
"[email protected]"
]
| |
61752c74ffa9dac746a5a342dfb9225aaf000fdb | 38f1665166a899eb65dbc35348d81ecaf305b197 | /src/python/upload_to_big_query.py | 63101da6cfb2b34ce3c4f16fee210e8f5cb702a2 | []
| no_license | surbhikkabra/Hate-Speech-Analysis | 23df82344f5de0d8337895d236f23f96d87d77bd | e2a0b84d39d761874026abc5dd958b9207000a68 | refs/heads/master | 2022-11-11T02:00:59.118280 | 2020-07-05T02:24:32 | 2020-07-05T02:24:32 | 274,814,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | from google.cloud import bigquery
from google.cloud import bigquery_storage_v1beta1
from google.oauth2 import service_account
from google_auth_oauthlib import flow
import os
def get_credentials():
# appflow = flow.InstalledAppFlow.from_client_secrets_file(
# 'client_stores.json',
# scopes=['https://www.googleapis.com/auth/bigquery'])
#
# appflow.run_console()
# return appflow.credentials
credentials = service_account.Credentials.from_service_account_file(
'service-account.json',
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
return credentials
def create_client():
credentials = get_credentials()
print("Authentication Successful")
return bigquery.Client(project='hatespeech-2019', credentials=credentials)
def create_storage_client():
credentials = get_credentials()
print("Authentication Successful")
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def get_job_config():
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
job_config.autodetect = True
return job_config
def get_table_ref(client, dataset_id, table_id):
dataset_ref = client.dataset(dataset_id)
return dataset_ref.table(table_id)
def execute_job(client, source_file, comment_type):
if comment_type == "parent":
dataset_id = 'Parent_Comments'
table_id = 'Comments'
elif comment_type == "child":
dataset_id = 'Child_Comments'
table_id = 'Replies'
table_ref = get_table_ref(client, dataset_id, table_id)
print("Uploading the data....from {}".format(source_file))
job = client.load_table_from_file(source_file, table_ref, location="us", job_config=get_job_config())
job.result() # Waits for table load to complete.
print("Loaded {} rows into {}:{}.".format(job.output_rows, dataset_id, table_id))
def run_job(source_file_name, client, comment_type):
source_file = open(source_file_name, "rb")
execute_job(client, source_file, comment_type)
print("Processed Delimited file {} ".format(source_file_name))
source_file.close()
os.remove(source_file_name)
print("Deleted Delimited file {} ".format(source_file_name))
def get_rows_from_table():
client = create_client()
bqstorageclient = create_storage_client()
dataset_id = 'Final_DataSet'
table_id = 'Channel_Videos_Comments_Merged'
table_ref = get_table_ref(client, dataset_id, table_id)
rows = client.list_rows(
table_ref
)
dataframe = rows.to_dataframe(bqstorage_client=bqstorageclient)
return dataframe
| [
"[email protected]"
]
| |
d19d550f037f8ddc3caafe06ec1e87db7d68a59b | 5988c420e41652eba3f2c510c18f3eb10ef1127d | /lagou/pipelines.py | 0e273ca61aa40cd207a0287660f123c4fb35e10c | []
| no_license | lpnsjl/Lagou-scrapy | 55bf30b4691105260bdf388da8f84499937df77f | 241e807653e7f5b64068b6ffc6ec9d9c6ff22bf3 | refs/heads/master | 2021-04-26T23:46:28.744606 | 2018-03-04T18:53:00 | 2018-03-04T18:53:00 | 123,854,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pymongo
class LagouPipeline(object):
"""def __init__(self):
self.file = open('lagou.json', 'w')
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii=False) + '\n'
self.file.write(text.encode('utf-8'))
return item
def close_spider(self, spider):
self.file.close()"""
def __init__(self):
# mongod的主机号
host = '127.0.0.1'
# mongod的端口号
port = 27017
# 数据库的名字
dbname = 'lagou'
# 表的名字
sheetname = 'position'
# 创建一个mongod客户端
mongoclient = pymongo.MongoClient(host=host, port=port)
# 创建数据库
mydb = mongoclient[dbname]
# 创建表
self.sheet = mydb[sheetname]
def process_item(self, item, spider):
position_info = dict(item)
# 把数据放入mongod数据库中
self.sheet.insert(position_info)
return item
| [
"[email protected]"
]
| |
11e34cc411e5ccc24b9c4a13af103857fd8ab486 | 0c4bb8541efc25343a58a9df9717e20ccbb6dc5c | /polls/models.py | 9c182e229b6f5b28a8fa9a2ed7612ab94e71b111 | []
| no_license | MarcinKruzewski/mydjango | eb970c5fd6fb9bf0702c94aaaf0a591bb8e87c00 | 8327934b5db0ed4048129d2522320162bbda3055 | refs/heads/master | 2021-01-10T16:18:59.142559 | 2016-01-08T20:16:08 | 2016-01-08T20:16:08 | 49,285,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"[email protected]"
]
| |
f564385e2b1523b166f2fa4e8d7d7697af9a8f8f | b86ac85d4aad9974b70619eaaea993d7722c5443 | /ecommerce/api/serializers.py | 2d24d81d64dd9a87c3ced749c7da9817bbec16fb | []
| no_license | abn93/fpftech-api | 493892a7dbe23c683dafb09d16621516219649a0 | 4dbc0a9abff6352472b729203f64d2a19b16742a | refs/heads/master | 2023-06-19T18:41:16.745437 | 2021-07-08T19:01:28 | 2021-07-08T19:01:28 | 382,482,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from rest_framework import serializers
from produtos import models
class ProdutoSerializer(serializers.ModelSerializer):
class Meta:
model = models.Produto
fields = ['id', 'nome', 'marca', 'categoria', 'preco']
| [
"[email protected]"
]
| |
290f3bc6b220e4c8877fcf47617d2039ce61cba1 | beeded6f6df10462c09ad0681ae311407269389b | /testGoodsTrade/python/x64/Ice_Current_ice.py | 32801971831ce97b58a73db1a853bd5e4789ee50 | []
| no_license | cash2one/python-1 | 12687097b9c1f8dcc28aa3ae261d7f29dee9856a | a978fa3f611273620d7eb4b45df872c7fe3de6d0 | refs/heads/master | 2020-02-26T17:03:06.869925 | 2013-11-22T11:02:14 | 2013-11-22T11:02:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,911 | py | # **********************************************************************
#
# Copyright (c) 2003-2010 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
# Ice version 3.4.1
# <auto-generated>
#
# Generated from file `Current.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
import Ice, IcePy, __builtin__
import Ice_ObjectAdapterF_ice
import Ice_ConnectionF_ice
import Ice_Identity_ice
# Included module Ice
_M_Ice = Ice.openModule('Ice')
# Start of module Ice
__name__ = 'Ice'
if not _M_Ice.__dict__.has_key('_t_Context'):
_M_Ice._t_Context = IcePy.defineDictionary('::Ice::Context', (), IcePy._t_string, IcePy._t_string)
if not _M_Ice.__dict__.has_key('OperationMode'):
_M_Ice.OperationMode = Ice.createTempClass()
class OperationMode(object):
'''The OperationMode determines the retry behavior an
invocation in case of a (potentially) recoverable error.'''
def __init__(self, val):
assert(val >= 0 and val < 3)
self.value = val
def __str__(self):
return self._names[self.value]
__repr__ = __str__
def __hash__(self):
return self.value
def __lt__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value < other.value;
elif other == None:
return False
return NotImplemented
def __le__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value <= other.value;
elif other == None:
return False
return NotImplemented
def __eq__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value == other.value;
elif other == None:
return False
return NotImplemented
def __ne__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value != other.value;
elif other == None:
return False
return NotImplemented
def __gt__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value > other.value;
elif other == None:
return False
return NotImplemented
def __ge__(self, other):
if isinstance(other, _M_Ice.OperationMode):
return self.value >= other.value;
elif other == None:
return False
return NotImplemented
_names = ('Normal', 'Nonmutating', 'Idempotent')
OperationMode.Normal = OperationMode(0)
OperationMode.Nonmutating = OperationMode(1)
OperationMode.Idempotent = OperationMode(2)
_M_Ice._t_OperationMode = IcePy.defineEnum('::Ice::OperationMode', OperationMode, (), (OperationMode.Normal, OperationMode.Nonmutating, OperationMode.Idempotent))
_M_Ice.OperationMode = OperationMode
del OperationMode
if not _M_Ice.__dict__.has_key('Current'):
_M_Ice.Current = Ice.createTempClass()
class Current(object):
'''Information about the current method invocation for servers. Each
operation on the server has a Current as its implicit final
parameter. Current is mostly used for Ice services. Most
applications ignore this parameter.'''
def __init__(self, adapter=None, con=None, id=Ice._struct_marker, facet='', operation='', mode=_M_Ice.OperationMode.Normal, ctx=None, requestId=0):
self.adapter = adapter
self.con = con
if id is Ice._struct_marker:
self.id = _M_Ice.Identity()
else:
self.id = id
self.facet = facet
self.operation = operation
self.mode = mode
self.ctx = ctx
self.requestId = requestId
def __hash__(self):
_h = 0
_h = 5 * _h + __builtin__.hash(self.adapter)
_h = 5 * _h + __builtin__.hash(self.con)
_h = 5 * _h + __builtin__.hash(self.id)
_h = 5 * _h + __builtin__.hash(self.facet)
_h = 5 * _h + __builtin__.hash(self.operation)
_h = 5 * _h + __builtin__.hash(self.mode)
if self.ctx:
for _i0 in self.ctx:
_h = 5 * _h + __builtin__.hash(_i0)
_h = 5 * _h + __builtin__.hash(self.ctx[_i0])
_h = 5 * _h + __builtin__.hash(self.requestId)
return _h % 0x7fffffff
def __lt__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter < other.adapter or self.con < other.con or self.id < other.id or self.facet < other.facet or self.operation < other.operation or self.mode < other.mode or self.ctx < other.ctx or self.requestId < other.requestId
elif other == None:
return False
return NotImplemented
def __le__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter <= other.adapter or self.con <= other.con or self.id <= other.id or self.facet <= other.facet or self.operation <= other.operation or self.mode <= other.mode or self.ctx <= other.ctx or self.requestId <= other.requestId
elif other == None:
return False
return NotImplemented
def __eq__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter == other.adapter and self.con == other.con and self.id == other.id and self.facet == other.facet and self.operation == other.operation and self.mode == other.mode and self.ctx == other.ctx and self.requestId == other.requestId
elif other == None:
return False
return NotImplemented
def __ne__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter != other.adapter or self.con != other.con or self.id != other.id or self.facet != other.facet or self.operation != other.operation or self.mode != other.mode or self.ctx != other.ctx or self.requestId != other.requestId
elif other == None:
return True
return NotImplemented
def __gt__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter > other.adapter or self.con > other.con or self.id > other.id or self.facet > other.facet or self.operation > other.operation or self.mode > other.mode or self.ctx > other.ctx or self.requestId > other.requestId
elif other == None:
return False
return NotImplemented
def __ge__(self, other):
if isinstance(other, _M_Ice.Current):
return self.adapter >= other.adapter or self.con >= other.con or self.id >= other.id or self.facet >= other.facet or self.operation >= other.operation or self.mode >= other.mode or self.ctx >= other.ctx or self.requestId >= other.requestId
elif other == None:
return False
return NotImplemented
def __str__(self):
return IcePy.stringify(self, _M_Ice._t_Current)
__repr__ = __str__
_M_Ice._t_Current = IcePy.defineStruct('::Ice::Current', Current, (), (
('adapter', (), _M_Ice._t_ObjectAdapter),
('con', (), _M_Ice._t_Connection),
('id', (), _M_Ice._t_Identity),
('facet', (), IcePy._t_string),
('operation', (), IcePy._t_string),
('mode', (), _M_Ice._t_OperationMode),
('ctx', (), _M_Ice._t_Context),
('requestId', (), IcePy._t_int)
))
_M_Ice.Current = Current
del Current
# End of module Ice
| [
"[email protected]"
]
| |
641aacc8b6854764e829d6932d4d0627ea980786 | 19d03d646fcee318cca8078af27636732290d77b | /parlai/utils/flake8.py | 1170b4bbb4a717b201637e00678bf96a87614026 | [
"MIT"
]
| permissive | yongkyung-oh/CMU-Studio-Project | 2d6fe6ef6fa30fda1a4f2d1fc45c5b85d6143775 | 448492f342e8157df2e736aa52825b66b1d66fd7 | refs/heads/master | 2022-10-24T16:56:46.763865 | 2020-07-01T10:03:00 | 2020-07-01T10:03:00 | 252,878,283 | 2 | 5 | MIT | 2021-03-25T23:50:27 | 2020-04-04T01:02:44 | Python | UTF-8 | Python | false | false | 3,424 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Custom flake8 rules for ParlAI.
Includes:
- Checks for python3 shebang
- Check for copyright message
- Docformatter issues (TODO)
"""
import docformatter
import difflib
PYTHON_SHEBANG = '#!/usr/bin/env python3'
WHITELIST_PHRASES = ['Moscow Institute of Physics and Technology.']
WHITELIST_FNS = ["mlb_vqa"]
COPYRIGHT = [
"Copyright (c) Facebook, Inc. and its affiliates.",
"This source code is licensed under the MIT license found in the",
"LICENSE file in the root directory of this source tree.",
]
class ParlAIChecker:
"""
Custom flake8 checker for some special ParlAI requirements.
"""
name = 'flake8-parlai'
version = '0.1'
def __init__(self, tree=None, filename=None, lines=None):
self.filename = filename
self.lines = lines
def run(self):
if self.lines is None:
with open(self.filename) as f:
self.lines = f.readlines()
if self.lines and PYTHON_SHEBANG not in self.lines[0]:
yield (
1,
0,
'PAI100 Missing python3 shebang. (`#!/usr/bin/env python3`)',
'',
)
# check doc formatting
source = "".join(self.lines)
formatted_source = docformatter.format_code(
source,
pre_summary_newline=True,
description_wrap_length=88,
summary_wrap_length=88,
make_summary_multi_line=True,
force_wrap=False,
)
if source != formatted_source:
diff = difflib.unified_diff(
source.split('\n'), # have to strip newlines
formatted_source.split('\n'),
f'before/{self.filename}',
f'after/{self.filename}',
n=0,
lineterm='',
)
for line in diff:
if line.startswith('@@'):
fields = line.split()
# find out the beginning line of the docstring reformat. Example:
# --- /path/to/original timestamp
# +++ /path/to/new timestamp
# @@ -1,3 +1,9 @@
# that -1 says the first line changed, and 3 lines were removed
# with a new offset belonging at the first line, and 9
# inserted lines.
line_no, *_ = fields[1].split(',')
line_no = -int(line_no)
yield (
line_no,
1,
f'PAI101 autoformat.sh would reformat the docstring',
'',
)
# the rest is checking copyright, but there are some exceptions
# copyright must appear in the first 16 lines of the file.
source = "".join(self.lines[:16])
if any(wl in source for wl in WHITELIST_PHRASES):
return
for i, msg in enumerate(COPYRIGHT, 1):
if any(wl in self.filename for wl in WHITELIST_FNS) and i < 3:
continue
if source and msg not in source:
yield (i, 0, f'PAI20{i} Missing copyright `{msg}`', '')
| [
"[email protected]"
]
| |
5011edd540e59266a4dab322eeabf74f75d8df9e | 76db1e651dba6c639e61a5c251583390ffd60b93 | /kConcatenationMaxSum.py | 8098b05eaf3abe2b55ad64af0efd551a19fee2d5 | []
| no_license | keenouter/leetcode | 58127ece6d4f76a4e80a00ec564ee46143d72232 | ebb485d7fdb9c3df9669ecf94315ebc0a836977f | refs/heads/master | 2022-04-24T06:56:27.982291 | 2020-04-30T06:34:10 | 2020-04-30T06:34:10 | 260,130,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | class Solution:
def kConcatenationMaxSum(self, arr, k):
# max_index=0
# max_child_sum=0
# arr_sum=0
# temp=0
# min_sum=0
# max_sum=0
# for i in range(len(arr)):
# arr_sum+=arr[i]
# if temp>0:
# if temp>max_child_sum:
# max_child_sum=temp
# max_index=i
# temp+=arr[i]
# elif temp<=0:
# if arr[i]<=0:
# temp=0
# else:
# temp=arr[i]
# if arr_sum<min_sum:
# min_sum=arr_sum
# if arr_sum>max_sum:
# max_sum=arr_sum
# if temp>max_child_sum:
# max_child_sum=temp
# max_index= len(arr)
# print(arr_sum,max_child_sum,temp)
# return max([arr_sum*k-min_sum,arr_sum*(k-1)+sum(arr[:max_index])-min_sum,max_child_sum,temp+max_sum,0])
arr_sum_list=[0]
temp=0
max_index=0
max_sum=0
for i in range(len(arr)):
temp+=arr[i]
if temp>max_sum:
max_sum=temp
max_index=i+1
arr_sum_list.append(temp)
left_min=min(arr_sum_list[:max_index-1])
right_min=min(arr_sum_list[max_index+1:]+[0])
return max([arr_sum_list[-1]*(k-1)+max_sum - left_min*2,])
print(Solution().kConcatenationMaxSum([1,2,3],3))
| [
"[email protected]"
]
| |
e5aabdd20579220da7313ebbf46eb97315da75b0 | 3825126997a40c42f56765d9c0dae2047e8b7297 | /energy/tests/test_period.py | e889e34520937a338db106b829bab7885acf41a5 | []
| no_license | kirill1990/kalugaenergo | 34a87aec7104b8caef7b6b803ffa78f72e2f33f6 | 393e3cbcd6dcb78fb489408c8f94365e52606b89 | refs/heads/master | 2021-01-10T08:25:26.100346 | 2016-02-26T12:52:18 | 2016-02-26T12:52:18 | 47,502,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | # coding: utf8
from django.test import TestCase
from energy.models import Period
__author__ = 'Demyanov Kirill'
class PeriodTest(TestCase):
fixtures = ['energy_fixtures.json']
def setUp(self):
self.periods = {x: Period.objects.get(pk=x) for x in range(1, 13)}
def test_count(self):
""" Наличие периодов в бд """
self.assertNotEqual(Period.objects.all().count(), 0)
def test_get_hour(self):
""" Проверяем """
# Периоды, где 31 дней в месяце
for index in [1, 3, 5, 7, 8, 10, 12]:
self.assertEqual(self.periods[index].get_hour(), 744)
# Периоды, где 30 дней в месяце
for index in [4, 6, 9, 11]:
self.assertEqual(self.periods[index].get_hour(), 720)
# Февраль с 28 днями
self.assertEqual(self.periods[2].get_hour(), 672)
# Февраль с 29 днями
self.assertEqual(Period.objects.get(pk=26).get_hour(), 696)
def test_between_left(self):
""" Проверяемый период находится левее проверяемой временной линии """
self.assertFalse(self.periods[3].is_between(self.periods[4], self.periods[7]))
self.assertFalse(self.periods[2].is_between(self.periods[3], None))
def test_between_right(self):
""" Проверяемый период находится правее проверяемой временной линии """
self.assertFalse(self.periods[4].is_between(self.periods[2], self.periods[3]))
self.assertFalse(self.periods[3].is_between(self.periods[2], self.periods[3]))
def test_between_into(self):
""" Проверяемый период находится внутри проверяемой временной линии """
self.assertTrue(self.periods[3].is_between(self.periods[1], self.periods[5]))
self.assertTrue(self.periods[3].is_between(self.periods[3], self.periods[4]))
self.assertTrue(self.periods[4].is_between(self.periods[2], None))
self.assertTrue(self.periods[2].is_between(self.periods[2], None))
def test_between_none(self):
""" Проверяемой временной линии не существует"""
self.assertFalse(self.periods[3].is_between(self.periods[3], self.periods[3]))
| [
"[email protected]"
]
| |
51277d3ffd256cbd4cdca9c05caf2490c0becfec | 34c32858ddcaef504aa7495eb716ebf8e98d4ebe | /optical_flow/MF_Tracker.py | b9eb03adc2204c831d59b082240032d55812a9a7 | [
"Apache-2.0"
]
| permissive | whjzsy/SiamTrackers | 7a6a0c7be36386614395dbbc7dcdc7235cd41d51 | 88e0f801405ca9b36fcc3e23d296455872ba37d2 | refs/heads/master | 2022-11-05T03:56:06.361932 | 2020-06-16T07:53:40 | 2020-06-16T07:53:40 | 272,852,303 | 3 | 0 | Apache-2.0 | 2020-06-17T01:44:51 | 2020-06-17T01:44:50 | null | UTF-8 | Python | false | false | 5,244 | py | #!/home/ubuntu/anaconda3/bin python
# -*- coding:utf-8 -*-
# -*- coding: utf-8 -*-
""" MedianFlow sandbox
Usage:
medianflow.py SOURCE
Options:
SOURCE INT: camera, STR: video file
"""
from __future__ import print_function
from __future__ import division
from docopt import docopt
from os.path import abspath, exists
import numpy as np
import cv2
from optical_flow.rect_selector import RectSelector
class MedianFlowTracker(object):
def __init__(self):
self.lk_params = dict(winSize = (11, 11),
maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.1))
self._atan2 = np.vectorize(np.math.atan2)
def track(self, bb, prev, curr):
self._n_samples = 100
self._fb_max_dist = 1
self._ds_factor = 0.95
self._min_n_points = 10
# sample points inside the bounding box
p0 = np.empty((self._n_samples, 2))
p0[:, 0] = np.random.randint(bb[0], bb[2] + 1, self._n_samples)
p0[:, 1] = np.random.randint(bb[1], bb[3] + 1, self._n_samples)
p0 = p0.astype(np.float32)
# forward-backward tracking
p1, st, err = cv2.calcOpticalFlowPyrLK(prev, curr, p0, None, **self.lk_params)
indx = np.where(st == 1)[0]
p0 = p0[indx, :]
p1 = p1[indx, :]
p0r, st, err = cv2.calcOpticalFlowPyrLK(curr, prev, p1, None, **self.lk_params)
if err is None:
return None
# check forward-backward error and min number of points
fb_dist = np.abs(p0 - p0r).max(axis=1)
good = fb_dist < self._fb_max_dist
# keep half of the points
err = err[good].flatten()
if len(err) < self._min_n_points:
return None
indx = np.argsort(err)
half_indx = indx[:len(indx) // 2]
p0 = (p0[good])[half_indx]
p1 = (p1[good])[half_indx]
# estimate displacement
dx = np.median(p1[:, 0] - p0[:, 0])
dy = np.median(p1[:, 1] - p0[:, 1])
# all pairs in prev and curr
i, j = np.triu_indices(len(p0), k=1)
pdiff0 = p0[i] - p0[j]
pdiff1 = p1[i] - p1[j]
# estimate change in scale
p0_dist = np.sum(pdiff0 ** 2, axis=1)
p1_dist = np.sum(pdiff1 ** 2, axis=1)
ds = np.sqrt(np.median(p1_dist / (p0_dist + 2**-23)))
ds = (1.0 - self._ds_factor) + self._ds_factor * ds;
# update bounding box
dx_scale = (ds - 1.0) * 0.5 * (bb[3] - bb[1] + 1)
dy_scale = (ds - 1.0) * 0.5 * (bb[2] - bb[0] + 1)
bb_curr = (int(bb[0] + dx - dx_scale + 0.5),
int(bb[1] + dy - dy_scale + 0.5),
int(bb[2] + dx + dx_scale + 0.5),
int(bb[3] + dy + dy_scale + 0.5))
if bb_curr[0] >= bb_curr[2] or bb_curr[1] >= bb_curr[3]:
return None
bb_curr = (min(max(0, bb_curr[0]), curr.shape[1]),
min(max(0, bb_curr[1]), curr.shape[0]),
min(max(0, bb_curr[2]), curr.shape[1]),
min(max(0, bb_curr[3]), curr.shape[0]))
return bb_curr
class API(object):
def __init__(self, win, source):
self._device = cv2.VideoCapture(source)
if isinstance(source, str):
self.paused = True
else:
self.paused = False
self.win = win
cv2.namedWindow(self.win, 1)
self.rect_selector = RectSelector(self.win, self.on_rect)
self._bounding_box = None
self._tracker = MedianFlowTracker()
def on_rect(self, rect):
self._bounding_box = rect
def run(self):
prev, curr = None, None
ret, frame = self._device.read()
if not ret:
raise IOError('can\'t reade frame')
while True:
if not self.rect_selector.dragging and not self.paused:
ret, grabbed_frame = self._device.read()
if not ret:
break
frame = grabbed_frame.copy()
prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if prev is not None and self._bounding_box is not None:
bb = self._tracker.track(self._bounding_box, prev, curr)
if bb is not None:
self._bounding_box = bb
cv2.rectangle(frame, self._bounding_box[:2], self._bounding_box[2:], (0, 255, 0), 2)
else:
cv2.rectangle(frame, self._bounding_box[:2], self._bounding_box[2:], (0, 0, 255), 2)
self.rect_selector.draw(frame)
cv2.imshow(self.win, frame)
ch = cv2.waitKey(1)
if ch == 27 or ch in (ord('q'), ord('Q')):
break
elif ch in (ord('p'), ord('P')):
self.paused = not self.paused
if __name__ == "__main__":
# args = docopt(__doc__)
#
# try:
# source = int(args['SOURCE'])
# except:
# source = abspath(str(args['SOURCE']))
# if not exists(source):
# raise IOError('file does not exists')
source = '/home/ubuntu/Desktop/Object_Track/SiamTrackers/demo/bag.avi'
API("Median Flow Tracker", source).run()
| [
"[email protected]"
]
| |
372c0a5bf62df64d0962abad6fd583321f129e17 | ebf9a2a442dc3710129fb73ae432d1050098b01c | /joint_motion_server.py | 56243d5d9ff8e8010bcfdb730e649e67e904a8ad | []
| no_license | iskandersauma/stomp-chomp | f8f4bcf55946287e029ddfbe2186b637d6dd56df | 22b228350e80dcba2baa438e9c1199fb2a6ac44d | refs/heads/master | 2022-07-31T20:18:46.100687 | 2020-05-24T17:40:59 | 2020-05-24T17:40:59 | 266,592,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | #! /usr/bin/env python
import rospy
import actionlib
import asp_tools.msg
from asp_tools.srv import MoveJoints
from abstract_motion_server import AbstractMotionServer
class JointMotionAction(AbstractMotionServer):
def __init__(self, name):
super(JointMotionAction, self).__init__(name)
def _init_server(self):
self._feedback = asp_tools.msg.JointMotionFeedback()
self._result = asp_tools.msg.JointMotionResult()
self._as = actionlib.SimpleActionServer(self._action_name, asp_tools.msg.JointMotionAction, execute_cb=self.execute_cb, auto_start = False)
def call_service(self, goal):
"""
Calls the motion service. Returns true if the call wass successfull,
false otherwise.
"""
# publish info to the console for the user
rospy.loginfo('%s: Executing the joint motion action' % (self._action_name))
rospy.wait_for_service('/asp/move_joints')
try:
self.plan_executed = False
move_joints = rospy.ServiceProxy('/asp/move_joints', MoveJoints)
resp = move_joints(x=goal.x, y=goal.y, b=goal.b, z=goal.z, a=goal.a, async=True)
executed, planned = resp.executed, resp.planned
except rospy.ServiceException, e:
print "move_joints service call failed: %s"%e
executed, planned = False, False
return executed, planned
if __name__ == '__main__':
rospy.init_node('joint_motion')
server = JointMotionAction(rospy.get_name())
rospy.spin()
| [
"[email protected]"
]
| |
73c31b39270f334f0ce304cf2fe7d9b0dde94306 | b87b602775b04c6bd018cbac63f6c032276949c9 | /Python/Self Studies/WebPenetrationPython/http-codes.py | f27273c26f472254cc374b299bce023c7e258417 | []
| no_license | smiroshnikov/Java-Python-Studies-2016 | 098430ce6b158af7dbbdbe7ff42482036db3f9de | 334205279b4c3979ec252d4c65ee85c7ce79d71f | refs/heads/master | 2021-01-11T05:43:12.236304 | 2016-11-21T15:26:00 | 2016-11-21T15:26:00 | 71,373,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # Will be useful for any interview
# 1XX - Informational
# 2XX - Success
# 3XX - Redirection
# 4XX - Client-side error
# 5XX - Server-side error
import requests
url_invalid = 'http://httpbin.org/failhtml'
url_valid = 'http://httpbin.org/html'
url_redirect = 'http://httpbin.org/redirect-to'
payload = {'url': "http://bing.com"}
req = requests.get(url_valid)
print "For this url {} ".format(req.url) + "Response code is : " + str(req.status_code) + "\n"
req = requests.get(url_invalid)
print "For this url {}".format(req.url) + "Response code is: " + str(req.status_code) + "\n"
req = requests.get(url_redirect, params=payload)
print "For this url {}".format(req.url) + "Response code is: " + str(req.status_code) + "\n"
print "We have used redirection to bing.com instead of HackYourBank.com, that can be reflected in the request"
for element in req.history:
print ("History code: " + str(element.status_code)+ ' : ' + element.url)
| [
"[email protected]"
]
| |
57e19bf0eacc2c9dc6bfd1452ebf6c427e698494 | 311ce6fbe1b264f2b656ba235371e756695dca53 | /forcing/dot_in/aestus1_A1_ae1/make_dot_in.py | 3e296f6d56c6863053a5285cd0f5d84cb28cdf8f | [
"MIT"
]
| permissive | parkermac/LiveOcean | 94bc9cb9fba1bdc2e206488e0e2afadfafeabb34 | 4bd2776cf95780a7965a18addac3c5e395703ce5 | refs/heads/master | 2022-11-30T10:21:50.568014 | 2022-11-21T16:32:55 | 2022-11-21T16:32:55 | 35,834,637 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,180 | py | """
This creates and poulates directories for ROMS runs on gaggle. It is
designed to work with the "BLANK" version of the .in file,
replacing things like $whatever$ with meaningful values.
"""
import os
import sys
fpth = os.path.abspath('../../')
if fpth not in sys.path:
sys.path.append(fpth)
import forcing_functions as ffun
Ldir, Lfun = ffun.intro()
from datetime import datetime, timedelta
fdt = datetime.strptime(Ldir['date_string'], '%Y.%m.%d')
fdt_yesterday = fdt - timedelta(1)
print('- dot_in.py creating files for LiveOcean for ' + Ldir['date_string'])
#### USER DEFINED VALUES ####
gtag = Ldir['gtag']
gtagex = gtag + '_' + Ldir['ex_name']
EX_NAME = Ldir['ex_name'].upper()
multi_core = True # use more than one core
if Ldir['run_type'] == 'backfill':
days_to_run = 1.0
else:
days_to_run = 1.0
dtsec = 30 # time step in seconds INTEGER (should fit evenly into 3600 sec)
restart_nrrec = '-1' # '-1' for a non-crash restart file, otherwise '1' or '2'
his_interval = 3600 # seconds to define and write to history files
rst_interval = 1 # days between writing to the restart file (e.g. 5)
zqt_height = '2.0d0'
zw_height = '10.0d0'
#### END USER DEFINED VALUES ####
# DERIVED VALUES
if multi_core:
ntilei = '12' # number of tiles in I-direction (6)
ntilej = '6' # number of tiles in J-direction (12)
else:
ntilei = '1'
ntilej = '1'
if float(3600/dtsec) != 3600.0/dtsec:
print('** WARNING: dtsec does not fit evenly into 1 hour **')
dt = str(dtsec) + '.0d0' # a string version of dtsec, for the .in file
ninfo = int(his_interval/dtsec) # how often to write info to the log file (# of time steps)
nhis = int(his_interval/dtsec) # how often to write to the history files
ndefhis = int(nhis) # how often to create new history files
nrst = int(rst_interval*86400/dtsec)
ntimes = int(days_to_run*86400/dtsec)
# file location stuff
date_string = Ldir['date_string']
date_string_yesterday = fdt_yesterday.strftime('%Y.%m.%d')
dstart = str(int(Lfun.datetime_to_modtime(fdt) / 86400.))
f_string = 'f' + date_string
f_string_yesterday = 'f'+ date_string_yesterday
# where forcing files live (fjord, as seen from gaggle)
lo_dir = '/fjdata1/parker/LiveOcean/'
loo_dir = '/fjdata1/parker/LiveOcean_output/'
grid_dir = '/fjdata1/parker/LiveOcean_data/grids/' + Ldir['gridname'] + '/'
force_dir = loo_dir + gtag + '/' + f_string + '/'
roms_dir = '/pmr1/parker/LiveOcean_roms/'
roms_name = 'ROMS_820'
# the .in file
dot_in_name = 'liveocean.in' # name of the .in file
dot_in_dir0 = Ldir['roms'] + 'output/' + gtagex + '/'
Lfun.make_dir(dot_in_dir0) # make sure it exists
dot_in_dir = dot_in_dir0 + f_string +'/'
Lfun.make_dir(dot_in_dir, clean=True) # make sure it exists and is empty
# where to put the output files according to the .in file
out_dir0 = roms_dir + 'output/' + gtagex + '/'
out_dir = out_dir0 + f_string + '/'
atm_dir = 'atm/' # which atm forcing files to use
ocn_dir = 'ocnA/' # which ocn forcing files to use
riv_dir = 'riv1/' # which riv forcing files to use
tide_dir = 'tideA/' # which tide forcing files to use
if Ldir['start_type'] == 'continuation':
nrrec = '0' # '-1' for a hot restart
ininame = 'ocean_rst.nc' # for a hot perfect restart
#ininame = 'ocean_his_0025.nc' # for a hot restart
ini_fullname = out_dir0 + f_string_yesterday + '/' + ininame
elif Ldir['start_type'] == 'new':
nrrec = '0' # '0' for a history or ini file
ininame = 'ocean_ini.nc' # could be an ini or history file
ini_fullname = force_dir + ocn_dir + ininame
# END DERIVED VALUES
## create .in ##########################
f = open('BLANK.in','r')
f2 = open(dot_in_dir + dot_in_name,'w')
in_varlist = ['base_dir','ntilei','ntilej','ntimes','dt','nrrec','ninfo',
'nhis','dstart','ndefhis','nrst','force_dir','grid_dir','roms_dir',
'atm_dir','ocn_dir','riv_dir','tide_dir','dot_in_dir',
'zqt_height','zw_height','ini_fullname','out_dir','EX_NAME','roms_name']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f2.write(line2)
f.close()
f2.close()
| [
"[email protected]"
]
| |
2cfa60fa7eb34f071242927549751cfed75fc724 | b4c789816d9aa0c0068cd4458b3faf6d52143901 | /blackfynn/cli/bf_upload.py | d3633250c1da97ed1967d775ead28251215054d4 | [
"Apache-2.0"
]
| permissive | intrepidlemon/blackfynn-python | 21a7c2a84e6d19720dd766259c2c1e159f60d77d | 05501f818276474e0fa08b283bc3312101ef8d4e | refs/heads/master | 2021-05-05T11:16:52.504076 | 2018-02-01T16:40:23 | 2018-02-01T16:40:23 | 118,154,254 | 0 | 0 | null | 2018-01-19T17:09:31 | 2018-01-19T17:09:31 | null | UTF-8 | Python | false | false | 1,029 | py | '''
usage:
bf upload [options] [--to=destination] <file>...
global options:
-h --help Show help
--dataset=<dataset> Use specified dataset (instead of your current working dataset)
--profile=<name> Use specified profile (instead of default)
'''
from docopt import docopt
from cli_utils import recursively_upload, get_client, settings
import os
def main():
args = docopt(__doc__)
bf = get_client()
files = args['<file>']
if args['--to']:
collection = bf.get(args['--to'])
recursively_upload(bf, collection, files)
else:
ds = settings.working_dataset
if not ds:
exit("ERROR: Must specify destination when uploading. Options:\n" \
"\n 1. Set destination explicitly using --to command line argument" \
"\n 2. Set default dataset using 'bf use <dataset>' before running upload command" \
"\n")
dataset = bf.get_dataset(ds)
recursively_upload(bf, dataset, files)
| [
"[email protected]"
]
| |
1bd7ecb873e652e34954eea2f0ad69785d748f1e | 2db0a61beb3f7b1c2d65f8e3694985cbd8d2de3a | /tarch.py | 2df0ddea4b142998a10c149b326dce6f11235031 | []
| no_license | kamou/poseidon | 093323cee464ec7f158d973e8060535fb85ee71b | 094f2ac4f8c12de6e4658e4723d943706ca2d34c | refs/heads/master | 2022-04-10T18:54:02.906446 | 2020-03-06T00:37:49 | 2020-03-06T00:37:49 | 223,223,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,229 | py | import triton
from texceptions import *
from tcc import *
import keystone
class X86Cdecl(CallingConvention):
def __init__(self, arch):
self.arch = arch
def get_func_arg(self, n):
offset = n*self.arch.psize + self.arch.psize
value = self.arch.tc.getConcreteMemoryValue(triton.MemoryAccess(self.arch.tc.getConcreteRegisterValue(self.sp)+offset, self.psize))
return value
def set_func_arg(self, n, value):
sp = self.arch.tc.getConcreteRegisterValue(self.arch.sp)
offset = n*self.arch.psize + self.arch.psize
self.arch.tc.setConcreteMemoryValue(triton.MemoryAccess(sp + offset, self.arch.psize), value)
return value
class ArchCommon(object):
MODE_FD = 0 # full decending
MODE_FA = 1 # full ascending
MODE_ED = 2 # empty decending
MODE_EA = 3 # empty ascending
def symbolize(self, addr, size):
return self.tc.symbolizeMemory(triton.MemoryAccess(addr, size))
def read_reg(self, reg):
return self.tc.getConcreteRegisterValue(reg)
def write_reg(self, reg, value):
return self.tc.setConcreteRegisterValue(reg, value)
def set_memory_feed(self, cb):
self.tc.addCallback(cb, triton.CALLBACK.GET_CONCRETE_MEMORY_VALUE)
def func_ret(self, value=None):
if value is not None:
self.tc.setConcreteRegisterValue(self.ret, value)
sp = self.tc.getConcreteRegisterValue(self.sp)
ret_addr = self.tc.getConcreteMemoryValue(triton.MemoryAccess(self.tc.getConcreteRegisterValue(self.sp), self.psize))
self.tc.setConcreteRegisterValue(self.pc, ret_addr)
self.tc.setConcreteRegisterValue(self.sp, sp+self.psize)
def get_area(self, address, size):
return self.tc.getConcreteMemoryAreaValue(address, size)
def get_memory_value(self, addr, size):
return self.tc.getConcreteMemoryValue(triton.MemoryAccess(addr, size))
def set_memory_value(self, addr, value, size):
return self.tc.setConcreteMemoryValue(triton.MemoryAccess(addr, size), value)
def disassemble(self, addr=None):
if not addr is None:
pc = addr
else:
pc = self.read_reg(self.pc)
inst = triton.Instruction()
inst_code = self.get_area(pc, 16)
inst.setOpcode(inst_code)
inst.setAddress(pc)
self.tc.disassembly(inst)
return inst
def process(self, addr=None):
if not addr is None:
pc = addr
else:
pc = self.read_reg(self.pc)
inst = triton.Instruction()
inst_code = self.get_area(pc, 16)
inst.setOpcode(inst_code)
inst.setAddress(pc)
if not self.tc.processing(inst):
raise UnmanagedInstruction(inst)
for se in inst.getSymbolicExpressions():
se.setComment(str(inst))
return inst
def get_syscall_func_arg(self, n):
if n >= len(self.regs):
raise SyscallTooManyArgs()
value = self.tc.getConcreteRegisterValue(self.syscall_regs[n])
return value
def is_call(self, inst):
if inst.getType() in self.call_types:
return True
return False
def is_ret(self, inst):
if inst.getType() in self.ret_types:
return True
return False
def is_branch(self, inst):
if inst.getType() in self.branch_types:
return True
return False
def is_conditional_branch(self, inst):
if inst.getType() in self.conditional_branch_types:
return True
return False
def only_on_tainted(self, en):
self.tc.setMode(triton.MODE.ONLY_ON_TAINTED, en)
def taint_through_pointers(self, en):
self.tc.setMode(triton.MODE.TAINT_THROUGH_POINTERS, en)
def only_on_symbolized(self, en):
self.tc.setMode(triton.MODE.ONLY_ON_SYMBOLIZED, en)
def add_simplification(self, symplification):
self.tc.removeCallback(self.simplify, triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.tc.addCallback(self.simplify, triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.simplifications.add(symplification)
def clear_simplifications(self):
self.tc.removeCallback(self.simplify, triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.simplifications = set()
def simplify(self, tc, node):
for simplification in self.simplifications:
node = simplification(self, tc, node)
return node
def assemble(self, code):
return self.ks.asm(code)
def push(self, value, size=None, stack_mode=MODE_FD):
if size == None:
size = self.psize
if mode == MODE_FD:
sp = self.tc.getConcreteRegisterValue(self.sp) - size
self.write_reg(self.sp, sp)
self.set_memory_value(sp, value, size)
elif mode == MODE_ED:
sp = self.tc.getConcreteRegisterValue(self.sp)
self.set_memory_value(sp, value, size)
self.write_reg(self.sp, sp - size)
elif mode == MODE_FA:
sp = self.tc.getConcreteRegisterValue(self.sp) + size
self.write_reg(self.sp, sp)
self.set_memory_value(sp, value, size)
elif mode == MODE_EA:
sp = self.tc.getConcreteRegisterValue(self.sp)
self.set_memory_value(sp, value, size)
self.write_reg(self.sp, sp + size)
def pop(self, size=None, mode=MODE_FD):
if size == None:
size = self.psize
if mode == MODE_FD:
sp = self.tc.getConcreteRegisterValue(self.sp)
value = self.get_memory_value(sp, size)
self.write_reg(self.sp, sp + size)
elif mode == MODE_ED:
sp = self.tc.getConcreteRegisterValue(self.sp) + size
self.write_reg(self.sp, sp)
value = self.get_memory_value(sp, size)
elif mode == MODE_FA:
sp = self.tc.getConcreteRegisterValue(self.sp)
value = self.get_memory_value(sp, size)
self.write_reg(self.sp, sp-size)
elif mode == MODE_EA:
sp = self.tc.getConcreteRegisterValue(self.sp) - size
self.write_reg(self.sp, sp)
value = self.get_memory_value(sp, size)
return value
class ArchX86(ArchCommon):
def __init__(self):
self.simplifications = set()
self.tc = triton.TritonContext()
self.tc.setArchitecture(triton.ARCH.X86)
self.tc.setMode(triton.MODE.ALIGNED_MEMORY, True)
self.tc.setMode(triton.MODE.SYMBOLIZE_INDEX_ROTATION, True)
self.ks = keystone.Ks(keystone.KS_ARCH_X86, keystone.KS_MODE_32)
self.pc = self.tc.registers.eip
self.sp = self.tc.registers.esp
self.psize = triton.CPUSIZE.DWORD
self.ret = self.tc.registers.eax
self.tc.setAstRepresentationMode(triton.AST_REPRESENTATION.PYTHON)
self.syscall_regs = [
self.tc.registers.eax,
self.tc.registers.ebx,
self.tc.registers.ecx,
self.tc.registers.edx,
self.tc.registers.esi,
self.tc.registers.edi,
]
self.ret_types = set([triton.OPCODE.X86.RET])
self.call_types = set([triton.OPCODE.X86.CALL, triton.OPCODE.X86.LCALL])
self.conditional_branch_types = set([
triton.OPCODE.X86.JA,
triton.OPCODE.X86.JBE,
triton.OPCODE.X86.JECXZ,
triton.OPCODE.X86.JL,
triton.OPCODE.X86.JNE,
triton.OPCODE.X86.JNS,
triton.OPCODE.X86.JRCXZ,
triton.OPCODE.X86.JAE,
triton.OPCODE.X86.JCXZ,
triton.OPCODE.X86.JG,
triton.OPCODE.X86.JLE,
triton.OPCODE.X86.JNO,
triton.OPCODE.X86.JO,
triton.OPCODE.X86.JS,
triton.OPCODE.X86.JB,
triton.OPCODE.X86.JE,
triton.OPCODE.X86.JGE,
triton.OPCODE.X86.JNP,
triton.OPCODE.X86.JP
])
self.branch_types = set()
self.branch_types.update(self.conditional_branch_types)
self.branch_types.add(triton.OPCODE.X86.JMP)
def get_func_arg(self, n):
offset = n*self.psize + self.psize
value = self.tc.getConcreteMemoryValue(triton.MemoryAccess(self.tc.getConcreteRegisterValue(self.sp)+offset, self.psize))
return value
def set_func_arg(self, n, value):
sp = self.tc.getConcreteRegisterValue(self.sp)
offset = n*self.psize + self.psize
self.tc.setConcreteMemoryValue(triton.MemoryAccess(sp + offset, self.psize), value)
return value
def resolve_branch(self, inst):
# TODO...
assert(self.is_branch(inst))
if dst.getType() == triton.OPERAND.IMM:
return inst.getOperands()[0].getValue()
elif dst.getType() == triton.OPERAND.MEM:
disp = dst.getDisplacement()
scale = dst.getScale()
br = dst.getBaseRegister()
sr = dst.getSegmentRegister()
def push(self, value, size=None):
super.push(value, size, MODE_FD)
def pop(self, value, size=None):
return super.pop(value, size, MODE_FD)
class ArchX8664(ArchCommon):
def __init__(self):
self.simplifications = set()
self.tc = triton.TritonContext()
self.tc.setArchitecture(triton.ARCH.X86_64)
self.tc.setMode(triton.MODE.ALIGNED_MEMORY, True)
self.ks = keystone.Ks(keystone.KS_ARCH_X86, keystone.KS_MODE_64)
self.tc.addCallback(self.simplify, triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.tc.setMode(triton.MODE.SYMBOLIZE_INDEX_ROTATION, True)
self.pc = self.tc.registers.rip
self.sp = self.tc.registers.rsp
self.psize = triton.CPUSIZE.QWORD
self.ret = self.tc.registers.rax
self.tc.setAstRepresentationMode(triton.AST_REPRESENTATION.PYTHON)
self.regs = [
self.tc.registers.rdi,
self.tc.registers.rsi,
self.tc.registers.rdx,
self.tc.registers.rcx,
self.tc.registers.r8,
self.tc.registers.r9
]
self.syscall_regs = [
self.tc.registers.rax,
self.tc.registers.rbx,
self.tc.registers.rcx,
self.tc.registers.rdx,
self.tc.registers.rsi,
self.tc.registers.rdi,
]
self.ret_types = set([triton.OPCODE.X86.RET])
self.call_types = set([triton.OPCODE.X86.CALL, triton.OPCODE.X86.LCALL])
self.conditional_branch_types = set([
triton.OPCODE.X86.JA,
triton.OPCODE.X86.JBE,
triton.OPCODE.X86.JECXZ,
triton.OPCODE.X86.JL,
triton.OPCODE.X86.JNE,
triton.OPCODE.X86.JNS,
triton.OPCODE.X86.JRCXZ,
triton.OPCODE.X86.JAE,
triton.OPCODE.X86.JCXZ,
triton.OPCODE.X86.JG,
triton.OPCODE.X86.JLE,
triton.OPCODE.X86.JNO,
triton.OPCODE.X86.JO,
triton.OPCODE.X86.JS,
triton.OPCODE.X86.JB,
triton.OPCODE.X86.JE,
triton.OPCODE.X86.JGE,
triton.OPCODE.X86.JNP,
triton.OPCODE.X86.JP
])
self.branch_types = set()
self.branch_types.update(self.conditional_branch_types)
self.branch_types.add(triton.OPCODE.X86.JMP)
def get_func_arg(self, n):
if n < len(self.regs):
value = self.tc.getConcreteRegisterValue(self.regs[n])
else:
offset = (n-len(self.regs))*self.psize
value = self.tc.getConcreteMemoryValue(triton.MemoryAccess(self.tc.getConcreteRegisterValue(self.sp)+offset, self.psize))
return value
def set_func_arg(self, n, value):
if n < len(self.regs):
self.tc.setConcreteRegisterValue(self.regs[n], value)
else:
offset = (n-len(self.regs))*self.psize + self.psize
self.tc.setConcreteMemoryValue(MemoryAccess(offset, self.psize), value)
return value
def push(self, value, size=None):
super.push(value, size, MODE_FD)
def pop(self, value, size=None):
return super.pop(value, size, MODE_FD)
| [
"[email protected]"
]
| |
e089d72ea17ae3c6b7cb2cde2f8f3c0371686fa6 | fe88aaeba451b4ff38e23fbc04f7f7a339e54aff | /usb.py | 867d1f4dca9cb6f7ead1451c9af93f3df6b9d2fc | []
| no_license | sudocn/python | d5a6903f2deb09974b96f6b476e5ccb753aa15ef | c2313f19e630ebc1bb7729cbd6b248723ca3dd11 | refs/heads/master | 2021-07-09T13:13:34.312731 | 2021-04-19T09:07:09 | 2021-04-19T09:07:09 | 5,881,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,947 | py | from __future__ import print_function
import os, sys
UNIT = 83 # 83us, one bit
HALF_UNIT = 42
USB_PID = [
[[0,0,1,1,1,1,0,0], "PRE "],
[[0,1,0,0,1,0,1,1], "ACK "],
[[0,1,0,1,1,0,1,0], "NAK "],
[[0,1,1,1,1,0,0,0], "STALL "],
[[1,0,0,0,0,1,1,1], "OUT "],
[[1,0,0,1,0,1,1,0], "IN "],
[[1,0,1,0,0,1,0,1], "SOF "],
[[1,0,1,1,0,1,0,0], "SETUP "],
[[1,1,0,0,0,0,1,1], "DATA0 "],
[[1,1,0,1,0,0,1,0], "DATA1 "]]
#filename=r'C:\Documents and Settings\cpeng\Desktop\python\usb_capture.csv'
#filename=r'C:\Documents and Settings\cpeng\Desktop\python\usb.csv'
filename = r'C:\Documents and Settings\cpeng\Desktop\USB\LA Caputre\enumerate_fail_1'
#filename = r'/home/cpeng/prog/enum'
global out
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def nrzi_decode(pkt):
NRZI_SEQUNCE = [0,1,1,1,1,1,1]
decoded = []
dec_dbg = []
dbg = False
stuffing_bit = 0
#for pulse in pkt[1:]: #zhiyuan
for pulse in pkt:
count = pulse[2] / UNIT
if pulse[2] % UNIT > HALF_UNIT:
count += 1
if dbg: print ("nrzi:", count, pulse)
if count > 7: continue
#raise ParseError("Wrong NRZI code: core than 7 continous 1s")
if count == 7: stuffing_bit = 2 # next loop's first zero should remove
# append decoded bits
if stuffing_bit == 1:
print ("stuffing bit removed")
stuffing_bit = 0
decoded.extend(NRZI_SEQUNCE[1:count])
if dbg: dec_dbg.append(NRZI_SEQUNCE[1:count])
else:
decoded.extend(NRZI_SEQUNCE[:count])
if dbg: dec_dbg.append(NRZI_SEQUNCE[:count])
if stuffing_bit:
stuffing_bit -= 1
if dbg: print (dec_dbg)
return decoded
# print a packet that in csv mode
def print_csv_pkt(pkt):
for u in pkt:
print (u)
# parse USB PID to string
def parse_pid(pid):
for t in USB_PID:
if t[0] == pid:
return t[1]
return "UNDEF "
# print a NRZI decoded usb packet
def print_usb_pkt(pkt, output):
if len(pkt) < 16:
print ("incomplete usb packet:", pkt, file=output)
return
# find SYNC BYTE
if pkt[:8] != [0,0,0,0,0,0,0,1]:
for i in range(1,len(pkt)-8):
if pkt[i:i+8] == [0,0,0,0,0,0,0,1]:
break
print ("! SKIP ", pkt[:i], file=output)
pkt = pkt[i:]
#print ("! SYNC BYTE ERROR", end='', file=output)
if (len(pkt)) < 16:
print ("incomplete usb packet:", pkt, file=output)
return
print (len(pkt), ": ", end='', file=output)
print (parse_pid(pkt[8:16]), end='', file=output)
for i in range(8,len(pkt)):
print(pkt[i],end='', file=output)
if i%8==7:
print(" ", end='', file=output)
print(file=output)
# parse a cvs format packet
# input: pkt in csv raw data format
# return: NRZI decoded, stuffing bit removed packet data
def parse_csv_packet(pkt):
global out
#print_csv_pkt(pkt)
dec = nrzi_decode(pkt)
print_usb_pkt(dec, out)#sys.stdout)
#
# Zhiyuan LA capture file
#
#list all files in the same dir, and sort it
#for zhiyuan LA multiple csv files
def list_all_files(fname):
path = os.path.dirname(fname)
files = os.listdir(path)
prefix = files[0]
rindex = prefix.rindex('_') + 1
prefix = prefix[:rindex]
numbers = map(lambda x: int(x[rindex:-4]), files)
numbers.sort()
files_sorted = []
for num in numbers:
files_sorted.append(os.path.join(path, prefix+str(num)+'.csv'))
return files_sorted
def filter_jitters(pkt):
return filter(lambda x: x[1] != 0, pkt)
# split and parse one cvs line
def parse_csv_line(line):
items = line.split(",")
if len(items) != 2:
raise ParseError("length > 2")
time,value = items
if time[-1] != 's':
raise ParseError("Not time value")
else:
if time[-2:] == 'ns':
time = int(time[:-2])
elif time[-2:] == 'us':
time = int(float(time[:-2])*1000)
elif time[-2:] == 'ms':
time = int(float(time[:-2])*1000*1000)
else: # s
time = int(float(time[:-1])*1000*1000*1000)
value = int(value)
return (time,value)
def parse_csv_file(fname):
pt,pv = 0,0 # previous_time & previous_value
packet = []
with open(fname, "r") as f:
f.readline() # skip the first line
for line in f:
try:
t,v = parse_csv_line(line)
#print pt,pv,t-pt
packet.append((pt,pv,t-pt))
if pv == 0 and t-pt > UNIT + HALF_UNIT:
packet = filter_jitters(packet)
parse_csv_packet(packet)
print (line)
#print "> EOP <"
packet = []
pt,pv = t,v
except ParseError as e:
print (e.__str__, line)
#
# HP LA capture files
#
def parse_hp_line(line):
#print (line)
sp = line.split()
if len(sp) < 4: return (0,0,0)
dur = float(sp[2])
if sp[3] == 'us':
dur *= 1000
dur = int(dur)
return (sp[0], sp[1], dur)
def parse_hp_file(fname):
packet = []
with open(fname, "r") as f:
for line in f:
if not line[0] in {'-', '0'}: continue
try:
t,v,d = parse_hp_line(line)
packet.append((t,v,d))
#print(t,v,d)
if d > 10*UNIT:
parse_csv_packet(packet[:-1])
packet = []
except ParseError as e:
print (e, line)
def main(fname):
global out
out = open(r'c:\usb.txt', 'w')
#parse_csv_file(fname)
parse_hp_file(fname)
out.close()
if __name__ == "__main__":
main(filename)
| [
"[email protected]"
]
| |
93a1f05c08ea21f959912453a64c9da033ab05d2 | ffb3ecaa2c56ac87f2bedb762d0e385305a8c722 | /pkmn/damage_range.py | 7026a994ff28cd921a8047fed606cfbaf2cae468 | []
| no_license | MoneyHypeMike/Pokemon-SpeedrunTools | 1bc79444ae8f6955c605f2272e03919cf2c98077 | 268e7bc77a3d5ddd7fc6111002a65c8cf5c37156 | refs/heads/master | 2020-05-18T15:27:22.762665 | 2014-12-27T02:24:22 | 2014-12-27T02:24:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,563 | py | from math import ceil
import damage5
import formulas
import movedex
import pokemon
import speciedex
def atk_damage_range(filename, gen, name, gender, level, nature, ability, hp, atk, Def, spatk, spdef, spd):
output_file = "DamageRange_Atk_{}_{}_{}_{}_{}_{}_{}_{}.csv".format(name, nature, hp, atk, Def, spatk, spdef, spd)
with open(filename, "r") as f, open(output_file, "w") as i:
f.readline()
i.write("BATTLE,DEF_NAME,MOVE_NAME,DEF_STAGE,ATK_STAGE,DAMAGE_RANGE,DEF_HP,MIN_TURNS,MAX_TURNS,CRIT,"\
"HP_BOOST,ITEM,SCREEN,WEATHER,EVS,ATK_STAT,DEF_STAT,TOTAL_EXP,LEVEL,POKERUS\n")
last_battle = -1
battle_num = 0
atk_pkmn = pokemon.Pokemon(gen, name, gender, level, nature, ability, "", "", "", "", "", hp, atk, Def, spatk, spdef, spd)
for lines in f:
info = lines.strip().split(",")
battle_num = info[0]
battle_type = info[1].upper()
num_poke = int(info[2])
trainer = True if battle_type != "WILD" else False
n = 0
#Information which changes for each battle
if last_battle != battle_num:
last_battle = battle_num
screens = [""]
weathers = list(set([info[3].upper()] + update_weather(atk_pkmn, [])))
location = info[4]
atk_pkmn.pokerus = bool(info[33])
#Change the user pokemon specie if it has evolved or was switched
if atk_pkmn.species.name != info[21].upper():
atk_pkmn.species = speciedex.all.dex[int(gen)][info[21].upper()]
atk_pkmn.update_stats()
atk_stage, def_stage, spatk_stage, spdef_stage, spd_stage = 0, 0, 0, 0, 0
atk_pkmn.boost = [0, 0, 0, 0, 0, 0]
max_atk, min_atk, max_def, min_def = 0, 0, 0, 0
name = []
#Updates user pokémon item, moves, happiness value and rare candy usage
atk_pkmn.item = info[22].upper()
atk_pkmn.moves = [movedex.all.dex[gen][x.upper()] for x in [info[23], info[24], info[25], info[26]] if x != ""]
for x in range(int(info[32])):
atk_pkmn.rare_candy()
atk_pkmn.happiness = int(info[34])
#Creates defending pokemon
def_pkmn = pokemon.Pokemon(gen, info[5], info[6], int(info[7]), info[8], info[9], info[10], info[11].upper(), info[12].upper(), info[13].upper(), info[14].upper(),
int(info[15]), int(info[16]), int(info[17]), int(info[18]), int(info[19]), int(info[20]))
#Updates weather and screen based on defending moves
weathers = update_weather(def_pkmn, weathers)
screens = update_screen(def_pkmn, screens)
#updates stages
atk_stage += int(info[27])
def_stage += int(info[28])
spatk_stage += int(info[29])
spdef_stage += int(info[30])
spd_stage += int(info[30])
#Loop to calculate damage ranges
for move in atk_pkmn.moves:
ability = atk_pkmn.ability
category = move.category
hps = [atk_pkmn.stat[0]]
hp_boost = False
type = move.type.name
n += 1
if def_pkmn.ability == "INTIMIDATE" and n == 1:
name.append(def_pkmn.species.name)
if category == "STATUS":
continue
elif category == "PHYSICAL":
max_atk = 2 if check_mod_atk(atk_pkmn, True) else atk_stage
min_atk = min(len(name) * -1, -2 if check_mod_atk(def_pkmn, False) else 0)
max_def = 0
min_def = -2 if check_mod_def(atk_pkmn, False) else 0
else:
max_atk = spatk_stage
min_atk = 0
max_def = 0
min_def = 0
if (ability == "BLAZE" and type == "FIRE") or (ability == "OVERGROW" and type == "GRASS") or (ability == "SWARM" and type == "BUG") or (ability == "TORRENT" and type == "WATER"):
boost = hps.append(1)
hp_boost = True
for crit in (False, True):
for hp in hps:
atk_pkmn.hp = hp
for def_mod in range(min_def, max_def + 1):
if category == "PHYSICAL":
def_pkmn.boost[2] = def_mod
else:
def_pkmn.boost[4] = def_mod
if crit and def_mod > 0:
continue
for atk_mod in range(min_atk, max_atk + 1):
if category == "PHYSICAL":
atk_pkmn.boost[1] = atk_mod
else:
atk_pkmn.boost[3] = atk_mod
if crit and atk_mod < 0:
continue
for screen in screens:
if screen == "LIGHT SCREEN" and (category == "PHYSICAL" or crit or move.name == "BRICK BREAK"):
continue
elif screen == "REFLECT" and (category == "SPECIAL" or crit or move.name == "BRICK BREAK"):
continue
for weather in weathers:
if weather in ("SUNNY DAY", "RAIN DANCE") and type not in ("WATER", "FIRE"):
continue
elif weather == "SANDSTORM" and (category == "PHYSICAL" or type != "ROCK"):
continue
elif weather == "HAIL" and move.name != "SOLARBEAM":
continue
for damage_range in damage5.damage5(atk_pkmn, def_pkmn, move, battle_type, screen, crit, weather):
if damage_range != [0]:
dmg = "-".join([str(value) for value in damage_range])
min_turns = ceil(def_pkmn.hp / max(damage_range))
max_turns = ceil(def_pkmn.hp / min(damage_range))
i.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n"\
.format(battle_num, def_pkmn.species.name, move.name, def_mod, atk_mod,
dmg, def_pkmn.hp, min_turns, max_turns, crit, True if hp == 1 else False, atk_pkmn.item, screen, weather,
"-".join([str(x) for x in atk_pkmn.ev]),
"-".join([str(x) for x in atk_pkmn.stat]), "-".join([str(x) for x in def_pkmn.stat]),
atk_pkmn.exp, atk_pkmn.level, atk_pkmn.pokerus))
atk_pkmn.update_exp(formulas.calc_exp(gen, def_pkmn.level, def_pkmn.species.base_exp, trainer, num_poke, atk_pkmn.level, atk_pkmn.item))
atk_pkmn.update_evs(def_pkmn.species.ev_yield)
atk_pkmn.update_level()
def def_damage_range(filename, gen, name, gender, level, nature, ability, hp, atk, Def, spatk, spdef, spd):
output_file = "DamageRange_Def_{}_{}_{}_{}_{}_{}_{}_{}.csv".format(name, nature, hp, atk, Def, spatk, spdef, spd)
with open(filename, "r") as f, open(output_file, "w") as i:
f.readline()
i.write("BATTLE,ATK_NAME,MOVE_NAME,DEF_STAGE,ATK_STAGE,DAMAGE_RANGE,DEF_HP,MIN_TURNS,MAX_TURNS,CRIT,"\
"HP_BOOST,ITEM,SCREEN,WEATHER,EVS,ATK_STAT,DEF_STAT,TOTAL_EXP,LEVEL,POKERUS\n")
last_battle = -1
battle_num = 0
def_pkmn = pokemon.Pokemon(gen, name, gender, level, nature, ability, "", "", "", "", "", hp, atk, Def, spatk, spdef, spd)
for lines in f:
info = lines.strip().split(",")
battle_num = info[0]
battle_type = info[1].upper()
num_poke = int(info[2])
trainer = True if battle_type != "WILD" else False
n = 0
#Information which changes for each battle
if last_battle != battle_num:
last_battle = battle_num
screens = [""]
weathers = list(set([info[3].upper()] + update_weather(def_pkmn, [])))
location = info[4]
def_pkmn.pokerus = bool(info[33])
#Change the user pokemon specie if it has evolved or was switched
if def_pkmn.species.name != info[21].upper():
def_pkmn.species = speciedex.all.dex[int(gen)][info[21].upper()]
def_pkmn.update_stats()
atk_stage, def_stage, spatk_stage, spdef_stage, spd_stage = 0, 0, 0, 0, 0
max_atk, min_atk, max_def, min_def = 0, 0, 0, 0
name = []
#Updates user pokémon item, moves, happiness value and rare candy usage
def_pkmn.item = info[22].upper()
def_pkmn.moves = [movedex.all.dex[gen][x.upper()] for x in [info[23], info[24], info[25], info[26]] if x != ""]
for x in range(int(info[32])):
def_pkmn.rare_candy()
def_pkmn.happiness = int(info[34])
#Creates defending pokemon
atk_pkmn = pokemon.Pokemon(gen, info[5], info[6], int(info[7]), info[8], info[9], info[10], info[11].upper(), info[12].upper(), info[13].upper(), info[14].upper(),
int(info[15]), int(info[16]), int(info[17]), int(info[18]), int(info[19]), int(info[20]))
#Updates weather and screen based on defending moves
weathers = update_weather(atk_pkmn, weathers)
screens = update_screen(atk_pkmn, screens)
#Loop to calculate damage ranges
for move in atk_pkmn.moves:
ability = atk_pkmn.ability
category = move.category
hps = [atk_pkmn.stat[0]]
hp_boost = False
type = move.type.name
n += 1
if def_pkmn.ability == "INTIMIDATE" and n == 1:
name.append(def_pkmn.species.name)
if category == "STATUS":
continue
elif category == "PHYSICAL":
max_atk = 2 if check_mod_atk(atk_pkmn, True) else atk_stage
min_atk = min(len(name) * -1, -2 if check_mod_atk(def_pkmn, False) else 0)
max_def = 0
min_def = -2 if check_mod_def(atk_pkmn, False) else 0
else:
max_atk = spatk_stage
min_atk = 0
max_def = 0
min_def = 0
if (ability == "BLAZE" and type == "FIRE") or (ability == "OVERGROW" and type == "GRASS") or (ability == "SWARM" and type == "BUG") or (ability == "TORRENT" and type == "WATER"):
boost = hps.append(1)
hp_boost = True
for crit in (False, True):
for hp in hps:
atk_pkmn.hp = hp
for def_mod in range(min_def, max_def + 1):
if category == "PHYSICAL":
def_pkmn.boost[2] = def_mod
else:
def_pkmn.boost[4] = def_mod
if crit and def_mod > 0:
continue
for atk_mod in range(min_atk, max_atk + 1):
if category == "PHYSICAL":
atk_pkmn.boost[1] = atk_mod
else:
atk_pkmn.boost[3] = atk_mod
if crit and atk_mod < 0:
continue
for screen in screens:
if screen == "LIGHT SCREEN" and (category == "PHYSICAL" or crit or move.name == "BRICK BREAK"):
continue
elif screen == "REFLECT" and (category == "SPECIAL" or crit or move.name == "BRICK BREAK"):
continue
for weather in weathers:
if weather in ("SUNNY DAY", "RAIN DANCE") and type not in ("WATER", "FIRE"):
continue
elif weather == "SANDSTORM" and (category == "PHYSICAL" or type != "ROCK"):
continue
elif weather == "HAIL" and move.name != "SOLARBEAM":
continue
for damage_range in damage5.damage5(atk_pkmn, def_pkmn, move, battle_type, screen, crit, weather):
if damage_range != [0]:
dmg = "-".join([str(value) for value in damage_range])
min_turns = ceil(def_pkmn.hp / max(damage_range))
max_turns = ceil(def_pkmn.hp / min(damage_range))
i.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n"\
.format(battle_num, atk_pkmn.species.name, move.name, def_mod, atk_mod,
dmg, def_pkmn.hp, min_turns, max_turns, crit, True if hp == 1 else False, atk_pkmn.item, screen, weather,
"-".join([str(x) for x in def_pkmn.ev]),
"-".join([str(x) for x in atk_pkmn.stat]), "-".join([str(x) for x in def_pkmn.stat]),
def_pkmn.exp, def_pkmn.level, def_pkmn.pokerus))
def_pkmn.update_exp(formulas.calc_exp(gen, atk_pkmn.level, atk_pkmn.species.base_exp, trainer, num_poke, def_pkmn.level, def_pkmn.item))
def_pkmn.update_evs(atk_pkmn.species.ev_yield)
def_pkmn.update_level()
def update_screen(pkmn, screen):
ls = "LIGHT SCREEN"
reflect = "REFLECT"
if ls in [x.name for x in pkmn.moves]:
screen.append(ls)
if reflect in [x.name for x in pkmn.moves]:
screen.append(reflect)
return list(set(screen))
def update_weather(pkmn, weather):
sun = "SUNNY DAY"
rain = "RAIN DANCE"
sandstorm = "SANDSTORM"
hail = "HAIL"
if sun in [x.name for x in pkmn.moves]:
weather.append(sun)
if rain in [x.name for x in pkmn.moves]:
weather.append(rain)
if hail in [x.name for x in pkmn.moves]:
weather.append(hail)
if sandstorm in [x.name for x in pkmn.moves]:
weather.append(sandstorm)
return list(set(weather))
#To do: Swagger boost target
# Superpower drops user
def check_mod_atk(pkmn, boost):
if boost:
info = {"ANCIENTPOWER", "ACUPRESSURE", "BELLY DRUM", "BULK UP", "CURSE",
"DRAGON DANCE", "HOWL", "MEDITATE", "METAL CLAW", "METEOR MASH",
"OMINOUS WIND", "RAGE", "SHARPEN", "SILVER WIND", "SWORDS DANCE",
"WORK UP"}
else:
info = {"AURORA BEAM", "CHARM", "FEATHERDANCE", "GROWL", "MEMENTO",
"SECRET POWER", "SUPERPOWER", "TICKLE"}
for moves in [x.name for x in pkmn.moves]:
if moves in info:
return True
#To do: Close Combat/Superpower drops user
def check_mod_def(pkmn, boost):
if boost:
info = {"ACID ARMOR", "ANCIENTPOWER", "ACUPRESSURE", "BARRIER", "BULK UP",
"COSMIC POWER", "CURSE", "DEFEND ORDER", "DEFENSE CURL", "HARDEN",
"IRON DEFENSE", "OMINOUS WIND", "SILVER WIND", "SKULL BASH",
"STEEL WING", "STOCKPILE", "WITHDRAW"}
else:
info = {"CRUNCH", "CRUSH CLAW", "IRON TAIL", "LEER", "ROCK SMASH",
"SCREECH", "TAIL WHIP", "TICKLE"}
for moves in [x.name for x in pkmn.moves]:
if moves in info:
return True
#To do: Flatter boost target
# Draco Meteor/Leaf Storm/Overheat/Psycho Boost drops user
def check_mod_spatk(pkmn, boost):
if boost:
info = {"AncientPower", "Acupressure", "Calm Mind", "Charge Beam",
"Growth", "Nasty Plot", "Ominous Wind", "Silver Wind",
"Tail Glow"}
else:
info = {"Captivate", "Memento", "Mist Ball"}
for moves in [x.name for x in pkmn.moves]:
if moves in info:
return True
#To do: Close Combat drops user
def check_mod_spdef(pkmn, boost):
if boost:
info = {"Amnesia", "AncientPower", "Acupressure", "Calm Mind", "Charge",
"Cosmic Power", "Defend Order", "Ominous Wind", "Silver Wind",
"Stockpile"}
else:
info = {"Acid", "Bug Buzz", "Earth Power", "Energy Ball", "Fake Tears",
"Flash Cannon", "Focus Blast", "Luster Purge", "Metal Sound",
"Psychic", "Shadow Ball", "Seed Flare"}
for moves in [x.name for x in pkmn.moves]:
if moves in info:
return True
atk_damage_range("Battles_B_Tepig.csv", 5, "Tepig", "Male", 5, "Adamant", "Blaze", 30, 30, 30, 30, 30, 30)
def_damage_range("Battles_B_Tepig.csv", 5, "Tepig", "Male", 5, "Adamant", "Blaze", 30, 30, 30, 30, 30, 30) | [
"[email protected]"
]
| |
9cd50b1e4b2cea56a81f0057f7a4137c0153622c | 39a8bb0bcbca9a9e25705decead580602dbcfd2b | /meraki_cisco_parser.py | 9b945c602780e21342c7524f27ddcc68efaa9c2d | []
| no_license | NickVK9/Cisco-Meraki-Selenium-project | 1e1fa829d763564d7c60cc2830bcef9392946a30 | 59fe8ed7d69293d2df3d709dbc13efbed4a84c98 | refs/heads/master | 2020-11-28T08:42:16.639710 | 2019-12-25T06:41:37 | 2019-12-25T06:41:37 | 229,759,425 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,012 | py | from selenium import webdriver
import csv
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
LOGIN = "[email protected]"
PASSWORD = "Plussix@88"
LINK = "https://account.meraki.com/secure/login/dashboard_login"
# PLEASE, PUT YOUR PATH TO CHROMEDRIVER
PATH_TO_CHROMEDRIVER = "C:\\Users\\Nick\\Desktop\\Cisco-Meraki-Selenium-project-master\\chromedriver.exe"
# PLEASE, WRITE HERE FILE'S NAME
FILE = 'Network.csv'
# PLEASE, WRITE HERE PATH TO CSV FILE
PATH_TO_CSV_FILE = "C:\\Users\\Nick\\Desktop\\Cisco-Meraki-Selenium-project-master\\"
COLUMN_NAME = 'Network Name' #Name of head column, to drop it
ORGANIZATION = 'Boyd Hyperconverged Inc'
# THIS DICT MADE TO FOLLOW WHICH NETWORKS ALREADY DONE
CHECK = {}
browser = webdriver.Chrome(executable_path=PATH_TO_CHROMEDRIVER)
with open(PATH_TO_CSV_FILE + FILE) as f:
#HERE PROGRAM TAKES ALL NETWORK NAMES AND TAKE THEM TO DICTIONARY
reader = csv.reader(f)
for row in reader:
if row[0] != COLUMN_NAME:
CHECK[row[0]] = ''
def take_network_from_csv():
global FILE
global PATH_TO_CSV_FILE
global COLUMN_NAME
global CHECK
global PATH_TO_CHROMEDRIVER
for i in CHECK:
if CHECK[i] != 'Done':
network_name = i
open_link(browser, network_name)
CHECK[network_name] = 'Done'
else:
continue
def open_link(browser, network_name):
# THIS IS MAIN FUNCTION
global LINK
global LOGIN
global PASSWORD
browser.get(LINK)
#LOG IN
email = browser.find_element_by_id('email')
password = browser.find_element_by_id('password')
email.send_keys(LOGIN)
password.send_keys(PASSWORD)
submit_button = browser.find_element_by_id('commit')
submit_button.click()
# CHOOSE NEEDED ORGANISATION
organization = browser.find_element_by_link_text('Boyd Hyperconverged Inc')
organization.click()
#WAITING FOR PAGE LOADING
time.sleep(3)
# FIND AND CHOOSE NEEDED NETWORK
select_arrow_zone = browser.find_element_by_class_name('Select-arrow-zone')
select_arrow_zone.click()
input_network = browser.find_element_by_xpath('//*[@id="react-select-2--value"]/div[2]/input')
input_network.send_keys(network_name)
input_network.send_keys(Keys.ENTER)
#GOING TO Firewall & traffic shaping
tables = browser.find_elements_by_class_name('menu-item-container')
for i in tables:
if i.text == 'Wireless':
needed_table = i
needed_table.click()
time.sleep(3)
organization = browser.find_elements_by_tag_name('a')
for i in organization:
if i.text == 'Firewall & traffic shaping' or i.text == 'Firewall':
firewall = i
firewall.click()
# SWITCHES SLIDERS
client_slider = browser.find_elements_by_class_name('simple')
if client_slider[0].text != 'unlimited':
source_element = browser.find_element_by_xpath('//*[@id="per_client_limit"]/table/tbody/tr/td[1]/div/div[2]/a')
dest_element = browser.find_element_by_class_name('bandwidth_widget_toggle')
ActionChains(browser).drag_and_drop(source_element, dest_element).perform()
if client_slider[1].text != 'unlimited':
source_element = browser.find_element_by_xpath('//*[@id="per_ssid_limit"]/table/tbody/tr/td[1]/div/div[2]/a')
dest_element = browser.find_element_by_class_name('bandwidth_widget_toggle')
ActionChains(browser).drag_and_drop(source_element, dest_element).perform()
time.sleep(5)
# SAVING
try:
save_changes = browser.find_element_by_id('floating_submit')
save_changes.click()
except:
print('Already Unlimited')
browser.quit()
if __name__ == '__main__':
while True:
try:
take_network_from_csv()
break
except:
browser.quit()
take_network_from_csv()
print('DONE')
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.