hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d25b3d6bc31f3ca7960ee1d2b2edc46e92e9ff1d
| 6,142 |
py
|
Python
|
skills/eliza/test_eliza.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 34 |
2021-08-18T14:51:44.000Z
|
2022-03-10T14:14:48.000Z
|
skills/eliza/test_eliza.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 27 |
2021-08-30T14:42:09.000Z
|
2022-03-17T22:11:45.000Z
|
skills/eliza/test_eliza.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 40 |
2021-08-22T07:13:32.000Z
|
2022-03-29T11:45:32.000Z
|
import unittest
import eliza
class ElizaTest(unittest.TestCase):
def test_decomp_1(self):
el = eliza.Eliza()
self.assertEqual([], el._match_decomp(["a"], ["a"]))
self.assertEqual([], el._match_decomp(["a", "b"], ["a", "b"]))
def test_decomp_2(self):
el = eliza.Eliza()
self.assertIsNone(el._match_decomp(["a"], ["b"]))
self.assertIsNone(el._match_decomp(["a"], ["a", "b"]))
self.assertIsNone(el._match_decomp(["a", "b"], ["a"]))
self.assertIsNone(el._match_decomp(["a", "b"], ["b", "a"]))
def test_decomp_3(self):
el = eliza.Eliza()
self.assertEqual([["a"]], el._match_decomp(["*"], ["a"]))
self.assertEqual([["a", "b"]], el._match_decomp(["*"], ["a", "b"]))
self.assertEqual([["a", "b", "c"]], el._match_decomp(["*"], ["a", "b", "c"]))
def test_decomp_4(self):
el = eliza.Eliza()
self.assertEqual([], el._match_decomp([], []))
self.assertEqual([[]], el._match_decomp(["*"], []))
def test_decomp_5(self):
el = eliza.Eliza()
self.assertIsNone(el._match_decomp(["a"], []))
self.assertIsNone(el._match_decomp([], ["a"]))
def test_decomp_6(self):
el = eliza.Eliza()
self.assertEqual([["0"]], el._match_decomp(["*", "a"], ["0", "a"]))
self.assertEqual([["0", "a"]], el._match_decomp(["*", "a"], ["0", "a", "a"]))
self.assertEqual([["0", "a", "b"]], el._match_decomp(["*", "a"], ["0", "a", "b", "a"]))
self.assertEqual([["0", "1"]], el._match_decomp(["*", "a"], ["0", "1", "a"]))
def test_decomp_7(self):
el = eliza.Eliza()
self.assertEqual([[]], el._match_decomp(["*", "a"], ["a"]))
def test_decomp_8(self):
el = eliza.Eliza()
self.assertIsNone(el._match_decomp(["*", "a"], ["a", "b"]))
self.assertIsNone(el._match_decomp(["*", "a"], ["0", "a", "b"]))
self.assertIsNone(el._match_decomp(["*", "a"], ["0", "1", "a", "b"]))
def test_decomp_9(self):
el = eliza.Eliza()
self.assertEqual([["0"], ["b"]], el._match_decomp(["*", "a", "*"], ["0", "a", "b"]))
self.assertEqual([["0"], ["b", "c"]], el._match_decomp(["*", "a", "*"], ["0", "a", "b", "c"]))
def test_decomp_10(self):
el = eliza.Eliza()
self.assertEqual([["0"], []], el._match_decomp(["*", "a", "*"], ["0", "a"]))
self.assertEqual([[], []], el._match_decomp(["*", "a", "*"], ["a"]))
self.assertEqual([[], ["b"]], el._match_decomp(["*", "a", "*"], ["a", "b"]))
def test_syn_1(self):
el = eliza.Eliza()
el.load("doctor.txt")
self.assertEqual([["am"]], el._match_decomp(["@be"], ["am"]))
self.assertEqual([["am"]], el._match_decomp(["@be", "a"], ["am", "a"]))
self.assertEqual([["am"]], el._match_decomp(["a", "@be", "b"], ["a", "am", "b"]))
def test_syn_2(self):
el = eliza.Eliza()
el.load("doctor.txt")
self.assertIsNone(el._match_decomp(["@be"], ["a"]))
def test_syn_3(self):
el = eliza.Eliza()
el.load("doctor.txt")
self.assertIsNotNone(el._match_decomp(["*", "i", "am", "@sad", "*"], ["its", "true", "i", "am", "unhappy"]))
def test_response_1(self):
el = eliza.Eliza()
el.load("doctor.txt")
self.assertEqual("In what way ?", el.respond("Men are all alike."))
self.assertEqual(
"Can you think of a specific example ?", el.respond("They're always bugging us about something or other.")
)
self.assertEqual("Your boyfriend made you come here ?", el.respond("Well, my boyfriend made me come here."))
self.assertEqual(
"I am sorry to hear that you are depressed .", el.respond("He says I'm depressed much of the time.")
)
self.assertEqual(
"Do you think that coming here will help you not to be unhappy ?", el.respond("It's true. I am unhappy.")
)
self.assertEqual(
"What would it mean to you if you got some help ?", el.respond("I need some help, that much seems certain.")
)
self.assertEqual(
"Tell me more about your family.", el.respond("Perhaps I could learn to get along with my mother.")
)
self.assertEqual("Who else in your family takes care of you ?", el.respond("My mother takes care of me."))
self.assertEqual("Your father ?", el.respond("My father."))
self.assertEqual("What resemblence do you see ?", el.respond("You are like my father in some ways."))
self.assertEqual(
"What makes you think I am not very aggressive ?",
el.respond("You are not very aggressive, but I think you don't want me to notice that."),
)
self.assertEqual("Why do you think I don't argue with you ?", el.respond("You don't argue with me."))
self.assertEqual("Does it please you to believe I am afraid of you ?", el.respond("You are afraid of me."))
self.assertEqual(
"What else comes to mind when you think of your father ?", el.respond("My father is afraid of everybody.")
)
self.assertIn(
el.respond("Bullies."),
[
"Lets discuss further why your boyfriend made you come here .",
"Earlier you said your mother .",
"But your mother takes care of you .",
"Does that have anything to do with the fact that your boyfriend made you come here ?",
"Does that have anything to do with the fact that your father ?",
"Lets discuss further why your father is afraid of everybody .",
],
)
def test_response_2(self):
el = eliza.Eliza()
el.load("doctor.txt")
self.assertEqual(el.initial(), "How do you do. Please tell me your problem.")
self.assertIn(
el.respond("Hello"), ["How do you do. Please state your problem.", "Hi. What seems to be your problem ?"]
)
self.assertEqual(el.final(), "Goodbye. Thank you for talking to me.")
if __name__ == "__main__":
unittest.main()
| 45.496296 | 120 | 0.545262 | 6,061 | 0.986812 | 0 | 0 | 0 | 0 | 0 | 0 | 2,146 | 0.349398 |
d25b9fd6524f688abaf7d222a5e27a028065bdf6
| 5,387 |
py
|
Python
|
examples/tasks/dtw-energy-plus-models-data/code.py
|
dburian/ivis-core
|
7789821c3750ccab68396aa2bfdd405fd4d21520
|
[
"MIT"
] | 2 |
2021-05-17T13:20:56.000Z
|
2021-11-04T16:36:29.000Z
|
examples/tasks/dtw-energy-plus-models-data/code.py
|
dburian/ivis-core
|
7789821c3750ccab68396aa2bfdd405fd4d21520
|
[
"MIT"
] | 37 |
2019-05-08T04:53:58.000Z
|
2022-03-02T03:50:42.000Z
|
examples/tasks/dtw-energy-plus-models-data/code.py
|
dburian/ivis-core
|
7789821c3750ccab68396aa2bfdd405fd4d21520
|
[
"MIT"
] | 12 |
2019-04-06T15:00:32.000Z
|
2021-11-06T08:56:07.000Z
|
import sys
import os
import json
from elasticsearch import Elasticsearch, helpers
from datetime import datetime, timezone
import numpy as np
from dtw import dtw
# Get parameters and set up elasticsearch
data = json.loads(sys.stdin.readline())
es = Elasticsearch([{'host': data['es']['host'], 'port': int(data['es']['port'])}])
state = data.get('state')
params= data['params']
entities= data['entities']
# Get ES index and fields
sensor_set = entities['signalSets'][params['sensors']]
sensor_ts = entities['signals'][params['sensors']][params['ts']]
sensor_co2 = entities['signals'][params['sensors']][params['co2']]
limit_val = float(params['limitValue'])
limit = limit_val
if state is None or state.get('index') is None:
ns = sensor_set['namespace']
msg = {}
msg['type'] = 'sets'
# Request new signal set creation
msg['sigSet'] = {
"cid" : "e_plus_mod",
"name" : "E+ comparison" ,
"namespace": ns,
"description" : "Comparison of Energy+ models" ,
"aggs" : "0"
}
signals= []
signals.append({
"cid": "ts",
"name": "ts",
"description": "timestamp",
"namespace": ns,
"type": "date",
"indexed": False,
"settings": {}
})
signals.append({
"cid": "mod",
"name": "mod",
"description": "mod",
"namespace": ns,
"type": "keyword",
"indexed": False,
"settings": {}
})
signals.append({
"cid": "model",
"name": "model",
"description": "Closest model's cid",
"namespace": ns,
"type": "keyword",
"indexed": False,
"settings": {}
})
msg['sigSet']['signals'] = signals
ret = os.write(3,(json.dumps(msg) + '\n').encode())
state = json.loads(sys.stdin.readline())
error = state.get('error')
if error:
sys.stderr.write(error+"\n")
sys.exit(1)
else:
store_msg = {}
store_msg["type"] = "store"
store_msg["state"] = state
ret = os.write(3,(json.dumps(store_msg) + '\n').encode())
def get_co2_values(index,ts_field, co2_field):
# sensor data query
query = {
'_source': [co2_field, ts_field],
'sort': [{ts_field: 'asc'}],
'query': {
"range" : {
ts_field : {
"gt" : "now-180m/m",
"lt" : "now/m"
}
}
}
}
results = es.search(index=index, body=query)
sensor_data = []
for item in results['hits']['hits']:
val = item["_source"][co2_field]
if val is not None:
sensor_data.append(val)
else:
continue
return sensor_data
sensor_data = get_co2_values(sensor_set['index'], sensor_ts['field'], sensor_co2['field'])
if not sensor_data:
print('No sensor data to measure on')
exit()
sensor_np = np.array(sensor_data, dtype=float).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
min_model={}
min_distance=float("inf")
for model in params['models']:
ts =entities['signals'][model['sigSet']][model['ts']]['field']
co2 =entities['signals'][model['sigSet']][model['co2']]['field']
sig_set = entities['signalSets'][model['sigSet']]['index']
model_data = get_co2_values(sig_set, ts,co2)
if not model_data:
print(f'No data for signal set {sig_set}')
continue
# Calculate for all models
model_np = np.array(model_data, dtype=float).reshape(-1, 1)
# Calculate for all models
d, cost_matrix, acc_cost_matrix, path = dtw(sensor_np, model_np, dist=euclidean_norm)
if d<min_distance:
min_distance = d
min_model['name'] = entities["signalSets"][model["sigSet"]]["name"]
min_model['cid'] = model["sigSet"]
min_model['ts'] = ts
min_model['co2'] = co2
min_model['index'] = sig_set
# Do something with closest model
if not min_model:
print('No model found')
exit()
print(f'Closest model is: {min_model["name"]}')
# Query prediction
query = {
'_source': [min_model['co2'], min_model['ts']],
'sort': [{min_model['ts']: 'asc'}],
"aggs" : {
"max_co2" : { "max" : { "field" : min_model['co2'] } }
},
'query': {
"range" : {
min_model['ts'] : {
"gt" : "now/m",
"lt" : "now+60m/m"
}
}
}
}
results = es.search(index=min_model['index'], body=query)
max_co2 = results['aggregations']['max_co2']['value']
# Get current mode
# TODO this will probably change later on to take data from the actual system
query = {
'size': 1,
'_source': [state['fields']['mod']],
'sort': [{state['fields']['ts']: 'desc'}],
'query': {
"match_all": {}
}
}
results = es.search(index=state['index'], body=query)
mod = results['hits']['hits'][0]['_source'][state['fields']['mod']] if results['hits']['total'] > 0 else 'mod1'
# If currently over limit or going to be according to models data, open more
if sensor_data[-1] > limit or max_co2 > limit:
mod = 'mod2'
elif sensor_data[-1] < limit - 200:
mod = 'mod1'
print(f'Chosen: {mod}')
ts = datetime.now(timezone.utc).astimezone()
doc = {
state['fields']['ts']: ts,
state['fields']['model']: min_model['cid'],
state['fields']['mod']: mod
}
res = es.index(index=state['index'], doc_type='_doc', id=ts, body=doc)
#prediction_data = []
#for item in results['hits']['hits']:
# val = item["_source"][min_model['co2']]
# if val is not None:
# prediction_data.append(val)
# else:
# continue
#print (prediction_data)
| 25.530806 | 111 | 0.593837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,940 | 0.360126 |
d25d20d8eebe1fa8e5a33aad978f268f206cda23
| 602 |
py
|
Python
|
xmrswap/interface_part.py
|
tecnovert/xmrswap
|
ad2983a4df03184453ff680c17602497acc75a87
|
[
"MIT"
] | 2 |
2020-09-21T17:33:23.000Z
|
2020-10-03T08:54:01.000Z
|
xmrswap/interface_part.py
|
tecnovert/xmrswap
|
ad2983a4df03184453ff680c17602497acc75a87
|
[
"MIT"
] | 2 |
2020-10-03T09:18:48.000Z
|
2020-10-13T19:58:34.000Z
|
xmrswap/interface_part.py
|
tecnovert/xmrswap
|
ad2983a4df03184453ff680c17602497acc75a87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
from .contrib.test_framework.messages import (
CTxOutPart,
)
from .interface_btc import BTCInterface
class PARTInterface(BTCInterface):
@staticmethod
def witnessScaleFactor():
return 2
@staticmethod
def txVersion():
return 0xa0
def __init__(self, rpc_callback):
self.rpc_callback = rpc_callback
self.txoType = CTxOutPart
| 22.296296 | 73 | 0.704319 | 273 | 0.453488 | 0 | 0 | 114 | 0.189369 | 0 | 0 | 213 | 0.353821 |
d25df58bed9f8be63b8c4a15d08e86c300ade0fd
| 2,511 |
py
|
Python
|
pelican_resume/resume.py
|
cmenguy/pelican-resume
|
57105e72c24ef04ad96857f51e5e9060e6aff1f6
|
[
"MIT"
] | 12 |
2016-02-07T05:16:44.000Z
|
2019-11-20T08:46:10.000Z
|
pelican_resume/resume.py
|
cmenguy/pelican-resume
|
57105e72c24ef04ad96857f51e5e9060e6aff1f6
|
[
"MIT"
] | 1 |
2019-01-20T20:57:35.000Z
|
2019-01-20T20:59:59.000Z
|
pelican_resume/resume.py
|
cmenguy/pelican-resume
|
57105e72c24ef04ad96857f51e5e9060e6aff1f6
|
[
"MIT"
] | 5 |
2016-06-07T23:34:36.000Z
|
2020-07-13T18:01:23.000Z
|
'''
resume
==============================================================================
This plugin generates a PDF resume from a Markdown file using customizable CSS
'''
import os
import logging
import tempfile
from subprocess import Popen
from pelican import signals
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CSS_DIR = os.path.join(CURRENT_DIR, "static", "css")
logger = logging.getLogger(__name__)
def set_default_settings(settings):
settings.setdefault("RESUME_SRC", "pages/resume.md")
settings.setdefault("RESUME_PDF", "pdfs/resume.pdf")
settings.setdefault("RESUME_CSS_DIR", CSS_DIR)
settings.setdefault("RESUME_TYPE", "moderncv")
settings.setdefault("RESUME_PANDOC", "pandoc")
settings.setdefault("RESUME_WKHTMLTOPDF", "wkhtmltopdf")
def init_default_config(pelican):
from pelican.settings import DEFAULT_CONFIG
set_default_settings(DEFAULT_CONFIG)
if (pelican):
set_default_settings(pelican.settings)
def generate_pdf_resume(generator):
path = generator.path
output_path = generator.settings.get("OUTPUT_PATH")
markdown = os.path.join(path, generator.settings.get("RESUME_SRC"))
css_type = generator.settings.get("RESUME_TYPE")
css = os.path.join(generator.settings.get("RESUME_CSS_DIR"), "%s.css" % css_type)
if not os.path.exists(markdown):
logging.critical("Markdown resume not found under %s" % markdown)
return
if css and not os.path.exists(os.path.join(path, css)):
logging.warn("Resume CSS not found under %s, CSS will be ignored" % css)
css = os.path.join(path, css) if css else css
with tempfile.NamedTemporaryFile(suffix=".html") as html_output:
pdf_output = os.path.join(output_path, generator.settings.get("RESUME_PDF"))
pdf_dir = os.path.dirname(pdf_output)
if not os.path.exists(pdf_dir):
os.makedirs(pdf_dir)
pandoc = generator.settings.get("RESUME_PANDOC")
wkhtmltopdf = generator.settings.get("RESUME_WKHTMLTOPDF")
html_cmd = "%s --standalone " % pandoc
if css:
html_cmd += "-c %s " % css
html_cmd += "--from markdown --to html -o %s %s" % (html_output.name, markdown)
Popen(html_cmd, shell=True).wait()
pdf_cmd = "%s %s %s" % (wkhtmltopdf, html_output.name, pdf_output)
Popen(pdf_cmd, shell=True).wait()
def register():
signals.initialized.connect(init_default_config)
signals.article_generator_finalized.connect(generate_pdf_resume)
| 38.630769 | 87 | 0.682597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.244524 |
d25e34eee54e20d2dc920f68d0031efffaa533b3
| 331 |
py
|
Python
|
app/machine_learning.py
|
ludthor/CovidVisualizer
|
721015e8f9f0b1c0fb2e5ba985884341d22046e2
|
[
"MIT"
] | null | null | null |
app/machine_learning.py
|
ludthor/CovidVisualizer
|
721015e8f9f0b1c0fb2e5ba985884341d22046e2
|
[
"MIT"
] | null | null | null |
app/machine_learning.py
|
ludthor/CovidVisualizer
|
721015e8f9f0b1c0fb2e5ba985884341d22046e2
|
[
"MIT"
] | null | null | null |
from sklearn.linear_model import Ridge
class MachineLearning():
def __init__(self):
self.model = None
def train_model(self, X,y):
lr = Ridge(alpha=0.5)
lr.fit(X,y)
print(lr)
self.model = lr
def predict(self, X):
preds = self.model.predict(X)
return preds
| 15.761905 | 38 | 0.570997 | 286 | 0.864048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d25f4bca7ddaa3e56f525ab91b8973856914e4df
| 6,246 |
py
|
Python
|
connection/connection_handler.py
|
valsoares/td
|
4856604c71ff7d996f4e2580e0cdd9b904805225
|
[
"MIT"
] | null | null | null |
connection/connection_handler.py
|
valsoares/td
|
4856604c71ff7d996f4e2580e0cdd9b904805225
|
[
"MIT"
] | null | null | null |
connection/connection_handler.py
|
valsoares/td
|
4856604c71ff7d996f4e2580e0cdd9b904805225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Marcos F. Caetano ([email protected]) 11/03/2020
@description: PyDash Project
The ConnectionHandler is a Singleton class implementation
The class responsible to retrieve segments in the web server.
Also it implements a traffic shaping approach.
"""
from base.simple_module import SimpleModule
from base.message import Message, MessageKind, SSMessage
from base.configuration_parser import ConfigurationParser
from player.parser import *
import http.client
import time
from scipy.stats import expon
from base.timer import Timer
import seaborn as sns
import matplotlib.pyplot as plt
class ConnectionHandler(SimpleModule):
def __init__(self, id):
SimpleModule.__init__(self, id)
self.initial_time = 0
self.qi = []
# for traffic shaping
config_parser = ConfigurationParser.get_instance()
self.traffic_shaping_interval = int(config_parser.get_parameter('traffic_shaping_profile_interval'))
self.traffic_shaping_seed = int(config_parser.get_parameter('traffic_shaping_seed'))
self.traffic_shaping_values = []
# mark the current traffic shapping interval
self.current_traffic_shaping_interval = 0
self.traffic_shaping_sequence = []
# traffic shaping sequence position
self.tss_position = 0
# traffic shaping values position
self.tsv_position = 0
token = config_parser.get_parameter('traffic_shaping_profile_sequence')
for i in range(len(token)):
if token[i] == 'L':
self.traffic_shaping_sequence.append(0)
elif token[i] == 'M':
self.traffic_shaping_sequence.append(1)
elif token[i] == 'H':
self.traffic_shaping_sequence.append(2)
self.timer = Timer.get_instance()
def get_traffic_shaping_positions(self):
current_tsi = self.timer.get_current_time() // self.traffic_shaping_interval
if current_tsi > self.current_traffic_shaping_interval:
self.current_traffic_shaping_interval = current_tsi
self.tss_position = (self.tss_position + 1) % len(self.traffic_shaping_sequence)
self.tsv_position = (self.tsv_position + 1) % len(self.traffic_shaping_values[0])
return (self.tss_position, self.tsv_position)
def initialize(self):
# self.send_down(Message(MessageKind.SEGMENT_REQUEST, 'Olá Mundo'))
pass
def bandwidth_limitation(self, package_size=0):
if package_size == 0:
return
tsp = self.get_traffic_shaping_positions()
target_throughput = self.traffic_shaping_values[self.traffic_shaping_sequence[tsp[0]]][tsp[1]]
print(f'Execution Time {self.timer.get_current_time()} > target throughput: {target_throughput} - profile: ({self.traffic_shaping_sequence[tsp[0]]}, {tsp[1]})')
rtt = time.perf_counter() - self.initial_time
throughput = package_size / rtt
# we didn't pass our throughput go
if target_throughput >= throughput:
return
waiting_time = (package_size - (target_throughput * rtt)) / target_throughput
time.sleep(waiting_time)
def finalization(self):
pass
def handle_xml_request(self, msg):
if not 'http://' in msg.get_payload():
raise ValueError('url_mpd parameter should starts with http://')
self.initial_time = time.perf_counter()
url_tokens = msg.get_payload().split('/')[2:]
port = '80'
host_name = url_tokens[0]
path_name = '/' + '/'.join(url_tokens[1:])
mdp_file = ''
try:
connection = http.client.HTTPConnection(host_name, port)
connection.request('GET', path_name)
mdp_file = connection.getresponse().read().decode()
connection.close()
except Exception as err:
print('> Houston, we have a problem!')
print(f'> trying to connecto to: {msg.get_payload()}')
print(err)
exit(-1)
msg = Message(MessageKind.XML_RESPONSE, mdp_file)
msg.add_bit_length(8 * len(mdp_file))
parsed_mpd = parse_mpd(msg.get_payload())
self.qi = parsed_mpd.get_qi()
increase_factor = 1
low = round(self.qi[len(self.qi) - 1] * increase_factor)
medium = round(self.qi[(len(self.qi) // 2) - 1] * increase_factor)
high = round(self.qi[0] * increase_factor)
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=low, size=1000, random_state=self.traffic_shaping_seed))
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=medium, size=1000, random_state=self.traffic_shaping_seed))
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=high, size=1000, random_state=self.traffic_shaping_seed))
self.send_up(msg)
def handle_segment_size_request(self, msg):
port = '80'
host_name = msg.get_host_name()
path_name = msg.get_url()
ss_file = ''
self.initial_time = time.perf_counter()
print(f'Execution Time {self.timer.get_current_time()} > selected QI: {self.qi.index(msg.get_quality_id())}')
try:
connection = http.client.HTTPConnection(host_name, port)
connection.request('GET', path_name)
ss_file = connection.getresponse().read()
connection.close()
except Exception as err:
print('> Houston, we have a problem!')
print(f'> trying to connecto to: {msg.get_payload()}')
print(err)
exit(-1)
msg.set_kind(MessageKind.SEGMENT_RESPONSE)
decoded = False
try:
ss_file = ss_file.decode()
except UnicodeDecodeError:
# if wasn't possible to decode() is a ss
msg.add_bit_length(8 * len(ss_file))
self.bandwidth_limitation(msg.get_bit_length())
decoded = True
if not decoded and '404 Not Found' in ss_file:
msg.set_found(False)
self.send_up(msg)
def handle_segment_size_response(self, msg):
pass
def handle_xml_response(self, msg):
pass
| 34.131148 | 168 | 0.646013 | 5,624 | 0.900272 | 0 | 0 | 0 | 0 | 0 | 0 | 1,172 | 0.18761 |
d260f409a5bee0d6f1b71a1546aadce5730647ca
| 37,634 |
py
|
Python
|
latexpp/fixes/pkg/phfqit.py
|
psg-mit/latexpp
|
0b7b523c9ce147c2d34cc430b1abd39972e33fa9
|
[
"MIT"
] | 4 |
2020-08-28T18:41:48.000Z
|
2021-11-11T11:23:58.000Z
|
latexpp/fixes/pkg/phfqit.py
|
psg-mit/latexpp
|
0b7b523c9ce147c2d34cc430b1abd39972e33fa9
|
[
"MIT"
] | 4 |
2020-07-31T07:34:38.000Z
|
2021-11-23T19:05:00.000Z
|
latexpp/fixes/pkg/phfqit.py
|
psg-mit/latexpp
|
0b7b523c9ce147c2d34cc430b1abd39972e33fa9
|
[
"MIT"
] | 1 |
2020-07-22T02:44:48.000Z
|
2020-07-22T02:44:48.000Z
|
import re
import yaml
import logging
logger = logging.getLogger(__name__)
from pylatexenc.macrospec import MacroSpec, ParsedMacroArgs, MacroStandardArgsParser
from pylatexenc import latexwalker
from latexpp.macro_subst_helper import MacroSubstHelper
from latexpp.fix import BaseFix
# parse entropy macros etc.
_qitobjdefs = yaml.safe_load(r"""
stdset:
HH:
type: Hbase
Hzero:
type: Hbase
sub: '\mathrm{max},0'
Hmin:
type: Hbase
sub: '\mathrm{min}'
Hmaxf:
type: Hbase
sub: '\mathrm{max}'
Hfn:
type: Hfnbase
Dmax:
type: Dbase
sub: '\mathrm{max}'
Dminz:
type: Dbase
sub: '0'
Dminf:
type: Dbase
sub: '\mathrm{min}'
Dr:
type: Dbase
sub: '\mathrm{Rob}'
DHyp:
type: Dbase
sub: '\mathrm{H}'
Dhyp:
type: Dbase
sub: '\mathrm{h}'
DCoh:
type: DCohbase
DCohx:
type: DCohbase
DD:
type: DD
""")
baseqitobjs = yaml.safe_load("""
IdentProc:
type: IdentProc
ee:
type: ee
""")
_fixed_repl = {
'DSym': lambda self: self.DSym,
'HSym': lambda self: self.HSym,
}
class ExpandQitObjects(BaseFix):
r"""
Expand the definitions for the "QIT Objects" that are defined via the
{phfqit} package.
If applied along with :py:class:`latexpp.fixes.pkg.phfqit.ExpandMacros`, the
dependency on package {phfqit} should be removed.
Arguments:
- `qitobjs`: a dictionary of custom "QIT Objects" to expand. The dictionary
has the structure ``{macroname: qitobjspec, ...}``, where:
- `macroname` is the name of the macro representing this QIT object (no
leading backslash);
- `qitobjspec` is a dictionary with the following structure::
{
'type': <type>,
'sym': <sym>
<...>
}
The `<type>` is a string that must be one of the following QIT object
types: 'Hbase', 'Hfnbase', 'DD', 'Dbase', 'DCohbase', 'IdentProc', 'ee'.
This determines on one hand how the arguments to the macro are parsed
and on the other hand the template latex code that will serve as a
replacement for the QIT object invocation.
The `<sym>` is any string that will be used to override the default
symbol for this qit object type. The 'sym' key can be left out to use
the default symbol for the qit object.
Depending on `<type>`, you can specify further keys that specify how the
qit object is rendered (specified alongside `type: <type>` above, where
`<...>` stands):
- `<type>='Hbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the entropy object. This can be any
LaTeX code.
- `<type>='Hfnbase'`: You may further specify ``'sub': <sub>`` and
``'sup': <sup>`` which specifies the subscript and superscript to add
to the entropy object. Both can be any LaTeX code.
- `<type>='Dbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the relative entropy object. This
can be any LaTeX code. You can also specify 'default_epsilon' to give
a default value of the epsilon argument (any LaTeX code).
- `<type>='Dalpha'`: You can also specify 'default_alpha' and
'default_epsilon' to give a default value for these arguments (any
LaTeX code).
- `<type>='DD'`: There are no further keys you can specify.
- `<type>='DCohbase'`: There are no further keys you can specify.
- `<type>='IdentProc'`: There are no further keys you can specify.
- `<type>='ee'`: There are no further keys you can specify.
- `qitobjdef`: a list of built-in QIT object sets to use, designated by
builtin set name. Currently only the set named "stdset" is available,
i.e., you may use ``qitobjdef=[]`` (don't use built-in QIT objects) or
``qitobjdef=['stdset']`` (use built-in QIT objects).
- `HSym`: the default symbol to use for entropy-like QIT objects. Defaults
to 'H'
- `DSym`: the default symbol to use for relative-entropy-like QIT objects.
Defaults to 'D'
- `DCSym`: the default symbol to use for coherent-relative-entropy-like QIT
objects. Defaults to '\\hat{D}'
"""
def __init__(self, qitobjs=dict(), qitobjdef=['stdset'],
HSym='H', DSym='D', DCSym=r'\hat{D}'):
super().__init__()
self.qitobjs = dict(baseqitobjs)
for qitobjname in qitobjdef:
self.qitobjs.update(_qitobjdefs[qitobjname])
self.qitobjs.update(qitobjs)
self.HSym = HSym
self.DSym = DSym
self.DCSym = DCSym
def specs(self, **kwargs):
return dict(
macros= (
MacroSpec(mname, args_parser=PhfQitObjectArgsParser(self.qitargspec(m['type'])))
for mname, m in self.qitobjs.items()
)
)
def qitargspec(self, t):
return {
"IdentProc": "`[[{",
"ee": "^",
"Hbase": "`[[{[",
"Hfnbase": "`(",
"DD": "_^`{{",
"Dbase": "[`{{",
"Dalpha": "[[`{{",
"DCohbase": "[`{{{{{",
}.get(t)
def fix_node(self, n, **kwargs):
if n.isNodeType(latexwalker.LatexMacroNode) and n.macroname in _fixed_repl:
return _fixed_repl[n.macroname](self)
if not n.isNodeType(latexwalker.LatexMacroNode) or n.macroname not in self.qitobjs:
return None
m = self.qitobjs[n.macroname]
fixs = self.fix_qitobj(m, n)
#logger.debug(" --> %r", fixs)
return fixs
def fix_qitobj(self, m, n):
#logger.debug("fix_qitobj: m=%r, n=%r", m, n)
if m['type'] == 'IdentProc':
nsizespec, nsysA, nsysB, narg = n.nodeargd.argnlist
sym = m.get('sym', r'\mathrm{id}')
subscript = ''
A, B = '', ''
if nsysA is not None:
A = self.preprocess_contents_latex(nsysA)
if nsysB is not None:
B = self.preprocess_contents_latex(nsysB)
if A:
if B:
subscript = A + r'\to ' + B
else:
subscript = A
text = '{' + sym + '}'
if subscript:
text += '_{' + subscript + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'ee':
narg, = n.nodeargd.argnlist
sym = m.get('sym', r'e')
return '{'+sym+'}^{' + self.preprocess_contents_latex(narg) + '}'
if m['type'] == 'Hbase':
nsizespec, nstate, nepsilon, ntargetsys, ncondsys = n.nodeargd.argnlist
sym = m.get('sym', self.HSym)
sub = m.get('sub', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od
text += self.preprocess_contents_latex(ntargetsys)
if ncondsys is not None:
text += r'\,' + md + r'\,' + self.preprocess_contents_latex(ncondsys)
text += cd
if nstate is not None:
text += r'_{' + self.preprocess_contents_latex(nstate) + '}'
return text
if m['type'] == 'Hfnbase':
nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Hfnbase':
nsub, nsup, nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Dbase':
nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sub = m.get('sub', None)
sym = m.get('sym', self.DSym)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'Dalpha':
nalpha, nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
default_alpha = m.get('default_alpha', None)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if nalpha is not None:
text += '_{' + self.preprocess_contents_latex(nalpha) + '}'
elif default_alpha:
text += '_{' + default_alpha + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DD':
nsub, nsup, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
text = '{' + sym + '}'
if nsub is not None:
text += '_{' + self.preprocess_contents_latex(nsub) + '}'
if nsup is not None:
text += '^{' + self.preprocess_contents_latex(nsup) + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DCohbase':
nepsilon, nsizespec, nstate, nX, nXp, nGX, nGXp = n.nodeargd.argnlist
sym = m.get('sym', self.DCSym)
process_arg_subscripts = m.get('process_arg_subscripts', False)
text = '{' + sym + '}'
tX = self.preprocess_contents_latex(nX)
tXp = self.preprocess_contents_latex(nXp)
if tX and tXp:
text += '_{' + tX + r'\to ' + tXp + '}'
elif tX:
text += '_{' + tX + '}'
elif tXp:
text += '_{' + tXp + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
if nstate.isNodeType(latexwalker.LatexGroupNode) \
and len(nstate.nodelist) \
and nstate.nodelist[0].isNodeType(latexwalker.LatexCharsNode) \
and nstate.nodelist[0].chars.lstrip().startswith('*'):
# remove '*'
statelatex = self.preprocess_contents_latex(nstate).lstrip(' \t*')
else:
if process_arg_subscripts:
statelatex = self.preprocess_contents_latex(nstate) + '_{' \
+ tX + r'\to ' + tXp + '}'
else:
statelatex = self.preprocess_contents_latex(nstate) + '_{' + tXp \
+ 'R_{' + tX + '}}'
text += od + statelatex + r'\,' + md + r'\,' + \
self.preprocess_contents_latex(nGX) + r',\,' \
+ self.preprocess_contents_latex(nGXp) + cd
return text
raise ValueError("Unknown phfqit macro type: {!r}".format(m))
def _delims(sizenode, opendelim, middelim, closedelim):
if sizenode is None:
return (opendelim, middelim, closedelim)
if sizenode.isNodeType(latexwalker.LatexGroupNode):
assert( len(sizenode.nodelist) == 1 )
sizenode = sizenode.nodelist[0]
if sizenode.isNodeType(latexwalker.LatexCharsNode) and sizenode.chars == '*':
return (r'\mathopen{}\left'+opendelim,
r'\mathclose{}\middle'+middelim+r'\mathopen{}',
r'\right'+closedelim+r'\mathclose{}')
if sizenode.isNodeType(latexwalker.LatexMacroNode):
mname = sizenode.macroname
return (r'\mathopen{}'+'\\'+mname+'l '+opendelim, # \bigl(
r'\mathopen{}'+'\\'+mname+' '+middelim, # \big|
r'\mathopen{}'+'\\'+mname+'r '+closedelim) # \bigr)
raise ValueError("unexpected optional sizing node : "+repr(sizenode))
def _delimtype(sizenode):
if sizenode is None:
return None
if sizenode.isNodeType(latexwalker.LatexGroupNode):
assert( len(sizenode.nodelist) == 1 )
sizenode = sizenode.nodelist[0]
if sizenode.isNodeType(latexwalker.LatexCharsNode) and sizenode.chars == '*':
return '*'
if sizenode.isNodeType(latexwalker.LatexMacroNode):
return '\\'+sizenode.macroname
mathtools_delims_macros = {
'abs': (r'\lvert', r'\rvert'),
'norm': (r'\lVert', r'\rVert'),
'avg': (r'\langle', r'\rangle'),
'ket': (r'\lvert', r'{%(1)s}', r'\rangle'),
'bra': (r'\langle', r'{%(1)s}', r'\rvert'),
'braket': (r'\langle', r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert\phfqitKetsBarSpace{%(2)s}',
r'\rangle'),
'ketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(2)s}',
r'\rvert'),
'proj': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(1)s}',
r'\rvert'),
'matrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(3)s}',
r'\rangle'),
'dmatrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(1)s}',
r'\rangle'),
'innerprod': (r'\langle',
r'{%(1)s}%(phfqitBeforeCommaSpace)s,%(phfqitAfterCommaSpace)s{%(2)s}',
r'\rangle'),
'oket': (r'\lvert', r'{%(1)s}', r'\rrangle'),
'obra': (r'\llangle', r'{%(1)s}', r'\rvert'),
'obraket': (r'\llangle', r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}',
r'\rrangle'),
'oketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(2)s}',
r'\rvert'),
'oproj': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(1)s}',
r'\rvert'),
'omatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(3)s}',
r'\rrangle'),
'odmatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(1)s}',
r'\rrangle'),
'intervalc': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
'intervalo': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervalco': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervaloc': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
}
def gate(x):
return r'\ifmmode\textsc{\lowercase{'+x+r'}}\else{\rmfamily\textsc{\lowercase{'+x+r'}}}\fi'
simple_substitution_macros = {
r'Hs': r'\mathscr{H}',
r'Ident': r'\mathds{1}',
# bits and gates
r'bit': {'qitargspec': '{', 'repl': r'\texttt{%(1)s}'},
r'bitstring': {'qitargspec': '{', 'repl': r'\ensuremath{\underline{\overline{\texttt{%(1)s}}}}'},
r'gate': {'qitargspec': '{',
'repl': gate("%(1)s") },
r'AND': gate('And'),
r'XOR': gate('Xor'),
r'CNOT': gate('C-Not'),
r'NOT': gate('Not'),
r'NOOP': gate('No-Op'),
# math groups
'uu': dict(qitargspec='(', repl=r'\mathrm{u}({%(1)s})'),
'UU': dict(qitargspec='(', repl=r'\mathrm{U}({%(1)s})'),
'su': dict(qitargspec='(', repl=r'\mathrm{su}({%(1)s})'),
'SU': dict(qitargspec='(', repl=r'\mathrm{SU}({%(1)s})'),
'so': dict(qitargspec='(', repl=r'\mathrm{so}({%(1)s})'),
'SO': dict(qitargspec='(', repl=r'\mathrm{SO}({%(1)s})'),
#'sl': dict(qitargspec='(', repl=r'\mathrm{sl}({%(1)s})'), # not in phfqit -- why? should add it there
#'SL': dict(qitargspec='(', repl=r'\mathrm{SL}({%(1)s})'),
'GL': dict(qitargspec='(', repl=r'\mathrm{GL}({%(1)s})'),
'SN': dict(qitargspec='(', repl=r'\mathrm{S}_{%(1)s}'),
}
math_operators = {
'tr': 'tr',
'supp': 'supp',
'rank': 'rank',
'linspan': 'span',
'spec': 'spec',
'diag': 'diag',
'Re': 'Re',
'Im': 'Im',
'poly': 'poly',
}
rx_hspace = re.compile(r'\\hspace\*?\{[^}]+\}')
def _delempties(d):
delkeys = [k for k, v in d.items() if v is None]
for k in delkeys:
del d[k]
class ExpandMacros(BaseFix):
r"""
Expand various macros defined by the {phfqit} package.
If applied along with :py:class:`latexpp.fixes.pkg.phfqit.ExpandQitObjects`,
the dependency on package {phfqit} should be removed.
Arguments:
- `subst`: a dictionary of substitutions to perform. The dictionary keys
are macro names without leading backslash, and values are dictionaries of
the form ``{'qitargspec': <qitargspec>, 'repl': <repl>}``. This has a
similar syntax to the :py:class:`latexpp.fixes.macro_subst.Subst` fix
class, but argument parsing allows an extended syntax. Instead of
specifying an `'argspec': <argspec>`, you specify `'qitargspec':
<qitargspec>` which provides argument parsing extensions to the usual
`argspec`.
Each character in `<qitargspec>` is one of:
- '*', '[', '{' represent the same kind of arguments as for 'argspec' in
:py:class:`latexpp.fixes.macro_subst.Subst`;
- '(' represents a mandatory argument in parentheses;
- '`' represents an optional argument introduced by ```<token or group>``;
- '_' represents an optional argument introduced by ``_<token or group>``;
- or '^' which represents an optional argument introduced by ``^<token or
group>``.
As for :py:class:`latexpp.fixes.macro_subst.Subst`, arguments are
available in the replacement string `<repl>` via the syntax ``%(n)s``
where `n` is the argument number.
A default set of substitutions are provided according to the macros
defined in the {phfqit} package; arguments here override the defaults.
You can disable individual default substitutions by providingthe value
`None` (`null` in the YAML file) for the given macro name in the `subst`
dictionary.
- `ops`: a dictionary of "operator names" to substitute for. This is a
dictionary ``{<opname>: <opstring>, ...}`` where `<opname>` is the macro
name of the operator without leading backslash (e.g., ``tr`` for "trace"),
and `<opstring>` is the replacement LaTeX string that will be formatted as
an operator name. See `math_operator_fmt=` for how operators are
formatted.
A default set of operator names are provided according to the macros
defined in the {phfqit} package; arguments here override the defaults.
You can disable individual default operator names by providing the value
`None` (`null` in the YAML file) for the given operator name in the `ops`
dictionary.
- `math_operator_fmt`: The template string to use to format an operator. By
default, we use `\\operatorname{...}` to format the operator. The
template should contain the string `%(opname)s` which will be replaced by
the actual operator name. The default value is
``\operatorname{%(opname)s}``; if you prefer to use ``\mbox`` for
operators, you could set this to ``\mbox{%(opname)s}``.
- `delims`: A dictionary specifying macros that format delimited expressions
(such as `\\abs`, `\\ket`, `\\norm`, etc.). These macros take an optional
star (which indicates that the delimiters should be latex-dynamically
sized with ``\left`` and ``\right``), or an optional sizing macro in
square braces (such as ``\norm[\big]{...}``). After the optional star and
optional argument, the macro must take a fixed number of mandatory
arguments (e.g., one for ``\norm`` but two for ``\ketbra`` and three for
``\matrixel``).
The `delims` argument is a dictionary ``{<delim-macro-name>: <delim-spec>,
...}`` where `<delim-macro-name>` is the name of the macro without leading
backslash (e.g., 'ket' or 'abs'). The `<delim-spec>` is either:
- `<delim-spec>=(<left-delim>, <right-delim>)`, i.e., a two-item tuple or
list specifying the left and right delimiter. The macro must take a
single mandatory argument, which will be typeset between the two
delimiters. One must be able to size the delimiters using sizing
commands such as ``\big`` or ``\left``/``\right``.
- `<delim-spec>=(<left-delim>, <contents-repl>, <right-delim>)`, i.e., a
three-item tuple or list. The `<left-delim>` and `<right-delim>` are as
above. The `<contents-repl>` specifies how to format the contents
between the two delimiters, and should contain replacement strings of
the form ``%(n)s`` that expand into the `n`-th mandatory argument of the
macro. The number of mandatory arguments that the macro accepts is
inferred by inspecting the replacement string and looking for the
highest `n` in these replacement placeholders. Furthermore, you can use
the replacement placeholder ``%(delimsize)s``, which expands to the
relevant sizing command (e.g., ``\big``, ``\middle`` to match
``\left``/``\right``, or nothing if no sizing options are given) and
which can be placed immediately before a delimiter.
- `subst_use_hspace`: In all the above substitutions (including delimiters),
there are some custom sizing corrections in the form of ``\hspace*{XXex}``
that adjust the spacing between the different symbols in the expansion of
those macros. By default, they are kept in the replacement latex code so
that the document looks the same when compiled. If instead, you would
like simple substitutions without these fine-tuning spacing commands, set
`subst_use_hspace=False`.
"""
def __init__(self, *,
subst=None, ops=None, delims=None,
math_operator_fmt=r'\operatorname{%(opname)s}',
subst_use_hspace=True,
subst_space=None,
):
super().__init__()
if subst is None:
subst = {}
if ops is None:
ops = {}
if delims is None:
delims = {}
the_simple_substitution_macros = {}
the_simple_substitution_macros.update(simple_substitution_macros)
the_simple_substitution_macros.update(subst)
# remove any items which have a None value (used to indicate a default
# key should be removed from the YAML config)
the_math_operators = {}
the_math_operators.update(math_operators)
the_math_operators.update(ops)
the_simple_substitution_macros.update(**{
opname: math_operator_fmt%dict(opname=opv)
for opname, opv in the_math_operators.items()
})
# delimiter macros --> substitution rules
self.mathtools_delims_macros = dict(mathtools_delims_macros)
self.mathtools_delims_macros.update(delims)
_delempties(self.mathtools_delims_macros)
def delim_cfg(delimtuple):
if len(delimtuple) == 2:
return dict(qitargspec='`*[{',
repl=r'%(open_delim)s{%(1)s}%(close_delim)s')
numargs = max( int(m.group(1)) for m in re.finditer(r'\%\((\d)\)s', delimtuple[1]) )
return dict(qitargspec='`*[' + '{'*numargs,
repl='%(open_delim)s' + delimtuple[1] + '%(close_delim)s')
the_simple_substitution_macros.update(**{
mname: delim_cfg(delimtuple)
for mname, delimtuple in self.mathtools_delims_macros.items()
})
_delempties(the_simple_substitution_macros)
self.subst_space = dict(
phfqitKetsBarSpace=r'\mkern 1.5mu\relax ',
phfqitKetsRLAngleSpace=r'\mkern -1.8mu\relax ',
phfqitOKetsBarSpace=r'\mkern 1.5mu\relax ',
phfqitOKetsRLAngleSpace=r'\mkern -1.8mu\relax ',
phfqitKetsBeforeCommaSpace=r'',
phfqitKetsAfterCommaSpace=r'\mkern 1.5mu\relax ',
)
if subst_space is not None:
self.subst_space.update(subst_space)
# remove \hspace...'s if we don't want them.
if not subst_use_hspace:
self.subst_space = {k: '' for k in self.subst_space.keys()}
self.substitution_helper = MacroSubstHelper(
macros=the_simple_substitution_macros,
argspecfldname='qitargspec',
args_parser_class=PhfQitObjectArgsParser,
)
def specs(self, **kwargs):
# get specs from substitution helper
return dict(**self.substitution_helper.get_specs())
def fix_node(self, n, **kwargs):
# we treat all via the substitution helper
c = self.substitution_helper.get_node_cfg(n)
if c is not None:
# got a substitution. Check if it is a delimiter, which warrants
# further processing
if n.isNodeType(latexwalker.LatexMacroNode) and \
n.macroname in self.mathtools_delims_macros:
#
# it's a delimiter macro!
#
# check for `backtick argument after checking for * and/or [,
# because the latter have precedence
delimtype = None
if n.nodeargd.argnlist[1] is not None:
# with star
delimtype = '*'
elif n.nodeargd.argnlist[2] is not None \
and n.nodeargd.argnlist[2].nodelist:
delimtype = '\\'+n.nodeargd.argnlist[2].nodelist[0].macroname
elif n.nodeargd.argnlist[0] is not None:
# we have a backtick size
delimtype = _delimtype(n.nodeargd.argnlist[0])
if delimtype is None:
delims_pc = ('%s', '%s')
delimsize = ''
elif delimtype == '*':
# with star
delims_pc = (r'\mathopen{}\left%s', r'\right%s\mathclose{}')
delimsize = r'\middle'
else:
sizemacro = delimtype
delimsize = sizemacro+r' '
delims_pc = (sizemacro+r'l %s', sizemacro+r'r %s')
# get delim specification for this macro
delimchars = list(self.mathtools_delims_macros[n.macroname])
if len(delimchars) == 3:
# replacement string is already stored in substitution helper
delimchars = [delimchars[0], delimchars[2]]
# ensure we protect bare delimiter macros with a trailing space
for j in (0, 1):
if re.match(r'^\\[a-zA-Z]+$', delimchars[j]): # bare macro, protect with space
delimchars[j] = delimchars[j] + ' '
context = dict(open_delim=delims_pc[0]%delimchars[0],
delimsize=delimsize,
close_delim=delims_pc[1]%delimchars[1],
**self.subst_space)
return self.substitution_helper.eval_subst(
c,
n,
node_contents_latex=self.preprocess_contents_latex,
argoffset=3,
context=context
)
return self.substitution_helper.eval_subst(
c,
n,
node_contents_latex=self.preprocess_contents_latex
)
return None
# qitargspec: extension of argspec with:
# *, [, { -- as in latexwalker
# ` -- optional size arg
# ( -- mandatory arg in (...)
# _ -- optional arg (subscript) that is marked by '_', e.g. \DD_{min}{...}{...}
# ^ -- optional arg (superscript) that is marked by '^', e.g. \DD^{\epsilon}{...}{...}
def qitargspec_to_argspec(qitargspec):
return "".join( x if x in ('*', '[', '{') else '[' for x in qitargspec )
class PhfQitObjectParsedArgs(ParsedMacroArgs):
def __init__(self, qitargspec, argnlist, **kwargs):
self.qitargspec = qitargspec
argspec = qitargspec_to_argspec(self.qitargspec)
super().__init__(argspec=argspec,
argnlist=argnlist,
**kwargs)
def __repr__(self):
return "{}(qitargspec={!r}, argnlist={!r})".format(
self.__class__.__name__, self.qitargspec, self.argnlist
)
def args_to_latex(self, recomposer):
return "".join(self._arg_to_latex(at, an, recomposer=recomposer)
for at, an in zip(self.qitargspec, self.argnlist))
def _arg_to_latex(self, argt, argn, recomposer):
if argn is None:
return ''
if argt == '{':
return recomposer.node_to_latex(argn)
elif argt == '[':
return recomposer.node_to_latex(argn)
elif argt == '*':
return recomposer.node_to_latex(argn)
elif argt == '`':
return '`' + recomposer.node_to_latex(argn)
elif argt == '(':
return recomposer.node_to_latex(argn)
elif argt in ('_', '^'):
return argt + recomposer.node_to_latex(argn)
raise RuntimeError("Invalid argt={!r} (argn={!r})".format(argt, argn))
class PhfQitObjectArgsParser(MacroStandardArgsParser):
def __init__(self, qitargspec):
self.qitargspec = qitargspec
argspec = qitargspec_to_argspec(self.qitargspec)
super().__init__(argspec=argspec)
def parse_args(self, w, pos, parsing_state=None):
if parsing_state is None:
parsing_state = w.make_parsing_state()
argnlist = []
p = pos
for argt in self.qitargspec:
#
# copied from MacroStandardArgsParser
#
if argt == '{':
(node, np, nl) = w.get_latex_expression(p, strict_braces=False,
parsing_state=parsing_state)
p = np + nl
argnlist.append(node)
elif argt == '[':
if self.optional_arg_no_space and w.s[p].isspace():
# don't try to read optional arg, we don't allow space
argnlist.append(None)
continue
optarginfotuple = w.get_latex_maybe_optional_arg(p, parsing_state=parsing_state)
if optarginfotuple is None:
argnlist.append(None)
continue
(node, np, nl) = optarginfotuple
p = np + nl
argnlist.append(node)
elif argt == '*':
# possible star.
tok = w.get_token(p)
if tok.tok == 'char' and tok.arg == '*':
# has star
node = w.make_node(latexwalker.LatexCharsNode,
parsing_state=parsing_state,
chars='*', pos=tok.pos, len=tok.len)
argnlist.append(node)
p = tok.pos + 1
else:
argnlist.append(None)
elif argt == '`':
# optional size arg introduced by "`"
tok = w.get_token(p)
if tok.tok in ('char', 'specials') and \
(tok.arg == '`' or getattr(tok.arg, 'specials_chars', None) == '`'):
# we have an optional size arg
p = tok.pos+1
tok = w.get_token(p)
# check for star
if tok.tok == 'char' and tok.arg == '*':
# has star
thenode = w.make_node(latexwalker.LatexCharsNode,
parsing_state=parsing_state,
chars='*', pos=tok.pos, len=tok.len)
argnlist.append(thenode)
p = tok.pos + 1
elif tok.tok == 'macro':
thenode = w.make_node(latexwalker.LatexMacroNode,
parsing_state=parsing_state,
macroname=tok.arg,
nodeargd=None,
pos=tok.pos, len=tok.len)
argnlist.append(thenode)
p = tok.pos+tok.len
else:
raise latexwalker.LatexWalkerParseError(
msg="Expected '*' or macro after `",
s=w.s,
pos=p
)
else:
# optional size arg not present
argnlist.append(None)
elif argt == '(':
(argnode, ppos, plen) = w.get_latex_braced_group(p, brace_type='(',
parsing_state=parsing_state)
argnlist.append( argnode )
p = ppos+plen
elif argt in ('_', '^'):
# optional argument introduced by "_" or "^"
tok = w.get_token(p)
# check for intro char "_"/"^"
if tok.tok == 'char' and tok.arg == argt:
# has this argument, read expression:
#optpos = tok.pos
p = tok.pos+tok.len
(node, np, nl) = w.get_latex_expression(p, strict_braces=False,
parsing_state=parsing_state)
p = np + nl
argnlist.append( node )
# argnlist.append(
# w.make_node(
# latexwalker.LatexGroupNode,
# parsing_state=parsing_state,
# nodelist=[ node ],
# delimiters=(argt, ''),
# pos=optpos,
# len=np+nl-optpos
# )
# )
else:
argnlist.append(None)
else:
raise LatexWalkerError(
"Unknown macro argument kind for macro: {!r}".format(argt)
)
parsed = PhfQitObjectParsedArgs(
qitargspec=self.qitargspec,
argnlist=argnlist,
)
return (parsed, pos, p-pos)
| 37.041339 | 115 | 0.533693 | 30,524 | 0.811075 | 0 | 0 | 0 | 0 | 0 | 0 | 15,708 | 0.417389 |
d26193d8f95b87350b91fd8517bcdb1ccfde7d7b
| 3,936 |
py
|
Python
|
Ch_5/linear_alg5.py
|
Skyblueballykid/linalg
|
515eea984856ad39c823314178929876b21f8014
|
[
"MIT"
] | null | null | null |
Ch_5/linear_alg5.py
|
Skyblueballykid/linalg
|
515eea984856ad39c823314178929876b21f8014
|
[
"MIT"
] | null | null | null |
Ch_5/linear_alg5.py
|
Skyblueballykid/linalg
|
515eea984856ad39c823314178929876b21f8014
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
import sympy
from numpy import linalg as lg
from numpy.linalg import solve
from numpy.linalg import eig
from scipy.integrate import quad
# Question 1
'''
A. Determinant = -21
B. Determinant = -21
'''
m1 = np.array([[3, 0, 3], [2, 3, 3], [0, 4, -1]])
print(m1)
det1 = np.linalg.det(m1)
print(det1) # correct
# Question 2
# Det = -159
# Question 3
'''
A.
Replace row 3 with k times row 3.
B.
The determinant is multiplied by k.
'''
# Question 4
m2 = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
det2 = np.linalg.det(m2)
print(det2) # correct
# Question 5
'''
A.
False, because the determinant of A can be computed by cofactor expansion across any row or down any column. Since the determinant of A is well defined, both of these cofactor expansions will be equal.
B.
False, because the determinant of a triangular matrix is the product of the entries along the main diagonal.
'''
# Question 6
'''
If two rows of A are interchanged to produce B, then det Upper B equals negative det A.
'''
# Question 7
'''
If a multiple of one row of A is added to another row to produce matrix B, then det Upper B equals det Upper A.
'''
# Question 8
m3 = sympy.Matrix([[1, 5, -6], [-1, -4, -5], [1, 4, 7]])
print(m3)
rref1 = m3.rref()
print(rref1)
m4 = np.array([[1, 5, -6], [-1, -4, -5], [1, 4, 7]])
det3 = np.linalg.det(m4)
print(det3) # correct, det = 2
# Question 9
# Switch the rows, det of original matrix = -10, det of changed matrix = 10
# Question 10
m5 = np.array([[-25, -4, -2], [-5, 12, -4], [0, -20, 6]])
det4 = np.linalg.det(m5)
print(det4)
# The matrix is invertible because the determinant of the matrix is not zero.
# Question 11
# formula
# Question 12
mat = np.array([[1,1,0], [3, 0, 5], [0, 1, -5]])
print(mat)
det8 = np.linalg.det(mat)
print(det8)
#Cramer's Rule
# Find A1b by replacing the first column with column b
mat2 = np.array([[2,1,0], [0, 0, 5], [3, 1, -5]])
print(mat2)
det9 = np.linalg.det(mat2)
print(det9)
print(det9/det8)
#Find A2b by replacing the second column with b
mat3 = np.array([[1, 2, 0], [3, 0, 5], [0, 3, -5]])
print(mat3)
det10 = np.linalg.det(mat3)
print(det10)
print(det10/det8)
#Find A3b by replacing the third column with b
mat4 = np.array([[1, 1, 2], [3, 0, 0], [0, 1, 3]])
print(mat4)
det11 = np.linalg.det(mat4)
print(det11)
print(det11/det8)
# Answers above are correct, but try again because I misread the print output
matr = np.array([[1,1,0], [5, 0, 4], [0, 1, -4]])
print(matr)
deter = np.linalg.det(matr)
print(deter)
# Find A1b by replacing first column with b
matr1 = np.array([[5, 1, 0], [0, 0, 4], [6, 1, -4]])
print(matr1)
deter1 = np.linalg.det(matr1)
print(deter1/deter)
# Find A2b by replacing second column with b
matr2 = np.array([[1, 5, 0], [5, 0, 4], [0, 6, -4]])
print(matr2)
deter2 = np.linalg.det(matr2)
print(deter2/deter)
# Find A3b by replacing third column with b
matr3 = np.array([[1, 1, 5], [5, 0, 0], [0, 1, 6]])
print(matr3)
deter3 = np.linalg.det(matr3)
print(deter3/deter)
# Question 13
# Compute the adjugate of the given matrix
matri = np.matrix([[2, 5, 4], [1, 0, 1], [3, 2, 2]])
print(matri)
# Hermitian transpose (not correct)
print(matri.getH())
# Det of matrix
determ = np.linalg.det(matri)
print(determ)
adj_matr = np.array([[-2, -2, 5], [1, -8, 2], [2, 11, -5]])
print(adj_matr * 1/determ) # Correct
# Question 14
m6 = np.array([[3, 7], [6, 2]])
print(m6)
det5 = np.linalg.det(m6)
print(det5) # correct
# The area of the parellelogram is the absolute value of the det. In this case = 36
# Question 15
# First find the area of the parellelogram
m7 = np.array([[-5, -5], [5, 10]])
det6 = np.linalg.det(m7)
print(det6) # -25
# next find the det of matrix A
m8 = np.array([[7, -8], [-2, 8]])
print(m8)
det7 = np.linalg.det(m8)
print(det7) # 40
# Finally, multiply the absolute value of the det of the first matrix (area of the parellelogram) by the det of the second matrix
# Answer = 1000
| 23.152941 | 202 | 0.653201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,872 | 0.474645 |
d26272e6f74f04e9cc5cdc1f6a997a3ad8bdee52
| 2,620 |
py
|
Python
|
builder/tasks_bullet/standimitation_task_bullet.py
|
FrankTianTT/laikago_robot
|
a5d54f10ea6a5620762c2210893ae8abe2f9ac05
|
[
"MIT"
] | 6 |
2020-12-02T07:49:36.000Z
|
2021-12-24T01:36:07.000Z
|
builder/tasks_bullet/standimitation_task_bullet.py
|
FrankTianTT/laikago_robot
|
a5d54f10ea6a5620762c2210893ae8abe2f9ac05
|
[
"MIT"
] | null | null | null |
builder/tasks_bullet/standimitation_task_bullet.py
|
FrankTianTT/laikago_robot
|
a5d54f10ea6a5620762c2210893ae8abe2f9ac05
|
[
"MIT"
] | 3 |
2021-01-12T14:09:40.000Z
|
2021-12-24T01:36:17.000Z
|
from builder.laikago_task_bullet import LaikagoTaskBullet
from builder.laikago_task import InitPose
import math
import numpy as np
ABDUCTION_P_GAIN = 220.0
ABDUCTION_D_GAIN = 0.3
HIP_P_GAIN = 220.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 220.0
KNEE_D_GAIN = 2.0
class LaikagoStandImitationBulletBase(LaikagoTaskBullet):
def __init__(self,
reward_mode='without_shaping',
run_mode='train'):
super(LaikagoStandImitationBulletBase, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
init_pose=InitPose.LIE)
self.imitation_action = np.array([-10, 30, -75,
10, 30, -75,
-10, 50, -75,
10, 50, -75]) * np.pi / 180
self._kp = [ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN]
self._kd = [ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN]
self._torque_limits = np.ones(12) * 40
class LaikagoStandImitationBullet0(LaikagoStandImitationBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping',):
super(LaikagoStandImitationBullet0, self).__init__(run_mode=run_mode,
reward_mode=reward_mode)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=30) or
self.done_p_bullet(threshold=30) or
self.done_y_bullet(threshold=30) or
self.done_height_bullet(threshold=0.25) or
self.done_region_bullet(threshold=3) or
self.done_toe_contact_long(threshold=30) or
self.done_toe_distance(threshold=0.2))
def cal_phi_function(self):
pos = np.array(self._env.get_history_angle()[0])
vel = np.array(self._env.get_history_velocity()[0])
target_pos = self.imitation_action
target_vel = np.zeros(12)
motor_torques = -1 * (self._kp * (pos - target_pos)) - self._kd * (vel - target_vel)
return 10 / np.sum(np.abs(motor_torques))
def update_reward(self):
if self.is_healthy:
self.add_reward(1, 1)
| 42.95082 | 92 | 0.596183 | 2,363 | 0.901908 | 0 | 0 | 449 | 0.171374 | 0 | 0 | 45 | 0.017176 |
d26509e1b720c708ef4c28d0e261a51f29110955
| 425 |
py
|
Python
|
build.py
|
chrahunt/conan-protobuf
|
c49350d1c69d2e5b40305803f3184561f433554c
|
[
"MIT"
] | null | null | null |
build.py
|
chrahunt/conan-protobuf
|
c49350d1c69d2e5b40305803f3184561f433554c
|
[
"MIT"
] | null | null | null |
build.py
|
chrahunt/conan-protobuf
|
c49350d1c69d2e5b40305803f3184561f433554c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
if __name__ == "__main__":
builder = build_template_default.get_builder()
# Todo: re-enable shared builds when issue resolved
# github issue: https://github.com/google/protobuf/issues/2502
builder.items = filter(lambda build: build.options["protobuf:shared"] == False, builder.items)
builder.run()
| 26.5625 | 98 | 0.701176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.432941 |
d2657376a64e94e969ed1edf71ca0efd3af9b3de
| 2,046 |
py
|
Python
|
pytrek/settings/LimitsSettings.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | 1 |
2021-06-13T00:56:24.000Z
|
2021-06-13T00:56:24.000Z
|
pytrek/settings/LimitsSettings.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | 94 |
2021-04-16T20:34:10.000Z
|
2022-01-13T19:58:20.000Z
|
pytrek/settings/LimitsSettings.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | null | null | null |
from logging import Logger
from logging import getLogger
from pytrek.settings.BaseSubSetting import BaseSubSetting
from pytrek.settings.SettingsCommon import SettingsCommon
from pytrek.settings.SettingsCommon import SettingsNameValues
class LimitsSettings(BaseSubSetting):
LIMITS_SECTION: str = 'Limits'
MAXIMUM_STARS: str = 'maximum_stars'
MINIMUM_STAR_BASES: str = 'minimum_star_bases'
MAXIMUM_STAR_BASES: str = 'maximum_star_bases'
MAXIMUM_PLANETS: str = 'maximum_planets'
DEFAULT_FULL_SHIELDS: str = 'default_full_shields'
LIMITS_SETTINGS: SettingsNameValues = SettingsNameValues({
MAXIMUM_STARS: '4',
MINIMUM_STAR_BASES: '2',
MAXIMUM_STAR_BASES: '5',
MAXIMUM_PLANETS: '10',
DEFAULT_FULL_SHIELDS: '2500'
})
"""
This is a singleton based on the inheritance hierarchy
"""
def init(self, *args, **kwds):
self.logger: Logger = getLogger(__name__)
BaseSubSetting.init(self, *args, **kwds)
self._settingsCommon: SettingsCommon = SettingsCommon(self._config)
def addMissingSettings(self):
self._settingsCommon.addMissingSettings(sectionName=LimitsSettings.LIMITS_SECTION, nameValues=LimitsSettings.LIMITS_SETTINGS)
@property
def maximumStars(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_STARS)
@property
def minimumStarBases(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MINIMUM_STAR_BASES)
@property
def maximumStarBases(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_STAR_BASES)
@property
def maximumPlanets(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_PLANETS)
@property
def defaultFullShields(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.DEFAULT_FULL_SHIELDS)
| 33.540984 | 133 | 0.729717 | 1,806 | 0.882698 | 0 | 0 | 735 | 0.359238 | 0 | 0 | 191 | 0.093353 |
d26592ea9cca4872fa15f4c5aedeb743d022345c
| 2,366 |
py
|
Python
|
tests/integration_tests/testYieldCurve.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | 1 |
2021-10-04T03:15:50.000Z
|
2021-10-04T03:15:50.000Z
|
tests/integration_tests/testYieldCurve.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
tests/integration_tests/testYieldCurve.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import date
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve
import numpy as np
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2020, 6, 26)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = False
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
spots = (flatR * np.ones(5)).tolist()
yearFrac = np.arange(1, 6).tolist()
par = (flatR * np.ones(5)).tolist()
t = date(2021, 6, 30) # try date(2021, 6, 26) will trigger extrapolation warning msg
t1 = date(2022, 6, 26)
t2 = date(2023, 6, 26)
class testYieldCurveGetRate(unittest.TestCase):
def testDiscountCurve(self):
disC = discountCurve(alias_disC, referenceDate,
dayCount, compounding, allowExtrapolation)
disC.values = dF
disC.timeIndex = timeIndex
self.assertAlmostEqual(disC.getRate(t1, t2), (1 + flatR) ** -1) # almostEqual auto rounds to 7 decimals
def testForwardCurve(self):
forwardC = forwardCurve(alias_forC, referenceDate,
dayCount, compounding, allowExtrapolation)
forwardC.values = forwardRates
forwardC.timeIndex = timeIndex
self.assertAlmostEqual(forwardC.getRate(t, t1, t2), flatR)
def testSpot2Df(self):
self.assertCountEqual(np.round(yieldCurve.spot2Df(
spots, yearFrac, compounding), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.spot2Df(
dF, yearFrac, compounding, reverse=True), 8), np.round(spots, 8))
def testDf2Forward(self):
self.assertCountEqual(np.round(yieldCurve.dF2Forward(
dF, yearFrac), 8), np.round(forwardRates, 8))
def testForward2Spot(self):
self.assertCountEqual(np.round(yieldCurve.forward2Spot(
forwardRates, yearFrac, compounding), 8), np.round(spots, 8))
def testPar2Df(self):
self.assertCountEqual(
np.round(yieldCurve.par2Df(par, yearFrac), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.par2Df(
dF, yearFrac, reverse=True), 8), np.round(par, 8))
| 37.555556 | 111 | 0.674979 | 1,557 | 0.658073 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.056213 |
d26666e751893b180e8e39534e0d885f31d24a15
| 1,876 |
py
|
Python
|
src/python/setup.py
|
blaine141/NVISII
|
1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24
|
[
"Apache-2.0"
] | 149 |
2021-02-09T11:35:23.000Z
|
2022-03-29T10:06:22.000Z
|
src/python/setup.py
|
blaine141/NVISII
|
1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24
|
[
"Apache-2.0"
] | 66 |
2020-05-28T18:53:21.000Z
|
2021-02-07T05:34:14.000Z
|
src/python/setup.py
|
blaine141/NVISII
|
1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24
|
[
"Apache-2.0"
] | 14 |
2021-02-09T08:51:44.000Z
|
2022-03-11T00:39:21.000Z
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from setuptools import setup, dist
import wheel
import os
# required to geneerate a platlib folder required by audittools
from setuptools.command.install import install
# for generating a wheel version from git tag
from setuptools_scm import get_version
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
# force setuptools to recognize that this is
# actually a binary distribution
class BinaryDistribution(dist.Distribution):
def is_pure(self):
return False
def has_ext_modules(foo):
return True
# This gets the version from the most recent git tag, potentially concatinating
# a commit hash at the end.
current_version = get_version(
root = "..",
relative_to = __file__,
fallback_version='0.0.0-dev0'
)
optix_version = os.environ.get("OPTIX_VERSION", None)
if optix_version:
current_version = current_version + "." + optix_version
print(current_version)
setup(
# This package is called nvisii
name='nvisii',
install_requires = ['numpy>=1.19.5'],
packages = ['nvisii', "nvisii.importers"], # include the package "nvisii"
# make sure the shared library is included
package_data = {'': ("*.dll", "*.pyd", "*.so")},
include_package_data=True,
description='',
# See class BinaryDistribution that was defined earlier
distclass=BinaryDistribution,
version = current_version,
author='Nate Morrical',
author_email='',
maintainer='',
maintainer_email='',
python_requires = ">=3.6",
cmdclass={'install': InstallPlatlib},
)
| 28 | 83 | 0.710554 | 339 | 0.180704 | 0 | 0 | 0 | 0 | 0 | 0 | 771 | 0.410981 |
d26a2cfd9b0c9f91c37793b0017bd2b85c25f09b
| 1,060 |
py
|
Python
|
portal_gun/configuration/schemas/compute_aws.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 69 |
2018-05-03T18:25:43.000Z
|
2021-02-10T11:37:28.000Z
|
portal_gun/configuration/schemas/compute_aws.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 7 |
2018-09-19T06:39:11.000Z
|
2022-03-29T21:55:08.000Z
|
portal_gun/configuration/schemas/compute_aws.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 11 |
2018-07-30T18:09:12.000Z
|
2019-10-03T15:36:13.000Z
|
from marshmallow import fields, Schema
from .provision import ProvisionActionSchema
class InstanceSchema(Schema):
type = fields.String(required=True)
image_id = fields.String(required=True)
availability_zone = fields.String(required=True)
ebs_optimized = fields.Boolean()
iam_fleet_role = fields.String(required=True)
class Meta:
ordered = True
class AuthSchema(Schema):
key_pair_name = fields.String(required=True)
identity_file = fields.String(required=True)
user = fields.String(required=True)
group = fields.String(required=True)
class Meta:
ordered = True
class NetworkSchema(Schema):
security_group_id = fields.String(required=True)
subnet_id = fields.String()
class Meta:
ordered = True
class ComputeAwsSchema(Schema):
provider = fields.String(required=True)
instance = fields.Nested(InstanceSchema, required=True)
auth = fields.Nested(AuthSchema, required=True)
network = fields.Nested(NetworkSchema, required=True)
provision_actions = fields.Nested(ProvisionActionSchema, many=True)
class Meta:
ordered = True
| 24.090909 | 68 | 0.779245 | 963 | 0.908491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d26a6bee5f324041d60e07e49f5e1f8b0a925d37
| 1,099 |
py
|
Python
|
extras/unbundle.py
|
mstriemer/amo-validator
|
35b502204183d783634207e7c2e7766ea1070ce8
|
[
"BSD-3-Clause"
] | 1 |
2015-07-15T20:06:09.000Z
|
2015-07-15T20:06:09.000Z
|
extras/unbundle.py
|
mstriemer/amo-validator
|
35b502204183d783634207e7c2e7766ea1070ce8
|
[
"BSD-3-Clause"
] | null | null | null |
extras/unbundle.py
|
mstriemer/amo-validator
|
35b502204183d783634207e7c2e7766ea1070ce8
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import zipfile
from zipfile import ZipFile
from StringIO import StringIO
source = sys.argv[1]
target = sys.argv[2]
if not target.endswith("/"):
target = "%s/" % target
def _unbundle(path, target):
zf = ZipFile(path, 'r')
contents = zf.namelist()
for item in contents:
sp = item.split("/")
if not sp[-1]:
continue
if "__MACOSX" in item:
continue
print item, ">", target + item
cpath = target + "/".join(sp[:-1])
if not os.path.exists(cpath):
os.makedirs(cpath)
if item.endswith((".jar", ".xpi", ".zip")):
now = target + item
path_item = item.split("/")
path_item[-1] = "_" + path_item[-1]
path = target + "/".join(path_item)
buff = StringIO(zf.read(item))
_unbundle(buff, path + "/")
else:
f = open(target + item, 'w')
f.write(zf.read(item))
f.close()
zf.close()
if not os.path.exists(target):
os.mkdir(target)
_unbundle(source, target)
| 22.895833 | 51 | 0.526843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.057325 |
d26afa5cb9899f00bda32076f95a8a1292054119
| 81,920 |
py
|
Python
|
linuxOperation/app/domain/forms.py
|
zhouli121018/core
|
f9700204349ecb22d45e700e9e27e79412829199
|
[
"MIT"
] | null | null | null |
linuxOperation/app/domain/forms.py
|
zhouli121018/core
|
f9700204349ecb22d45e700e9e27e79412829199
|
[
"MIT"
] | 1 |
2021-06-10T20:45:55.000Z
|
2021-06-10T20:45:55.000Z
|
linuxOperation/app/domain/forms.py
|
zhouli121018/core
|
f9700204349ecb22d45e700e9e27e79412829199
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
import time
import os
import math
import json
from lib.forms import BaseFied, BaseFieldFormatExt, DotDict, BaseCfilterActionFied, BaseCfilterOptionFied
from app.core.models import Mailbox, MailboxUserAttr, Domain, CoCompany, CoreAlias, DomainAttr, \
Department, CoreConfig, CoreMonitor, CoreWhitelist
from app.domain.models import Signature, SecretMail, WmCustomerInfo, WmCustomerCate, WmTemplate
from app.utils.MailboxLimitChecker import MailboxLimitChecker
from django import forms
from django.db.models import Sum,Count
from lib import validators
from lib.formats import dict_compatibility
from lib.tools import clear_redis_cache, download_excel, GenerateRsaKeys, generate_rsa, get_unicode, get_string,\
get_system_user_id, get_system_group_id, recursion_make_dir, get_random_string, \
phpLoads, phpDumps, get_client_request
from lib.validators import check_domain, check_email_ordomain
from django_redis import get_redis_connection
from django.utils.translation import ugettext as _
import base64
import time
import copy
import constants
import chardet
from auditlog.api import api_create_admin_log
from app.core.constants import MAILBOX_SEND_PERMIT, MAILBOX_RECV_PERMIT
def getSavePath(saveName):
#新版本webmail保存位置
saveDir = u"/usr/local/u-mail/data/www/webmail/netdisk/media"
if not os.path.exists(saveDir):
recursion_make_dir(saveDir)
user_name = "umail_apache"
os.chown(saveDir, get_system_user_id(user_name), get_system_group_id(user_name) )
#旧版本webmail保存位置
#saveDir2 = u"/usr/local/u-mail/data/www/webmail/attachment"
savePath = u"%s/%s"%(saveDir, saveName)
return savePath
def saveLogoToPath(filedata):
filedata = base64.decodestring(filedata.encode("utf-8","ignore").strip())
user_name = "umail_apache"
now = time.strftime("%Y%m%d%H%M%S")
decimal,_= math.modf(time.time())
saveName = u"logo_%s_%s_%03d.jpg"%(get_random_string(5), now, int(decimal*1000))
savePath = getSavePath(saveName)
with open(savePath, "wb+") as f:
f.write(filedata)
os.chown(savePath, get_system_user_id(user_name), get_system_group_id(user_name) )
return saveName
def deleteLogoFromPath(saveName):
if not saveName:
return
savePath = getSavePath(saveName)
try:
if os.path.exists(savePath):
os.unlink(savePath)
except:
pass
#域名配置的基类
class DomainForm(DotDict):
PARAM_NAME = {}
PARAM_LIST = {}
PARAM_TYPE = {}
def __init__(self, domain_id, get=None, post=None, request={}):
self.request = request
self.domain_id = BaseFied(value=domain_id, error=None)
self.get = get or {}
self.post = post or {}
self.valid = True
self.initialize()
def initialize(self):
self.initBasicParams()
self.initPostParams()
def formatOptionValue(self, key, value):
if value.lower() == u"on":
return u"1"
return value
def initBasicParams(self):
for key, default in self.PARAM_LIST.items():
sys_type = self.PARAM_TYPE[ key ]
instance = DomainAttr.objects.filter(domain_id=self.domain_id.value,type=sys_type,item=key).first()
setattr(self,"instance_%s"%key,instance)
value = instance.value if instance else default
obj = BaseFied(value=value, error=None)
setattr(self,key,obj)
def initPostParams(self):
self.initPostParamsDefaultNone()
def initPostParamsDefaultNone(self):
data = self.post if self.post else self.get
if "domain_id" in data:
self.domain_id = BaseFied(value=data["domain_id"], error=None)
for key,default in self.PARAM_LIST.items():
if not key in data:
continue
value = self.formatOptionValue(key, data[key])
obj = BaseFied(value=value, error=None)
setattr(self,key,obj)
def initPostParamsDefaultDisable(self):
data = self.post if self.post else self.get
if "domain_id" in data:
self.domain_id = BaseFied(value=data["domain_id"], error=None)
data = self.post if self.post else self.get
if data:
self.domain_id = BaseFied(value=data["domain_id"], error=None)
for key,default in self.PARAM_LIST.items():
value = self.formatOptionValue(key, data.get(key, u"-1"))
obj = BaseFied(value=value, error=None)
setattr(self,key,obj)
def is_valid(self):
if not self.domain_id.value:
self.valid = False
self.domain_id.set_error(_(u"无效的域名"))
return self.valid
self.check()
return self.valid
def check(self):
return self.valid
def checkSave(self):
if self.is_valid():
self.save()
def paramSave(self):
for key in self.PARAM_LIST.keys():
obj = getattr(self,"instance_%s"%key,None)
value = getattr(self,key).value
if obj:
sys_type = self.PARAM_TYPE[ key ]
obj.domain_id = u"{}".format(self.domain_id.value)
obj.type = u"{}".format(sys_type)
obj.item = u"{}".format(key)
obj.value = u"{}".format(value)
obj.save()
else:
sys_type = self.PARAM_TYPE[ key ]
obj = DomainAttr.objects.create(
domain_id=u"{}".format(self.domain_id.value),
type=u"{}".format(sys_type),
item=u"{}".format(key),
value=u"{}".format(value)
)
value = obj.value
if len(value) > 100:
value = u"..."
param = u"{}({})".format(self.PARAM_NAME.get(obj.item,u''),u"{}-{}".format(obj.type,obj.item))
msg = _(u"域名参数:'{}' 值:{}").format(param,value)
api_create_admin_log(self.request, obj, 'domainconfig', msg)
clear_redis_cache()
def save(self):
self.paramSave()
class DomainBasicForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_BASIC_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_BASIC_PARAMS_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_BASIC_PARAMS_TYPE)
STATUS_LIST = dict(constants.DOMAIN_BASIC_STATUS)
def initialize(self):
self.initBasicParams()
self.initPostParams()
self.initStatus()
def initStatus(self):
checker = MailboxLimitChecker()
statMailbox = checker._stat_domain_mailbox_info(domain_id=self.domain_id.value)
mailboxUsed = statMailbox["mailbox_count"]
spaceUsed = statMailbox["mailbox_size"]
netdiskUsed = statMailbox["netdisk_size"]
aliasUsed = CoreAlias.objects.filter(domain_id=self.domain_id.value).count()
self.mailboxUsed = BaseFied(value=mailboxUsed, error=None)
self.aliasUsed = BaseFied(value=aliasUsed, error=None)
self.spaceUsed = BaseFied(value=spaceUsed, error=None)
self.netdiskUsed = BaseFied(value=netdiskUsed, error=None)
def check(self):
return self.valid
def save(self):
self.paramSave()
class DomainRegLoginWelcomeForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_REG_LOGIN_WELCOME_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_REG_LOGIN_WELCOME_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_REG_LOGIN_WELCOME_TYPE)
def initialize(self):
self.subject = u""
self.content = u""
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_welcome_letter.value)
self.subject = oldData.get(u"subject",u"")
self.content = oldData.get(u"content",u"")
except:
oldData = {}
if newData:
self.subject = newData.get(u"subject",u"")
self.content = newData.get(u"content",u"")
saveData = json.dumps( {"subject" : self.subject, "content": self.content } )
self.cf_welcome_letter = BaseFied(value=saveData, error=None)
class DomainRegLoginAgreeForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_REG_LOGIN_AGREE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_REG_LOGIN_AGREE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_REG_LOGIN_AGREE_TYPE)
#收发限制
class DomainSysRecvLimitForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_RECV_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_RECV_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_RECV_TYPE)
SEND_LIMIT_RANGE = dict(MAILBOX_SEND_PERMIT)
RECV_LIMIT_RANGE = dict(MAILBOX_RECV_PERMIT)
def initialize(self):
self.initBasicParams()
self.initPostParams()
data = self.post if self.post else self.get
self.modify_all_limit_send = data.get("modify_all_limit_send", u"-1")
self.modify_all_limit_recv = data.get("modify_all_limit_recv", u"-1")
def initPostParams(self):
self.initPostParamsDefaultDisable()
#这里逻辑有点绕,因为 参数的意思是 limit ,所以当禁用时,按钮是unchecked的
data = self.post if self.post else self.get
if data:
self.limit_pop = BaseFied(value=data.get("limit_pop", "1"), error=None)
self.limit_imap = BaseFied(value=data.get("limit_imap", "1"), error=None)
self.limit_smtp = BaseFied(value=data.get("limit_smtp", "1"), error=None)
def check(self):
if not self.limit_send.value in self.SEND_LIMIT_RANGE:
self.limit_send.set_error(_(u"无效的发信权限"))
self.valid = False
return self.valid
if not self.limit_recv.value in self.RECV_LIMIT_RANGE:
self.limit_recv.set_error(_(u"无效的收信权限"))
self.valid = False
return self.valid
return self.valid
def save(self):
self.paramSave()
if self.modify_all_limit_send == u"1":
Mailbox.objects.filter(domain_id=self.domain_id.value).update(limit_send=self.limit_send.value)
if self.modify_all_limit_recv == u"1":
Mailbox.objects.filter(domain_id=self.domain_id.value).update(limit_recv=self.limit_recv.value)
@property
def getLimitSendParams(self):
return MAILBOX_SEND_PERMIT
@property
def getLimitRecvParams(self):
return MAILBOX_RECV_PERMIT
class DomainSysRecvWhiteListForm(DotDict):
def __init__(self, domain_id, type=u"send", get=None, post=None, request={}):
self.request = request
self.type = type
self.domain_id = BaseFied(value=domain_id, error=None)
self.get = get or {}
self.post = post or {}
self.valid = True
self.initialize()
@property
def getSendLimitWhiteList(self):
lists = CoreWhitelist.objects.filter(type=u"fix_send", operator=u"sys", domain_id=self.domain_id.value, mailbox_id=0).all()
num = 1
for d in lists:
yield num, d.id, d.email, str(d.disabled)
num += 1
@property
def getRecvLimitWhiteList(self):
lists = CoreWhitelist.objects.filter(type=u"fix_recv", operator=u"sys", domain_id=self.domain_id.value, mailbox_id=0).all()
num = 1
for d in lists:
yield num, d.id, d.email, str(d.disabled)
num += 1
def initialize(self):
def getPostMailbox(key):
#从 entry_{{ mailbox }}_id 这种格式中把 mailbox 提取出来
l = key.split("_")
l.pop(0)
flag = l.pop(-1)
mailbox = "_".join(l)
return mailbox
def setPostMailboxData(mailbox, key, value):
self.mailboxDict.setdefault(mailbox, {})
self.mailboxDict[mailbox][key] = value
#enddef
self.newMailbox = u""
self.mailboxDict = {}
self.newMailboxList = []
data = self.post if self.post else self.get
if not data:
return
newMailbox = data.get("new_mailbox", u"")
newMailboxList = data.get("new_mailbox_list", u"")
if newMailbox:
self.newMailbox = newMailbox
boxList = newMailboxList.split("|")
boxList = [box for box in boxList if box.strip()]
if boxList:
self.newMailboxList = boxList
for k,v in data.items():
if k.startswith("{}_".format(self.type)):
if k.endswith("_id"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "id", v)
elif k.endswith("_delete"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "delete", v)
for mailbox in self.mailboxDict.keys():
isDisabled = data.get(u"{}_{}_disabled".format(self.type, mailbox), u"1")
setPostMailboxData(mailbox, "disabled", isDisabled)
def is_valid(self):
if not self.domain_id.value:
self.valid = False
self.domain_id.set_error(_(u"无效的域名"))
return self.valid
self.check()
return self.valid
def check(self):
return self.valid
def checkSave(self):
if self.is_valid():
self.save()
def saveNewEmail(self, mailbox):
if mailbox in self.mailboxDict:
return
obj = CoreWhitelist.objects.create(type=u"fix_{}".format(self.type), operator=u"sys", domain_id=self.domain_id.value, mailbox_id=0, email=mailbox)
obj.save()
def saveOldEmail(self):
for mailbox, data in self.mailboxDict.items():
data = self.mailboxDict[mailbox]
entry_id = data.get("id", "")
if not entry_id:
continue
obj = CoreWhitelist.objects.filter(id=entry_id).first()
if not obj:
continue
if data.get("delete", u"-1") == u"1":
obj.delete()
else:
obj.operator=u"sys"
obj.type=u"fix_{}".format(self.type)
obj.disabled = data.get("disabled", "-1")
obj.save()
def save(self):
#先添加新的邮箱
if self.newMailbox:
self.saveNewEmail( self.newMailbox )
for mailbox in self.newMailboxList:
self.saveNewEmail( mailbox )
self.saveOldEmail()
#安全设置
class DomainSysSecurityForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_SECURITY_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_SECURITY_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_SECURITY_TYPE)
def initialize(self):
self.count = u"0"
self.timespan = u"0"
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_def_safe_login.value)
self.count = oldData.get(u"count",u"0")
self.timespan = oldData.get(u"timespan",u"0")
except:
oldData = {}
if newData:
for key,default in self.PARAM_LIST.items():
value = self.formatOptionValue(key, newData.get(key, u"-1"))
obj = BaseFied(value=value, error=None)
setattr(self,key,obj)
self.count = newData.get(u"count",u"0")
self.timespan = newData.get(u"timespan",u"0")
saveData = json.dumps( { "count": self.count, "timespan": self.timespan } )
self.cf_def_safe_login = BaseFied(value=saveData, error=None)
class DomainSysSecurityPasswordForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_SECURITY_PWD_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_SECURITY_PWD_VALUES)
PARAM_TYPE = dict(constants.DOMAIN_SYS_SECURITY_PWD_TYPE)
def initialize(self):
self.subject = u""
self.content = u""
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_def_login_limit_mail.value)
self.subject = oldData.get(u"subject",u"")
self.content = oldData.get(u"content",u"")
except:
oldData = {}
if newData:
self.subject = newData.get(u"subject",u"")
self.content = newData.get(u"content",u"")
saveData = json.dumps( {"subject" : self.subject, "content": self.content } )
self.cf_def_login_limit_mail = BaseFied(value=saveData, error=None)
#密码规则
class DomainSysPasswordForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_PASSWORD_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_PASSWORD_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_PASSWORD_TYPE)
PARAM_TYPE_LIMIT = constants.DOMAIN_SYS_PASSWORD_TYPE_LIMIT
PARAM_LEN_LIMIT = constants.DOMAIN_SYS_PASSWORD_LEN_LIMIT
PRAAM_RULE_VALUE = dict(constants.DOMAIN_SYS_PASSWORD_RULE_VALUE)
PARAM_RULE_LIMIT = dict(constants.DOMAIN_SYS_PASSWORD_RULE_LIMIT)
PARAM_FORBID_RULE = dict(constants.DOMAIN_SYS_PASSWORD_FORBID_RULE)
PARAM_FORBID_RULE_DEFAULT = dict(constants.DOMAIN_SYS_PASSWORD_FORBID_RULE_DEFAULT)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_pwd_rule.value)
except:
oldData = {}
oldData = {} if not isinstance(oldData, dict) else oldData
for name, param in self.PRAAM_RULE_VALUE.items():
default = self.PARAM_RULE_LIMIT[param]
setattr(self, name, oldData.get(param, default))
if newData:
for key,default in self.PARAM_LIST.items():
value = self.formatOptionValue(key, newData.get(key, u"-1"))
obj = BaseFied(value=value, error=None)
setattr(self,key,obj)
for name, param in self.PRAAM_RULE_VALUE.items():
setattr(self, name, newData.get(param, u"-1"))
saveData = {}
for name, param in self.PRAAM_RULE_VALUE.items():
saveData[param] = getattr(self, name)
#2.2.59后,强制要求验证密码长度
saveData["passwd_size"] = "1"
self.cf_pwd_rule = BaseFied(value=json.dumps(saveData), error=None)
try:
oldData = json.loads(self.cf_pwd_forbid.value)
except:
oldData = {}
saveData = {}
for name, param in self.PARAM_FORBID_RULE.items():
default = self.PARAM_FORBID_RULE_DEFAULT[param]
if newData:
setattr(self, name, newData.get(param, '-1'))
else:
setattr(self, name, oldData.get(param, default))
saveData[param] = getattr(self, name)
self.cf_pwd_forbid = BaseFied(value=json.dumps(saveData), error=None)
def save(self):
self.paramSave()
#兼容PHP那边旧版本的强密码规则开关
#关闭PHP的开关
DomainAttr.saveAttrObjValue(
domain_id=self.domain_id.value,
type=u"webmail",
item="sw_pass_severe",
value="-1"
)
#使用超管这边的开关
DomainAttr.saveAttrObjValue(
domain_id=self.domain_id.value,
type=u"webmail",
item="sw_pass_severe_new",
value="1"
)
#第三方对接
class DomainSysInterfaceForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
class DomainSysInterfaceAuthApiForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_TYPE)
class DomainSysInterfaceIMApiForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_TYPE)
#杂项设置
class DomainSysOthersForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_TYPE)
SMSServiceList = (
(u'jiutian', _(u'短信通道一(九天)')),
(u'zhutong', _(u'短信通道二(助通)')),
)
@property
def get_sms_list(self):
return self.SMSServiceList
def initPostParams(self):
self.initPostParamsDefaultDisable()
data = self.post if self.post else self.get
#短信服务器配置
confSms = DomainAttr.objects.filter(domain_id=self.domain_id.value,type="system",item="cf_sms_conf").first()
dataSms = "{}" if not confSms else confSms.value
try:
jsonSms = json.loads(dataSms)
jsonSms = {} if not isinstance(jsonSms, dict) else jsonSms
except:
jsonSms = {}
self.sms_type = jsonSms.get(u"type", u"")
self.sms_account = jsonSms.get(u"account", u"")
self.sms_password = jsonSms.get(u"password", u"")
self.sms_sign = jsonSms.get(u"sign", u"")
if "sms_type" in data:
self.sms_type = data["sms_type"]
if "sms_account" in data:
self.sms_account = data["sms_account"]
if "sms_password" in data:
self.sms_password = data["sms_password"]
if "sms_sign" in data:
self.sms_sign = data["sms_sign"]
jsonSms["type"] = self.sms_type
jsonSms["account"] = self.sms_account
jsonSms["password"] = self.sms_password
jsonSms["sign"] = self.sms_sign
self.cf_sms_conf = BaseFied(value=json.dumps(jsonSms), error=None)
self.sms_cost = None
try:
if self.request.user.licence_validsms and (self.sms_account and self.sms_password):
from lib import sms_interface
self.sms_cost = sms_interface.query_sms_cost(self.sms_type, self.sms_account, self.sms_password)
except Exception,err:
print err
def save(self):
super(DomainSysOthersForm, self).save()
#旧版本的短信开关是保存在域名上的
Domain.objects.filter(id=self.domain_id.value).update(
recvsms=self.sw_recvsms.value,
sendsms=self.sw_sendsms.value,
)
class DomainSysOthersCleanForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_SPACE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_SPACE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_SPACE_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldCleanData = json.loads(self.cf_spaceclean.value)
except:
oldCleanData = {}
try:
oldMailData = json.loads(self.cf_spacemail.value)
except:
oldMailData = {}
oldCleanData = {} if not isinstance(oldCleanData, dict) else oldCleanData
oldMailData = {} if not isinstance(oldMailData, dict) else oldMailData
self.general_keep_time = get_unicode(oldCleanData.get(u"general_keep_time", u"0"))
self.sent_keep_time = get_unicode(oldCleanData.get(u"sent_keep_time", u"0"))
self.spam_keep_time = get_unicode(oldCleanData.get(u"spam_keep_time", u"0"))
self.trash_keep_time = get_unicode(oldCleanData.get(u"trash_keep_time", u"0"))
self.subject = oldMailData.get(u"subject", u"").strip()
self.content = oldMailData.get(u"content", u"")
self.warn_rate=get_unicode(oldMailData.get(u"warn_rate", u"85"))
if newData:
self.general_keep_time = get_unicode(newData.get(u"general_keep_time", u"0"))
self.sent_keep_time = get_unicode(newData.get(u"sent_keep_time", u"0"))
self.spam_keep_time = get_unicode(newData.get(u"spam_keep_time", u"0"))
self.trash_keep_time = get_unicode(newData.get(u"trash_keep_time", u"0"))
self.subject = newData.get(u"subject", u"").strip()
self.content = newData.get(u"content", u"")
self.warn_rate=get_unicode(newData.get(u"warn_rate", u"85"))
saveCleanData = {
u"general_keep_time" : self.general_keep_time,
u"sent_keep_time" : self.sent_keep_time,
u"spam_keep_time" : self.spam_keep_time,
u"trash_keep_time" : self.trash_keep_time,
}
saveMailData = {
u"subject" : self.subject,
u"content" : self.content,
u"warn_rate" : self.warn_rate,
}
self.cf_spaceclean = BaseFied(value=json.dumps(saveCleanData), error=None)
self.cf_spacemail = BaseFied(value=json.dumps(saveMailData), error=None)
class DomainSysOthersAttachForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_online_attach.value)
except:
oldData = {}
#这里的设置,在数据库没数据时要初始化数据库,不然app那边会读取错误
autoSave = False
#这个是2.2.58以后不再使用的值,在该版本以前是 所有类型邮件的 “转存大小”
#在2.2.58后因为转存的邮件区分出了类型,所以这个值改为默认值
self.client_size_default = oldData.get("size", "50")
self.client_url = oldData.get("url", "")
self.client_public = oldData.get("public", "-1")
self.client_size_list = oldData.get("size_list", self.client_size_default)
self.client_size_in = oldData.get("size_in", self.client_size_default)
self.client_size_out = oldData.get("size_out", self.client_size_default)
#从系统设置中读取下载地址的默认值
if not self.client_url.strip():
obj = DomainAttr.objects.filter(domain_id=0,type=u'system',item=u'view_webmail_url').first()
self.client_url = obj.value if obj else ""
#系统设置没有配置时就读取默认值
if not self.client_url.strip() and self.request:
self.client_url = get_client_request(self.request)
autoSave = True
if newData:
self.client_size_list = newData.get("client_size_list", self.client_size_default)
self.client_size_in = newData.get("client_size_in", self.client_size_default)
self.client_size_out = newData.get("client_size_out", self.client_size_default)
self.client_url = newData.get("client_url", "")
self.client_public = newData.get("client_public", "-1")
saveData = {
u"url" : self.client_url,
u"size" : self.client_size_default,
u"size_list" : self.client_size_list,
u"size_in" : self.client_size_in,
u"size_out" : self.client_size_out,
u"public" : self.client_public,
}
self.cf_online_attach = BaseFied(value=json.dumps(saveData), error=None)
if autoSave:
self.paramSave()
class DomainSignDomainForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SIGN_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SIGN_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SIGN_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_domain_signature.value)
except:
oldData = {}
oldData = {} if not isinstance(oldData, dict) else oldData
self.content_html = oldData.get(u"html",u"")
if self.content_html and u"new" in oldData:
self.content_html = base64.decodestring(self.content_html)
self.content_text = oldData.get(u"text",u"")
if newData:
self.content_html = newData.get(u"content_html", u"")
self.content_text = newData.get(u"content_text", u"-1")
sw_domain_signature = newData.get("sw_domain_signature", "-1")
self.sw_domain_signature = BaseFied(value=sw_domain_signature, error=None)
saveData = {
u"html" : get_unicode(base64.encodestring(get_string(self.content_html))),
u"text" : self.content_text,
u"new" : u"1", #针对老版本的兼容标记
}
self.cf_domain_signature = BaseFied(value=json.dumps(saveData), error=None)
class DomainSignPersonalForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SIGN_PERSONAL_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SIGN_PERSONAL_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SIGN_PERSONAL_TYPE)
PARAM_LIST_DEFAULT = dict(constants.DOMAIN_SIGN_PERSONAL_VALUE_DEFAULT)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_personal_sign.value)
except:
oldData = {}
oldData = {} if not isinstance(oldData, dict) else oldData
for name, default in self.PARAM_LIST_DEFAULT.items():
setattr(self, name, oldData.get(name, default) )
if self.personal_sign_templ:
self.personal_sign_templ = get_unicode(base64.decodestring(get_string(self.personal_sign_templ)))
if newData:
self.personal_sign_new = get_unicode(newData.get(u"personal_sign_new", u"-1"))
self.personal_sign_forward = get_unicode(newData.get(u"personal_sign_forward", u"-1"))
self.personal_sign_auto = get_unicode(newData.get(u"personal_sign_auto", u"-1"))
self.personal_sign_templ = get_unicode(newData.get(u"content_html", u""))
saveData = {
u"personal_sign_new" : self.personal_sign_new,
u"personal_sign_forward" : self.personal_sign_forward,
u"personal_sign_auto" : self.personal_sign_auto,
u"personal_sign_templ" : get_unicode(base64.encodestring(get_string(self.personal_sign_templ))),
}
self.cf_personal_sign = BaseFied(value=json.dumps(saveData), error=None)
try:
import HTMLParser
html_parser = HTMLParser.HTMLParser()
#转码让HTML能正常显示
self.personal_sign_templ2 = html_parser.unescape(self.personal_sign_templ)
except Exception,err:
print str(err)
self.personal_sign_templ2 = self.personal_sign_templ
def applyAll(self):
import cgi
caption = _(u"系统默认签名")
content = self.personal_sign_templ
content = cgi.escape(content)
content = get_unicode(content)
is_default = "1" if self.personal_sign_new == "1" else "-1"
is_fwd_default = "1" if self.personal_sign_forward == "1" else "-1"
obj_list = Mailbox.objects.filter(domain_id=self.domain_id.value)
for mailbox in obj_list:
mailbox_id = mailbox.id
obj_sign = Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").first()
if obj_sign:
obj_sign.content = u"{}".format(content)
obj_sign.default = u"{}".format(is_default)
obj_sign.refw_default = u"{}".format(is_fwd_default)
obj_sign.save()
else:
obj_sign = Signature.objects.create(
domain_id=u"{}".format(self.domain_id.value),
mailbox_id=u"{}".format(mailbox_id),
type=u"domain",
caption=u"{}".format(caption),
content=u"{}".format(content),
default=u"{}".format(is_default),
refw_default=u"{}".format(is_fwd_default),
)
if is_default == "1":
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id).update(default='-1')
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(default='1')
else:
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(default='-1')
if is_fwd_default == "1":
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id).update(refw_default='-1')
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(refw_default='1')
else:
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(refw_default='-1')
class DomainModuleHomeForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_HOME_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_HOME_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_HOME_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
class DomainModuleMailForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_MAIL_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_MAIL_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_MAIL_TYPE)
def initialize(self):
self.initBasicParams()
self.sw_save_client_sent_email_old = self.sw_save_client_sent_email.value
self.initPostParamsDefaultDisable()
def save(self):
super(DomainModuleMailForm, self).save()
#与上次的值不同,就更新所有邮箱用户的按钮
if self.sw_save_client_sent_email_old != self.sw_save_client_sent_email.value:
for obj in Mailbox.objects.filter(domain_id=self.domain_id.value).all():
obj_attr = MailboxUserAttr.objects.filter(mailbox_id=obj.id, item=u'save_client_sent').first()
if not obj_attr:
obj_attr = MailboxUserAttr.objects.create(
domain_id=self.domain_id.value,
mailbox_id=obj.id,
item=u'save_client_sent',
)
obj_attr.type = u"user"
obj_attr.value = self.sw_save_client_sent_email.value
obj_attr.save()
class DomainModuleSetForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_SET_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_SET_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_SET_TYPE)
def initialize(self):
self.initBasicParams()
self.initPostParamsDefaultDisable()
data = self.post if self.post else self.get
#sw_userbwlist对应的是core_domain的userbwlist列,特殊处理之
if not data:
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
sw_userbwlist = "-1" if not domainObj else domainObj.userbwlist
self.sw_userbwlist = BaseFied(value=get_unicode(sw_userbwlist), error=None)
else:
self.sw_userbwlist = BaseFied(value=get_unicode(data.get("sw_userbwlist", "-1")), error=None)
def check(self):
return self.valid
def save(self):
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
domainObj.userbwlist = u"{}".format(self.sw_userbwlist.value)
domainObj.save()
self.paramSave()
class DomainModuleOtherForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_OTHER_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_OTHER_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_OTHER_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
#密级管理
class DomainSecretForm(DotDict):
def __init__(self, get=None, post=None, request={}):
self.request = request
self.get = get or {}
self.post = post or {}
self.error = u""
self.action = u""
self.grade = constants.DOMAIN_SECRET_GRADE_1
self.addList = []
self.delList = []
self.valid = True
self.initialize()
def initialize(self):
data = self.post if self.post else self.get
if data:
self.action = data.get(u"action", u"")
self.grade = data.get(u"grade", constants.DOMAIN_SECRET_GRADE_1)
if self.action == u"new":
boxList = data.get(u"mailbox", "")
boxList = [box.strip() for box in boxList.split("|") if box.strip()]
self.addList = boxList
if self.action == u"del":
idList = data.get(u"idlist", "")
idList = [box.strip() for box in idList.split("|") if box.strip()]
self.delList = idList
for grade, name in constants.DOMAIN_SECRET_GRADE_ALL:
grade_num = len(SecretMail.objects.filter(secret_grade=grade))
setattr(self, "gradeNum_{}".format( int(grade)+1 ), grade_num)
@staticmethod
def getBoxListByGrade(grade):
dataList = []
lists = SecretMail.objects.filter(secret_grade=grade)
for d in lists:
mailbox_id = d.mailbox_id
boxObj = Mailbox.objects.filter(id=mailbox_id).first()
mailbox = _(u"已删除帐号") if not boxObj else boxObj.username
dataList.append( {
"id" : d.id,
"mailbox" : mailbox,
}
)
return dataList
def is_valid(self):
self.check()
return self.valid
def check(self):
if self.action == u"new":
for mailbox in self.addList:
boxObj = Mailbox.objects.filter(username=mailbox).first()
if not boxObj:
self.error = _(u"邮箱帐号不存在")
self.valid = False
return self.valid
return self.valid
def save(self):
if self.action == u"new":
for mailbox in self.addList:
boxObj = Mailbox.objects.filter(username=mailbox).first()
if not boxObj:
continue
obj = SecretMail.objects.filter(secret_grade=self.grade, mailbox_id=boxObj.id).first()
if not obj:
SecretMail.objects.create(secret_grade=self.grade, mailbox_id=boxObj.id)
if self.action == u"del":
for entry_id in self.delList:
SecretMail.objects.filter(id=entry_id).delete()
#添加公共通讯录
class DomainPublicInputForm(DotDict):
def __init__(self, domain_id, instance=None, post=None, get=None, request={}):
self.request = request
self.post = post or {}
self.get = get or {}
self.error = u""
self.domain_id = int(domain_id)
self.instance = instance
self.valid = True
self.initialize()
def initialize(self):
self.fullname = BaseFied(value=u"", error=None)
self.cate_id = BaseFied(value=0, error=None)
self.gender = BaseFied(value=u"F", error=None)
self.birthday = BaseFied(value=u"", error=None)
self.pref_email = BaseFied(value=u"", error=None)
self.pref_tel = BaseFied(value=u"", error=None)
self.home_tel = BaseFied(value=u"", error=None)
self.work_tel = BaseFied(value=u"", error=None)
self.im_qq = BaseFied(value=u"", error=None)
self.im_msn = BaseFied(value=u"", error=None)
self.remark = BaseFied(value=u"", error=None)
data = self.post if self.post else self.get
if self.instance:
self.fullname = BaseFied(value=self.instance.fullname, error=None)
self.cate_id = BaseFied(value=self.instance.cate_id, error=None)
self.gender = BaseFied(value=self.instance.gender, error=None)
self.birthday = BaseFied(value=self.instance.birthday, error=None)
self.pref_email = BaseFied(value=self.instance.pref_email, error=None)
self.pref_tel = BaseFied(value=self.instance.pref_tel, error=None)
self.home_tel = BaseFied(value=self.instance.home_tel, error=None)
self.work_tel = BaseFied(value=self.instance.work_tel, error=None)
self.im_qq = BaseFied(value=self.instance.im_qq, error=None)
self.im_msn = BaseFied(value=self.instance.im_msn, error=None)
self.remark = BaseFied(value=self.instance.remark, error=None)
if data:
self.fullname = BaseFied(value=data[u"fullname"], error=None)
self.cate_id = BaseFied(value=data.get(u"cate_id",0), error=None)
self.gender = BaseFied(value=data.get(u"gender",u"F"), error=None)
self.birthday = BaseFied(value=data[u"birthday"], error=None)
self.pref_email = BaseFied(value=data[u"pref_email"], error=None)
self.pref_tel = BaseFied(value=data[u"pref_tel"], error=None)
self.home_tel = BaseFied(value=data[u"home_tel"], error=None)
self.work_tel = BaseFied(value=data[u"work_tel"], error=None)
self.im_qq = BaseFied(value=data[u"im_qq"], error=None)
self.im_msn = BaseFied(value=data[u"im_msn"], error=None)
self.remark = BaseFied(value=data[u"remark"], error=None)
def is_valid(self):
self.check()
return self.valid
def check(self):
fullname = u"" if not self.fullname.value.strip() else self.fullname.value.strip()
if not fullname:
self.fullname.set_error(_(u"请填写姓名"))
self.valid = False
return self.valid
pref_email = u"" if not self.pref_email.value.strip() else self.pref_email.value.strip()
if not pref_email:
self.pref_email.set_error(_(u"请填写邮箱地址"))
self.valid = False
return self.valid
if not check_email_ordomain(pref_email):
self.pref_email.set_error(_(u"不合法的邮箱地址格式"))
self.valid = False
return self.valid
#生日不应该是个必填项,用一个默认值填充
birthday = u"" if not self.birthday.value.strip() else self.birthday.value.strip()
if not birthday:
self.birthday = BaseFied(value="1970-01-01", error=None)
return self.valid
def save(self):
if self.instance:
obj = self.instance
obj.domain_id = u"{}".format(self.domain_id)
obj.fullname = u"{}".format(self.fullname.value)
obj.cate_id = u"{}".format(self.cate_id.value)
obj.gender = u"{}".format(self.gender.value)
obj.birthday = u"{}".format(self.birthday.value)
obj.pref_email = u"{}".format(self.pref_email.value)
obj.pref_tel = u"{}".format(self.pref_tel.value)
obj.home_tel = u"{}".format(self.home_tel.value)
obj.work_tel = u"{}".format(self.work_tel.value)
obj.im_qq = u"{}".format(self.im_qq.value)
obj.im_msn = u"{}".format(self.im_msn.value)
obj.remark = u"{}".format(self.remark.value)
obj.save()
else:
WmCustomerInfo.objects.create(
domain_id=u"{}".format(self.domain_id),
fullname=u"{}".format(self.fullname.value),
cate_id=u"{}".format(self.cate_id.value),
gender=u"{}".format(self.gender.value),
birthday=u"{}".format(self.birthday.value),
pref_email=u"{}".format(self.pref_email.value),
pref_tel=u"{}".format(self.pref_tel.value),
home_tel=u"{}".format(self.home_tel.value),
work_tel=u"{}".format(self.work_tel.value),
im_qq=u"{}".format(self.im_qq.value),
im_msn=u"{}".format(self.im_msn.value),
remark=u"{}".format(self.remark.value),
)
@property
def get_cate_list(self):
return WmCustomerCate.objects.filter(domain_id=self.domain_id).all()
#批量导入/删除通讯录
class DomainPublicImportForm(DotDict):
COL_ADD_LIST = [
"fullname", "pref_email", "pref_tel", "cate_type", "remark",
"birthday", "gender", "work_tel", "home_tel", "im_qq", "im_msn"
]
def __init__(self, domain_id, action=u"import_add", instance=None, post=None, get=None, request={}):
self.request = request
self.post = post or {}
self.get = get or {}
self.action = action
self.error = u""
self.domain_id = int(domain_id)
self.instance = instance
self.valid = True
self.data_list = []
self.insert_list = []
self.fail_list = []
self.import_error = []
self.initialize()
def initialize(self):
data = self.post if self.post else self.get
import_data = ""
if "import_data" in data:
import_data = data["import_data"]
import_data = import_data.replace("\r\n","\n")
import_data = import_data.replace("\r","\n")
if self.action == "import_del":
for line in import_data.split("\n"):
fullname = self.joinString(line)
if not fullname:
continue
self.data_list.append( (line,fullname) )
else:
for line in import_data.split("\n"):
line = self.joinString(line)
if not line:
continue
data = {}
for idx,col in enumerate(line.split("\t")):
if idx >= len(self.COL_ADD_LIST):
break
col_name = self.COL_ADD_LIST[idx]
if col.upper() in ("${EMPTY}","EMPTY"):
col = ""
data[ col_name ] = col.strip()
if not data:
continue
self.data_list.append( (line,data) )
def joinString(self, line):
code_1 = []
code_2 = []
line = line.replace(";","\t")
for s in line:
if s == "\t":
if code_1:
code_2.append( "".join(code_1) )
code_1 = []
continue
code_1.append( s )
if code_1:
code_2.append( "".join(code_1) )
return "\t".join(code_2)
def checkSave(self):
if self.action == "import_add":
self.checkImportAdd()
elif self.action == "import_del":
self.checkImportDel()
self.save()
return False if self.import_error else True
def checkImportAdd(self):
for line,data in self.data_list:
if len(data) < len(self.COL_ADD_LIST):
self.import_error.append( _(u"数据列不足: {}").format(line) )
continue
self.insert_list.append( data )
def checkImportDel(self):
for line,fullname in self.data_list:
self.insert_list.append( fullname )
def save(self):
cate_id_map = {}
if self.action == "import_add":
for data in self.insert_list:
try:
fullname = data["fullname"].strip()
pref_email = data["pref_email"].strip()
pref_tel = data["pref_tel"].strip()
cate_type = data["cate_type"].strip()
remark = data["remark"].strip()
birthday = data["birthday"].strip()
gender = data["gender"].strip()
work_tel = data["work_tel"].strip()
home_tel = data["home_tel"].strip()
im_qq = data["im_qq"].strip()
im_msn = data["im_msn"].strip()
except Exception,err:
self.import_error.append( _(u"数据格式错误: {} : {}").format(line,get_unicode(err)) )
continue
if not pref_email or not check_email_ordomain(pref_email):
self.import_error.append( _(u"不合法的邮箱地址: {} : '{}'").format(line,pref_email) )
continue
if not fullname:
self.import_error.append( _(u"未填写姓名: {} : '{}'").format(line,fullname) )
continue
if not birthday:
birthday = u"1970-01-01"
cate_id = 0
if cate_type:
cate_id = cate_id_map.get(cate_type, 0)
if not cate_id:
cate_obj = WmCustomerCate.objects.filter(domain_id=self.domain_id, name=cate_type).first()
if not cate_obj:
cate_obj = WmCustomerCate.objects.create(
domain_id=u"{}".format(self.domain_id),
name=u"{}".format(cate_type),
parent_id=-1,
order=0,
)
cate_id = cate_obj.id
cate_id_map[cate_type] = cate_id
try:
obj = WmCustomerInfo.objects.filter(domain_id=self.domain_id, fullname=fullname, pref_email=pref_email).first()
if obj:
obj.domain_id = u"{}".format(self.domain_id)
obj.fullname = u"{}".format(fullname)
obj.cate_id = u"{}".format(cate_id)
obj.gender = u"{}".format(gender)
obj.birthday = u"{}".format(birthday)
obj.pref_email = u"{}".format(pref_email)
obj.pref_tel = u"{}".format(pref_tel)
obj.home_tel = u"{}".format(home_tel)
obj.work_tel = u"{}".format(work_tel)
obj.im_qq = u"{}".format(im_qq)
obj.im_msn = u"{}".format(im_msn)
obj.remark = u"{}".format(remark)
obj.save()
else:
WmCustomerInfo.objects.create(
domain_id=u"{}".format(self.domain_id),
fullname=u"{}".format(fullname),
cate_id=u"{}".format(cate_id),
gender=u"{}".format(gender),
birthday=u"{}".format(birthday),
pref_email=u"{}".format(pref_email),
pref_tel=u"{}".format(pref_tel),
home_tel=u"{}".format(home_tel),
work_tel=u"{}".format(work_tel),
im_qq=u"{}".format(im_qq),
im_msn=u"{}".format(im_msn),
remark=u"{}".format(remark),
)
except Exception,err:
self.import_error.append( _(u"数据保存失败: {} : {}").format(line,get_unicode(err)) )
continue
elif self.action == "import_del":
for fullname in self.insert_list:
WmCustomerInfo.objects.filter(fullname=fullname, domain_id=self.domain_id).delete()
def export(self):
import xlwt,StringIO,os
#创建workbook对象并设置编码
ws = xlwt.Workbook(encoding='utf-8')
w = ws.add_sheet(_(u'公共通讯录'),cell_overwrite_ok=True)
w.write(0, 0, _(u"客户名称"))
w.write(0, 1, _(u"邮件地址"))
w.write(0, 2, _(u"联系电话"))
w.write(0, 3, _(u"客户分组"))
w.write(0, 4, _(u"备注"))
w.write(0, 5, _(u"生日"))
w.write(0, 6, _(u"性别"))
w.write(0, 7, _(u"工作电话"))
w.write(0, 8, _(u"家庭电话"))
w.write(0, 9, u"QQ")
w.write(0, 10, u"MSN")
excel_row = 1
cate_id_map = {}
lists = WmCustomerInfo.objects.filter(domain_id=self.domain_id).all()
for d in lists:
fullname = d.fullname.strip()
pref_email = d.pref_email.strip()
pref_tel = d.pref_tel.strip()
cate_id = d.cate_id
if not cate_id in cate_id_map:
obj_cate = WmCustomerCate.objects.filter(domain_id=self.domain_id, id=cate_id).first()
if obj_cate:
cate_type = obj_cate.name.strip()
else:
cate_type = u""
cate_id_map[cate_id] = cate_type
cate_type = cate_id_map[cate_id]
remark = d.remark.strip()
birthday = get_unicode(d.birthday).strip()
gender = d.gender.strip()
work_tel = d.work_tel.strip()
home_tel = d.home_tel.strip()
im_qq = d.im_qq.strip()
im_msn = d.im_msn.strip()
w.write(excel_row, 0, fullname)
w.write(excel_row, 1, pref_email)
w.write(excel_row, 2, pref_tel)
w.write(excel_row, 3, cate_type)
w.write(excel_row, 4, remark)
w.write(excel_row, 5, birthday)
w.write(excel_row, 6, gender)
w.write(excel_row, 7, work_tel)
w.write(excel_row, 8, home_tel)
w.write(excel_row, 9, im_qq)
w.write(excel_row, 10, im_msn)
excel_row += 1
return download_excel(ws,"public_list.xls")
#客户分类列表
class DomainPublicTypeForm(DotDict):
def __init__(self, domain_id, instance=None, post=None, get=None, request={}):
self.request = request
self.post = post or {}
self.get = get or {}
self.error = u""
self.domain_id = int(domain_id)
self.instance = instance
self.valid = True
self.initialize()
def initialize(self):
self.name = BaseFied(value=u"", error=None)
self.parent_id = BaseFied(value=-1, error=None)
self.order = BaseFied(value=u"0", error=None)
data = self.post if self.post else self.get
if self.instance:
self.name = BaseFied(value=self.instance.name, error=None)
self.parent_id = BaseFied(value=self.instance.parent_id, error=None)
self.order = BaseFied(value=self.instance.order, error=None)
if data:
parent_id = -1 if int(data[u"parent_id"])<=0 else int(data[u"parent_id"])
self.domain_id = int(data[u"domain_id"])
self.name = BaseFied(value=data[u"name"], error=None)
self.parent_id = BaseFied(value=parent_id, error=None)
self.order = BaseFied(value=data.get(u"order",u"0"), error=None)
def is_valid(self):
self.check()
return self.valid
def check(self):
name = u"" if not self.name.value.strip() else self.name.value.strip()
if not name:
self.name.set_error(_(u"请填写分类名称"))
self.valid = False
return self.valid
return self.valid
def save(self):
if self.instance:
obj = self.instance
obj.domain_id = u"{}".format(self.domain_id)
obj.name = u"{}".format(self.name.value)
obj.parent_id = u"{}".format(self.parent_id.value)
obj.order = u"{}".format(self.order.value)
obj.save()
else:
WmCustomerCate.objects.create(
domain_id=u"{}".format(self.domain_id),
name=u"{}".format(self.name.value),
parent_id=u"{}".format(self.parent_id.value),
order=u"{}".format(self.order.value),
)
@property
def get_cate_list(self):
return WmCustomerCate.objects.filter(domain_id=self.domain_id).all()
#域名列表管理
class DomainListForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_LIST_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_LIST_PARAMS_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_LIST_PARAMS_TYPE)
def initialize(self):
self.error = ""
self.initBasicParams()
self.initPostParamsDefaultDisable()
data = self.post if self.post else self.get
if not data:
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
domainDisabled = u"-1" if not domainObj else domainObj.disabled
domainWechatHost = u"-1" if not domainObj else domainObj.is_wx_host
domainName = u"" if not domainObj else domainObj.domain
self.domainName = BaseFied(value=domainName, error=None)
self.domainDisabled = BaseFied(value=str(domainDisabled), error=None)
self.domainWechatHost = BaseFied(value=str(domainWechatHost), error=None)
else:
self.domainDisabled = BaseFied(value=str(data.get("domainDisabled", u"1")), error=None)
self.domainWechatHost = BaseFied(value=str(data.get("domainWechatHost", u"-1")), error=None)
self.domainName = BaseFied(value=data.get("domainName", u""), error=None)
self.operate = data.get(u"operate",u"add")
def checkSave(self):
if not self.domainName.value.strip():
self.error = _(u"请设置域名名称")
return False
if not check_email_ordomain('test@'+self.domainName.value):
self.error = _(u"错误的域名格式")
return False
if self.operate == u"add":
obj = Domain.objects.filter(domain=self.domainName.value).first()
if obj:
self.error = _(u"域名已存在")
return False
obj = CoreAlias.objects.filter(source=u'@%s'%self.domainName.value).first()
if obj:
self.error = _(u"域名已存在于域别名中的虚拟地址中")
return False
if self.domainName.value in ("comingchina.com","fenbu.comingchina.com") \
and unicode(self.request.user).startswith(u"demo_admin@"):
self.error = _(u"演示版本主域名不可修改")
return False
self.save()
return True
def save(self):
#微信主域名只能存在一个
if self.domainWechatHost.value == "1":
Domain.objects.all().update(is_wx_host=u"0")
if str(self.domain_id.value) != "0":
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
#禁止修改域名名称
#domainObj.domain = u"{}".format(self.domainName.value)
domainObj.disabled = u"{}".format(self.domainDisabled.value)
domainObj.is_wx_host = u"{}".format(self.domainWechatHost.value)
domainObj.save()
else:
domainObj = Domain.objects.create(
domain = u"{}".format(self.domainName.value),
disabled = u"{}".format(self.domainDisabled.value),
is_wx_host = u"{}".format(self.domainWechatHost.value),
)
self.domain_id = BaseFied(value=domainObj.id, error=None)
#需要记录域名创建日期
DomainAttr.objects.create(
domain_id = self.domain_id.value,
type = u'system',
item = u'created',
value = time.strftime('%Y-%m-%d %H:%M:%S')
)
self.paramSave()
@property
def getLimitSendParams(self):
return MAILBOX_SEND_PERMIT
@property
def getLimitRecvParams(self):
return MAILBOX_RECV_PERMIT
class DomainDkimForm(DotDict):
ItemKey = 'dkim_privatekey'
ItemType = 'system'
def __init__(self, domain_id, request={}):
super(DomainDkimForm, self).__init__()
self.request = request
self.domain_id = domain_id
self.initialize()
def initialize(self):
self.error = u""
self.private_key = u""
self.public_key = u""
self.verify_success = False
self.verify_failure = False
attrs = DomainAttr.objects.filter(item=self.ItemKey, type=self.ItemType, domain_id=self.domain_id)
attr = attrs.first() if attrs else None
if attr:
try:
_, public_key = generate_rsa(pkey=attr.value)
self.private_key = attr.value
self.public_key = self.makePublicKey(self.private_key)
except:
self.autoSet()
#self.error = u'您的密钥格式不正确,请清除后重新生成!'
else:
self.autoSet()
def makePublicKey(self, private_key):
_, public_key = generate_rsa(pkey=private_key)
public_key = "".join(public_key.split("\n")[1:-1])
public_key = u"v=DKIM1;k=rsa;p={}".format(public_key)
return public_key
def autoSet(self):
private_key, _ = generate_rsa()
attr, _ = DomainAttr.objects.get_or_create(item=self.ItemKey, type=self.ItemType, domain_id=self.domain_id)
attr.value = private_key
attr.save()
self.private_key = private_key
self.public_key = self.makePublicKey(self.private_key)
clear_redis_cache()
return self.checkVerify()
def importFile(self, request):
private_key = request.POST.get('certfile', '').replace('\r', '').strip()
if not private_key:
self.error = _(u'请选择密钥文件导入')
return False
else:
try:
private_key, public_key = generate_rsa(pkey=private_key)
except Exception,err:
self.error = _(u'您导入的密钥格式不正确,请重新生成: %s')%str(err)
self.verify_failure = True
else:
attr, _ = DomainAttr.objects.get_or_create(item=self.ItemKey, type=self.ItemType, domain_id=self.domain_id)
attr.value = private_key
attr.save()
self.private_key = private_key
self.public_key = self.makePublicKey(self.private_key)
clear_redis_cache()
return self.checkVerify()
return False
def export(self):
from django.http import HttpResponse
try:
attr = DomainAttr.objects.get(item=self.ItemKey, type=self.ItemType, domain_id=self.domain_id)
except DomainAttr.DoesNotExist:
self.error = _(u'密钥数据不存在')
return None
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=dkim.key'
response.write(attr.value)
return response
def delete(self):
DomainAttr.objects.filter(item=self.ItemKey, type=self.ItemType, domain_id=self.domain_id).delete()
Domain.objects.filter(id=self.domain_id).update(dkim=u'-1')
self.initialize()
clear_redis_cache()
return True
def checkVerify(self):
from lib import dkim_tools
domain_name = Domain.objects.filter(id=self.domain_id).first().domain
if not dkim_tools.valid_domain(domain=domain_name, rdtype='dkim', record=self.public_key):
self.error = _(u"验证DKIM记录不通过,请确认SPF、MX记录已经配置正确!")
self.verify_failure = True
return False
try:
if not self.private_key:
self.error = _(u"未设置加密私钥")
return False
import dkim
from email.header import make_header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# 生成邮件
mail = MIMEMultipart()
part = MIMEText(_(u"测试邮件"), 'plain', 'utf-8')
mail['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
mail["From"] = "[email protected]"
mail["To"] = "[email protected]"
mail['Subject'] = make_header(((_(u"测试DKIM邮件"), 'utf-8'),))
mail.attach(part)
maildata = mail.as_string()
# 进行签名
signature = dkim.sign(maildata, 'umail', domain_name, self.private_key)
signature = signature.replace('\r', '').lstrip()
self.verify_success = True
Domain.objects.filter(id=self.domain_id).update(dkim=u'1')
return True
except Exception,err:
self.error = _(u"测试签名邮件时发生错误: {}").format(str(err))
self.verify_failure = True
#验证失败后需要关闭DKIM开关
Domain.objects.filter(id=self.domain_id).update(dkim=u'-1')
return False
#webmail页面定制
class DomainWebBasicForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_WEB_BASIC_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_WEB_BASIC_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_WEB_BASIC_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
def initialize(self):
self.initBasicParams()
self.initPostParams()
obj = CoCompany.objects.filter(domain_id=self.domain_id.value).first()
self.company = BaseFied(value=u"" if not obj else obj.company, error=None)
if u"company" in self.post:
self.company = BaseFied(value=self.post[u"company"], error=None)
def save(self):
self.paramSave()
obj, create = CoCompany.objects.get_or_create(domain_id=self.domain_id.value)
obj.company = self.company.value
obj.save()
#webmail页面定制---系统公告
class DomainWebAnounceForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_WEB_ANOUNCE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_WEB_ANOUNCE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_WEB_ANOUNCE_YPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
def initialize(self):
self.initBasicParams()
self.initPostParams()
self.content = self.cf_announce.value
try:
data = json.loads(self.cf_announce_set.value)
data = {} if not isinstance(data, dict) else data
except:
data = {}
self.title = data.get(u"title", u"")
self.title_color = data.get(u"title_color", u"")
self.height = data.get(u"height", u"")
if self.post:
self.title = self.post.get(u"title", u"")
self.title_color = self.post.get(u"title_color", u"")
self.height = self.post.get(u"height", u"")
self.content = self.post.get(u"content", u"")
data = {
u"title" : self.title,
u"title_color" : self.title_color,
u"height" : self.height,
}
self.cf_announce_set = BaseFied(value=json.dumps(data), error=None)
self.cf_announce = BaseFied(value=self.content, error=None)
#logo设置
class DomainWebLogoForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_LOGO_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_LOGO_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_LOGO_TYPE)
def getData(self,item):
cache = u"cache_%s"%item
if hasattr(self, cache):
return getattr(self, cache)
value = DomainAttr.getAttrObjValue(self.domain_id.value, type=u"webmail", item=item)
if value and value.strip():
savePath = getSavePath(value)
if os.path.exists(savePath):
with open(savePath,"rb") as f:
data = f.read()
data = base64.encodestring(data)
setattr(self, cache, data)
return data
setattr(self, cache, u"")
return u""
def getWebmailLogoData(self):
return self.getData(u"cf_webmail_logo")
def getLoginLogoData(self):
return self.getData(u"cf_login_logo")
def saveLogo(self, filedata, item):
saveName = saveLogoToPath(filedata)
DomainAttr.saveAttrObjValue(self.domain_id.value, type=u"webmail", item=item, value=saveName)
return True
def importLogoLogin(self):
item = u"cf_login_logo"
filedata = self.post.get("logofile", u"")
if not filedata:
return False
return self.saveLogo(filedata, item)
def deleteLogoLogin(self):
saveName = self.cf_login_logo.value
if saveName:
deleteLogoFromPath(saveName)
self.cf_login_logo = BaseFied(value=u"", error=None)
DomainAttr.saveAttrObjValue(self.domain_id.value, type=u"webmail", item=u"cf_login_logo", value=u"")
def importLogoWebmail(self):
item = u"cf_webmail_logo"
filedata = self.post.get("logofile", u"")
if not filedata:
return False
return self.saveLogo(filedata, item)
def deleteLogoWebmail(self):
saveName = self.cf_webmail_logo.value
if saveName:
deleteLogoFromPath(saveName)
self.cf_webmail_logo = BaseFied(value=u"", error=None)
DomainAttr.saveAttrObjValue(self.domain_id.value, type=u"webmail", item=u"cf_webmail_logo", value=u"")
#登录模板设置
class DomainWebLoginTempForm(DotDict):
PARAM_LIST = dict(constants.DOMAIN_LOGIN_TEMP_LIST)
def __init__(self, domain_id, post={}, request={}):
self.post = post
self.request = request
self.domain_id = domain_id
self.initialize()
def initialize(self):
v = DomainAttr.objects.filter(domain_id=self.domain_id, type=u"webmail", item=u"cf_login_page").first()
v = u"default" if not v else v.value
self.cf_login_page = BaseFied(value=v, error=None)
def clickLoginTemplImg(self, domain_id, name):
item = u"cf_login_page"
if not name in self.PARAM_LIST:
return False
DomainAttr.saveAttrObjValue(domain_id=domain_id, type=u"webmail", item=item, value=name)
return True
#页面广告设置
class DomainWebAdForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_WEB_AD_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_WEB_AD_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_WEB_AD_TYPE)
def initialize(self):
self.initBasicParams()
try:
data = json.loads(self.cf_adsetting2.value)
except:
data = {}
self.login_1 = data.get(u"login_1", {})
self.login_2 = data.get(u"login_2", {})
self.webmail = data.get(u"webmail", {})
self.image_name_1 = self.login_1.get(u"image", u"")
self.advert_link_1 = self.login_1.get(u"link", u"")
self.image_name_2 = self.login_2.get(u"image", u"")
self.advert_link_2 = self.login_2.get(u"link", u"")
self.webmail_name = self.webmail.get(u"image", u"")
self.webmail_link = self.webmail.get(u"link", u"")
def getImgData(self, name, data):
cache = u"cache_%s"%name
if hasattr(self, cache):
return getattr(self, cache)
if not data or data==u"-1":
return u""
value = data.get(name,{}).get(u"image","")
if value and value.strip():
savePath = getSavePath(value)
if os.path.exists(savePath):
with open(savePath,"rb") as f:
data = f.read()
data = base64.encodestring(data)
setattr(self, cache, data)
return data
setattr(self, cache, u"")
return u""
def getData(self):
item = u"cf_adsetting2"
cache = u"cache_%s"%item
if hasattr(self, cache):
return getattr(self, cache)
data = DomainAttr.getAttrObjValue(domain_id=self.domain_id.value, type=u"webmail", item=item)
try:
data = json.loads(data)
except:
data = {}
setattr(self, cache, data)
return data
def getAdvertData_1(self):
data = self.getData()
return self.getImgData(u"login_1", data)
def getAdvertData_2(self):
data = self.getData()
return self.getImgData(u"login_2", data)
def getAdvertData_3(self):
data = self.getData()
return self.getImgData(u"webmail", data)
def importAdvertData(self, action):
filedata = self.post.get("logofile", u"")
if not filedata:
return
name = saveLogoToPath(filedata)
if action == "login_advert_1":
deleteLogoFromPath(self.image_name_1)
self.image_name_1 = name
self.advert_link_1 = self.post.get(u"advert_link_1", u"")
elif action == "login_advert_2":
deleteLogoFromPath(self.image_name_2)
self.image_name_2 = name
self.advert_link_2 = self.post.get(u"advert_link_2", u"")
elif action == "login_advert_3":
deleteLogoFromPath(self.webmail_name)
self.webmail_name = name
self.webmail_link = self.post.get(u"webmail_link", u"")
self.saveData()
def deleteAdvertData(self, action):
if action == "login_advert_1_del":
deleteLogoFromPath(self.image_name_1)
self.image_name_1 = u""
self.advert_link_1 = u""
elif action == "login_advert_2_del":
deleteLogoFromPath(self.image_name_2)
self.image_name_2 = u""
self.advert_link_2 = u""
elif action == "login_advert_3_del":
deleteLogoFromPath(self.webmail_name)
self.webmail_name = u""
self.webmail_link = u""
self.saveData()
def saveData(self):
data = {
"login_1" : {"image":self.image_name_1,"link":self.advert_link_1},
"login_2" : {"image":self.image_name_2,"link":self.advert_link_2},
"webmail" : {"image":self.webmail_name,"link":self.webmail_link},
}
data = json.dumps(data)
DomainAttr.saveAttrObjValue(domain_id=self.domain_id.value, type=u"webmail", item=u"cf_adsetting2", value=data)
#首页链接设置
class DomainWebLinkForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_WEB_LINK_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_WEB_LINK_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_WEB_LINK_TYPE)
def initialize(self):
"""
{'0':
{'order': '',
'links': [
{'url': 'http://', 'desc': '', 'icon': None, 'title': ''},
{'url': 'http://', 'desc': '', 'icon': '', 'title': ''},
{'url': 'http://', 'desc': '', 'icon': '', 'title': ''},
{'url': 'http://', 'desc': '', 'icon': '', 'title': ''}
],
'title': ''
}
}
"""
self.initBasicParams()
try:
data = json.loads(self.cf_webmail_link2.value)
except:
data = {}
self.data = data
if not isinstance(self.data, dict):
self.data = {}
def getLinkList(self):
for i in self.data.keys():
dd = self.getLinkIndex(i)
yield i, dd
def getLinkIndex(self, idx):
dd = {
u"order" : u"1",
u"title" : u"",
u"links" : [],
}
for j in xrange(4):
dd["url_%s"%j] = u""
dd["desc_%s"%j] = u""
dd["icon_%s"%j] = u""
dd["title_%s"%j] = u""
dd["img_%s"%j] = u""
if not str(idx).isdigit():
return dd
idx = str(idx)
if not idx in self.data:
return dd
dd = {
u"order" : self.data[idx][u"order"],
u"title" : self.data[idx][u"title"],
}
d_link = self.data[idx][u"links"]
for j,v in enumerate(d_link):
icon = v[u"icon"]
dd["url_%s"%j] = v[u"url"]
dd["desc_%s"%j] = v[u"desc"]
dd["icon_%s"%j] = v[u"icon"]
dd["title_%s"%j] = v[u"title"]
imgData = self.getImgData(v[u"icon"])
dd["img_%s"%j] = imgData
return dd
def getImgData(self, value):
if value.strip():
savePath = getSavePath(value)
if os.path.exists(savePath):
with open(savePath,"rb") as f:
data = f.read()
data = base64.encodestring(data)
return data
return u""
def checkSaveNew(self, idx=""):
title = self.post.get(u"title", "")
order = self.post.get(u"order", "1")
data_link_1 = {
u"url" : self.post.get(u"url_0", ""),
u"desc" : self.post.get(u"desc_0", ""),
u"title" : self.post.get(u"title_0", ""),
}
data_link_2 = {
u"url" : self.post.get(u"url_1", ""),
u"desc" : self.post.get(u"desc_1", ""),
u"title" : self.post.get(u"title_1", ""),
}
data_link_3 = {
u"url" : self.post.get(u"url_2", ""),
u"desc" : self.post.get(u"desc_2", ""),
u"title" : self.post.get(u"title_2", ""),
}
data_link_4 = {
u"url" : self.post.get(u"url_3", ""),
u"desc" : self.post.get(u"desc_3", ""),
u"title" : self.post.get(u"title_3", ""),
}
icon_1, icon_2, icon_3, icon_4 = self.setLogoData(idx)
data_link_1["icon"] = icon_1
data_link_2["icon"] = icon_2
data_link_3["icon"] = icon_3
data_link_4["icon"] = icon_4
data = {
u'order' : order,
u'title' : title,
u'links' : [data_link_1, data_link_2, data_link_3, data_link_4],
}
if str(idx).isdigit():
idx = int(idx)
self.checkDelete(idx)
else:
idx = 0 if not self.data else max( [int(i) for i in self.data.keys()] )+1
self.data[str(idx)] = data
self.saveData()
return True
def setLogoData(self, idx):
def setLogoData2(default, logofile):
if default:
deleteLogoFromPath(default)
return saveLogoToPath(logofile)
#end def
icon_1_default = u""
icon_2_default = u""
icon_3_default = u""
icon_4_default = u""
idx = str(idx)
if idx in self.data:
icon_1_default = self.data[idx]["links"][0]["icon"]
icon_2_default = self.data[idx]["links"][1]["icon"]
icon_3_default = self.data[idx]["links"][2]["icon"]
icon_4_default = self.data[idx]["links"][3]["icon"]
icon_1 = setLogoData2(icon_1_default, self.post.get(u"logofile_1", "").strip())
icon_2 = setLogoData2(icon_2_default, self.post.get(u"logofile_2", "").strip())
icon_3 = setLogoData2(icon_3_default, self.post.get(u"logofile_3", "").strip())
icon_4 = setLogoData2(icon_4_default, self.post.get(u"logofile_4", "").strip())
return icon_1, icon_2, icon_3, icon_4
def checkDelete(self, idx):
if not str(idx).isdigit():
return False
idx = str(idx)
if not idx in self.data:
return False
data = self.data.pop(idx)
self.saveData()
for d in data["links"]:
deleteLogoFromPath(d["icon"])
def saveData(self):
data = json.dumps(self.data)
self.cf_webmail_link2 = BaseFied(value=data, error=None)
self.save()
#信纸设置
class DomainWebLetterForm(DotDict):
def __init__(self, domain_id, instance=None, get=None, post=None, request={}):
self.request = request
self.domain_id = BaseFied(value=domain_id, error=None)
self.get = get or {}
self.post = post or {}
self.instance = instance
self.valid = True
self.initialize()
def initialize(self):
self.name = u""
self.image = u""
self.content = u""
self.filedata = u""
if self.instance:
self.name = self.instance.name
self.image = self.instance.image
self.content = self.instance.content
if self.post:
self.name = self.post.get(u"name",u"")
self.content = self.post.get(u"content",u"")
self.filedata = self.post.get(u"logofile",u"")
def getImgData(self):
value = self.image
if value and value.strip():
savePath = getSavePath(value)
if os.path.exists(savePath):
with open(savePath,"rb") as f:
data = f.read()
data = base64.encodestring(data)
return data
return u""
def checkSave(self):
self.save()
return True
def save(self):
saveName = saveLogoToPath(self.filedata)
if self.instance:
obj = self.instance
obj.name = u"{}".format(self.name)
obj.image = u"{}".format(saveName)
obj.content = u"{}".format(self.content)
obj.save()
else:
obj = WmTemplate.objects.create(
domain_id=u"{}".format(self.domain_id.value),
name=u"{}".format(self.name),
image=u"{}".format(saveName),
content=u"{}".format(self.content)
)
self.instance = obj
| 39.083969 | 154 | 0.585278 | 80,556 | 0.965136 | 677 | 0.008111 | 1,700 | 0.020368 | 0 | 0 | 9,086 | 0.108859 |
d26b10ff6669fa3fb71b08771c9e2a65a51f7bb3
| 9,074 |
py
|
Python
|
deep_coach.py
|
jendelel/rhl-algs
|
d5b8779d7e271265d4f0bfcb3602bc56958e3eb3
|
[
"Apache-2.0"
] | 2 |
2019-03-30T23:29:10.000Z
|
2019-04-05T21:54:21.000Z
|
deep_coach.py
|
jendelel/rhl-algs
|
d5b8779d7e271265d4f0bfcb3602bc56958e3eb3
|
[
"Apache-2.0"
] | 3 |
2019-03-29T11:23:17.000Z
|
2020-12-28T02:00:17.000Z
|
deep_coach.py
|
jendelel/rhl-algs
|
d5b8779d7e271265d4f0bfcb3602bc56958e3eb3
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5 import QtGui, QtCore, QtWidgets
from collections import namedtuple
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
HumanFeedback = namedtuple('HumanFeedback', ['feedback_value'])
SavedAction = namedtuple('SavedAction', ['state', 'action', 'logprob'])
SavedActionsWithFeedback = namedtuple('SavedActionsWithFeedback', ['saved_actions', 'final_feedback'])
def parse_args(parser):
parser.add_argument('--batch_size', type=int, default=16, help='batch_size (default: 16)')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate (default:0.00025)')
parser.add_argument('--eligibility_decay', type=float, default=0.35, help='Learning rate (default:0.01)')
parser.add_argument("--coach_window_size", type=int, default=10, help="Number of transitions in a window.")
parser.add_argument('--entropy_reg', type=float, default=1.5, help='Entropy regularization beta')
parser.add_argument('--feedback_delay_factor', type=int, default=1, help='COACH Feedback delay factor.')
parser.add_argument(
'--ppo_eps',
type=float,
default=0.2,
help='PPO-like clipping of the loss. Negative value turns the ppo clipping off.')
parser.add_argument('--no_cuda', action='store_true', default=True, help='disables CUDA training')
class DeepCoach():
def __init__(self, window, args, env):
self.window = window
self.args = args
self.env = env
torch.manual_seed(args.seed)
self.device = torch.device("cuda" if not args.no_cuda else "cpu")
if window is not None:
self.setup_ui(window)
PolicyNet = CategoricalPolicyNet if hasattr(self.env.action_space, 'n') else GaussianPolicyNet
self.policy_net = PolicyNet(env.observation_space.shape[0], env.action_space).to(device=self.device)
self.optimizer = torch.optim.RMSprop(self.policy_net.parameters(), lr=args.learning_rate)
self.feedback = None
def setup_ui(self, window):
@QtCore.pyqtSlot(QtGui.QKeyEvent)
def keyPressed(event):
numpad_mod = int(event.modifiers()) & QtCore.Qt.KeypadModifier
if (event.key() == QtCore.Qt.Key_Minus and numpad_mod) or event.key() == QtCore.Qt.Key_M:
self.buttonClicked(-1)
elif (event.key() == QtCore.Qt.Key_Plus and numpad_mod) or event.key() == QtCore.Qt.Key_P:
self.buttonClicked(1)
else:
print("ERROR: Unknown key: ", event)
hor = QtWidgets.QHBoxLayout()
for i in range(-1, 2):
if i == 0:
continue
but = QtWidgets.QPushButton()
but.setText(str(i))
but.clicked.connect(lambda bla, def_arg=i: self.buttonClicked(def_arg))
hor.addWidget(but)
window.feedback_widget.setLayout(hor)
window.keyPressedSignal.connect(keyPressed)
def buttonClicked(self, value):
self.feedback = HumanFeedback(feedback_value=value)
def to_tensor(self, value):
return torch.tensor(value).float().to(device=self.device)
def select_action(self, state):
state = torch.from_numpy(state).to(device=self.device).float()
action, logprob, entropy = self.policy_net(state)
return logprob, action.detach().cpu().numpy(), entropy
def update_net(self, savedActionsWithFeedback, current_entropy):
if not savedActionsWithFeedback:
return
print("training")
e_losses = []
for saf in savedActionsWithFeedback:
final_feedback = saf.final_feedback
for n, sa in enumerate(saf.saved_actions[::-1]):
log_p_old = torch.tensor(sa.logprob).to(self.device)
log_prob, _, _ = self.select_action(sa.state)
probs_ratio = (log_prob - log_p_old).exp()
if self.args.ppo_eps > 0:
surr1 = final_feedback * probs_ratio
surr2 = torch.clamp(probs_ratio, 1.0 - self.args.ppo_eps, 1.0 + self.args.ppo_eps) * final_feedback
loss_term = torch.min(surr1, surr2)
else:
loss_term = probs_ratio * final_feedback
e_loss = (self.args.eligibility_decay**(n)) * loss_term
e_loss = torch.sum(e_loss, dim=0) # Sum the loss across all actions.
e_losses.append(e_loss)
loss = -(self.to_tensor(1 /
(len(savedActionsWithFeedback))) * torch.stack(e_losses).to(device=self.device).sum() +
torch.sum(self.args.entropy_reg * current_entropy, dim=0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def processFeedback(self, savedActions, buffer):
feedback = self.feedback.feedback_value
if feedback is not None and len(savedActions) > 0:
print("Feedback: ", feedback)
if feedback > 0:
self.window.viewer.num_pos_feedback += 1
elif feedback < 0:
self.window.viewer.num_neg_feedback += 1
window_size = min(len(savedActions), self.args.coach_window_size)
del savedActions[:-(window_size + self.args.feedback_delay_factor)]
window = savedActions[:-self.args.feedback_delay_factor] # Copy the list
savedActionsWithFeedback = SavedActionsWithFeedback(saved_actions=window, final_feedback=feedback)
buffer.append(savedActionsWithFeedback)
self.feedback = None
def train(self):
buffer = []
running_reward = 10
for i_episode in range(1, 10000):
state, ep_reward = self.env.reset(), 0
savedActions = []
for t in range(1, 10000): # Don't infinite loop while learning
logprob, action, entropy = self.select_action(state)
old_state = state
state, reward, done, _ = self.env.step(action)
ep_reward += reward
savedActions.append(SavedAction(state=state, action=action, logprob=logprob.detach().cpu().numpy()))
self.window.render(self.env)
if not self.window.isVisible():
break
if self.feedback:
self.processFeedback(savedActions, buffer)
if len(buffer[-1].saved_actions) > 0 and self.window.trainCheck.isChecked():
self.update_net([buffer[-1]], self.select_action(old_state)[2])
time.sleep(self.window.renderSpin.value())
if len(buffer) > 50:
del buffer[:10]
if len(buffer) >= self.args.batch_size and self.window.trainCheck.isChecked():
indicies = random.sample(range(len(buffer)), self.args.batch_size)
mini_batch = [buffer[i] for i in indicies]
self.update_net(mini_batch, entropy)
print("Action: {}, Reward: {:.2f}, ep_reward: {:.2f}".format(action, reward, ep_reward))
if done:
break
if not self.window.isVisible():
break
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
print("Running reward %d" % running_reward)
def start(window, args, env):
alg = DeepCoach(window, args, env)
print("Number of trainable parameters:", utils.count_parameters(alg.policy_net))
alg.train()
env.close()
class CategoricalPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(CategoricalPolicyNet, self).__init__()
action_dim = action_space.n
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.action_probs = nn.Linear(16, action_dim)
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
logits = self.action_probs(x)
action = torch.argmax(logits, dim=-1)
distribution = torch.distributions.Categorical(logits=logits)
return action, distribution.log_prob(action), distribution.entropy()
class GaussianPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(GaussianPolicyNet, self).__init__()
action_dim = action_space.shape[-1]
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.mu_head = nn.Linear(16, action_dim)
self.log_std = torch.nn.parameter.Parameter(-0.5 * torch.ones(action_dim))
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
mean = self.mu_head(x)
std = self.log_std.expand_as(mean).exp()
distribution = torch.distributions.Normal(mean, std)
action = torch.normal(mean, std)
return action, distribution.log_prob(action), distribution.entropy()
| 45.144279 | 119 | 0.622548 | 7,482 | 0.824554 | 0 | 0 | 492 | 0.054221 | 0 | 0 | 923 | 0.101719 |
d26dbcccea877eec0764524f32244d3a230c796d
| 434 |
py
|
Python
|
model/DB Automation/add_db.py
|
chrisdcao/Covid_Map_Hanoi
|
07d18cad8c1b4988795d9ec2aca5ae1fefdff892
|
[
"MIT"
] | 1 |
2021-09-09T07:55:00.000Z
|
2021-09-09T07:55:00.000Z
|
model/DB Automation/add_db.py
|
chrisdcao/Covid_Map_Hanoi
|
07d18cad8c1b4988795d9ec2aca5ae1fefdff892
|
[
"MIT"
] | null | null | null |
model/DB Automation/add_db.py
|
chrisdcao/Covid_Map_Hanoi
|
07d18cad8c1b4988795d9ec2aca5ae1fefdff892
|
[
"MIT"
] | null | null | null |
import pyodbc
import mysql.connector
conn = mysql.connector.connect(user='root', password='', port='3307', host='localhost', database='coviddb')
cursor = conn.cursor(buffered=True)
cursor.execute('SELECT * FROM coviddb.markers')
cursor.execute('''
INSERT INTO coviddb.markers(id, name, address, subject, lat, lng, type)
VALUES
('0','0','0','0','0','0','None')
''')
conn.commit()
| 22.842105 | 107 | 0.615207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.534562 |
d26eb1d2c5907c778452828f508b0d406c3d409a
| 605 |
py
|
Python
|
conduit_tests/fixtu.py
|
ArtuZi2/conduit
|
804fc2b69dda7e244fc91025eb30ad1847b81f6a
|
[
"MIT"
] | null | null | null |
conduit_tests/fixtu.py
|
ArtuZi2/conduit
|
804fc2b69dda7e244fc91025eb30ad1847b81f6a
|
[
"MIT"
] | null | null | null |
conduit_tests/fixtu.py
|
ArtuZi2/conduit
|
804fc2b69dda7e244fc91025eb30ad1847b81f6a
|
[
"MIT"
] | null | null | null |
import time
import pytest
# preparing selenium and chrome web driver manager
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
# importing os for environmental variable, and docker-compose up
import os
@pytest.fixture(scope="session")
def browser():
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get("http://localhost:1667")
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
return driver
| 28.809524 | 79 | 0.77686 | 0 | 0 | 0 | 0 | 306 | 0.505785 | 0 | 0 | 173 | 0.28595 |
d26eb2453ea9164766469c382d7b579b2c3779e5
| 22,390 |
py
|
Python
|
scope/utils.py
|
jasonmsetiadi/scope
|
e718998d0a7ac64e5f86554383030341dbe940f9
|
[
"MIT"
] | 3 |
2021-03-05T01:32:34.000Z
|
2022-01-19T03:13:44.000Z
|
scope/utils.py
|
jasonmsetiadi/scope
|
e718998d0a7ac64e5f86554383030341dbe940f9
|
[
"MIT"
] | 57 |
2021-01-14T19:49:44.000Z
|
2022-03-25T22:32:03.000Z
|
scope/utils.py
|
jasonmsetiadi/scope
|
e718998d0a7ac64e5f86554383030341dbe940f9
|
[
"MIT"
] | 10 |
2021-01-08T19:59:24.000Z
|
2022-02-16T10:54:44.000Z
|
__all__ = [
"Dataset",
"forgiving_true",
"load_config",
"log",
"make_tdtax_taxonomy",
"plot_gaia_density",
"plot_gaia_hr",
"plot_light_curve_data",
"plot_periods",
]
from astropy.io import fits
import datetime
import json
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tqdm.auto import tqdm
from typing import Mapping, Optional, Union
import yaml
def load_config(config_path: Union[str, pathlib.Path]):
"""
Load config and secrets
"""
with open(config_path) as config_yaml:
config = yaml.load(config_yaml, Loader=yaml.FullLoader)
return config
def time_stamp():
"""
:return: UTC time as a formatted string
"""
return datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")
def log(message: str):
print(f"{time_stamp()}: {message}")
def forgiving_true(expression):
return True if expression in ("t", "True", "true", "1", 1, True) else False
def make_tdtax_taxonomy(taxonomy: Mapping):
"""Recursively convert taxonomy definition from config["taxonomy"]
into tdtax-parsable dictionary
:param taxonomy: config["taxonomy"] section
:return:
"""
tdtax_taxonomy = dict()
if taxonomy["class"] not in ("tds", "phenomenological", "ontological"):
tdtax_taxonomy["name"] = f"{taxonomy['class']}: {taxonomy['name']}"
else:
tdtax_taxonomy["name"] = taxonomy["name"]
if "subclasses" in taxonomy:
tdtax_taxonomy["children"] = []
for cls in taxonomy["subclasses"]:
tdtax_taxonomy["children"].append(make_tdtax_taxonomy(cls))
return tdtax_taxonomy
def plot_light_curve_data(
light_curve_data: pd.DataFrame,
period: Optional[float] = None,
title: Optional[str] = None,
save: Optional[str] = None,
):
"""Plot and save to file light curve data
:param light_curve_data:
:param period: float [days] if set, a phase-folded light curve will be displayed
:param title: plot title
:param save: path to save the plot
:return:
"""
plt.close("all")
# Official start of ZTF MSIP survey, March 17, 2018
jd_start = 2458194.5
colors = {
1: "#28a745",
2: "#dc3545",
3: "#00415a",
"default": "#f3dc11",
}
mask_good_data = light_curve_data["catflags"] == 0
df = light_curve_data.loc[mask_good_data]
if period is not None:
fig = plt.figure(figsize=(16, 9), dpi=200)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
fig = plt.figure(figsize=(16, 5), dpi=200)
ax1 = fig.add_subplot(111)
if title is not None:
fig.suptitle(title, fontsize=24)
# plot different ZTF bands/filters
for band in df["filter"].unique():
mask_filter = df["filter"] == band
ax1.errorbar(
df.loc[mask_filter, "hjd"] - jd_start,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
if period is not None:
for n in [0, -1]:
ax2.errorbar(
(df.loc[mask_filter, "hjd"] - jd_start) / period % 1 + n,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
# invert y axes since we are displaying magnitudes
ax1.invert_yaxis()
if period is not None:
ax2.invert_yaxis()
ax1.set_xlabel("Time")
ax1.grid(lw=0.3)
if period is not None:
ax2.set_xlabel(f"phase [period={period:4.4g} days]")
ax2.set_xlim(-1, 1)
ax2.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_periods(
features: pd.DataFrame,
limits: Optional[list] = None,
loglimits: Optional[bool] = False,
number_of_bins: Optional[int] = 20,
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot a histogram of periods for the sample"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# make figure
fig, ax = plt.subplots(figsize=(6, 6))
if title is not None:
fig.suptitle(title, fontsize=24)
if limits is not None:
if loglimits:
edges = np.logspace(
np.log10(limits[0]), np.log10(limits[1]), number_of_bins
)
else:
edges = np.linspace(limits[0], limits[1], number_of_bins)
else:
if loglimits:
edges = np.linspace(
np.log10(0.9 * np.min(features["period"])),
np.log10(1.1 * np.max(features["period"])),
number_of_bins,
)
else:
edges = np.linspace(
0.9 * np.min(features["period"]),
1.1 * np.max(features["period"]),
number_of_bins,
)
hist, bin_edges = np.histogram(features["period"], bins=edges)
hist = hist / np.sum(hist)
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
ax.plot(bins, hist, linestyle="-", drawstyle="steps")
ax.set_xlabel("Period [day]")
ax.set_ylabel("Probability Density Function")
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if loglimits:
ax.set_xscale("log")
ax.set_xlim([0.9 * bins[0], 1.1 * bins[-1]])
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_hr(
gaia_data: pd.DataFrame,
path_gaia_hr_histogram: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the Gaia HR diagram with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load background histogram
histogram = np.loadtxt(path_gaia_hr_histogram)
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
x_edges = np.arange(-0.681896, 5.04454978, 0.02848978)
y_edges = np.arange(-2.90934, 16.5665952, 0.0968952)
ax.pcolormesh(x_edges, y_edges, histogram.T, antialiased=False)
ax.set_xlim(x_edges[0], x_edges[-1])
ax.set_ylim(y_edges[0], y_edges[-1])
ax.invert_yaxis()
ax.set_xlabel(r"$G_{BP} - G_{RP}$")
ax.set_ylabel(r"$M_G$")
# plot sample data
ax.errorbar(
gaia_data["BP-RP"],
gaia_data["M"],
gaia_data["M"] - gaia_data["Ml"],
marker=".",
color="#e68a00",
alpha=0.75,
ls="",
lw=0.5,
)
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_density(
positions: pd.DataFrame,
path_gaia_density: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the RA/DEC Gaia density plot with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load the data
hdulist = fits.open(path_gaia_density)
hist = hdulist[1].data["srcdens"][np.argsort(hdulist[1].data["hpx8"])]
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
# background setup
coordsys = ["C", "C"]
nest = True
# colormap
cm = plt.cm.get_cmap("viridis") # colorscale
cm.set_under("w")
cm.set_bad("w")
# plot the data in healpy
norm = "log"
hp.mollview(
hist,
norm=norm,
unit="Stars per sq. arcmin.",
cbar=False,
nest=nest,
title="",
coord=coordsys,
notext=True,
cmap=cm,
flip="astro",
nlocs=4,
min=0.1,
max=300,
)
ax = plt.gca()
image = ax.get_images()[0]
cbar = fig.colorbar(
image,
ax=ax,
ticks=[0.1, 1, 10, 100],
fraction=0.15,
pad=0.05,
location="bottom",
)
cbar.set_label("Stars per sq. arcmin.", size=12)
cbar.ax.tick_params(labelsize=12)
ax.tick_params(axis="both", which="major", labelsize=24)
# borders
lw = 3
pi = np.pi
dtor = pi / 180.0
theta = np.arange(0, 181) * dtor
hp.projplot(theta, theta * 0 - pi, "-k", lw=lw, direct=True)
hp.projplot(theta, theta * 0 + 0.9999 * pi, "-k", lw=lw, direct=True)
phi = np.arange(-180, 180) * dtor
hp.projplot(phi * 0 + 1.0e-10, phi, "-k", lw=lw, direct=True)
hp.projplot(phi * 0 + pi - 1.0e-10, phi, "-k", lw=lw, direct=True)
# ZTF
theta = np.arange(0.0, 360, 0.036)
phi = -30.0 * np.ones_like(theta)
hp.projplot(theta, phi, "k--", coord=["C"], lonlat=True, lw=2)
hp.projtext(170.0, -24.0, r"ZTF Limit", lonlat=True)
theta = np.arange(0.0, 360, 0.036)
# galaxy
for gallat in [15, 0, -15]:
phi = gallat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["G"], lonlat=True, lw=2)
# ecliptic
for ecllat in [0, -30, 30]:
phi = ecllat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["E"], lonlat=True, lw=2, ls=":")
# graticule
hp.graticule(ls="-", alpha=0.1, lw=0.5)
# labels
for lat in [60, 30, 0, -30, -60]:
hp.projtext(360.0, lat, str(lat), lonlat=True)
for lon in [0, 60, 120, 240, 300]:
hp.projtext(lon, 0.0, str(lon), lonlat=True)
# NWES
plt.text(0.0, 0.5, r"E", ha="right", transform=ax.transAxes, weight="bold")
plt.text(1.0, 0.5, r"W", ha="left", transform=ax.transAxes, weight="bold")
plt.text(
0.5,
0.992,
r"N",
va="bottom",
ha="center",
transform=ax.transAxes,
weight="bold",
)
plt.text(
0.5, 0.0, r"S", va="top", ha="center", transform=ax.transAxes, weight="bold"
)
color = "k"
lw = 10
alpha = 0.75
for pos in positions:
hp.projplot(
pos[0],
pos[1],
color=color,
markersize=5,
marker="o",
coord=coordsys,
lonlat=True,
lw=lw,
alpha=alpha,
zorder=10,
)
if save is not None:
fig.tight_layout()
plt.savefig(save)
""" Datasets """
class Dataset(object):
def __init__(
self,
tag: str,
path_dataset: str,
features: tuple,
verbose: bool = False,
**kwargs,
):
"""Load csv file with the dataset containing both data and labels
As of 20210317, it is produced by labels*.ipynb - this will likely change in a future PR
:param tag:
:param path_dataset:
:param features:
:param verbose:
"""
self.verbose = verbose
self.tag = tag
self.features = features
self.target = None
if self.verbose:
log(f"Loading {path_dataset}...")
nrows = kwargs.get("nrows", None)
self.df_ds = pd.read_csv(path_dataset, nrows=nrows)
if self.verbose:
log(self.df_ds[list(features)].describe())
self.df_ds = self.df_ds.replace([np.inf, -np.inf, np.nan], 0.0)
dmdt = []
if self.verbose:
print("Moving dmdt's to a dedicated numpy array...")
iterator = tqdm(self.df_ds.itertuples(), total=len(self.df_ds))
else:
iterator = self.df_ds.itertuples()
for i in iterator:
data = np.array(json.loads(self.df_ds["dmdt"][i.Index]))
if len(data.shape) == 0:
dmdt.append(np.zeros((26, 26)))
else:
dmdt.append(data)
self.dmdt = np.array(dmdt)
self.dmdt = np.expand_dims(self.dmdt, axis=-1)
# drop in df_ds:
self.df_ds.drop(columns="dmdt")
@staticmethod
def threshold(a, t: float = 0.5):
b = np.zeros_like(a)
b[np.array(a) > t] = 1
return b
def make(
self,
target_label: str = "variable",
threshold: float = 0.5,
balance: Optional[float] = None,
weight_per_class: bool = True,
scale_features: str = "min_max",
test_size: float = 0.1,
val_size: float = 0.1,
random_state: int = 42,
feature_stats: Optional[dict] = None,
batch_size: int = 256,
shuffle_buffer_size: int = 256,
epochs: int = 300,
**kwargs,
):
"""Make datasets for target_label
:param target_label: corresponds to training.classes.<label> in config
:param threshold: our labels are floats [0, 0.25, 0.5, 0.75, 1]
:param balance: balance ratio for the prevalent class. if null - use all available data
:param weight_per_class:
:param scale_features: min_max | median_std
:param test_size:
:param val_size:
:param random_state: set this for reproducibility
:param feature_stats: feature_stats to use to standardize features.
if None, stats are computed from the data, taking balance into account
:param batch_size
:param shuffle_buffer_size
:param epochs
:return:
"""
# Note: Dataset.from_tensor_slices method requires the target variable to be of the int type.
# TODO: see what to do about it when trying label smoothing in the future.
target = np.asarray(
list(map(int, self.threshold(self.df_ds[target_label].values, t=threshold)))
)
self.target = np.expand_dims(target, axis=1)
neg, pos = np.bincount(target.flatten())
total = neg + pos
if self.verbose:
log(
f"Examples:\n Total: {total}\n Positive: {pos} ({100 * pos / total:.2f}% of total)\n"
)
w_pos = np.rint(self.df_ds[target_label].values) == 1
index_pos = self.df_ds.loc[w_pos].index
if target_label == "variable":
# 'variable' is a special case: there is an explicit 'non-variable' label:
w_neg = (
np.asarray(
list(
map(
int,
self.threshold(
self.df_ds["non-variable"].values, t=threshold
),
)
)
)
== 1
)
else:
w_neg = ~w_pos
index_neg = self.df_ds.loc[w_neg].index
# balance positive and negative examples?
index_dropped = None
if balance:
underrepresented = min(np.sum(w_pos), np.sum(w_neg))
overrepresented = max(np.sum(w_pos), np.sum(w_neg))
sample_size = int(min(overrepresented, underrepresented * balance))
if neg > pos:
index_neg = (
self.df_ds.loc[w_neg].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_neg].index) - set(index_neg))
].index
else:
index_pos = (
self.df_ds.loc[w_pos].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_pos].index) - set(index_pos))
].index
if self.verbose:
log(
"Number of examples to use in training:"
f"\n Positive: {len(index_pos)}\n Negative: {len(index_neg)}\n"
)
ds_indexes = index_pos.to_list() + index_neg.to_list()
# Train/validation/test split (we will use an 81% / 9% / 10% data split by default):
train_indexes, test_indexes = train_test_split(
ds_indexes, shuffle=True, test_size=test_size, random_state=random_state
)
train_indexes, val_indexes = train_test_split(
train_indexes, shuffle=True, test_size=val_size, random_state=random_state
)
# Normalize features (dmdt's are already L2-normalized) (?using only the training samples?).
# Obviously, the same norms will have to be applied at the testing and serving stages.
# load/compute feature norms:
if feature_stats is None:
feature_stats = {
feature: {
"min": np.min(self.df_ds.loc[ds_indexes, feature]),
"max": np.max(self.df_ds.loc[ds_indexes, feature]),
"median": np.median(self.df_ds.loc[ds_indexes, feature]),
"mean": np.mean(self.df_ds.loc[ds_indexes, feature]),
"std": np.std(self.df_ds.loc[ds_indexes, feature]),
}
for feature in self.features
}
if self.verbose:
print("Computed feature stats:\n", feature_stats)
# scale features
for feature in self.features:
stats = feature_stats.get(feature)
if (stats is not None) and (stats["std"] != 0):
if scale_features == "median_std":
self.df_ds[feature] = (
self.df_ds[feature] - stats["median"]
) / stats["std"]
elif scale_features == "min_max":
self.df_ds[feature] = (self.df_ds[feature] - stats["min"]) / (
stats["max"] - stats["min"]
)
# norms = {
# feature: np.linalg.norm(self.df_ds.loc[ds_indexes, feature])
# for feature in self.features
# }
# for feature, norm in norms.items():
# if np.isnan(norm) or norm == 0.0:
# norms[feature] = 1.0
# if self.verbose:
# print('Computed feature norms:\n', norms)
#
# for feature, norm in norms.items():
# self.df_ds[feature] /= norm
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[train_indexes, self.features].values,
"dmdt": self.dmdt[train_indexes],
},
target[train_indexes],
)
)
val_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[val_indexes, self.features].values,
"dmdt": self.dmdt[val_indexes],
},
target[val_indexes],
)
)
test_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[test_indexes, self.features].values,
"dmdt": self.dmdt[test_indexes],
},
target[test_indexes],
)
)
dropped_samples = (
tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[index_dropped, self.features].values,
"dmdt": self.dmdt[index_dropped],
},
target[index_dropped],
)
)
if balance
else None
)
# Shuffle and batch the datasets:
train_dataset = (
train_dataset.shuffle(shuffle_buffer_size).batch(batch_size).repeat(epochs)
)
val_dataset = val_dataset.batch(batch_size).repeat(epochs)
test_dataset = test_dataset.batch(batch_size)
dropped_samples = dropped_samples.batch(batch_size) if balance else None
datasets = {
"train": train_dataset,
"val": val_dataset,
"test": test_dataset,
"dropped_samples": dropped_samples,
}
indexes = {
"train": np.array(train_indexes),
"val": np.array(val_indexes),
"test": np.array(test_indexes),
"dropped_samples": np.array(index_dropped.to_list())
if index_dropped is not None
else None,
}
# How many steps per epoch?
steps_per_epoch_train = len(train_indexes) // batch_size - 1
steps_per_epoch_val = len(val_indexes) // batch_size - 1
steps_per_epoch_test = len(test_indexes) // batch_size - 1
steps_per_epoch = {
"train": steps_per_epoch_train,
"val": steps_per_epoch_val,
"test": steps_per_epoch_test,
}
if self.verbose:
print(f"Steps per epoch: {steps_per_epoch}")
# Weight training data depending on the number of samples?
# Very useful for imbalanced classification, especially in the cases with a small number of examples.
if weight_per_class:
# weight data class depending on number of examples?
# num_training_examples_per_class = np.array([len(target) - np.sum(target), np.sum(target)])
num_training_examples_per_class = np.array([len(index_neg), len(index_pos)])
assert (
0 not in num_training_examples_per_class
), "found class without any examples!"
# fewer examples -- larger weight
weights = (1 / num_training_examples_per_class) / np.linalg.norm(
(1 / num_training_examples_per_class)
)
normalized_weight = weights / np.max(weights)
class_weight = {i: w for i, w in enumerate(normalized_weight)}
else:
# working with binary classifiers only
class_weight = {i: 1 for i in range(2)}
return datasets, indexes, steps_per_epoch, class_weight
| 31.01108 | 109 | 0.552121 | 11,517 | 0.514381 | 0 | 0 | 128 | 0.005717 | 0 | 0 | 5,516 | 0.24636 |
d26ec52370aaf5a63c525e628d70b23d3bdd5697
| 1,787 |
py
|
Python
|
spacy/lang/pt/lex_attrs.py
|
keshan/spaCy
|
45c165af448783359f99673ab6b91492033bc66b
|
[
"MIT"
] | 1 |
2018-12-13T18:12:18.000Z
|
2018-12-13T18:12:18.000Z
|
spacy/lang/pt/lex_attrs.py
|
keshan/spaCy
|
45c165af448783359f99673ab6b91492033bc66b
|
[
"MIT"
] | null | null | null |
spacy/lang/pt/lex_attrs.py
|
keshan/spaCy
|
45c165af448783359f99673ab6b91492033bc66b
|
[
"MIT"
] | null | null | null |
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = ['zero', 'um', 'dois', 'três', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez',
'onze', 'doze', 'dúzia', 'dúzias', 'duzia', 'duzias', 'treze', 'catorze', 'quinze', 'dezasseis',
'dezassete', 'dezoito', 'dezanove', 'vinte', 'trinta', 'quarenta', 'cinquenta', 'sessenta',
'setenta', 'oitenta', 'noventa', 'cem', 'cento', 'duzentos', 'trezentos', 'quatrocentos',
'quinhentos', 'seicentos', 'setecentos', 'oitocentos', 'novecentos', 'mil', 'milhão', 'milhao',
'milhões', 'milhoes', 'bilhão', 'bilhao', 'bilhões', 'bilhoes', 'trilhão', 'trilhao', 'trilhões',
'trilhoes', 'quadrilhão', 'quadrilhao', 'quadrilhões', 'quadrilhoes']
_ordinal_words = ['primeiro', 'segundo', 'terceiro', 'quarto', 'quinto', 'sexto',
'sétimo', 'oitavo', 'nono', 'décimo', 'vigésimo', 'trigésimo',
'quadragésimo', 'quinquagésimo', 'sexagésimo', 'septuagésimo',
'octogésimo', 'nonagésimo', 'centésimo', 'ducentésimo',
'trecentésimo', 'quadringentésimo', 'quingentésimo', 'sexcentésimo',
'septingentésimo', 'octingentésimo', 'nongentésimo', 'milésimo',
'milionésimo', 'bilionésimo']
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {
LIKE_NUM: like_num
}
| 41.55814 | 111 | 0.567431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 932 | 0.512088 |
d26f1afb5207b56be2e3191794a04329185695ac
| 1,818 |
py
|
Python
|
factor calculation scripts/15.smoothearningstopriceratio.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | null | null | null |
factor calculation scripts/15.smoothearningstopriceratio.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | null | null | null |
factor calculation scripts/15.smoothearningstopriceratio.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | 1 |
2021-07-21T12:24:51.000Z
|
2021-07-21T12:24:51.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 18:00:53 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index','NDX Index','CDAX Index' ,'DAX Index',
'ASX Index','UKX Index', 'TPX Index','NKY Index', 'SHCOMP Index' ,
'SZCOMP Index','XUTUM Index','XU100 Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '20040101'
firstday = '19990101'
today = date.today().strftime('%Y%m%d')
pe_ratio = con.bdh(index_tickers, 'PE RATIO', firstday, today)
pe_ratio_int = pe_ratio.interpolate(method='linear')
pe_ratio_int_w = pe_ratio_int.groupby(pd.Grouper(freq='W')).last()
#pe_ratio_last = pe_ratio_int_w[pe_ratio_int_w.index>=start]
#
#pe_ratio_last.columns = [i[0] for i in pe_ratio_last.columns]
#pe_ratio_last= pe_ratio_last[index_tickers]
pe_ratio_smoothed = pe_ratio_int_w.rolling(500, min_periods=100).mean()
var_no='15'
pe_ratio_smoothed_last = pe_ratio_smoothed[pe_ratio_smoothed.index>=start]
pe_ratio_smoothed_last.columns = [i[0] for i in pe_ratio_smoothed_last.columns]
pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
pe_ratio_smoothed_last.columns = [var_no+'_'+i for i in pe_ratio_smoothed_last.columns]
# pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
#pe_ratio_smoothed_last.columns = ['15_US_NY','15_US_SPX','15_US_CCMP', '15_DE','15_UK','15_JP','15_CH_SH','15_CH_SZ', '15_TR','15_MX','15_BR','15_RU','15_SA']
pe_ratio_smoothed_last.to_excel('C:/Users/sb0538/Desktop/15022020/excels/15_peratiosmoothed.xlsx')
| 33.054545 | 160 | 0.718372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.457096 |
d26f2516f232179df9832fdd43a7f139a4b6d7ba
| 357 |
py
|
Python
|
ms/commands/autopep8.py
|
edcilo/flask_ms_boilerplate
|
6507b7b7e61ab227df40b4701faab9ec9866e732
|
[
"MIT"
] | null | null | null |
ms/commands/autopep8.py
|
edcilo/flask_ms_boilerplate
|
6507b7b7e61ab227df40b4701faab9ec9866e732
|
[
"MIT"
] | null | null | null |
ms/commands/autopep8.py
|
edcilo/flask_ms_boilerplate
|
6507b7b7e61ab227df40b4701faab9ec9866e732
|
[
"MIT"
] | null | null | null |
import click
import os
from flask.cli import with_appcontext
@click.command(name='pep8',
help='Automatically formats Python code to conform to the PEP 8 style guide.')
@click.option('-p', '--path', default='./ms',
help='folder path to fix code')
@with_appcontext
def pep8(path: str) -> None:
os.system(f'autopep8 {path}')
| 27.461538 | 93 | 0.661064 | 0 | 0 | 0 | 0 | 293 | 0.820728 | 0 | 0 | 139 | 0.389356 |
d26f95f1c9db6cafe8a214de467a08368f6b0271
| 2,378 |
py
|
Python
|
py2ts/generate_service_registry.py
|
conanfanli/py2ts
|
8543ad03f19f094b0771c3b0cfc26a89eefd95ed
|
[
"MIT"
] | 3 |
2020-04-10T22:09:44.000Z
|
2020-11-29T07:19:28.000Z
|
py2ts/generate_service_registry.py
|
conanfanli/py2ts
|
8543ad03f19f094b0771c3b0cfc26a89eefd95ed
|
[
"MIT"
] | 1 |
2020-04-11T14:25:50.000Z
|
2020-04-11T14:25:50.000Z
|
py2ts/generate_service_registry.py
|
conanfanli/py2ts
|
8543ad03f19f094b0771c3b0cfc26a89eefd95ed
|
[
"MIT"
] | 1 |
2021-05-15T09:22:41.000Z
|
2021-05-15T09:22:41.000Z
|
#!/usr/bin/env python
import logging
import re
import subprocess
import sys
from typing import Dict
logger = logging.getLogger("py2ts.generate_service_registry")
logging.basicConfig(level=logging.INFO)
class RipgrepError(Exception):
pass
def camel_to_snake(name: str) -> str:
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
def get_service_registry_code(class_module_map: Dict[str, str]) -> str:
"""Return generated code for service registry."""
imports = []
services = []
for service_name, path in class_module_map.items():
imports.append(f"from {path} import {service_name}")
services.append(
f"{camel_to_snake(service_name)}: {service_name} = {service_name}()"
)
imports_code = "\n".join(imports)
services_code = "\n ".join(sorted(services))
return f"""
# Generated code. DO NOT EDIT!
from dataclasses import dataclass
{imports_code}
@dataclass
class ServiceRegistry:
{services_code}
service_registry = ServiceRegistry()
"""
def get_class_module_map() -> Dict[str, str]:
class_module_map = {}
result = subprocess.run(
f"rg '^(class \\w+Service)[\\(:]' -t py -o -r '$1'",
shell=True,
capture_output=True,
)
# Command successful
if result.returncode == 0:
# E.g., ['smartcat/services.py:class TrainingDataSetService:', 'smartcat/services.py:class SmartCatService:']
outputs = result.stdout.decode("utf-8").strip().split("\n")
logger.info(f"Output of rg:{outputs}")
for output in outputs:
# E.g., smartcat/services.py-class SmartCatService
file_path, class_name = output.split(":class ")
module = file_path.split(".py")[0].replace("/", ".")
assert class_name not in class_module_map, f"Found duplicate {class_name}"
class_module_map[class_name] = module
elif result.returncode >= 1:
# resultcode of 1 means no matches were found
raise RipgrepError(
f"Got code: {result.returncode} with message {result.stderr!r}"
)
return class_module_map
if __name__ == "__main__":
try:
code = get_service_registry_code(get_class_module_map())
print(code)
except RipgrepError as e:
logger.error(e)
sys.exit(1)
| 27.976471 | 117 | 0.638352 | 39 | 0.0164 | 0 | 0 | 0 | 0 | 0 | 0 | 891 | 0.374685 |
d273a6d20b4812002b8ea1fa328f2d59bdbbb865
| 5,311 |
py
|
Python
|
tasks/birds.py
|
CatherineWong/l3
|
53ed9dc99d9b247cb209333ae9b528974e5e7e96
|
[
"Apache-2.0"
] | 46 |
2017-11-03T16:54:36.000Z
|
2021-12-07T23:07:58.000Z
|
tasks/birds.py
|
CatherineWong/l3
|
53ed9dc99d9b247cb209333ae9b528974e5e7e96
|
[
"Apache-2.0"
] | 7 |
2018-08-03T18:27:53.000Z
|
2020-12-17T17:08:52.000Z
|
tasks/birds.py
|
CatherineWong/l3
|
53ed9dc99d9b247cb209333ae9b528974e5e7e96
|
[
"Apache-2.0"
] | 6 |
2018-02-24T19:00:00.000Z
|
2021-03-28T19:50:53.000Z
|
from misc import util
from collections import namedtuple
import csv
import numpy as np
import os
import pickle
import sys
N_EX = 4
Datum = namedtuple("Datum", ["hint", "ex_inputs", "input", "label"])
START = "<s>"
STOP = "</s>"
random = util.next_random()
birds_path = os.path.join(sys.path[0], "data/birds")
def choose_except(options, reject, random=random):
choice = None
while choice is None:
choice = random.choice(options)
if choice in reject:
choice = None
return choice
class BirdsTask():
def __init__(self):
self.hint_vocab = util.Index()
self.START = START
self.STOP = STOP
with open(os.path.join(birds_path, "hendricks_data", "CUB_feature_dict.pkl")) as feat_f:
self.features = pickle.load(feat_f)
#file_to_full = {k.split("/")[1]: k for k in self.features}
#self.captions = {}
#for fname in os.listdir(os.path.join(birds_path, "captions")):
# name = file_to_full[fname[:-4] + ".jpg"]
# inst_capts = []
# with open(os.path.join(birds_path, "captions", fname)) as capt_f:
# for line in capt_f:
# line = line.strip().replace(".", " .").replace(",", " ,")
# toks = [START] + line.split() + [STOP]
# toks = [self.hint_vocab.index(w) for w in toks]
# inst_capts.append(tuple(toks))
# self.captions[name] = tuple(inst_capts)
self.captions = {}
with open(os.path.join(birds_path, "hendricks_data", "captions.tsv")) as capt_f:
reader = csv.DictReader(capt_f, delimiter="\t")
for row in reader:
caption = row["Description"].lower().replace(".", " .").replace(",", " ,")
toks = [START] + caption.split() + [STOP]
toks = [self.hint_vocab.index(w) for w in toks]
url = row["Input.image_url"]
inst = "/".join(url.split("/")[-2:])
if inst not in self.captions:
self.captions[inst] = []
self.captions[inst].append(toks)
classes = sorted(list(set(k.split("/")[0] for k in self.captions)))
classes.remove("cub_missing")
shuf_random = np.random.RandomState(999)
shuf_random.shuffle(classes)
assert len(classes) == 200
data_classes = {
"train": classes[:100],
"val": classes[100:110],
"test": classes[100:200]
}
data_insts = {}
for fold in ("train", "val", "test"):
classes = data_classes[fold]
data_classes[fold] = classes
instances = {cls: [] for cls in classes}
for key in self.features.keys():
cls, inst = key.split("/")
if cls in instances:
instances[cls].append(key)
data_insts[fold] = instances
# print fold
# for cls in classes:
# print cls, len(instances[cls])
# print
#exit()
self.train_classes = data_classes["train"]
self.val_classes = data_classes["val"]
self.test_classes = data_classes["test"]
self.train_insts = data_insts["train"]
self.val_insts = data_insts["val"]
self.test_insts = data_insts["test"]
self.n_features = self.features[self.features.keys()[0]].size
def sample_train(self, n_batch, augment):
assert not augment
batch = []
for _ in range(n_batch):
cls = random.choice(self.train_classes)
insts = [random.choice(self.train_insts[cls]) for _ in range(N_EX)]
captions = self.captions[insts[0]]
caption = captions[random.choice(len(captions))]
feats = np.asarray([self.features[inst] for inst in insts])
label = random.randint(2)
if label == 0:
other_cls = choose_except(self.train_classes, [cls])
other_inst = random.choice(self.train_insts[other_cls])
else:
other_inst = choose_except(self.train_insts[cls], insts)
other_feats = self.features[other_inst]
datum = Datum(caption, feats, other_feats, label)
batch.append(datum)
return batch
def sample_heldout(self, classes, insts):
batch = []
local_random = np.random.RandomState(0)
for i, cls in enumerate(classes):
datum_insts = insts[cls][:N_EX]
caption = self.captions[datum_insts[0]][0]
feats = np.asarray([self.features[inst] for inst in datum_insts])
label = i % 2
if label == 0:
other_cls = choose_except(classes, [cls], local_random)
other_inst = insts[other_cls][N_EX]
else:
other_inst = insts[cls][N_EX]
other_feats = self.features[other_inst]
datum = Datum(caption, feats, other_feats, label)
batch.append(datum)
return batch
def sample_val(self, same=False):
return self.sample_heldout(self.val_classes, self.val_insts)
def sample_test(self, same=False):
return self.sample_heldout(self.test_classes, self.test_insts)
| 35.644295 | 96 | 0.559217 | 4,785 | 0.90096 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.177179 |
d273a8f81f9807f4dd16cdd363ca8063f3987151
| 2,261 |
py
|
Python
|
tests/test_utils.py
|
caiosba/covid-19
|
2a0f43f5004e7e39bd982eaa36185859cd9db88f
|
[
"MIT"
] | 9 |
2020-03-23T19:04:04.000Z
|
2020-03-28T02:11:14.000Z
|
tests/test_utils.py
|
caiosba/covid-19
|
2a0f43f5004e7e39bd982eaa36185859cd9db88f
|
[
"MIT"
] | 6 |
2020-03-22T14:10:08.000Z
|
2020-04-05T01:53:29.000Z
|
tests/test_utils.py
|
caiosba/covid-19
|
2a0f43f5004e7e39bd982eaa36185859cd9db88f
|
[
"MIT"
] | 6 |
2020-03-23T18:15:26.000Z
|
2020-04-05T01:49:40.000Z
|
import locale
import pytest
from covid.utils import fmt
class TestUtilityFunctions:
def test_format_functions_en_US(self):
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
return pytest.skip()
assert fmt(0.10) == "0.1"
assert fmt(0.12) == "0.12"
assert fmt(0.01) == "0.01"
assert fmt(0.012) == "0.012"
assert fmt(0.0123) == "0.012"
assert fmt(0.00123) == "1.23e-03"
assert fmt(0.0012) == "1.2e-03"
assert fmt(1.2341) == "1.23"
assert fmt(12.341) == "12.34"
assert fmt(123.41) == "123.4"
assert fmt(1234) == "1,234"
assert fmt(1234.5) == "1,234"
assert fmt(42_123.1) == "42,123"
assert fmt(42_123) == "42,123"
assert fmt(1_000_000) == "1M"
assert fmt(10_000_000) == "10M"
assert fmt(12_000_000) == "12M"
assert fmt(12_300_000) == "12.3M"
assert fmt(12_340_000) == "12.34M"
assert fmt(12_341_000) == "12.34M"
assert fmt(-12_341_000) == "-12.34M"
assert fmt(123_456_000) == "123.5M"
assert fmt(1_234_567_000) == "1.23B"
def test_format_functions_pt_BR(self):
try:
locale.setlocale(locale.LC_ALL, "pt_BR.UTF-8")
except locale.Error:
return pytest.skip()
assert fmt(0.10) == "0,1"
assert fmt(0.12) == "0,12"
assert fmt(0.01) == "0,01"
assert fmt(0.012) == "0,012"
assert fmt(0.0123) == "0,012"
assert fmt(0.00123) == "1,23e-03"
assert fmt(0.0012) == "1,2e-03"
assert fmt(1.2341) == "1,23"
assert fmt(12.341) == "12,34"
assert fmt(123.41) == "123,4"
assert fmt(1234) == "1.234"
assert fmt(1234.5) == "1.234"
assert fmt(42_123.1) == "42.123"
assert fmt(42_123) == "42.123"
assert fmt(1_000_000) == "1M"
assert fmt(10_000_000) == "10M"
assert fmt(12_000_000) == "12M"
assert fmt(12_300_000) == "12,3M"
assert fmt(12_340_000) == "12,34M"
assert fmt(12_341_000) == "12,34M"
assert fmt(-12_341_000) == "-12,34M"
assert fmt(123_456_000) == "123,5M"
assert fmt(1_234_567_000) == "1,23B"
| 33.25 | 58 | 0.530739 | 2,200 | 0.973021 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.153914 |
d27463b7bc3e1731eab5ba3103ed835b119f201f
| 10,717 |
py
|
Python
|
catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
#!/usr/bin/env python
from anti_instagram.AntiInstagram import AntiInstagram
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, FSMState)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.timekeeper import TimeKeeper
import cv2
import rospy
import threading
import time
from line_detector.line_detector_plot import color_segment, drawLines
import numpy as np
class LineDetectorNode(object):
def __init__(self):
self.node_name = rospy.get_name()
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
# FSM
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
self.sub_fsm_mode = rospy.Subscriber("~fsm_mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
#FSM
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
#FSM
def cbMode(self, mode_msg):
self.fsm_state = mode_msg.state # String of current FSM state
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
self.detector.setImage(image_cv_corr)
# Detect lines and normals
white = self.detector.detectLines('white')
yellow = self.detector.detectLines('yellow')
red = self.detector.detectLines('red')
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(white.lines) > 0:
lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
if len(yellow.lines) > 0:
lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
if len(red.lines) > 0:
lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
drawLines(image_with_lines, white.lines, (0, 0, 0))
drawLines(image_with_lines, yellow.lines, (255, 0, 0))
drawLines(image_with_lines, red.lines, (0, 255, 0))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
| 33.701258 | 122 | 0.624149 | 9,840 | 0.918167 | 0 | 0 | 0 | 0 | 0 | 0 | 1,622 | 0.151348 |
d274bf60d6abc1273072877c9d1d6cd1119e3863
| 776 |
py
|
Python
|
django_qiniu/utils.py
|
9nix00/django-qiniu
|
08a403dc156b4971eef5af359048a6d2ce485245
|
[
"MIT"
] | 1 |
2018-06-21T03:14:20.000Z
|
2018-06-21T03:14:20.000Z
|
django_qiniu/utils.py
|
9nix00/django-qiniu
|
08a403dc156b4971eef5af359048a6d2ce485245
|
[
"MIT"
] | null | null | null |
django_qiniu/utils.py
|
9nix00/django-qiniu
|
08a403dc156b4971eef5af359048a6d2ce485245
|
[
"MIT"
] | 1 |
2018-06-21T03:14:21.000Z
|
2018-06-21T03:14:21.000Z
|
# -*- coding: utf-8 -*-
from account_helper.middleware import get_current_user_id
from django.utils import timezone
from django.conf import settings
from hashlib import sha1
import os
def user_upload_dir(instance, filename):
name_struct = os.path.splitext(filename)
current_user_id = get_current_user_id()
expire = 3600 if not hasattr(settings, 'QINIU_PREVIEW_EXPIRE') else settings.QINIU_PREVIEW_EXPIRE
return '{4}/{0}/{3}/{1}{2}'.format(current_user_id,
sha1(filename.encode('utf-8')).hexdigest(),
name_struct[-1] if len(name_struct) > 1 else '',
timezone.now().strftime('%Y-%m-%d-%H-%M'),
expire)
| 38.8 | 101 | 0.590206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.115979 |
d27508d001c149eb0a10e44188a30d1458aaa3a0
| 1,339 |
py
|
Python
|
AxePy3Lib/01/re/re_test_patterns_groups.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1 |
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
AxePy3Lib/01/re/re_test_patterns_groups.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
AxePy3Lib/01/re/re_test_patterns_groups.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Show the groups within the matches for a pattern.
"""
# end_pymotw_header
import re
def test_patterns(text, patterns):
"""Given source text and a list of patterns, look for
matches for each pattern within the text and print
them to stdout.
"""
# Look for each pattern in the text and print the results
for pattern, desc in patterns:
print('{!r} ({})\n'.format(pattern, desc))
print(' {!r}'.format(text))
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
prefix = ' ' * (s)
print(
' {}{!r}{} '.format(prefix,
text[s:e],
' ' * (len(text) - e)),
end=' ',
)
print(match.groups())
if match.groupdict():
print('{}{}'.format(
' ' * (len(text) - s),
match.groupdict()),
)
print()
return
if __name__ == '__main__':
patterns = [(r'a((a*)(b*))', 'a followed by 0-n a and 0-n b'),
(r'(?P<first>a+)(?P<second>c+)', 'pattern 2'), ]
test_patterns('accaaccca', patterns)
| 29.108696 | 66 | 0.477969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.392084 |
d275a759f35f51d02a7503058e5ce4b1b8c106f5
| 332 |
py
|
Python
|
src/xr_embeds/urls.py
|
xr-web-de/xr-web
|
63269e26a8752564b63e84bfc0ce180198577d35
|
[
"MIT"
] | 4 |
2019-03-28T20:49:59.000Z
|
2019-08-11T19:31:35.000Z
|
src/xr_embeds/urls.py
|
xr-web-de/xr-web
|
63269e26a8752564b63e84bfc0ce180198577d35
|
[
"MIT"
] | 4 |
2019-05-08T18:07:45.000Z
|
2021-05-08T17:29:46.000Z
|
src/xr_embeds/urls.py
|
xr-web-de/xr-web
|
63269e26a8752564b63e84bfc0ce180198577d35
|
[
"MIT"
] | 5 |
2019-03-28T20:50:15.000Z
|
2020-01-17T21:16:57.000Z
|
from django.urls import re_path
from xr_embeds.views import geojson_view, embed_html_view
app_name = "embeds"
urlpatterns = [
re_path(r"^(\d+)/html/$", embed_html_view, name="embed_html"),
re_path(
r"^geojson/(?P<model_slug>\w+)/(?P<query_slug>\w+)/$",
geojson_view,
name="geojson_view",
),
]
| 22.133333 | 66 | 0.638554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.310241 |
d276c726608d0b175c4c5a4e294d5b6baeab2166
| 377 |
py
|
Python
|
templates/django/__APPNAME__/apps/utils/models.py
|
ba1dr/tplgenerator
|
f05b6f9a32cf825d326dd2faf551d1e156d2df37
|
[
"MIT"
] | null | null | null |
templates/django/__APPNAME__/apps/utils/models.py
|
ba1dr/tplgenerator
|
f05b6f9a32cf825d326dd2faf551d1e156d2df37
|
[
"MIT"
] | null | null | null |
templates/django/__APPNAME__/apps/utils/models.py
|
ba1dr/tplgenerator
|
f05b6f9a32cf825d326dd2faf551d1e156d2df37
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.core import signing
class PasswordMixin(object):
password_encrypted = models.CharField(max_length=128, null=False, blank=False)
@property
def password(self):
return signing.loads(self.password_encrypted)
@password.setter
def password(self, value):
self.password_encrypted = signing.dumps(value)
| 23.5625 | 82 | 0.732095 | 312 | 0.827586 | 0 | 0 | 189 | 0.501326 | 0 | 0 | 0 | 0 |
d2777dd29fcc8c927860ffb94a848ef3650dcd17
| 7,858 |
py
|
Python
|
publications/admin.py
|
lukacu/django-publications
|
663ace605925f53835f441c7761a6f4b0d2d4143
|
[
"BSD-3-Clause"
] | null | null | null |
publications/admin.py
|
lukacu/django-publications
|
663ace605925f53835f441c7761a6f4b0d2d4143
|
[
"BSD-3-Clause"
] | 3 |
2020-02-12T03:15:47.000Z
|
2021-06-10T22:05:24.000Z
|
publications/admin.py
|
lukacu/django-publications
|
663ace605925f53835f441c7761a6f4b0d2d4143
|
[
"BSD-3-Clause"
] | 1 |
2018-07-23T11:46:37.000Z
|
2018-07-23T11:46:37.000Z
|
# -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from publications import list_import_formats, get_publications_importer
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
from django.contrib import admin
from django import forms
import publications.models
from publications.models import Publication, PublicationType, Group, Authorship, Person, Metadata, Import
from publications.fields import PeopleField
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
def merge_people_by_family_name(modeladmin, request, queryset):
groups = publications.models.group_people_by_family_name(list(queryset))
groups = filter(lambda x : len(x) > 2, [group for fn, group in groups.items()])
if not len(groups):
messages.info(request, "Nothing to merge")
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
return render_to_response('admin/publications/person/merge.html', {
'groups': groups
}, context_instance=RequestContext(request))
def merge_people(modeladmin, request, queryset):
return render_to_response('admin/publications/person/merge.html', {
'groups': [list(queryset)]
}, context_instance=RequestContext(request))
class PublicationForm(forms.ModelForm):
class Meta:
model = Publication
fields = '__all__'
people_authorship = PeopleField(label="People", max_length=1024, help_text = 'List of authors separated by semicolon. Both first-name last-name and last-name, first name forms can be used. Example: John Doe; Smith, David; William, Chris.')
latitude = forms.FloatField(required=False)
def __init__(self, *args, **kwargs):
super(PublicationForm, self).__init__(*args, **kwargs)
if hasattr(self, 'instance'):
instance = self.instance
self.initial['people_authorship'] = instance.people_as_string()
def save(self, commit=True):
model = super(PublicationForm, self).save(commit=False)
model.set_people = self.cleaned_data['people_authorship']
if commit:
model.save()
return model
class MetadataInline(admin.TabularInline):
model = Metadata
class AuthorshipInline(admin.TabularInline):
model = Authorship
class PublicationAdmin(admin.ModelAdmin):
radio_fields = {"publication_type": admin.HORIZONTAL}
raw_id_fields = ["people"]
list_display = ('publication_type', 'first_author', 'title', 'year', 'within')
list_display_links = ('title',)
search_fields = ('title', 'within', 'people', 'tags', 'year')
fieldsets = (
("Basic information", {'fields':
('publication_type', 'title', 'people_authorship', 'abstract', 'note')}),
("Publishing information", {'fields':
('year', 'month', 'within', 'publisher', 'volume', 'number', 'pages')}),
("Resources", {'fields':
('url', 'code', 'file', 'doi')}),
("Categoritzation", {'fields':
('tags', 'public', 'groups')}),
)
inlines = [MetadataInline]
form = PublicationForm
def import_publications(self, request):
if request.method == 'POST':
# container for error messages
errors = {"publications" : [], "importer" : []}
# check for errors
if not request.POST['publications']:
errors["publications"].append('This field is required.')
if not request.POST['importer']:
errors["importer"].append('This field is required.')
else:
importer = get_publications_importer(request.POST['importer'])
if importer:
publications = []
importer.import_from_string(request.POST['publications'], lambda x : publications.append(x), lambda x : errors["publications"].append(x))
for publication in publications:
i = Import(title = publication["title"], data = publication, source = importer.get_format_identifier())
i.save()
if not publications:
errors["publications"].append('No valid entries found.')
else:
errors["importer"].append('Not a registered importer.')
if errors["publications"] or errors["importer"]:
# some error occurred
return render_to_response(
'admin/publications/publication/import.html', {
'errors': errors,
'title': 'Import publications',
'importers' : list_import_formats(),
'request': request},
RequestContext(request))
else:
if len(publications) > 1:
msg = 'Successfully added ' + str(len(publications)) + ' publications to import queue.'
else:
msg = 'Successfully added publication to import queue.'
# show message
messages.info(request, msg)
# redirect to publication listing
return HttpResponseRedirect(reverse("admin:publications_publication_changelist"))
else:
return render_to_response(
'admin/publications/publication/import.html', {
'title': 'Import publications', 'importers' : list_import_formats(),
'request': request},
RequestContext(request))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(PublicationAdmin, self).get_urls()
my_urls = patterns('',
url(
r'import',
self.admin_site.admin_view(self.import_publications),
name='import_publications',
),
)
return my_urls + urls
class GroupAdmin(admin.ModelAdmin):
list_display = ('identifier', 'title', 'public')
class PublicationTypeAdmin(admin.ModelAdmin):
list_display = ('identifier', 'title', 'description', 'weight')
class PersonAdmin(admin.ModelAdmin):
list_display = ('family_name', 'primary_name' , 'url', 'public', 'group')
list_display_links = ('primary_name', 'family_name',)
actions = [merge_people, merge_people_by_family_name]
def merge(self, request):
if request.method == 'POST':
if request.POST.has_key("_cancel"):
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
groups_count = int(request.POST.get("groups_count", 0))
groups = []
for group_id in xrange(1, groups_count+1):
#TODO: more validation
group_entries = [ int(i.strip()) for i in request.POST.get("group%d_set" % group_id, "").strip().split(" ") ]
pivot_id = int(request.POST.get("group%d" % group_id, "-1"))
if pivot_id in group_entries and len(group_entries) > 1:
group = list(Person.objects.filter(id__in = group_entries))
pivot = filter(lambda x : x.id == pivot_id, group)[0]
publications.models.merge_people(group, pivot)
messages.info(request, "Merged %d people entries" % len(group))
elif len(group_entries) == 1:
continue
else:
groups.append(list(Person.objects.filter(id__in = group_entries)))
if len(groups) > 0:
return render_to_response('admin/publications/person/merge.html', {
'groups': groups
}, context_instance=RequestContext(request))
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(PersonAdmin, self).get_urls()
my_urls = patterns('',
url(
r'merge',
self.admin_site.admin_view(self.merge),
name='merge_people',
),
)
return my_urls + urls
admin.site.register(Publication, PublicationAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(PublicationType, PublicationTypeAdmin)
| 38.331707 | 241 | 0.673199 | 6,179 | 0.786332 | 0 | 0 | 0 | 0 | 0 | 0 | 2,094 | 0.26648 |
d2786b44c868c09be621a0323723a3881eb90dc7
| 7,379 |
py
|
Python
|
tests/common/helpers/dut_utils.py
|
Rancho333/sonic-mgmt
|
c73836900f83c1a66b2121563511604a7b81807a
|
[
"Apache-2.0"
] | 2 |
2020-10-15T05:54:32.000Z
|
2020-12-14T07:21:41.000Z
|
tests/common/helpers/dut_utils.py
|
Rancho333/sonic-mgmt
|
c73836900f83c1a66b2121563511604a7b81807a
|
[
"Apache-2.0"
] | 14 |
2021-08-04T05:50:21.000Z
|
2021-12-14T10:06:38.000Z
|
tests/common/helpers/dut_utils.py
|
Rancho333/sonic-mgmt
|
c73836900f83c1a66b2121563511604a7b81807a
|
[
"Apache-2.0"
] | 7 |
2021-07-28T03:24:41.000Z
|
2022-03-07T01:44:20.000Z
|
import logging
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import get_host_visible_vars
from tests.common.utilities import wait_until
CONTAINER_CHECK_INTERVAL_SECS = 1
CONTAINER_RESTART_THRESHOLD_SECS = 180
logger = logging.getLogger(__name__)
def is_supervisor_node(inv_files, hostname):
"""Check if the current node is a supervisor node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
Currently, we are using 'card_type' in the inventory to make the decision. If 'card_type' for the node is defined in
the inventory, and it is 'supervisor', then return True, else return False. In future, we can change this
logic if possible to derive it from the DUT.
"""
dut_vars = get_host_visible_vars(inv_files, hostname)
if 'card_type' in dut_vars and dut_vars['card_type'] == 'supervisor':
return True
return False
def is_frontend_node(inv_files, hostname):
"""Check if the current node is a frontend node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
True if it is not any other type of node. Currently, the only other type of node supported is 'supervisor'
node. If we add more types of nodes, then we need to exclude them from this method as well.
"""
return not is_supervisor_node(inv_files, hostname)
def is_container_running(duthost, container_name):
"""Decides whether the container is running or not
@param duthost: Host DUT.
@param container_name: Name of a container.
Returns:
Boolean value. True represents the container is running
"""
running_containers = duthost.shell(r"docker ps -f 'status=running' --format \{\{.Names\}\}")['stdout_lines']
return container_name in running_containers
def check_container_state(duthost, container_name, should_be_running):
"""Determines whether a container is in the expected state (running/not running)
@param duthost: Host DUT.
@param container_name: Name of container.
@param should_be_running: Boolean value.
Returns:
This function will return True if the container was in the expected state.
Otherwise, it will return False.
"""
is_running = is_container_running(duthost, container_name)
return is_running == should_be_running
def is_hitting_start_limit(duthost, container_name):
"""Checks whether the container can not be restarted is due to start-limit-hit.
@param duthost: Host DUT.
@param ontainer_name: name of a container.
Returns:
If start limitation was hit, then this function will return True. Otherwise
it returns False.
"""
service_status = duthost.shell("sudo systemctl status {}.service | grep 'Active'".format(container_name))
for line in service_status["stdout_lines"]:
if "start-limit-hit" in line:
return True
return False
def clear_failed_flag_and_restart(duthost, container_name):
"""Clears the failed flag of a container and restart it.
@param duthost: Host DUT.
@param container_name: name of a container.
Returns:
None
"""
logger.info("{} hits start limit and clear reset-failed flag".format(container_name))
duthost.shell("sudo systemctl reset-failed {}.service".format(container_name))
duthost.shell("sudo systemctl start {}.service".format(container_name))
restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS,
CONTAINER_CHECK_INTERVAL_SECS,
check_container_state, duthost, container_name, True)
pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name))
def get_group_program_info(duthost, container_name, group_name):
"""Gets program names, running status and their pids by analyzing the command
output of "docker exec <container_name> supervisorctl status". Program name
at here represents a program which is part of group <group_name>
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Returns:
A dictionary where keys are the program names and values are their running
status and pids.
"""
group_program_info = defaultdict(list)
program_name = None
program_status = None
program_pid = None
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(group_name) != -1:
program_name = program_info.split()[0].split(':')[1].strip()
program_status = program_info.split()[1].strip()
if program_status in ["EXITED", "STOPPED", "STARTING"]:
program_pid = -1
else:
program_pid = int(program_info.split()[3].strip(','))
group_program_info[program_name].append(program_status)
group_program_info[program_name].append(program_pid)
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return group_program_info
def get_program_info(duthost, container_name, program_name):
"""Gets program running status and its pid by analyzing the command
output of "docker exec <container_name> supervisorctl status"
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Return:
Program running status and its pid.
"""
program_status = None
program_pid = -1
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(program_name) != -1:
program_status = program_info.split()[1].strip()
if program_status == "RUNNING":
program_pid = int(program_info.split()[3].strip(','))
break
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return program_status, program_pid
def get_disabled_container_list(duthost):
"""Gets the container/service names which are disabled.
Args:
duthost: Host DUT.
Return:
A list includes the names of disabled containers/services
"""
disabled_containers = []
container_status, succeeded = duthost.get_feature_status()
pytest_assert(succeeded, "Failed to get status ('enabled'|'disabled') of containers. Exiting...")
for container_name, status in container_status.items():
if "disabled" in status:
disabled_containers.append(container_name)
return disabled_containers
| 40.322404 | 126 | 0.69359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,865 | 0.523784 |
d279c195499d050dd18e9f8e03c43e9d1fc1fd2d
| 7,716 |
py
|
Python
|
pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# module _ctypes
# from /usr/lib/python3.5/lib-dynload/_ctypes.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
""" Create and manipulate C compatible data types in Python. """
# no imports
# Variables with simple values
FUNCFLAG_CDECL = 1
FUNCFLAG_PYTHONAPI = 4
FUNCFLAG_USE_ERRNO = 8
FUNCFLAG_USE_LASTERROR = 16
RTLD_GLOBAL = 256
RTLD_LOCAL = 0
_cast_addr = 140388692655680
_memmove_addr = 140388724844976
_memset_addr = 140388724996464
_string_at_addr = 140388692647104
_wstring_at_addr = 140388692653280
__version__ = '1.1.0'
# functions
def addressof(C_instance): # real signature unknown; restored from __doc__
"""
addressof(C instance) -> integer
Return the address of the C instance internal buffer
"""
return 0
def alignment(C_type): # real signature unknown; restored from __doc__
"""
alignment(C type) -> integer
alignment(C instance) -> integer
Return the alignment requirements of a C instance
"""
return 0
def buffer_info(*args, **kwargs): # real signature unknown
""" Return buffer interface information """
pass
def byref(C_instance, offset=0): # real signature unknown; restored from __doc__
"""
byref(C instance[, offset=0]) -> byref-object
Return a pointer lookalike to a C instance, only usable
as function argument
"""
pass
def call_cdeclfunction(*args, **kwargs): # real signature unknown
pass
def call_function(*args, **kwargs): # real signature unknown
pass
def dlclose(*args, **kwargs): # real signature unknown
""" dlclose a library """
pass
def dlopen(name, flag, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" dlopen(name, flag={RTLD_GLOBAL|RTLD_LOCAL}) open a shared library """
pass
def dlsym(*args, **kwargs): # real signature unknown
""" find symbol in shared library """
pass
def get_errno(*args, **kwargs): # real signature unknown
pass
def pointer(*args, **kwargs): # real signature unknown
pass
def POINTER(*args, **kwargs): # real signature unknown
pass
def PyObj_FromPtr(*args, **kwargs): # real signature unknown
pass
def Py_DECREF(*args, **kwargs): # real signature unknown
pass
def Py_INCREF(*args, **kwargs): # real signature unknown
pass
def resize(*args, **kwargs): # real signature unknown
""" Resize the memory buffer of a ctypes instance """
pass
def set_errno(*args, **kwargs): # real signature unknown
pass
def sizeof(C_type): # real signature unknown; restored from __doc__
"""
sizeof(C type) -> integer
sizeof(C instance) -> integer
Return the size in bytes of a C instance
"""
return 0
def _unpickle(*args, **kwargs): # real signature unknown
pass
# classes
class ArgumentError(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class Array(_CData):
""" XXX to be provided """
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
class CFuncPtr(_CData):
""" Function Pointer """
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
argtypes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""specify the argument types"""
errcheck = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""a function to check for errors"""
restype = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""specify the result type"""
class Structure(_CData):
""" Structure base class """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
class Union(_CData):
""" Union base class """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
class _Pointer(_CData):
""" XXX to be provided """
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
contents = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the object this pointer points to (read-write)"""
class _SimpleCData(_CData):
""" XXX to be provided """
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ctypes_from_outparam__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
value = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""current value"""
# variables with complex values
_pointer_type_cache = {
None: # (!) real value is ''
None # (!) real value is ''
,
None: # (!) real value is ''
None # (!) real value is ''
,
None: None, # (!) real value is ''
}
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| 27.459075 | 106 | 0.636729 | 4,581 | 0.593701 | 0 | 0 | 1,182 | 0.153188 | 0 | 0 | 3,796 | 0.491965 |
d279e09431c6846b49df5d7a332c49cc36e64bc9
| 1,059 |
py
|
Python
|
senseis/models/rc_action_model1.py
|
armandli/ReconChessRL
|
3f3f018fd347ee17452ef6ad725d82f2f11678c6
|
[
"MIT"
] | 4 |
2021-08-19T14:06:01.000Z
|
2021-12-24T06:34:23.000Z
|
senseis/models/rc_action_model1.py
|
captainzhu123/ReconChessRL
|
6d0de7acd7aeba0ad767e29c807ee0e6f30d95fb
|
[
"MIT"
] | 2 |
2021-09-18T08:34:01.000Z
|
2022-03-23T07:06:05.000Z
|
senseis/models/rc_action_model1.py
|
captainzhu123/ReconChessRL
|
6d0de7acd7aeba0ad767e29c807ee0e6f30d95fb
|
[
"MIT"
] | 1 |
2021-09-18T08:30:23.000Z
|
2021-09-18T08:30:23.000Z
|
import torch
from torch import nn
from senseis.torch_modules.activation import relu_activation
from senseis.torch_modules.residual_layer import ResidualLayer1DV5, ResidualLayer2DV3
# Dueling Q Model
class RCActionModel1(nn.Module):
def __init__(self, csz, row_sz, col_sz, a_sz):
super(RCActionModel1, self).__init__()
self.clayers = nn.Sequential(
ResidualLayer2DV3(csz, 24, 3, relu_activation, nn.BatchNorm2d),
ResidualLayer2DV3(24, 48, 3, relu_activation, nn.BatchNorm2d),
)
self.alayers = nn.Sequential(
ResidualLayer1DV5(48 * row_sz * col_sz, 4096, relu_activation, nn.LayerNorm),
nn.Linear(4096, a_sz),
)
self.vlayers = nn.Sequential(
ResidualLayer1DV5(48 * row_sz * col_sz, 256, relu_activation, nn.LayerNorm),
nn.Linear(256, 1),
)
def forward(self, x):
x = self.clayers(x)
x = torch.flatten(x, start_dim=1)
v = x
v = self.vlayers(v)
a = x
a = self.alayers(a)
mean_a = torch.mean(a, dim=1, keepdim=True)
q = v + (a - mean_a)
return q
| 31.147059 | 85 | 0.674221 | 857 | 0.809254 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.016053 |
d27a9b4239643f2c105ea4c3f170a4d1c43a0714
| 5,506 |
py
|
Python
|
win/msgbox.py
|
Zxynine/fusion360-thomasa88lib
|
c6570c9adffd06ec7b762032326805d13a99982e
|
[
"MIT"
] | 4 |
2021-11-19T17:24:44.000Z
|
2022-03-18T13:17:21.000Z
|
win/msgbox.py
|
Zxynine/fusion360-thomasa88lib
|
c6570c9adffd06ec7b762032326805d13a99982e
|
[
"MIT"
] | 2 |
2021-04-15T05:47:55.000Z
|
2021-12-07T17:36:53.000Z
|
win/msgbox.py
|
Zxynine/fusion360-thomasa88lib
|
c6570c9adffd06ec7b762032326805d13a99982e
|
[
"MIT"
] | 1 |
2021-12-04T23:07:53.000Z
|
2021-12-04T23:07:53.000Z
|
# Message box functions
#
# This file is part of thomasa88lib, a library of useful Fusion 360
# add-in/script functions.
#
# Copyright (c) 2020 Thomas Axelsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import adsk
import ctypes
# Must explicitly include wintypes for code to work at Fusion start-up
import ctypes.wintypes
user32 = ctypes.WinDLL('user32', use_last_error=True)
_hook_factory = ctypes.WINFUNCTYPE(ctypes.wintypes.LPARAM,
ctypes.c_int,
ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM)
# https://stackoverflow.com/a/31396340/106019
class CWPRETSTRUCT(ctypes.Structure):
_fields_ = (('lResult', ctypes.wintypes.LPARAM),
('lParam', ctypes.wintypes.LPARAM),
('wParam', ctypes.wintypes.WPARAM),
('message', ctypes.c_uint),
('hwnd', ctypes.wintypes.HWND))
LPCWPRETSTRUCT = ctypes.POINTER(CWPRETSTRUCT)
# Icons
MB_ICONERROR = 0x00000010
MB_ICONQUESTION = 0x00000020
MB_ICONWARNING = 0x00000030
MB_ICONINFORMATION = 0x00000040
# Button configurations
MB_ABORTRETRYIGNORE = 0x00000002
MB_CANCELTRYCONTINUE = 0x00000006
MB_HELP = 0x00004000
MB_OK = 0x00000000
MB_OKCANCEL = 0x00000001
MB_RETRYCANCEL = 0x00000005
MB_YESNO = 0x00000004
MB_YESNOCANCEL = 0x00000003
# Default button
MB_DEFBUTTON1 = 0x00000000
MB_DEFBUTTON2 = 0x00000100
MB_DEFBUTTON3 = 0x00000200
MB_DEFBUTTON4 = 0x00000300
# Button IDs
IDOK = 1
IDCANCEL = 2
IDABORT = 3
IDRETRY = 4
IDIGNORE = 5
IDYES = 6
IDNO = 7
IDTRYAGAIN = 10
IDCONTINUE = 11
WM_INITDIALOG = 0x0110
WH_CALLWNDPROCRET = 12
user32.CallNextHookEx.restype = ctypes.wintypes.LPARAM
user32.CallNextHookEx.argtypes = (ctypes.wintypes.HHOOK,
ctypes.c_int,
ctypes.wintypes.WPARAM,
ctypes.wintypes.LPARAM)
user32.UnhookWindowsHookEx.argtypes = (ctypes.wintypes.HHOOK,)
user32.UnhookWindowsHookEx.restype = ctypes.wintypes.BOOL
user32.SetWindowsHookExW.restype = ctypes.wintypes.HHOOK
user32.SetWindowsHookExW.argtypes = (ctypes.c_int,
_hook_factory,
ctypes.wintypes.HINSTANCE,
ctypes.wintypes.DWORD)
user32.GetDlgItem.argtypes = (ctypes.wintypes.HWND, ctypes.c_int)
user32.GetDlgItem.restype = ctypes.wintypes.HWND
user32.GetActiveWindow.restype = ctypes.wintypes.HWND
def custom_msgbox(text, caption, dlg_type, label_map={}):
'''Wrapper for MessageBox that allows setting button labels (Windows-only)
https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-messageboxw
'''
win_thread_id = ctypes.windll.kernel32.GetCurrentThreadId()
# This must not go out of scope as long as the hook is active
c_hook = _hook_factory(_create_hook(label_map))
hook_handle = user32.SetWindowsHookExW(WH_CALLWNDPROCRET, c_hook,
ctypes.wintypes.HINSTANCE(0),
win_thread_id)
#error = ctypes.get_last_error()
main_window = user32.GetActiveWindow()
ret = user32.MessageBoxW(main_window, text, caption, dlg_type)
if hook_handle:
user32.UnhookWindowsHookEx(hook_handle)
return ret
def _create_hook(label_map):
def hook(n_code, w_param, l_param):
if n_code < 0:
return user32.CallNextHookEx(None, n_code, w_param, l_param)
try:
msg = ctypes.cast(l_param, LPCWPRETSTRUCT)[0]
if msg.message == WM_INITDIALOG:
buf = ctypes.create_unicode_buffer(10)
user32.GetClassNameW(msg.hwnd, buf, len(buf))
class_name = buf.value
if class_name == '#32770':
for ctl_id, label in label_map.items():
_set_dialog_ctl_text(msg.hwnd, ctl_id, label)
except Exception as e:
print(f"{NAME} Hook error:", e)
finally:
return user32.CallNextHookEx(None, n_code, w_param, l_param)
return hook
def _set_dialog_ctl_text(parent_hwnd, control_id, text):
ctl_hwnd = user32.GetDlgItem(parent_hwnd, control_id)
if ctl_hwnd:
user32.SetWindowTextW.argtypes = (ctypes.wintypes.HWND,
ctypes.wintypes.LPCWSTR)
user32.SetWindowTextW.restype = ctypes.wintypes.BOOL
user32.SetWindowTextW(ctl_hwnd, text)
| 36.463576 | 85 | 0.680349 | 286 | 0.051943 | 0 | 0 | 0 | 0 | 0 | 0 | 1,708 | 0.310207 |
d27af471667bb42fad40784d1d32e99b8d50d1f8
| 22,496 |
py
|
Python
|
eispac/core/eiscube.py
|
MJWeberg/eispac
|
8de2b282fc08da9ac66d48c396060aab6e17be70
|
[
"MIT"
] | 11 |
2021-02-18T00:24:22.000Z
|
2022-01-30T06:48:06.000Z
|
eispac/core/eiscube.py
|
MJWeberg/eispac
|
8de2b282fc08da9ac66d48c396060aab6e17be70
|
[
"MIT"
] | 23 |
2021-04-09T16:34:26.000Z
|
2021-11-09T16:55:29.000Z
|
eispac/core/eiscube.py
|
MJWeberg/eispac
|
8de2b282fc08da9ac66d48c396060aab6e17be70
|
[
"MIT"
] | 5 |
2021-04-09T16:47:27.000Z
|
2021-11-04T15:45:29.000Z
|
__all__ = ['EISCube']
import sys
import copy
import numpy as np
import astropy.units as u
from astropy.convolution import convolve, CustomKernel
from astropy.coordinates import SkyCoord
from ndcube import __version__ as ndcube_ver
from ndcube import NDCube
class EISCube(NDCube):
"""EIS Level-1 Data Cube
Subclass of NDCube. Accepts all of the standard arguments and keywords
of `ndcube.NDCube`, as well as a few EIS specific parameters.
Parameters
----------
data : `numpy.ndarray`
The array holding the actual data in this object.
wcs : `astropy.wcs.WCS`, optional
The WCS object containing the axes' information, optional only if
``data`` is an `astropy.nddata.NDData` object.
uncertainty : any type, optional
Uncertainty in the dataset. Should have an attribute uncertainty_type
that defines what kind of uncertainty is stored, for example "std"
for standard deviation or "var" for variance. A metaclass defining
such an interface is NDUncertainty - but isn’t mandatory. If the uncertainty
has no such attribute the uncertainty is stored as UnknownUncertainty.
Defaults to None.
mask : any type, optional
Mask for the dataset. Masks should follow the numpy convention
that valid data points are marked by False and invalid ones with True.
Defaults to None.
meta : dict-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty collections.OrderedDict is created. Default is None.
unit : Unit-like or str, optional
Unit for the dataset. Strings that can be converted to a Unit are allowed.
Default is None.
copy : bool, optional
Indicates whether to save the arguments as copy. True copies every attribute
before saving it while False tries to save every parameter as reference.
Note however that it is not always possible to save the input as reference.
Default is False.
wavelength : array-like, optional
Numpy array with the corrected wavelength values for each location
within the EIS raster. Must have the same dimensionality as the input
data. If not given, will initialize the .wavelength property with
an array of zeros.
radcal : array-like, optional
Array of the radiometric calibration curve currently applied to the
input data cube. Required if you wish to use the .apply_radcal() and
.remove_radcal() methods
"""
# NOTE: this is based on the example given at
# https://docs.astropy.org/en/stable/nddata/subclassing.html#slicing-an-additional-property
def __init__(self, *args, **kwargs):
# Extract extra attributes, if given, and initialize them correctly.
input_wave = kwargs.pop('wavelength') if 'wavelength' in kwargs else None
if input_wave is None:
input_wave = np.zeros_like(args[0], dtype=float)
self.wavelength = input_wave
input_radcal = kwargs.pop('radcal') if 'radcal' in kwargs else 'unknown'
self._current_radcal = input_radcal
kwargs['copy'] = True # Try to ensure meta is not leaked between cutouts
super().__init__(*args, **kwargs)
@property
def wavelength(self):
"""Corrected wavelength values observed by EIS"""
return self._wavelength
@wavelength.setter
def wavelength(self, input_array):
self._wavelength = input_array
@property
def radcal(self):
"""Current radiometric calibration curve"""
return self._current_radcal
@radcal.setter
def radcal(self, input_array):
print('Error: Please use the .apply_radcal() and .remove_radcal()'
+' methods to modify or change the radiometric calibration.')
def _slice(self, item):
kwargs = super()._slice(item) # slice all normal attributes
old_meta = kwargs.pop('meta')
kwargs['meta'] = copy.deepcopy(old_meta) # no refs, please!
# The arguments for creating a new instance are saved in kwargs. So we
# need to add additional keywords with our sliced, extra properties
kwargs['wavelength'] = self.wavelength[item]
kwargs['radcal'] = self.radcal
# Update the 'mod_index' (used for exporting to .fits after fitting)
# Reminder: 'mod_index' uses a fits image axes order of [X, Y, Wave]
m_idx = copy.deepcopy(kwargs['meta']['mod_index'])
ll_wcs = kwargs['wcs'].low_level_wcs
wcs_arr_shape = ll_wcs.array_shape # axis order of [Y, X, wave]
new_ori = kwargs['wcs'].array_index_to_world(0,0,0)
ax_shape = [1, 1, 1] # true length of [Y, X, Wave] axes
if isinstance(new_ori, list):
# 3D subcube or 2D [spatial, wave] slice (one axis removed)
x1 = new_ori[-1].Tx.to('arcsec').value
y1 = new_ori[-1].Ty.to('arcsec').value
w1 = new_ori[0].to('Angstrom').value
elif isinstance(new_ori, SkyCoord):
# 2D slice at a given wavelength value or 1D spatial profile
x1 = new_ori.Tx.to('arcsec').value
y1 = new_ori.Ty.to('arcsec').value
lost_unit = ll_wcs.dropped_world_dimensions['world_axis_units']
lost_value = ll_wcs.dropped_world_dimensions['value']
w1 = u.Quantity(lost_value[0], lost_unit[0]).to('Angstrom').value
elif isinstance(new_ori, u.Quantity):
# Single spectrum at a selected location
w1 = new_ori.to('Angstrom').value
lost_units = ll_wcs.dropped_world_dimensions['world_axis_units']
lost_values = ll_wcs.dropped_world_dimensions['value']
x1 = u.Quantity(lost_values[0], lost_units[0]).to('arcsec').value
y1 = u.Quantity(lost_values[1], lost_units[1]).to('arcsec').value
# ndcube >= 2.0 drops all shallow (length 1) axes from .array_shape
# UNLESS an axis was sliced with explicit start:stop values of i:i+1
# Unfortunatly, this means there is no reliable method to identify the
# true length of each axis and which axis was dropped (if any).
# Therefore, we must resort to checking the type of each input
# slice parameter in "item" and try to manaully match axes with lengths
if ndcube_ver >= '2.0.0':
# First, extract and rearrange old shape in order of [Y, X, Wave]
old_shape = (m_idx['naxis2'], m_idx['naxis1'], m_idx['naxis3'])
ax_i = 0 # true axis index
wcs_i = 0 # sliced wcs axis index
# Note: we must take care when slicing a 2D slice of a cube
for s_i in range(len(item)):
while ax_i <= 2:
if old_shape[ax_i] == 1:
# skip previously dropped axes
ax_i += 1
else:
if isinstance(item[s_i], slice):
ax_shape[ax_i] = wcs_arr_shape[wcs_i]
wcs_i += 1
else:
ax_shape[ax_i] = 1
ax_i += 1
break
else:
# Works just fine in ndcube 1.4.2 (for all kinds of slices)
ax_shape = wcs_arr_shape
x2 = x1 + ax_shape[1]*m_idx['cdelt1']
y2 = y1 + ax_shape[0]*m_idx['cdelt2']
x_shift = round(abs(x1 - m_idx['crval1'])/m_idx['cdelt1'])
y_shift = round(abs(y1 - m_idx['crval2'])/m_idx['cdelt2'])
w_shift = round(abs(w1 - m_idx['crval3'])/m_idx['cdelt3'])
m_idx['naxis1'] = ax_shape[1] # X-axis
m_idx['naxis2'] = ax_shape[0] # Y-axis
m_idx['naxis3'] = ax_shape[2] # Wavelength axis
m_idx['crpix1'] = 1.0 - x_shift
m_idx['crpix2'] = 1.0 - y_shift
m_idx['crpix3'] = 1.0 - w_shift
m_idx['fovx'] = x2 - x1
m_idx['fovy'] = y2 - y1
m_idx['xcen'] = x1 + 0.5*(x2-x1)
m_idx['ycen'] = y1 + 0.5*(y2-y1)
kwargs['meta']['mod_index'] = m_idx
kwargs['meta']['extent_arcsec'] = [x1, x2, y1, y2] # [L, R, Top, Bot]
return kwargs # these must be returned
def crop_by_coords(self, *args, **kwargs):
"""REMOVED in NDCube 2.0"""
print('Error: crop_by_coords() was removed in NDCube 2.0. Please use'
+' the .crop() or .crop_by_values() methods instead. See the'
+' NDCube documentation for more information.', file=sys.stderr)
return None
def apply_radcal(self, input_radcal=None):
"""Apply a radiometric calibration curve (user-inputted or preflight)
Parameters
----------
input_radcal : array_like, optional
User-inputted radiometric calibration curve. If set to None, will
use the preflight radcal curve from the .meta dict. Default is None
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the calibrated data
"""
if input_radcal is None:
# Preflight radcal from HDF5 header file
new_radcal = self.meta['radcal']
else:
# User-inputted radcal curve
new_radcal = np.array(input_radcal)
if len(new_radcal) != self.data.shape[-1]:
print('Error: input_radcal must have the same number of elements'
+' as the last dimension in the data array.')
return self
output_radcal = new_radcal
if self.unit != u.photon:
if str(self.radcal) == 'unknown':
print('Error: Data currently has an unknown radcal applied.'
+' Unable to apply new calibration.')
return self
elif np.all(self.radcal == new_radcal):
print('Error: input_radcal is identical to current radcal.'
+' No calculation is required.')
return self
else:
print('Warning: Data currently has a different radcal applied.'
+' Old calibration curve will be removed.')
new_radcal = new_radcal/self.radcal
new_data = self.data.copy()*new_radcal
new_errs = self.uncertainty.array.copy()*new_radcal
new_meta = copy.deepcopy(self.meta)
new_meta['mod_index']['bunit'] = 'erg / (cm2 s sr)'
new_meta['notes'].append('Applied radcal to convert photon counts to intensity')
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,
wavelength=self.wavelength, radcal=output_radcal,
meta=new_meta, unit='erg / (cm2 s sr)',
# mask=self.mask, missing_axes=wcs_mask)
mask=self.mask)
return output_cube
def remove_radcal(self):
"""Remove the applied radiometric calibration and convert data to counts
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the photon count data
"""
if self.unit == u.photon:
print('Error: Data is already in units of photon counts.'
+' No calculation required.')
return self
elif str(self.radcal) == 'unknown':
print('Error: Data currently has an unknown radcal applied.'
+' Unable to remove calibration.')
return self
new_data = self.data.copy()/self.radcal
new_errs = self.uncertainty.array.copy()/self.radcal
new_meta = copy.deepcopy(self.meta)
new_meta['mod_index']['bunit'] = 'photon'
new_meta['notes'].append('Removed radcal to convert intensity to photon counts')
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,
wavelength=self.wavelength, radcal=None,
meta=new_meta, unit='photon',
# mask=self.mask, missing_axes=wcs_mask)
mask=self.mask)
return output_cube
def sum_spectra(self, wave_range=None, units=u.Angstrom):
"""Sum the data along the spectral axis.
Parameters
----------
wave_range : list of ints, floats, or Quantity instances
Wavelength range to sum over. Values can be input as either
[min, max] or [center, half width]. Units can be specified using
either Astropy units instances or by inputting a pair of ints or
floats and then also using the "units" keyword. If wave_range is set
to None, then entire spectra will be summed over. Default is None.
units : str or Quantity instance
Units to be used for the wavelength range if wave_range is given a
list of ints or floats. Will be ignored if either wave_range is None
or is given a list with Astropy units. Default is 'Angstrom'.
Returns
-------
output_cube : NDCube class instance
A new NDCube class instance containing the summed data
"""
if wave_range is None:
# Sum over entire wavelength axis and return an NDCube
try:
new_wcs = self.wcs.dropaxis(0)
except:
new_wcs = copy.deepcopy(self[:,:,0].wcs)
sum_data = np.sum(self.data, axis=2)
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Summed over entire wavelength axis.')
return NDCube(sum_data, new_wcs, meta=new_meta)
# Validate input wavelength range
if isinstance(wave_range, (list, tuple)):
use_range = [0, 0]
range_units = ['unknown', 'unknown']
print('Summing EISCube spectra over a select wavelength range.')
if len(wave_range) != 2:
print('Error: invalid number of wave_range values. Please input'
+' a list or tuple with exactly two elements.',
file=sys.stderr)
return None
else:
print('Error: invalid wave_range type. Please input either None or'
+' a list (or tuple) with two elements.', file=sys.stderr)
return None
for w in range(2):
if isinstance(wave_range[w], u.Quantity):
# Parse an astropy.units.Quantity and convert as needed
# Note: this will overwrite any inputs to the "units" kwarg
if wave_range[w].unit == u.pix:
use_range[w] = wave_range[w].value
range_units[w] = u.pix
elif wave_range[w].unit.physical_type == 'length':
use_range[w] = wave_range[w].to('Angstrom').value
range_units[w] = u.Angstrom
else:
print('Error: invalid wavelength unit. Please input a pixel'
+' or length unit.', file=sys.stderr)
return None
else:
# Assume default or user inputted units (still convert if needed)
input_units = u.Unit(units)
if input_units == u.pix:
use_range[w] = float(wave_range[w])
range_units[w] = u.pix
elif input_units.physical_type == 'length':
u_scale = input_units.to('Angstrom')
use_range[w] = float(wave_range[w])*u_scale
range_units[w] = u.Angstrom
else:
print('Error: invalid wavelength unit. Please input a pixel'
+' or length unit.', file=sys.stderr)
return None
# Check for consistent units
if range_units[0] != range_units[1]:
print('Error: mismatched units. Please input the same units for'
+' both wave_range elements or use the "units" keyword',
file=sys.stderr)
return None
# If given values of [center, half width], compute the actual range
if use_range[1] < use_range[0]:
temp_center = use_range[0]
temp_half_wid = use_range[1]
use_range[0] = temp_center - temp_half_wid
use_range[1] = temp_center + temp_half_wid
# Get indices to be summed over
w_indices = [0, -1]
if range_units[0] == u.pix:
# Round pixels values to nearest whole indice
w_indices[w] = int(round(use_range[w]))
elif range_units[0] == u.Angstrom:
# Find the closest pixel location on the average wavelength axis
try:
# Note: the corrected wavelength has units of [Angstrom]
w_coords = np.mean(self.wavelength, axis=(0,1))
except KeyError:
print('Error: missing or invalid corrected wavelength array.')
return None
for w in range(2):
abs_w_diff = np.abs(w_coords - use_range[w])
w_indices[w] = np.argmin(abs_w_diff)
try:
new_wcs = self.wcs.dropaxis(0)
except:
new_wcs = copy.deepcopy(self[:,:,0].wcs)
sum_data = np.sum(self.data[:,:,w_indices[0]:w_indices[1]+1], axis=2)
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Summed wavelength axis over the range of '
+str(use_range)+' '+str(range_units[0]))
return NDCube(sum_data, new_wcs, meta=new_meta)
def smooth_cube(self, width=3, **kwargs):
"""Smooth the data along one or more spatial axes.
Parameters
----------
width : list or single value of ints, floats, or Quantity instances
Number of pixels or angular distance to smooth over. If given a
single value, only the y-axis will be smoothed. Floats and angular
distances will be converted to the nearest whole pixel value.
If a width value is even, width + 1 will be used instead.
Default is width = 3
**kwargs : keywords or dict
Keyword arguments to be passed to the astropy.convolution.convolve()
function.
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the smoothed data
"""
# Validate input width
num_dims = len(self.dimensions)
wid_list = [1]*num_dims # NB: a width of 1 results in no smoothing
if isinstance(width, (list, tuple)):
# Note: we assume the last dim is always wavelength
wid_list[0] = width[0]
if num_dims > 2:
wid_list[1] = width[1]
print('Warning: smoothing over the x-axis can yield unexpected'
+' results due to the time interval between observations.'
+' Use with care.')
if len(width) >= num_dims:
print('Warning: smoothing over the wavelength axis is not'
+' supported. Only widths for the Y & X axes will be used')
elif isinstance(width, (int, float, u.Quantity)):
wid_list[0] = width # Only smooth along y-axis
else:
print('Error: invalid width data type. Please input an int, float,'
+' or astropy.units.Quantity instance', file=sys.stderr)
return None
coord_ax = ['y', 'x', 'w']
for w in range(len(wid_list)-1):
# Parse a astropy.units.Quantity and convert to units of pixels
if isinstance(wid_list[w], u.Quantity):
if wid_list[w].unit == u.pix:
wid_list[w] = wid_list[w].value
elif not wid_list[w].unit.physical_type == 'angle':
print('Error: invalid width unit. Please input a pixel or'
+' angular unit.', file=sys.stderr)
return None
else:
try:
# Note: y & x scales are in units of [arcsec]/[pixel]
ax_scale = self.meta['pointing'][coord_ax[w]+'_scale']
except KeyError:
print('Error: missing '+coord_ax[w]+'-axis scale.')
return None
angular_wid_str = str(wid_list[w])
wid_list[w] = wid_list[w].to('arcsec').value / ax_scale
print('Note: on the '+coord_ax[w]+'-axis, '+angular_wid_str
+' is equivalent to '+str(wid_list[w])+' pixels.')
# Round to nearest pixel and add 1 to even values
wid_list[w] = int(round(wid_list[w]))
if wid_list[w] % 2 == 0:
wid_list[w] = wid_list[w] + 1
# Create smoothing kernel with normalized weights (i.e. sum to 1)
# Note: Using a 2D or 3D kernel allows us to smooth everything at once
sm_weights = np.ones(wid_list) / (wid_list[0]*wid_list[1])
sm_kernel = CustomKernel(sm_weights)
# Calculate smoothed data and uncertainty values
sm_data = convolve(self.data, sm_kernel, **kwargs)
if self.uncertainty is not None:
sm_errs = np.sqrt(convolve(self.uncertainty.array**2,
sm_kernel, **kwargs))
else:
sm_errs = none
sm_data_mask = np.logical_or(np.isnan(sm_data), sm_data < 0)
# Pack everything up in a new EISCube
old_radcal = self.radcal
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Smoothed using pixel widths of '+str(wid_list))
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(sm_data, wcs=self.wcs, uncertainty=sm_errs,
wavelength=self.wavelength, radcal=old_radcal,
meta=new_meta, unit=self.unit,
# mask=sm_data_mask, missing_axes=wcs_mask)
mask=sm_data_mask)
return output_cube
| 46.00409 | 95 | 0.581526 | 22,238 | 0.988443 | 0 | 0 | 539 | 0.023958 | 0 | 0 | 10,693 | 0.475287 |
d27bad13c9c160040228fa36a65d4924909e6f0d
| 5,380 |
py
|
Python
|
stylee/comments/serializers.py
|
jbaek7023/Stylee-API
|
ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097
|
[
"MIT"
] | 1 |
2020-03-06T00:34:39.000Z
|
2020-03-06T00:34:39.000Z
|
stylee/comments/serializers.py
|
jbaek7023/Stylee-API
|
ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097
|
[
"MIT"
] | null | null | null |
stylee/comments/serializers.py
|
jbaek7023/Stylee-API
|
ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from profiles.serializers import UserRowSerializer
from .models import Comment
User = get_user_model()
# content, user
def create_comment_serializer(model_type='outfit', id=None, parent_id=None, user=None):
class CommentCreateSerializer(serializers.ModelSerializer):
class Meta:
model=Comment
fields = [
'id',
# 'user',
# 'content_type',
# 'object_id',
'content',
'created_at',
'parent',
]
def __init__(self, *args, **kwargs):
self.model_type = model_type
self.id = id
self.parent_obj = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() ==1:
self.parent_obj = parent_qs.first()
return super(CommentCreateSerializer, self).__init__(*args, **kwargs)
def validate(self, data):
model_type = self.model_type # coming from __init__
model_qs = ContentType.objects.filter(model=model_type)
if not model_qs.exists() or model_qs.count() != 1:
raise serializers.ValidationError("This is not a valid content type")
SomeModel = model_qs.first().model_class()
obj_qs = SomeModel.objects.filter(id=self.id)
if not obj_qs.exists() or obj_qs.count() !=1:
raise serializers.ValidationError("This is not a id for this content type")
return data
def create(self, validated_data):
content = validated_data.get("content")
if user:
main_user = user
else:
main_user = User.objects.all().first()
model_type = self.model_type
id = self.id
parent_obj = self.parent_obj
comment = Comment.objects.create_by_model_type(
model_type=model_type,
id=id,
user=main_user, # main_user itseslf?
content=content,
parent_obj=parent_obj,
)
return comment
return CommentCreateSerializer
class CommentSerializer(serializers.ModelSerializer):
reply_count = serializers.SerializerMethodField()
is_owner = serializers.SerializerMethodField()
child = serializers.SerializerMethodField()
user = UserRowSerializer(read_only=True)
class Meta:
model = Comment
fields = (
'id',
'user',
# 'content_type',
# 'object_id',
'content',
'created_at',
# 'parent',
'reply_count',
'is_owner',
'child',
)
def get_reply_count(self, obj):
if obj.is_parent:
return obj.children().count()
return 0
def get_is_owner(self, obj):
if obj.user:
return obj.user == self.context['request'].user
return False
def get_child(self, obj):
if obj.is_parent:
if obj.children():
return CommentChildSerializer(obj.children().first(), context=self.context).data
return None
return None
# class CommentsOnPostSerializer(serializers.ModelSerializer):
# reply_count = serializers.SerializerMethodField()
# user = UserRowSerializer(read_only=True)
#
# class Meta:
# model = Comment
# fields = (
# 'id',
# 'user',
# 'content',
# 'created_at',
# 'reply_count',
# )
#
# def get_reply_count(self, obj):
# if obj.is_parent:
# return obj.children().count()
# return 0
class CommentChildSerializer(serializers.ModelSerializer):
user = UserRowSerializer(read_only=True)
is_owner = serializers.SerializerMethodField()
class Meta:
model = Comment
fields = (
'id',
'user',
'content',
'created_at',
'is_owner',
)
def get_is_owner(self, obj):
if(obj.user):
return obj.user == self.context['request'].user
return False
class CommentDetailSerializer(serializers.ModelSerializer):
replies = serializers.SerializerMethodField()
is_owner = serializers.SerializerMethodField()
user = UserRowSerializer(read_only=True)
class Meta:
model = Comment
fields = (
'id',
'user',
'content',
'created_at',
'replies',
'is_owner',
)
read_only_fields = (
)
def get_replies(self, obj):
if obj.is_parent:
return CommentChildSerializer(obj.children(), many=True, context=self.context).data
return None
def get_is_owner(self, obj):
if(obj.user):
return obj.user == self.context['request'].user
return False
class CommentEditSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = (
'id',
'content',
'created_at',
)
| 29.723757 | 96 | 0.561152 | 4,473 | 0.831413 | 0 | 0 | 0 | 0 | 0 | 0 | 949 | 0.176394 |
d27bf22d6bf897be8eacbdeb8156e0811b013b5d
| 3,497 |
py
|
Python
|
stressTest/stressTestPV.py
|
bhill-slac/epics-stress-tests
|
bf895cdf84e3ef16819204fbf49f2dd54c9473fb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
stressTest/stressTestPV.py
|
bhill-slac/epics-stress-tests
|
bf895cdf84e3ef16819204fbf49f2dd54c9473fb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
stressTest/stressTestPV.py
|
bhill-slac/epics-stress-tests
|
bf895cdf84e3ef16819204fbf49f2dd54c9473fb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python3
class stressTestPV:
def __init__( self, pvName ):
self._pvName = pvName
self._tsValues = {} # Dict of collected values, keys are float timestamps
self._tsRates = {} # Dict of collection rates, keys are int secPastEpoch values
self._tsMissRates = {} # Dict of missed count rates, keys are int secPastEpoch values
self._timeoutRates= {} # Dict of timeout rates, keys are int secPastEpoch values
self._numMissed = 0 # Cumulative number of missed counts
self._numTimeouts = 0 # Cumulative number of timeouts
self._startTime = None # Earliest timestamp of all collected values
self._endTime = None # Latest timestamp of all collected values
# Accessors
def getName( self ):
return self._pvName
def getNumTsValues( self ):
return len(self._tsValues)
def getNumMissed( self ):
return self._numMissed
def getNumTimeouts( self ):
return self._numTimeouts
def getEndTime( self ):
return self._endTime;
def getStartTime( self ):
return self._startTime;
def getTsValues( self ):
return self._tsValues
def getTsRates( self ):
return self._tsRates
def getTsMissRates( self ):
return self._tsMissRates
def getTimeoutRates( self ):
return self._timeoutRates
def addTsValues( self, tsValues ):
# TODO: check for more than one value for the same timestamp
self._tsValues.update( tsValues )
def addTsTimeouts( self, tsTimeouts ):
self._tsTimeouts.update( tsTimeouts )
# stressTestPV.analyze
def analyze( self ):
( priorSec, priorValue ) = ( None, None )
( count, missed, timeouts ) = ( 0, 0, 0 )
sec = None
for timestamp in self._tsValues:
sec = int(timestamp)
if priorSec is None:
self._endTime = timestamp
self._startTime = timestamp
priorSec = sec - 1
if sec != priorSec:
if self._endTime < sec:
self._endTime = sec
self._tsRates[priorSec] = count
self._tsMissRates[priorSec] = missed
self._timeoutRates[priorSec] = timeouts
self._numMissed += missed
self._numTimeouts += timeouts
( count, missed, timeouts ) = ( 0, 0, 0 )
# Advance priorSec, filling gaps w/ zeroes
while True:
priorSec += 1
if priorSec >= sec:
break
self._tsRates[priorSec] = 0
self._tsMissRates[priorSec] = 0
self._timeoutRates[priorSec] = 0
priorSec = sec
count += 1
value = self._tsValues[timestamp]
if value is None:
timeouts += 1
continue
if priorValue is not None:
if priorValue + 1 != value:
# Keep track of miss incidents
#missed += 1
# or
# Keep track of how many we missed
missed += ( value - priorValue + 1 )
priorValue = value
if sec:
self._tsRates[sec] = count
self._tsMissRates[sec] = missed
self._timeoutRates[sec] = timeouts
| 38.01087 | 98 | 0.543323 | 3,471 | 0.992565 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.179582 |
d27c6795141864bd67b93ea1ed9caca681ced3fd
| 10,246 |
py
|
Python
|
pysoundcloud/client.py
|
omarcostahamido/PySoundCloud
|
1ca53a280c77f6b5f52868adefa332c4de56858f
|
[
"MIT"
] | 4 |
2021-09-15T06:40:02.000Z
|
2022-01-16T03:31:59.000Z
|
pysoundcloud/client.py
|
AnthonyMakesStuff/PySoundCloud
|
1ca53a280c77f6b5f52868adefa332c4de56858f
|
[
"MIT"
] | 1 |
2021-04-22T04:18:42.000Z
|
2021-05-09T09:22:59.000Z
|
pysoundcloud/client.py
|
AnthonyMakesStuff/PySoundCloud
|
1ca53a280c77f6b5f52868adefa332c4de56858f
|
[
"MIT"
] | 1 |
2020-09-05T02:14:37.000Z
|
2020-09-05T02:14:37.000Z
|
import re
import requests
from typing import Union
from pysoundcloud.soundcloudplaylists import SoundCloudPlaylists
from pysoundcloud.soundcloudsearchresults import SoundCloudSearchResults
from pysoundcloud.soundcloudlikedtracks import SoundCloudLikedTracks
from pysoundcloud.soundcloudplaylist import SoundCloudPlaylist
from pysoundcloud.soundcloudtrack import SoundCloudTrack
from pysoundcloud.soundcloudrelatedtracks import SoundCloudRelatedTracks
class Client:
base_url: str = "https://api-v2.soundcloud.com/"
client_id: str = ""
"""
:var base_url: The base url for all requests to the SoundCloud API
:var client_id: The client ID to use with the SoundCloud API
"""
def __init__(self, client_id: str) -> None:
"""
Setup the SoundCloud client to interact with the API
:param client_id: Your SoundCloud client ID
:return: None
"""
self.client_id = client_id
def search(self,
query: str,
limit: int = 10,
offset: int = 0) -> Union[bool, SoundCloudSearchResults]:
"""
Search SoundCloud for the specified query
For some reason it doesn't always work and I have no clue why
:param query: The query to search for
:param limit: The number of results to return
:param offset: The start position from 0
:return: SoundCloudSearchResults, or False if response is not 200
"""
parameters = {"q": query,
"limit": limit,
"offset": offset,
"client_id": self.client_id}
url = self.base_url + "search"
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudSearchResults(response, client_id=self.client_id, parent=self)
def track(self, track_id: int) -> Union[bool, SoundCloudTrack]:
"""
Gets data about the track with the specified track_id
:param track_id: The track id to search for
:return: a SoundCloudTrack with data about the track, or False if response is not 200
"""
parameters = {"client_id": self.client_id}
url = self.base_url + "tracks/{}".format(track_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudTrack(response.json(), self.client_id, parent=self)
def related(self, track_id: int, limit: int = 10, offset: int = 0) -> Union[bool, SoundCloudRelatedTracks]:
"""
Gets tracks related to the specified track_id
:param track_id: The track id to find related tracks for
:param limit: The number of tracks to find
:param offset: The number of tracks to search for from zero, so offset 10 and limit 10 means find tracks 10-20
:return: SoundCloudRelatedTracks with the tracks, or False if response is not 200
"""
parameters = {"limit": limit,
"offset": offset,
"client_id": self.client_id}
url = self.base_url + "tracks/{}/related".format(track_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudRelatedTracks(response.json(), self.client_id)
def playlists(self,
track_id: int,
representation: str = "mini",
limit: int = 10,
offset: int = 0) -> Union[bool, SoundCloudPlaylists]:
"""
Gets playlists containing a specified track
:param track_id: The track ID to find playlists containing
:param representation: The type of representation (either full or mini)
:param limit: The number of results to return
:param offset: The start position from 0
:return: SoundCloudPlaylists containing the track, or False if response is not 200
"""
parameters = {"representation": representation,
"limit": limit,
"offset": offset,
"client_id": self.client_id}
url = self.base_url + "tracks/{}/playlists_without_albums".format(track_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudPlaylists(response.json(), self.client_id, parent=self)
def albums(self,
track_id: int,
representation: str = "mini",
limit: int = 10,
offset: int = 0) -> Union[bool, SoundCloudPlaylists]:
"""
Gets albums containing a specified track
:param track_id: The track ID to find albums containing
:param representation: The type of representation (either full or mini)
:param limit: The number of results to return
:param offset: The start position from 0
:return: SoundCloudPlaylists containing the track, or False if response is not 200
"""
parameters = {"representation": representation,
"limit": limit,
"offset": offset,
"client_id": self.client_id}
url = self.base_url + "tracks/{}/albums".format(track_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
if (len(response.json()["collection"]) == 0):
return False
return SoundCloudPlaylists(response.json(), self.client_id)
def comments(self):
"""
.. todo::
Add a function to get comments on a specific track or by a specific user
"""
pass # Todo: add comments
def web_profiles(self):
"""
.. todo::
Add a function to get the "web profiles" of a specific user
"""
pass # Todo: add web_profiles
def liked_tracks(self,
user_id: int,
limit: int = 24,
offset: int = 0) -> Union[bool, SoundCloudLikedTracks]:
"""
Gets the user's liked tracks
:param user_id: The ID of the user to find liked tracks for
:param limit: The number of results to return
:param offset: The start position from 0
:return: SoundCloudLikedTracks containing all the tracks, or False if response is not 200
"""
parameters = {"client_id": self.client_id,
"limit": limit,
"offset": offset}
url = self.base_url + "users/{}/track_likes".format(user_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudLikedTracks(response, client_id=self.client_id)
def playlist(self,
playlist_id: int = None,
playlist_url: str = None,
representation: str = "full",
secret_token: str = None) -> Union[bool, SoundCloudPlaylist]:
"""
Get a playlist based on a specified playlist_id or playlist_url
:param playlist_id: The ID of the playlist
:param playlist_url: The URL of the playlist
:param representation: The playlist representation (either fill or mini)
:param secret_token: An optional secret token
:return: A SoundCloudPlaylist, or False if response is not 200
"""
if (playlist_id is None):
if (playlist_url is not None):
response = requests.get(playlist_url)
patterns = [
r'<meta property="twitter:app:url:(?:googleplay|iphone|ipad)'
r'content="soundcloud:\/\/playlists:([0-9]+)">',
r'<meta property="twitter:player" content="https:\/\/w\.soundcloud\.com\/player\/\?url=https'
r'%3(?:a|A)%2(?:f|F)%2(?:f|F)api\.soundcloud\.com%2(?:f|F)playlists%2(?:f|F)([0-9]+)',
r'<meta property="al:(?:ios|android):url" content="soundcloud:\/\/playlists:([0-9]+)">',
r'<link rel="alternate" href="android-app:\/\/com\.soundcloud\.android\/soundcloud\/'
r'playlists:([0-9]+)">',
r'<link rel="alternate" href="ios-app:\/\/336353151\/soundcloud\/playlists:([0-9]+)">'
]
for pattern in patterns:
if (playlist_id is None):
search_results = re.search(pattern,
response.text)
if (search_results is not None):
playlist_id = search_results.group(1)
if (playlist_id is None):
print("Error: Could not find the playlist id from the url \"{}\"".format(playlist_url))
return False
parameters = {"representation": representation,
"client_id": self.client_id}
if (secret_token is not None):
parameters["secret_token"] = secret_token
url = self.base_url + "playlists/{}".format(playlist_id)
response = requests.get(url, params=parameters)
if (response.status_code != 200):
print("Error: Received code {}".format(response.status_code))
return False
return SoundCloudPlaylist(response.json(),
self.client_id,
parent=self)
| 44.547826 | 119 | 0.576127 | 9,776 | 0.954128 | 0 | 0 | 0 | 0 | 0 | 0 | 4,352 | 0.424751 |
d27cc7e2f11f688e99e9542aba655008056fb669
| 859 |
py
|
Python
|
rojak-analyzer/generate_stopwords.py
|
pyk/rojak
|
0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7
|
[
"BSD-3-Clause"
] | 107 |
2016-10-02T05:54:42.000Z
|
2021-08-05T00:20:51.000Z
|
rojak-analyzer/generate_stopwords.py
|
pyk/rojak
|
0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7
|
[
"BSD-3-Clause"
] | 134 |
2016-10-02T21:21:08.000Z
|
2016-12-27T02:46:34.000Z
|
rojak-analyzer/generate_stopwords.py
|
pyk/rojak
|
0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7
|
[
"BSD-3-Clause"
] | 54 |
2016-10-02T08:47:56.000Z
|
2020-03-08T00:56:03.000Z
|
# Run this script to create stopwords.py based on stopwords.txt
import json
def generate(input_txt, output_py):
# Read line by line
txt_file = open(input_txt)
words = set([])
for raw_line in txt_file:
line = raw_line.strip()
# Skip empty line
if len(line) < 1: continue
# Skip comments
if line[0] == '#': continue
# Collect the stopwords
words.add(line)
# Dump the array to a file
output = open(output_py, 'w')
output.write('# DO NOT EDIT THIS FILE!\n')
output.write('# Edit stopwords.txt, generate this file again via ')
output.write('generate_stopwords.py\n')
output.write('stopwords = set(%s)' % (json.dumps(sorted(words),
indent=4)))
output.close()
txt_file.close()
if __name__ == '__main__':
generate('stopwords.txt', 'stopwords.py')
| 29.62069 | 71 | 0.622817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.389988 |
d27d80f790828621c13dad6b6615e88b5261c7f1
| 33,166 |
py
|
Python
|
bcbio/structural/cnvkit.py
|
YTLogos/bcbio-nextgen
|
f964a25ab74a31551273b7e50518f3451c90f473
|
[
"MIT"
] | 1 |
2019-08-29T07:55:48.000Z
|
2019-08-29T07:55:48.000Z
|
bcbio/structural/cnvkit.py
|
YTLogos/bcbio-nextgen
|
f964a25ab74a31551273b7e50518f3451c90f473
|
[
"MIT"
] | null | null | null |
bcbio/structural/cnvkit.py
|
YTLogos/bcbio-nextgen
|
f964a25ab74a31551273b7e50518f3451c90f473
|
[
"MIT"
] | null | null | null |
"""Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import math
import operator
import os
import sys
import tempfile
import subprocess
import pybedtools
import numpy as np
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import bedutils, effects, ploidy, population, vcfutils
from bcbio.structural import annotate, shared, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items, is_somatic=False):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]):
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data, is_somatic)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
if not background:
background = []
ckouts = _run_cnvkit_shared([data], background)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
tumor_ready, normal_ready = _match_batches(paired.tumor_data, normal_data[0] if normal_data else None)
ckouts = _run_cnvkit_shared([tumor_ready], [normal_ready] if normal_ready else [])
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data], is_somatic=True)
return tumor_data + normal_data
def _match_batches(tumor, normal):
"""Fix batch names for shared tumor/normals to ensure matching
"""
def _get_batch(x):
b = dd.get_batch(x)
return [b] if not isinstance(b, (list, tuple)) else b
if normal:
tumor = copy.deepcopy(tumor)
normal = copy.deepcopy(normal)
cur_batch = list(set(_get_batch(tumor)) & set(_get_batch(normal)))
assert len(cur_batch) == 1, "No batch overlap: %s and %s" % (_get_batch(tumor), _get_batch(normal))
cur_batch = cur_batch[0]
tumor["metadata"]["batch"] = cur_batch
normal["metadata"]["batch"] = cur_batch
return tumor, normal
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise
uses samples from the same batch as background.
"""
if background and len(background) > 0:
inputs = items
else:
inputs, background = shared.find_case_control(items)
# if we have case/control organized background or a single sample
if len(inputs) == 1 or len(background) > 0:
ckouts = _run_cnvkit_shared(inputs, background)
return _associate_cnvkit_out(ckouts, inputs) + background
# otherwise run each sample with the others in the batch as background
else:
out = []
for cur_input in items:
background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)]
ckouts = _run_cnvkit_shared([cur_input], background)
out.extend(_associate_cnvkit_out(ckouts, [cur_input]))
return out
def _get_cmd(script_name="cnvkit.py"):
return os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script_name)
def _prep_cmd(cmd, tx_out_file):
"""Wrap CNVkit commands ensuring we use local temporary directories.
"""
cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd
return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
def _bam_to_outbase(bam_file, work_dir, data):
"""Convert an input BAM file into CNVkit expected output.
Handles previous non-batch cases to avoid re-calculating,
returning both new and old values:
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
base = os.path.join(work_dir, out_base)
return "%s-%s" % (base, batch), base
def _run_cnvkit_shared(inputs, backgrounds):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
work_dir = _sv_workdir(inputs[0])
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_name = dd.get_sample_name(backgrounds[0]) if backgrounds else "flat"
background_cnn = os.path.join(raw_work_dir, "%s_background.cnn" % (background_name))
ckouts = []
for cur_input in inputs:
cur_raw_work_dir = utils.safe_makedir(os.path.join(_sv_workdir(cur_input), "raw"))
out_base, out_base_old = _bam_to_outbase(dd.get_align_bam(cur_input), cur_raw_work_dir, cur_input)
if utils.file_exists(out_base_old + ".cns"):
out_base = out_base_old
ckouts.append({"cnr": "%s.cnr" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cns"]):
cov_interval = dd.get_coverage_interval(inputs[0])
raw_target_bed, access_bed = _get_target_access_files(cov_interval, inputs[0], work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, inputs[0])
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval,
raw_work_dir, inputs[0])
samples_to_run = zip(["background"] * len(backgrounds), backgrounds) + \
zip(["evaluate"] * len(inputs), inputs)
raw_coverage_cnns = [_cnvkit_coverage(cdata, bed, itype) for itype, cdata in samples_to_run
for bed in [target_bed, antitarget_bed]]
coverage_cnns = reduce(operator.add,
[_cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, inputs + backgrounds)
for cnns in tz.groupby("bam", raw_coverage_cnns).values()])
background_cnn = _cnvkit_background(_select_background_cnns(coverage_cnns),
background_cnn, target_bed, antitarget_bed, inputs[0])
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, inputs + backgrounds) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
inputs[0]["config"], parallel)
[_cnvkit_segment(cnr, cov_interval, data) for cnr, data in fixed_cnrs]
return ckouts
def _cna_has_values(fname):
with open(fname) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return True
return False
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
if not _cna_has_values(cnr_file):
with open(tx_out_file, "w") as out_handle:
out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n")
else:
cmd = [_get_cmd(), "segment", "-p", str(dd.get_cores(data)),
"-o", tx_out_file, cnr_file]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome":
cmd += ["-v", small_vrn_files[0]]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = ("%s && export TMPDIR=%s && "
% (utils.get_R_exports(), os.path.dirname(tx_out_file)))
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items):
"""Estimate noise of a sample using a flat background.
Only used for panel/targeted data due to memory issues with whole genome
samples.
"""
if cov_interval == "genome":
return cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0]
background_file = _cnvkit_background([], background_file, target_bed, antitarget_bed, items[0])
cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground")
cns_file = _cnvkit_segment(cnr_file, cov_interval, data)
metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0]
if not utils.file_exists(metrics_file):
with file_transaction(data, metrics_file) as tx_metrics_file:
cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file]
do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics")
metrics = _read_metrics_file(metrics_file)
out = []
for cnn in cnns:
cnn["metrics"] = metrics
out.append(cnn)
return out
def _read_metrics_file(in_file):
with open(in_file) as in_handle:
header = in_handle.next().strip().split("\t")[1:]
vals = map(float, in_handle.next().strip().split("\t")[1:])
return dict(zip(header, vals))
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_fix(cnns, background_cnn, items):
"""Normalize samples, correcting sources of bias.
"""
return [_cnvkit_fix_base(cnns, background_cnn, items)]
def _cnvkit_fix_base(cnns, background_cnn, items, ext=""):
assert len(cnns) == 2, "Expected target and antitarget CNNs: %s" % cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
antitarget_cnn = [x["file"] for x in cnns if x["cnntype"] == "antitarget"][0]
data = [x for x in items if dd.get_sample_name(x) == cnns[0]["sample"]][0]
common_prefix = os.path.commonprefix([target_cnn, antitarget_cnn])
if common_prefix.endswith("."):
common_prefix = common_prefix[:-1]
out_file = "%s%s.cnr" % (common_prefix, ext)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "fix", "-o", tx_out_file, target_cnn, antitarget_cnn, background_cnn]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit fix")
return out_file, data
def _select_background_cnns(cnns):
"""Select cnns to use for background calculations.
Uses background samples in cohort, and will remove CNNs with high
on target variability. Uses (number of segments * biweight midvariance) as metric
for variability with higher numbers being more unreliable.
"""
min_for_variability_analysis = 20
pct_keep = 0.10
b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
if len(b_cnns) >= min_for_variability_analysis:
b_cnns_w_metrics = []
for b_cnn in b_cnns:
unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"]
b_cnns_w_metrics.append((unreliability, b_cnn))
b_cnns_w_metrics.sort()
to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2)
b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
return [x["file"] for x in b_cnns]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background")
return out_file
def _cnvkit_coverage(data, bed_file, input_type):
"""Calculate coverage in a BED file for CNVkit.
"""
bam_file = dd.get_align_bam(data)
work_dir = utils.safe_makedir(os.path.join(_sv_workdir(data), "raw"))
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
cnntype = None
for orig, (cur_cnntype, ext) in exts.items():
if bed_file.endswith(orig):
cnntype = cur_cnntype
break
if cnntype is None:
assert bed_file.endswith(".bed"), "Unexpected BED file extension for coverage %s" % bed_file
cnntype = ""
base, base_old = _bam_to_outbase(bam_file, work_dir, data)
out_file = "%s.%s" % (base, ext)
out_file_old = "%s.%s" % (base_old, ext)
# back compatible with previous runs to avoid re-calculating
if utils.file_exists(out_file_old):
out_file = out_file_old
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", "-p", str(dd.get_cores(data)), bam_file, bed_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage")
return {"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"sample": dd.get_sample_name(data)}
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
basename = os.path.splitext(os.path.basename(raw_target_bed))[0]
target_bed = os.path.join(work_dir, "%s-%s.target.bed" % (basename, batch))
# back compatible with previous runs to avoid re-calculating
target_bed_old = os.path.join(work_dir, "%s.target.bed" % basename)
if utils.file_exists(target_bed_old):
target_bed = target_bed_old
if not utils.file_exists(target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("target"):
cmd += ["--avg-size", str(bin_estimates["target"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s-%s.antitarget.bed" % (basename, batch))
antitarget_bed_old = os.path.join(work_dir, "%s.antitarget.bed" % basename)
# back compatible with previous runs to avoid re-calculating
if os.path.exists(antitarget_bed_old):
antitarget_bed = antitarget_bed_old
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("antitarget"):
cmd += ["--avg-size", str(bin_estimates["antitarget"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget")
return target_bed, antitarget_bed
def _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Estimate good coverage bin sizes for target regions based on coverage.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-bin_estimate.txt" % (
os.path.splitext(os.path.basename(raw_target_bed))[0], batch))
method_map = {"genome": "wgs", "regional": "hybrid", "amplicon": "amplicon"}
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd("coverage_bin_size.py"), dd.get_align_bam(data),
"-m", method_map[cov_interval], "-t", raw_target_bed,
"-g", access_bed]
cmd = " ".join(cmd) + " > " + tx_out_file
try:
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage bin estimation", log_error=False)
except subprocess.CalledProcessError:
logger.info("Bin size estimate failed, using default values")
with open(tx_out_file, "w") as out_handle:
out_handle.write("Bin size estimate failed, using default values")
avg_bin_sizes = {}
estimate_map = {"On-target": "target", "Off-target": "antitarget",
"Genome": "target", "Targets (sampling)": "target"}
range_map = {("genome", "target"): (500, 1000),
("regional", "target"): (50, 267), ("regional", "antitarget"): (20000, 200000),
("amplicon", "target"): (50, 267)}
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(tuple(estimate_map.keys())):
name, depth, bin_size = line.strip().split("\t")
name = estimate_map[name.replace(":", "").strip()]
try:
bin_size = int(bin_size)
except ValueError:
bin_size = None
if bin_size and bin_size > 0:
cur_min, cur_max = range_map[(cov_interval, name)]
avg_bin_sizes[name] = max(min(bin_size, cur_max), cur_min)
return avg_bin_sizes
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = shared.get_base_cnv_regions(data, work_dir)
target_bed = bedutils.sort_merge(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _compatible_small_variants(data):
"""Retrieve small variant (SNP, indel) VCFs compatible with CNVkit.
"""
supported = set(["vardict", "freebayes", "gatk-haplotype", "mutect2", "vardict"])
out = []
for v in data.get("variants", []):
vrn_file = v.get("vrn_file")
if vrn_file and v.get("variantcaller") in supported:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
if vcfutils.get_paired_phenotype(data):
out.append(vrn_file)
else:
sample_vrn_file = os.path.join(dd.get_work_dir(data), v["variantcaller"],
"%s-%s%s" % (base, dd.get_sample_name(data), ext))
sample_vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_vrn_file,
data["config"])
out.append(sample_vrn_file)
return out
def _add_variantcalls_to_output(out, data, is_somatic=False):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = population.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
filters = ["--filter", "cn"]
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \
filters + \
["--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_call_file, out["cns"]]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]):
cmd += ["-v", small_vrn_files[0]]
if not is_somatic:
cmd += ["-m", "clonal"]
if gender and gender.lower() != "unknown":
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
# Use less fine grained bootstrapping intervals for whole genome runs
if dd.get_coverage_interval(data) == "genome":
cmd += ["--alpha", "0.1", "--bootstrap", "50"]
else:
cmd += ["--alpha", "0.01", "--bootstrap", "500"]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_global_scatter_plot(out, data):
out_file = "%s-scatter_global.pdf" % os.path.splitext(out["cnr"])[0]
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit global scatter plot")
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_bed = dd.get_svprioritize(data)
if not priority_bed:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = population.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit diagram plot")
return out_file
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "access",
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
| 46.910891 | 117 | 0.637792 | 0 | 0 | 0 | 0 | 204 | 0.006151 | 0 | 0 | 7,525 | 0.226889 |
d27d939d04d3b5253e8adbdbae402c28328bae05
| 31,862 |
py
|
Python
|
pyexchange/exchange2010/__init__.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 128 |
2015-01-11T10:29:40.000Z
|
2021-06-25T05:27:45.000Z
|
pyexchange/exchange2010/__init__.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 52 |
2015-01-02T15:24:28.000Z
|
2020-08-07T04:49:49.000Z
|
pyexchange/exchange2010/__init__.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 96 |
2015-01-02T15:16:20.000Z
|
2021-12-25T01:37:46.000Z
|
"""
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse
from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService
from ..base.soap import ExchangeServiceSOAP
from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType
from ..compat import BASESTRING_TYPES
from . import soap_request
from lxml import etree
from copy import deepcopy
from datetime import date
import warnings
log = logging.getLogger("pyexchange")
class Exchange2010Service(ExchangeServiceSOAP):
def calendar(self, id="calendar"):
return Exchange2010CalendarService(service=self, calendar_id=id)
def mail(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def contacts(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def folder(self):
return Exchange2010FolderService(service=self)
def _send_soap_request(self, body, headers=None, retries=2, timeout=30, encoding="utf-8"):
headers = {
"Accept": "text/xml",
"Content-type": "text/xml; charset=%s " % encoding
}
return super(Exchange2010Service, self)._send_soap_request(body, headers=headers, retries=retries, timeout=timeout, encoding=encoding)
def _check_for_errors(self, xml_tree):
super(Exchange2010Service, self)._check_for_errors(xml_tree)
self._check_for_exchange_fault(xml_tree)
def _check_for_exchange_fault(self, xml_tree):
# If the request succeeded, we should see a <m:ResponseCode>NoError</m:ResponseCode>
# somewhere in the response. if we don't (a) see the tag or (b) it doesn't say "NoError"
# then flip out
response_codes = xml_tree.xpath(u'//m:ResponseCode', namespaces=soap_request.NAMESPACES)
if not response_codes:
raise FailedExchangeException(u"Exchange server did not return a status response", None)
# The full (massive) list of possible return responses is here.
# http://msdn.microsoft.com/en-us/library/aa580757(v=exchg.140).aspx
for code in response_codes:
if code.text == u"ErrorChangeKeyRequiredForWriteOperations":
# change key is missing or stale. we can fix that, so throw a special error
raise ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorItemNotFound":
# exchange_invite_key wasn't found on the server
raise ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorIrresolvableConflict":
# tried to update an item with an old change key
raise ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorInternalServerTransientError":
# temporary internal server error. throw a special error so we can retry
raise ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange":
# just means some or all of the requested instances are out of range
pass
elif code.text != u"NoError":
raise FailedExchangeException(u"Exchange Fault (%s) from Exchange server" % code.text)
class Exchange2010CalendarService(BaseExchangeCalendarService):
def event(self, id=None, **kwargs):
return Exchange2010CalendarEvent(service=self.service, id=id, **kwargs)
def get_event(self, id):
return Exchange2010CalendarEvent(service=self.service, id=id)
def new_event(self, **properties):
return Exchange2010CalendarEvent(service=self.service, calendar_id=self.calendar_id, **properties)
def list_events(self, start=None, end=None, details=False, delegate_for=None):
return Exchange2010CalendarEventList(service=self.service, calendar_id=self.calendar_id, start=start, end=end, details=details, delegate_for=delegate_for)
class Exchange2010CalendarEventList(object):
"""
Creates & Stores a list of Exchange2010CalendarEvent items in the "self.events" variable.
"""
def __init__(self, service=None, calendar_id=u'calendar', start=None, end=None, details=False, delegate_for=None):
self.service = service
self.count = 0
self.start = start
self.end = end
self.events = list()
self.event_ids = list()
self.details = details
self.delegate_for = delegate_for
# This request uses a Calendar-specific query between two dates.
body = soap_request.get_calendar_items(format=u'AllProperties', calendar_id=calendar_id, start=self.start, end=self.end, delegate_for=self.delegate_for)
response_xml = self.service.send(body)
self._parse_response_for_all_events(response_xml)
# Populate the event ID list, for convenience reasons.
for event in self.events:
self.event_ids.append(event._id)
# If we have requested all the details, basically repeat the previous 3 steps,
# but instead of start/stop, we have a list of ID fields.
if self.details:
log.debug(u'Received request for all details, retrieving now!')
self.load_all_details()
return
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self
def _add_event(self, xml=None):
log.debug(u'Adding new event to all events list.')
event = Exchange2010CalendarEvent(service=self.service, xml=xml)
log.debug(u'Subject of new event is %s' % event.subject)
self.events.append(event)
return self
def load_all_details(self):
"""
This function will execute all the event lookups for known events.
This is intended for use when you want to have a completely populated event entry, including
Organizer & Attendee details.
"""
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self
class Exchange2010CalendarEvent(BaseExchangeCalendarEvent):
def _init_from_service(self, id):
log.debug(u'Creating new Exchange2010CalendarEvent object from ID')
body = soap_request.get_item(exchange_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_event(response_xml)
self._update_properties(properties)
self._id = id
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def _init_from_xml(self, xml=None):
log.debug(u'Creating new Exchange2010CalendarEvent object from XML')
properties = self._parse_response_for_get_event(xml)
self._update_properties(properties)
self._id, self._change_key = self._parse_id_and_change_key_from_response(xml)
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def as_json(self):
raise NotImplementedError
def validate(self):
if self.recurrence is not None:
if not (isinstance(self.recurrence_end_date, date)):
raise ValueError('recurrence_end_date must be of type date')
elif (self.recurrence_end_date < self.start.date()):
raise ValueError('recurrence_end_date must be after start')
if self.recurrence == u'daily':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 999):
raise ValueError('recurrence_interval must be an int in the range from 1 to 999')
elif self.recurrence == u'weekly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
if self.recurrence_days is None:
raise ValueError('recurrence_days is required')
for day in self.recurrence_days.split(' '):
if day not in self.WEEKLY_DAYS:
raise ValueError('recurrence_days received unknown value: %s' % day)
elif self.recurrence == u'monthly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
elif self.recurrence == u'yearly':
pass # everything is pulled from start
else:
raise ValueError('recurrence received unknown value: %s' % self.recurrence)
super(Exchange2010CalendarEvent, self).validate()
def create(self):
"""
Creates an event in Exchange. ::
event = service.calendar().new_event(
subject=u"80s Movie Night",
location = u"My house",
)
event.create()
Invitations to attendees are sent out immediately.
"""
self.validate()
body = soap_request.new_event(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def resend_invitations(self):
"""
Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite.
"""
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Calendar ID to where you want to move the event to.
Moves an event to a different folder (calendar). ::
event = service.calendar().get_event(id='KEY HERE')
event.move_to(folder_id='NEW CALENDAR KEY HERE')
"""
if not folder_id:
raise TypeError(u"You can't move an event to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move an event that hasn't been created yet.")
self.refresh_change_key()
response_xml = self.service.send(soap_request.move_event(self, folder_id))
new_id, new_change_key = self._parse_id_and_change_key_from_response(response_xml)
if not new_id:
raise ValueError(u"MoveItem returned success but requested item not moved")
self._id = new_id
self._change_key = new_change_key
self.calendar_id = folder_id
return self
def get_master(self):
"""
get_master()
:raises InvalidEventType: When this method is called on an event that is not a Occurrence type.
This will return the master event to the occurrence.
**Examples**::
event = service.calendar().get_event(id='<event_id>')
print event.type # If it prints out 'Occurrence' then that means we could get the master.
master = event.get_master()
print master.type # Will print out 'RecurringMaster'.
"""
if self.type != 'Occurrence':
raise InvalidEventType("get_master method can only be called on a 'Occurrence' event type")
body = soap_request.get_master(exchange_id=self._id, format=u"AllProperties")
response_xml = self.service.send(body)
return Exchange2010CalendarEvent(service=self.service, xml=response_xml)
def get_occurrence(self, instance_index):
"""
get_occurrence(instance_index)
:param iterable instance_index: This should be tuple or list of integers which correspond to occurrences.
:raises TypeError: When instance_index is not an iterable of ints.
:raises InvalidEventType: When this method is called on an event that is not a RecurringMaster type.
This will return a list of occurrence events.
**Examples**::
master = service.calendar().get_event(id='<event_id>')
# The following will return the first 20 occurrences in the recurrence.
# If there are not 20 occurrences, it will only return what it finds.
occurrences = master.get_occurrence(range(1,21))
for occurrence in occurrences:
print occurrence.start
"""
if not all([isinstance(i, int) for i in instance_index]):
raise TypeError("instance_index must be an interable of type int")
if self.type != 'RecurringMaster':
raise InvalidEventType("get_occurrance method can only be called on a 'RecurringMaster' event type")
body = soap_request.get_occurrence(exchange_id=self._id, instance_index=instance_index, format=u"AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def conflicting_events(self):
"""
conflicting_events()
This will return a list of conflicting events.
**Example**::
event = service.calendar().get_event(id='<event_id>')
for conflict in event.conflicting_events():
print conflict.subject
"""
if not self.conflicting_event_ids:
return []
body = soap_request.get_item(exchange_id=self.conflicting_event_ids, format="AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def refresh_change_key(self):
body = soap_request.get_item(exchange_id=self._id, format=u"IdOnly")
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//m:Items/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_response_for_get_event(self, response):
result = self._parse_event_properties(response)
organizer_properties = self._parse_event_organizer(response)
if organizer_properties is not None:
if 'email' not in organizer_properties:
organizer_properties['email'] = None
result[u'organizer'] = ExchangeEventOrganizer(**organizer_properties)
attendee_properties = self._parse_event_attendees(response)
result[u'_attendees'] = self._build_resource_dictionary([ExchangeEventResponse(**attendee) for attendee in attendee_properties])
resource_properties = self._parse_event_resources(response)
result[u'_resources'] = self._build_resource_dictionary([ExchangeEventResponse(**resource) for resource in resource_properties])
result['_conflicting_event_ids'] = self._parse_event_conflicts(response)
return result
def _parse_event_properties(self, response):
property_map = {
u'subject': {
u'xpath': u'//m:Items/t:CalendarItem/t:Subject',
},
u'location':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Location',
},
u'availability':
{
u'xpath': u'//m:Items/t:CalendarItem/t:LegacyFreeBusyStatus',
},
u'start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Start',
u'cast': u'datetime',
},
u'end':
{
u'xpath': u'//m:Items/t:CalendarItem/t:End',
u'cast': u'datetime',
},
u'html_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="HTML"]',
},
u'text_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="Text"]',
},
u'_type':
{
u'xpath': u'//m:Items/t:CalendarItem/t:CalendarItemType',
},
u'reminder_minutes_before_start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:ReminderMinutesBeforeStart',
u'cast': u'int',
},
u'is_all_day':
{
u'xpath': u'//m:Items/t:CalendarItem/t:IsAllDayEvent',
u'cast': u'bool',
},
u'recurrence_end_date':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:EndDateRecurrence/t:EndDate',
u'cast': u'date_only_naive',
},
u'recurrence_interval':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/*/t:Interval',
u'cast': u'int',
},
u'recurrence_days':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:WeeklyRecurrence/t:DaysOfWeek',
},
}
result = self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
try:
recurrence_node = response.xpath(u'//m:Items/t:CalendarItem/t:Recurrence', namespaces=soap_request.NAMESPACES)[0]
except IndexError:
recurrence_node = None
if recurrence_node is not None:
if recurrence_node.find('t:DailyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'daily'
elif recurrence_node.find('t:WeeklyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'weekly'
elif recurrence_node.find('t:AbsoluteMonthlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'monthly'
elif recurrence_node.find('t:AbsoluteYearlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'yearly'
return result
def _parse_event_organizer(self, response):
organizer = response.xpath(u'//m:Items/t:CalendarItem/t:Organizer/t:Mailbox', namespaces=soap_request.NAMESPACES)
property_map = {
u'name':
{
u'xpath': u't:Name'
},
u'email':
{
u'xpath': u't:EmailAddress'
},
}
if organizer:
return self.service._xpath_to_dict(element=organizer[0], property_map=property_map, namespace_map=soap_request.NAMESPACES)
else:
return None
def _parse_event_resources(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
resources = response.xpath(u'//m:Items/t:CalendarItem/t:Resources/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in resources:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_attendees(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
required_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:RequiredAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in required_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
optional_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:OptionalAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in optional_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = False
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_conflicts(self, response):
conflicting_ids = response.xpath(u'//m:Items/t:CalendarItem/t:ConflictingMeetings/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
return [id_element.get(u"Id") for id_element in conflicting_ids]
class Exchange2010FolderService(BaseExchangeFolderService):
def folder(self, id=None, **kwargs):
return Exchange2010Folder(service=self.service, id=id, **kwargs)
def get_folder(self, id):
"""
:param str id: The Exchange ID of the folder to retrieve from the Exchange store.
Retrieves the folder specified by the id, from the Exchange store.
**Examples**::
folder = service.folder().get_folder(id)
"""
return Exchange2010Folder(service=self.service, id=id)
def new_folder(self, **properties):
"""
new_folder(display_name=display_name, folder_type=folder_type, parent_id=parent_id)
:param str display_name: The display name given to the new folder.
:param str folder_type: The type of folder to create. Possible values are 'Folder',
'CalendarFolder', 'ContactsFolder', 'SearchFolder', 'TasksFolder'.
:param str parent_id: The parent folder where the new folder will be created.
Creates a new folder with the given properties. Not saved until you call the create() method.
**Examples**::
folder = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
folder.create()
"""
return Exchange2010Folder(service=self.service, **properties)
def find_folder(self, parent_id):
"""
find_folder(parent_id)
:param str parent_id: The parent folder to list.
This method will return a list of sub-folders to a given parent folder.
**Examples**::
# Iterate through folders within the default 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
print(folder.display_name)
# Delete all folders within the 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
folder.delete()
"""
body = soap_request.find_folder(parent_id=parent_id, format=u'AllProperties')
response_xml = self.service.send(body)
return self._parse_response_for_find_folder(response_xml)
def _parse_response_for_find_folder(self, response):
result = []
folders = response.xpath(u'//t:Folders/t:*', namespaces=soap_request.NAMESPACES)
for folder in folders:
result.append(
Exchange2010Folder(
service=self.service,
xml=etree.fromstring(etree.tostring(folder)) # Might be a better way to do this
)
)
return result
class Exchange2010Folder(BaseExchangeFolder):
def _init_from_service(self, id):
body = soap_request.get_folder(folder_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_folder(response_xml)
self._update_properties(properties)
return self
def _init_from_xml(self, xml):
properties = self._parse_response_for_get_folder(xml)
self._update_properties(properties)
return self
def create(self):
"""
Creates a folder in Exchange. ::
calendar = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
calendar.create()
"""
self.validate()
body = soap_request.new_folder(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def delete(self):
"""
Deletes a folder from the Exchange store. ::
folder = service.folder().get_folder(id)
print("Deleting folder: %s" % folder.display_name)
folder.delete()
"""
if not self.id:
raise TypeError(u"You can't delete a folder that hasn't been created yet.")
body = soap_request.delete_folder(self)
response_xml = self.service.send(body) # noqa
# TODO: verify deletion
self._id = None
self._change_key = None
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Folder ID of what will be the new parent folder, of this folder.
Move folder to a different location, specified by folder_id::
folder = service.folder().get_folder(id)
folder.move_to(folder_id="ID of new location's folder")
"""
if not folder_id:
raise TypeError(u"You can't move to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move a folder that hasn't been created yet.")
response_xml = self.service.send(soap_request.move_folder(self, folder_id)) # noqa
result_id, result_key = self._parse_id_and_change_key_from_response(response_xml)
if self.id != result_id:
raise ValueError(u"MoveFolder returned success but requested folder not moved")
self.parent_id = folder_id
return self
def _parse_response_for_get_folder(self, response):
FOLDER_PATH = u'//t:Folder | //t:CalendarFolder | //t:ContactsFolder | //t:SearchFolder | //t:TasksFolder'
path = response.xpath(FOLDER_PATH, namespaces=soap_request.NAMESPACES)[0]
result = self._parse_folder_properties(path)
return result
def _parse_folder_properties(self, response):
property_map = {
u'display_name': {u'xpath': u't:DisplayName'},
}
self._id, self._change_key = self._parse_id_and_change_key_from_response(response)
self._parent_id = self._parse_parent_id_and_change_key_from_response(response)[0]
self.folder_type = etree.QName(response).localname
return self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:FolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_parent_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:ParentFolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
| 34.745911 | 216 | 0.701211 | 30,694 | 0.963342 | 0 | 0 | 0 | 0 | 0 | 0 | 13,180 | 0.413659 |
d27e18ed16bd406812b85f4af214631d5d9da65c
| 8,982 |
py
|
Python
|
rnacentral_pipeline/rnacentral/r2dt/should_show.py
|
RNAcentral/rnacentral-import-pipeline
|
238e573440c72581a051b16c15f56fcd25bece74
|
[
"Apache-2.0"
] | 1 |
2018-08-09T14:41:16.000Z
|
2018-08-09T14:41:16.000Z
|
rnacentral_pipeline/rnacentral/r2dt/should_show.py
|
RNAcentral/rnacentral-import-pipeline
|
238e573440c72581a051b16c15f56fcd25bece74
|
[
"Apache-2.0"
] | 60 |
2015-02-04T16:43:53.000Z
|
2022-01-27T10:28:43.000Z
|
rnacentral_pipeline/rnacentral/r2dt/should_show.py
|
RNAcentral/rnacentral-import-pipeline
|
238e573440c72581a051b16c15f56fcd25bece74
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import enum
import logging
import typing as ty
from pathlib import Path
import joblib
from more_itertools import chunked
import pandas as pd
from pypika import Table, Query
import psycopg2
import psycopg2.extras
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
LOGGER = logging.getLogger(__name__)
SOURCE_MAP = {
"crw": 0,
"ribovision": 1,
"gtrnadb": 2,
"rnase_p": 3,
"rfam": 4,
}
@enum.unique
class Attributes(enum.Enum):
SourceIndex = "source_index"
SequenceLength = "sequence_length"
DiagramSequenceLength = "diagram_sequence_length"
ModelLength = "model_length"
ModelBasepairCount = "model_basepair_count"
DiagramBps = "diagram_bps"
DiagramModelLength = "diagram_model_length"
DiagramOverlapCount = "diagram_overlap_count"
@classmethod
def model_columns(cls) -> ty.List[str]:
return [attr.column_name() for attr in cls]
def column_name(self) -> str:
return self.value
MODEL_COLUMNS: ty.List[str] = Attributes.model_columns()
def chunked_query(
ids: ty.Iterable[str], query_builder, db_url: str, chunk_size=100
) -> ty.Iterable[ty.Dict[str, ty.Any]]:
conn = psycopg2.connect(db_url)
for chunk in chunked(ids, chunk_size):
sql = str(query_builder(chunk))
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute(sql)
for result in cur:
yield dict(result)
def fetch_modeled_data(
all_ids: ty.Iterable[str], db_url: str, chunk_size=100
) -> ty.Iterable[ty.Dict[str, ty.Any]]:
rna = Table("rna")
ss = Table("rnc_secondary_structure_layout")
sm = Table("rnc_secondary_structure_layout_models")
def build_query(ids):
return (
Query.from_(rna)
.select(
rna.upi.as_("urs"),
rna.len.as_("sequence_length"),
sm.model_source,
ss.sequence_start.as_("diagram_sequence_start"),
ss.sequence_stop.as_("diagram_sequence_stop"),
ss.basepair_count.as_("diagram_bps"),
ss.model_start.as_("diagram_model_start"),
ss.model_stop.as_("diagram_model_stop"),
sm.model_length,
sm.model_basepair_count,
ss.overlap_count.as_("diagram_overlap_count"),
)
.join(ss)
.on(ss.urs == rna.upi)
.join(sm)
.on(sm.id == ss.model_id)
.where(ss.urs.isin(ids))
)
seen: ty.Set[str] = set()
results = chunked_query(all_ids, build_query, db_url, chunk_size=chunk_size)
for result in results:
if any(v is None for v in result.values()):
continue
yield result
seen.add(result["urs"])
for urs in all_ids:
if urs not in seen:
LOGGER.warn("Missed loading %s", urs)
def infer_columns(frame: pd.DataFrame):
frame["diagram_sequence_length"] = (
frame["diagram_sequence_stop"] - frame["diagram_sequence_start"]
)
frame["diagram_model_length"] = (
frame["diagram_model_stop"] - frame["diagram_model_start"]
)
frame["source_index"] = frame.model_source.map(SOURCE_MAP)
if frame["source_index"].isnull().any():
raise ValueError("Could not build source_index for all training data")
def fetch_training_data(handle: ty.IO, db_url: str) -> pd.DataFrame:
ids = []
training = {}
for (urs, flag) in csv.reader(handle):
ids.append(urs)
if flag == "1":
training[urs] = True
elif flag == "0":
training[urs] = False
else:
raise ValueError(f"Unknown flag {flag}")
filled = []
for metadata in fetch_modeled_data(ids, db_url):
urs = metadata["urs"]
if urs not in training:
raise ValueError(f"Got an extra entry, somehow {metadata}")
metadata["valid"] = training[urs]
filled.append(metadata)
training = pd.DataFrame.from_records(filled)
infer_columns(training)
return training
def train(handle, db_url, cross_validation=5, test_size=0.4) -> RandomForestClassifier:
data = fetch_training_data(handle, db_url)
X_train, X_test, y_train, y_test = train_test_split(
data[MODEL_COLUMNS].to_numpy(), data["valid"].to_numpy(), test_size=test_size
)
clf = RandomForestClassifier(min_samples_split=5)
scores = cross_val_score(clf, X_train, y_train, cv=cross_validation)
LOGGER.info("%s fold cross validation scores: %s", cross_validation, scores)
clf.fit(X_train, y_train)
LOGGER.info("Test data (%f) scoring %s", test_size, clf.score(X_test, y_test))
return clf
def from_result(clf, result) -> bool:
predictable = {}
for attribute in Attributes:
value = attribute.r2dt_result_value(result)
predictable[attribute.column_name()] = [value]
predictable = pd.DataFrame.from_records(predictable)
return clf.predict(predictable)[0]
def write(model_path: Path, handle: ty.IO, db_url: str, output: ty.IO):
model = joblib.load(model_path)
ids = [r[0] for r in csv.reader(handle)]
modeled = fetch_modeled_data(ids, db_url)
frame = pd.DataFrame.from_records(modeled)
infer_columns(frame)
predicted = model.predict(frame[MODEL_COLUMNS].to_numpy())
to_write = pd.DataFrame()
to_write["urs"] = frame["urs"]
to_write["should_show"] = predicted.astype(int)
to_write.to_csv(output, index=False)
def write_model(handle: ty.IO, db_url: str, output: Path):
joblib.dump(train(handle, db_url), output)
def write_training_data(handle: ty.IO, db_url: str, output: ty.IO):
ids = []
for row in csv.reader(handle):
ids.append(row[0])
modeled = list(fetch_modeled_data(ids, db_url))
writer = csv.DictWriter(output, fieldnames=modeled[0].keys())
writer.writeheader()
writer.writerows(modeled)
def convert_sheet(handle: ty.IO, output: ty.IO):
converted = []
for row in csv.DictReader(handle):
urs = row["urs"]
raw_should_show = row["Labeled Should show"]
if not raw_should_show:
LOGGER.info("No value for %s", urs)
should_show = None
raw_should_show = raw_should_show.lower()
if raw_should_show == "true":
should_show = "1"
elif raw_should_show == "false":
should_show = "0"
else:
LOGGER.warn("Unknown should show in %s", row)
continue
converted.append((urs, should_show))
converted.sort(key=lambda r: r[0])
writer = csv.writer(output)
writer.writerows(converted)
def inspect_data(data, db_url: str) -> ty.Iterable[ty.Dict[str, ty.Any]]:
def build_query(ids):
ss = Table("rnc_secondary_structure_layout")
sm = Table("rnc_secondary_structure_layout_models")
pre = Table("rnc_rna_precomputed")
return (
Query.from_(ss)
.join(sm)
.on(sm.id == ss.model_id)
.join(pre)
.on(pre.urs == sm.urs)
.select(
sm.model_source,
sm.model_name,
sm.model_so_term,
)
.where(ss.urs.isin(ids))
.where(pre.taxid.isnotnull)
)
mapping = {d[0]: d for d in data}
seen: ty.Set[str] = set()
results = chunked_query(data, build_query, db_url)
for result in results:
if any(v is None for v in result.values()):
continue
yield {
"urs": result["urs"],
"link": f"https://rnacentral.org/rna/{result['urs']}",
"model_source": result["model_source"],
"model_name": result["model_name"],
"model_so_term": result["model_so_term"],
"Labeled Should show": result["urs"],
}
seen.add(result["urs"])
for urs in mapping.keys():
if urs not in seen:
LOGGER.warn("Missed loading %s", urs)
def write_inspect_data(handle: ty.IO, db_url: str, output: ty.IO):
data = list(csv.reader(handle))
inspect = list(inspect_data(data, db_url))
writer = csv.DictWriter(output, fieldnames=inspect[0].keys())
writer.writeheader()
writer.writerows(inspect)
| 32.309353 | 87 | 0.638833 | 539 | 0.060009 | 3,195 | 0.355711 | 552 | 0.061456 | 0 | 0 | 1,810 | 0.201514 |
d27e557da62812d946f0019863efdd827d603a76
| 1,024 |
py
|
Python
|
model.py
|
nitro-code/inception-api
|
0ee40b1bdc7cccec8e15921ff835ce29070a66f6
|
[
"MIT"
] | 1 |
2017-08-18T09:13:47.000Z
|
2017-08-18T09:13:47.000Z
|
model.py
|
nitroventures/inception-api
|
0ee40b1bdc7cccec8e15921ff835ce29070a66f6
|
[
"MIT"
] | null | null | null |
model.py
|
nitroventures/inception-api
|
0ee40b1bdc7cccec8e15921ff835ce29070a66f6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
import numpy as np
import h5py
model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
graph = tf.get_default_graph()
def pil2array(pillow_img):
return np.array(pillow_img.getdata(), np.float32).reshape(pillow_img.size[1], pillow_img.size[0], 3)
def predict_pil(pillow_img):
img_array = pil2array(pillow_img)
return predict_nparray(img_array)
def predict_nparray(img_as_array):
global graph
img_batch_as_array = np.expand_dims(img_as_array, axis=0)
img_batch_as_array = preprocess_input(img_batch_as_array)
with graph.as_default():
preds = model.predict(img_batch_as_array)
decoded_preds = decode_predictions(preds, top=3)[0]
predictions = [{'label': label, 'descr': description, 'prob': probability} for label,description, probability in decoded_preds]
return predictions
| 34.133333 | 131 | 0.775391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.029297 |
d2824e51dfb32b914b5d61d7c72ec4e8a213bab5
| 4,959 |
py
|
Python
|
models/recall/word2vec/static_model.py
|
ziyoujiyi/PaddleRec
|
bcddcf46e5cd8d4e6b2c5ee8d0d5521e292a2a81
|
[
"Apache-2.0"
] | 2,739 |
2020-04-28T05:12:48.000Z
|
2022-03-31T16:01:49.000Z
|
models/recall/word2vec/static_model.py
|
jiangcongxu/PaddleRec
|
9a107c56af2d1ee282975bcc8edb1ad5fb7e7973
|
[
"Apache-2.0"
] | 205 |
2020-05-14T13:29:14.000Z
|
2022-03-31T13:01:50.000Z
|
models/recall/word2vec/static_model.py
|
jiangcongxu/PaddleRec
|
9a107c56af2d1ee282975bcc8edb1ad5fb7e7973
|
[
"Apache-2.0"
] | 545 |
2020-05-14T13:19:13.000Z
|
2022-03-24T07:53:05.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
from net import Word2VecLayer, Word2VecInferLayer
class StaticModel(object):
def __init__(self, config):
self.cost = None
self.metrics = {}
self.config = config
self._init_hyper_parameters()
def _init_hyper_parameters(self):
self.sparse_feature_number = self.config.get(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = self.config.get(
"hyper_parameters.sparse_feature_dim")
self.neg_num = self.config.get("hyper_parameters.neg_num")
self.with_shuffle_batch = self.config.get(
"hyper_parameters.with_shuffle_batch")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate")
self.decay_steps = self.config.get(
"hyper_parameters.optimizer.decay_steps")
self.decay_rate = self.config.get(
"hyper_parameters.optimizer.decay_rate")
def create_feeds(self, is_infer=False):
if is_infer:
analogy_a = paddle.static.data(
name="analogy_a", shape=[None, 1], dtype='int64')
analogy_b = paddle.static.data(
name="analogy_b", shape=[None, 1], dtype='int64')
analogy_c = paddle.static.data(
name="analogy_c", shape=[None, 1], dtype='int64')
#analogy_d = paddle.static.data(
# name="analogy_d", shape=[None], dtype='int64')
return [analogy_a, analogy_b, analogy_c]
input_word = paddle.static.data(
name="input_word", shape=[None, 1], dtype='int64')
true_word = paddle.static.data(
name='true_label', shape=[None, 1], dtype='int64')
if self.with_shuffle_batch:
return [input_word, true_word]
neg_word = paddle.static.data(
name="neg_label", shape=[None, self.neg_num], dtype='int64')
return [input_word, true_word, neg_word]
def net(self, inputs, is_infer=False):
word2vec_model = Word2VecLayer(
self.sparse_feature_number,
self.sparse_feature_dim,
self.neg_num,
emb_name="emb",
emb_w_name="emb_w",
emb_b_name="emb_b")
true_logits, neg_logits = word2vec_model.forward(inputs)
label_ones = paddle.full(
shape=[paddle.shape(true_logits)[0], 1], fill_value=1.0)
label_zeros = paddle.full(
shape=[paddle.shape(true_logits)[0], self.neg_num], fill_value=0.0)
true_logits = paddle.nn.functional.sigmoid(true_logits)
true_xent = paddle.nn.functional.binary_cross_entropy(true_logits,
label_ones)
neg_logits = paddle.nn.functional.sigmoid(neg_logits)
neg_xent = paddle.nn.functional.binary_cross_entropy(neg_logits,
label_zeros)
cost = paddle.add(true_xent, neg_xent)
avg_cost = paddle.mean(x=cost)
self._cost = avg_cost
fetch_dict = {'loss': avg_cost}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate)
# learning_rate=paddle.fluid.layers.exponential_decay(
# learning_rate=self.learning_rate,
# decay_steps=self.decay_steps,
# decay_rate=self.decay_rate,
# staircase=True))
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
return optimizer
def infer_net(self, input):
#[analogy_a, analogy_b, analogy_c] = inputs
all_label = paddle.static.data(
name="all_label",
shape=[self.sparse_feature_number],
dtype='int64')
word2vec = Word2VecInferLayer(self.sparse_feature_number,
self.sparse_feature_dim, "emb")
val, pred_idx = word2vec.forward(input[0], input[1], input[2],
all_label)
fetch_dict = {'pred_idx': pred_idx}
return fetch_dict
| 40.983471 | 79 | 0.617867 | 4,213 | 0.849566 | 0 | 0 | 0 | 0 | 0 | 0 | 1,390 | 0.280298 |
d282949426f3fae8441528c5a9f321ae9b759d68
| 324 |
py
|
Python
|
transformers/transformers/data/processors/split_sentences.py
|
richardbaihe/segatron_aaai
|
1739b667f2bee53541f00d227da8375543fe5f11
|
[
"MIT"
] | 16 |
2020-12-22T07:35:20.000Z
|
2022-02-09T19:49:02.000Z
|
transformers/transformers/data/processors/split_sentences.py
|
richardbaihe/segatron_aaai
|
1739b667f2bee53541f00d227da8375543fe5f11
|
[
"MIT"
] | 1 |
2021-12-21T14:33:15.000Z
|
2021-12-27T20:40:39.000Z
|
transformers/transformers/data/processors/split_sentences.py
|
richardbaihe/segatron_aaai
|
1739b667f2bee53541f00d227da8375543fe5f11
|
[
"MIT"
] | 2 |
2020-12-22T08:46:01.000Z
|
2021-01-09T14:50:12.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time : 2020-04-13 21:19
# @Author : Richard Bai
# @EMail : [email protected]
import nltk
import os
import json
def sentence_split(line):
sents = nltk.tokenize.sent_tokenize(line)
rnt = [sent.split() for sent in sents]
return rnt
| 19.058824 | 49 | 0.608025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.429012 |
d2831bfec38a388ec3a1badd4f034aaa55b158a5
| 1,661 |
py
|
Python
|
sfdata/posts/migrations/0001_initial.py
|
adjspecies/sfdata
|
9522176c1c80e9f0aeecf77da6576e8465238383
|
[
"MIT"
] | 1 |
2019-01-24T01:57:21.000Z
|
2019-01-24T01:57:21.000Z
|
sfdata/posts/migrations/0001_initial.py
|
adjspecies/sfdata
|
9522176c1c80e9f0aeecf77da6576e8465238383
|
[
"MIT"
] | null | null | null |
sfdata/posts/migrations/0001_initial.py
|
adjspecies/sfdata
|
9522176c1c80e9f0aeecf77da6576e8465238383
|
[
"MIT"
] | 1 |
2018-12-22T02:20:39.000Z
|
2018-12-22T02:20:39.000Z
|
# Generated by Django 2.1.4 on 2018-12-21 21:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('date', models.DateTimeField()),
('wordcount', models.IntegerField()),
('in_series', models.BooleanField()),
('views', models.IntegerField()),
('faves', models.IntegerField()),
('comments', models.IntegerField()),
('votes', models.IntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Author')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='posts.Tag'),
),
]
| 33.22 | 114 | 0.526791 | 1,535 | 0.924142 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.128236 |
d28385fbbbc8e61ca535b580e9a5d1d70c77fe44
| 1,361 |
py
|
Python
|
test/test_tools.py
|
cokelaer/sequana
|
da35de12b45f38b4fa488c7a15a6d9829890b44e
|
[
"BSD-3-Clause"
] | 138 |
2016-07-13T06:24:45.000Z
|
2022-03-28T13:12:03.000Z
|
test/test_tools.py
|
cokelaer/sequana
|
da35de12b45f38b4fa488c7a15a6d9829890b44e
|
[
"BSD-3-Clause"
] | 655 |
2016-03-10T17:33:40.000Z
|
2022-03-30T16:10:45.000Z
|
test/test_tools.py
|
cokelaer/sequana
|
da35de12b45f38b4fa488c7a15a6d9829890b44e
|
[
"BSD-3-Clause"
] | 39 |
2016-11-04T11:40:58.000Z
|
2022-03-15T08:12:29.000Z
|
from sequana.tools import bam_to_mapped_unmapped_fastq, reverse_complement, StatsBAM2Mapped
from sequana import sequana_data
from sequana.tools import bam_get_paired_distance, GZLineCounter, PairedFastQ
from sequana.tools import genbank_features_parser
def test_StatsBAM2Mapped():
data = sequana_data("test.bam", "testing")
res = StatsBAM2Mapped(data)
res.to_html()
def test_bam2fastq():
data = sequana_data("test.bam", "testing")
res = bam_to_mapped_unmapped_fastq(data)
def test_reverse_complement():
assert reverse_complement("AACCGGTTA") == 'TAACCGGTT'
def test_reverse():
from sequana.tools import reverse
assert reverse("AACCGG") == 'GGCCAA'
def test_distance():
data = sequana_data("test.bam", "testing")
distances = bam_get_paired_distance(data)
def test_gc_content():
from sequana.tools import gc_content
data = sequana_data('measles.fa', "testing")
gc_content(data, 10)['chr1']
gc_content(data, 101, circular=True)['chr1']
def test_genbank_features_parser():
data = sequana_data("JB409847.gbk")
genbank_features_parser(data)
def test_gzlinecounter():
assert len(GZLineCounter(sequana_data("test.fastq.gz"))) == 1000
def test_paired_file():
f1 = sequana_data("test.fastq.gz")
f2 = sequana_data("test.fastq.gz")
assert PairedFastQ(f1,f2).is_synchronised()
| 26.686275 | 91 | 0.739897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.137399 |
d285ef91afbd55e6c2220bf3361249aade406f01
| 16,219 |
py
|
Python
|
sxl/sxl.py
|
giriannamalai/sxl
|
37db962bbbc978053375ffcc51e298ee6f5272d8
|
[
"MIT"
] | null | null | null |
sxl/sxl.py
|
giriannamalai/sxl
|
37db962bbbc978053375ffcc51e298ee6f5272d8
|
[
"MIT"
] | null | null | null |
sxl/sxl.py
|
giriannamalai/sxl
|
37db962bbbc978053375ffcc51e298ee6f5272d8
|
[
"MIT"
] | null | null | null |
"""
xl.py - python library to deal with *big* Excel files.
"""
from abc import ABC
from collections import namedtuple, ChainMap
from contextlib import contextmanager
import datetime
import io
from itertools import zip_longest
import os
import re
import string
import xml.etree.cElementTree as ET
from zipfile import ZipFile
# ISO/IEC 29500:2011 in Part 1, section 18.8.30
STANDARD_STYLES = {
'0' : 'General',
'1' : '0',
'2' : '0.00',
'3' : '#,##0',
'4' : '#,##0.00',
'9' : '0%',
'10' : '0.00%',
'11' : '0.00E+00',
'12' : '# ?/?',
'13' : '# ??/??',
'14' : 'mm-dd-yy',
'15' : 'd-mmm-yy',
'16' : 'd-mmm',
'17' : 'mmm-yy',
'18' : 'h:mm AM/PM',
'19' : 'h:mm:ss AM/PM',
'20' : 'h:mm',
'21' : 'h:mm:ss',
'22' : 'm/d/yy h:mm',
'37' : '#,##0 ;(#,##0)',
'38' : '#,##0 ;[Red](#,##0)',
'39' : '#,##0.00;(#,##0.00)',
'40' : '#,##0.00;[Red](#,##0.00)',
'45' : 'mm:ss',
'46' : '[h]:mm:ss',
'47' : 'mmss.0',
'48' : '##0.0E+0',
'49' : '@',
}
ExcelErrorValue = namedtuple('ExcelErrorValue', 'value')
class ExcelObj(ABC):
"""
Abstract base class for other excel objects (workbooks, worksheets, etc.)
"""
main_ns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
rel_ns = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
@staticmethod
def tag_with_ns(tag, ns):
"Return XML tag with namespace that can be used with ElementTree"
return '{%s}%s' % (ns, tag)
@staticmethod
def col_num_to_letter(n):
"Return column letter for column number ``n``"
string = ""
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
@staticmethod
def col_letter_to_num(letter):
"Return column number for column letter ``letter``"
assert re.match(r'[A-Z]+', letter)
num = 0
for char in letter:
num = num * 26 + (ord(char.upper()) - ord('A')) + 1
return num
class Worksheet(ExcelObj):
"""
Excel worksheet
"""
def __init__(self, workbook, name, number):
self._used_area = None
self._row_length = None
self._num_rows = None
self._num_cols = None
self.workbook = self.wb = workbook
self.name = name
self.number = number
@contextmanager
def get_sheet_xml(self):
"Get a pointer to the xml file underlying the current sheet"
tpl = 'xl/worksheets/sheet{}.xml'
with self.workbook.xls.open(tpl.format(self.number)) as f:
yield io.TextIOWrapper(f, self.workbook.encoding)
@property
def range(self):
"Return data found in range of cells"
return Range(self)
@property
def rows(self):
"Iterator that will yield every row in this sheet between start/end"
return Range(self)
def _set_dimensions(self):
"Return the 'standard' row length of each row in this worksheet"
if self.used_area == 'A1':
self._num_cols = 0
self._num_rows = 0
else:
_, end = self.used_area.split(':')
last_col, last_row = re.match(r"([A-Z]+)([0-9]+)", end).groups()
self._num_cols = self.col_letter_to_num(last_col)
self._num_rows = int(last_row)
@property
def num_cols(self):
"Return the number of standard columns in this worksheet"
if self._num_cols is None:
self._set_dimensions()
return self._num_cols
@property
def num_rows(self):
"Return the total number of rows used in this worksheet"
if self._num_rows is None:
self._set_dimensions()
return self._num_rows
@property
def used_area(self):
"Return the used area of this sheet"
if self._used_area is not None:
return self._used_area
dimension_tag = self.tag_with_ns('dimension', self.main_ns)
sheet_data_tag = self.tag_with_ns('sheetData', self.main_ns)
with self.get_sheet_xml() as sheet:
for event, elem in ET.iterparse(sheet, events=('start', 'end')):
if event == 'start':
if elem.tag == dimension_tag:
used_area = elem.get('ref')
if used_area != 'A1':
break
if elem.tag == sheet_data_tag:
if list(elem):
used_area = 'A1:A1'
break
elem.clear()
self._used_area = used_area
return used_area
def head(self, num_rows=10):
"Return first 'num_rows' from this worksheet"
return self.rows[:num_rows+1] # 1-based
def cat(self, tab=1):
"Return/yield all rows from this worksheet"
dat = self.rows[1] # 1 based!
XLRec = namedtuple('XLRec', dat[0], rename=True) # pylint: disable=C0103
for row in self.rows[1:]:
yield XLRec(*row)
class Range(ExcelObj):
"""
Excel ranges
"""
def __init__(self, ws):
self.worksheet = self.ws = ws
self.start = None
self.stop = None
self.step = None
self.colstart = None
self.colstop = None
self.colstep = None
def __len__(self):
return self.worksheet.num_rows
def __iter__(self):
with self.ws.get_sheet_xml() as xml_doc:
row_tag = self.tag_with_ns('row', self.main_ns)
c_tag = self.tag_with_ns('c', self.main_ns)
v_tag = self.tag_with_ns('v', self.main_ns)
row = []
this_row = -1
next_row = 1 if self.start is None else self.start
last_row = self.ws.num_rows + 1 if self.stop is None else self.stop
context = ET.iterparse(xml_doc, events=('start', 'end'))
context = iter(context)
event, root = next(context)
for event, elem in context:
if event == 'end':
if elem.tag == row_tag:
this_row = int(elem.get('r'))
if this_row >= last_row:
break
while next_row < this_row:
yield self._row([])
next_row += 1
if this_row == next_row:
yield self._row(row)
next_row += 1
row = []
this_row = -1
root.clear()
elif elem.tag == c_tag:
val = elem.findtext(v_tag)
if val:
# only append cells with values
cell = ['', '', '', ''] # ref, type, value, style
cell[0] = elem.get('r') # cell ref
cell[1] = elem.get('t') # cell type
if cell[1] == 's': # string
cell[2] = self.ws.workbook.strings[int(val)]
else:
cell[2] = val
cell[3] = elem.get('s') # cell style
row.append(cell)
def __getitem__(self, rng):
if isinstance(rng, slice):
if rng.start is not None:
self.start = rng.start
if rng.stop is not None:
self.stop = rng.stop
if rng.step is not None:
self.step = rng.step
matx = [_ for _ in self]
self.start = self.stop = self.step = None
return matx
elif isinstance(rng, str):
if ':' in rng:
beg, end = rng.split(':')
else:
beg = end = rng
cell_split = lambda cell: re.match(r"([A-Z]+)([0-9]+)", cell).groups()
first_col, first_row = cell_split(beg)
last_col, last_row = cell_split(end)
first_col = self.col_letter_to_num(first_col) - 1 # python addressing
first_row = int(first_row)
last_col = self.col_letter_to_num(last_col)
last_row = int(last_row)
self.start = first_row
self.stop = last_row + 1
self.colstart = first_col
self.colstop = last_col
matx = [_ for _ in self]
# reset
self.start = self.stop = self.step = None
self.colstart = self.colstop = self.colstep = None
return matx
elif isinstance(rng, int):
self.start = rng
self.stop = rng + 1
matx = [_ for _ in self]
self.start = self.stop = self.step = None
return matx
else:
raise NotImplementedError("Cannot understand request")
def __call__(self, rng):
return self.__getitem__(rng)
def _row(self, row):
lst = [None] * self.ws.num_cols
col_re = re.compile(r'[A-Z]+')
col_pos = 0
for cell in row:
# apparently, 'r' attribute is optional and some MS products don't
# spit it out. So we default to incrementing from last known col
# (or 0 if we are at the beginning) when r is not available.
if cell[0]:
col = cell[0][:col_re.match(cell[0]).end()]
col_pos = self.col_letter_to_num(col) - 1
else:
col_pos += 1
try:
style = self.ws.wb.styles[int(cell[3])]
except Exception as e:
style = ''
# convert to python value (if necessary)
celltype = cell[1]
cellvalue = cell[2]
if celltype in ('str', 's'):
lst[col_pos] = cellvalue
elif celltype == 'b':
lst[col_pos] = bool(int(cellvalue))
elif celltype == 'e':
lst[col_pos] = ExcelErrorValue(cellvalue)
elif celltype == 'bl':
lst[col_pos] = None
elif celltype == 'd' or ('d' in style and 'Red' not in style) or 'm' in style or 'y' in style:
lst[col_pos] = self.ws.wb.num_to_date(float(cellvalue))
# Lastly, default to a number
else:
lst[col_pos] = float(cellvalue)
colstart = 0 if self.colstart is None else self.colstart
colstop = self.ws.num_cols if self.colstop is None else self.colstop
return lst[colstart:colstop]
class Workbook(ExcelObj):
"""
Excel workbook
"""
def __init__(self, workbook_path, encoding='cp1252'):
self.xls = ZipFile(workbook_path)
self.encoding = encoding
self._strings = None
self._sheets = None
self._styles = None
self.date_system = self.get_date_system()
self.name = os.path.basename(workbook_path)
self.path = workbook_path
def get_date_system(self):
"Determine the date system used by the current workbook"
with self.xls.open('xl/workbook.xml') as xml_doc:
tree = ET.parse(io.TextIOWrapper(xml_doc, self.encoding))
tag = self.tag_with_ns('workbookPr', self.main_ns)
tag_element = tree.find(tag)
if tag_element and tag_element.get('date1904') == '1':
return 1904
return 1900
@property
def sheets(self):
"Return list of all sheets in workbook"
if self._sheets is not None:
return self._sheets
tag = self.tag_with_ns('sheet', self.main_ns)
ref_tag = self.tag_with_ns('id', self.rel_ns)
sheet_map = {}
with self.xls.open('xl/workbook.xml') as xml_doc:
tree = ET.parse(io.TextIOWrapper(xml_doc, self.encoding))
for sheet in tree.iter(tag):
name = sheet.get('name')
ref = sheet.get(ref_tag)
num = int(ref[3:])
sheet = Worksheet(self, name, num)
sheet_map[name] = sheet
sheet_map[num] = sheet
self._sheets = sheet_map
return self._sheets
@property
def strings(self):
"Return list of shared strings within this workbook"
if self._strings is not None:
return self._strings
# Cannot use t element (which we were doing before). See
# http://bit.ly/2J7xAPu for more info on shared strings.
tag = self.tag_with_ns('si', self.main_ns)
strings = []
with self.xls.open('xl/sharedStrings.xml') as xml_doc:
tree = ET.parse(io.TextIOWrapper(xml_doc, self.encoding))
for elem in tree.iter(tag):
strings.append(''.join(_ for _ in elem.itertext()))
self._strings = strings
return strings
@property
def styles(self):
"Return list of styles used within this workbook"
if self._styles is not None:
return self._styles
styles = []
style_tag = self.tag_with_ns('xf', self.main_ns)
numfmt_tag = self.tag_with_ns('numFmt', self.main_ns)
with self.xls.open('xl/styles.xml') as xml_doc:
tree = ET.parse(io.TextIOWrapper(xml_doc, self.encoding))
number_fmts_table = tree.find(self.tag_with_ns('numFmts', self.main_ns))
number_fmts = {}
if number_fmts_table:
for num_fmt in number_fmts_table.iter(numfmt_tag):
number_fmts[num_fmt.get('numFmtId')] = num_fmt.get('formatCode')
number_fmts.update(STANDARD_STYLES)
style_table = tree.find(self.tag_with_ns('cellXfs', self.main_ns))
if style_table:
for style in style_table.iter(style_tag):
fmtid = style.get('numFmtId')
if fmtid in number_fmts:
styles.append(number_fmts[fmtid])
self._styles = styles
return styles
def num_to_date(self, number):
"""
Return date of "number" based on the date system used in this workbook.
The date system is either the 1904 system or the 1900 system depending
on which date system the spreadsheet is using. See
http://bit.ly/2He5HoD for more information on date systems in Excel.
"""
if self.date_system == 1900:
# Under the 1900 base system, 1 represents 1/1/1900 (so we start
# with a base date of 12/31/1899).
base = datetime.datetime(1899, 12, 31)
# BUT (!), Excel considers 1900 a leap-year which it is not. As
# such, it will happily represent 2/29/1900 with the number 60, but
# we cannot convert that value to a date so we throw an error.
if number == 60:
raise ValueError("Bad date in Excel file - 2/29/1900 not valid")
# Otherwise, if the value is greater than 60 we need to adjust the
# base date to 12/30/1899 to account for this leap year bug.
elif number > 60:
base = base - datetime.timedelta(days=1)
else:
# Under the 1904 system, 1 represent 1/2/1904 so we start with a
# base date of 1/1/1904.
base = datetime.datetime(1904, 1, 1)
days = int(number)
partial_days = number - days
seconds = int(round(partial_days * 86400000.0))
seconds, milliseconds = divmod(seconds, 1000)
date = base + datetime.timedelta(days, seconds, 0, milliseconds)
if days == 0:
return date.time()
return date
# Some helper functions
def num2col(num):
"""Convert given column letter to an Excel column number."""
result = []
while num:
num, rem = divmod(num-1, 26)
result[:0] = string.ascii_uppercase[rem]
return ''.join(result)
def col2num(ltr):
num = 0
for c in ltr:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
return num
| 35.803532 | 106 | 0.533818 | 14,693 | 0.905913 | 2,455 | 0.151366 | 5,088 | 0.313706 | 0 | 0 | 3,616 | 0.222948 |
9627bff44e51fdfda5ec4883f22ddd53286fedc6
| 5,156 |
py
|
Python
|
sdk-python/awaazde/base.py
|
ashwini-ad/awaazde-api-client-sdk
|
c966f24d1e4b11fb9b0878d7e20c80b19cc04628
|
[
"Apache-2.0"
] | null | null | null |
sdk-python/awaazde/base.py
|
ashwini-ad/awaazde-api-client-sdk
|
c966f24d1e4b11fb9b0878d7e20c80b19cc04628
|
[
"Apache-2.0"
] | null | null | null |
sdk-python/awaazde/base.py
|
ashwini-ad/awaazde-api-client-sdk
|
c966f24d1e4b11fb9b0878d7e20c80b19cc04628
|
[
"Apache-2.0"
] | null | null | null |
import logging
import urllib.parse
from .api_client import ApiClient
from .constants import APIConstants
from .exceptions import APIException
from .utils import CommonUtils
class BaseAPI(object):
"""
BaseApi class, all the other api class extends it
"""
resource_url = None
resource_cls = None
_client = None
_username = None
_password = None
_api_base_url = None
_token = None
def __init__(self, api_base_url=None, username=None, password=None):
self.api_base_url = api_base_url
self._client = ApiClient()
self._username = username
self._password = password
self._perform_auth()
super(BaseAPI, self).__init__()
self.url = self.get_url()
self._client.set_resource(self.resource_cls)
def _perform_auth(self):
response = self._client.post(self.api_base_url + "/account/login/",
json={"email": self._username, "password": self._password})
self._token = response.get('token')
def get_url(self):
return self.api_base_url + "/" + self.resource_url
def list(self, **kwargs):
"""
This will return list of resources.
"""
data = {}
if kwargs:
data = {'params': kwargs}
return self._client.get(self.url, **self._append_headers(data))
def create(self, data):
"""
This will create new object
"""
data = {'json': data}
return self._client.post(self.url, **self._append_headers(data))
def get(self, id, **kwargs):
"""
This will return the single object
"""
if not id:
raise APIException('Invalid ID or ID hasn\'t been specified')
url = "%s%s" % (self.url, id)
obj = self._client.get(url, **self._append_headers(kwargs))
return obj
def update(self, id, data):
"""
This will update the object
"""
if not id:
raise APIException('Invalid ID or ID hasn\'t been specified')
url = "%s%s/" % (self.url, id)
data = {'json': data}
return self._client.patch(url, **self._append_headers(data))
def put(self, id, data):
"""
This will update the object
"""
if not id:
raise APIException('Invalid ID or ID hasn\'t been specified')
url = "%s%s/" % (self.url, id)
data = {'json': data}
return self._client.put(url, **self._append_headers(data))
def delete(self, id, **kwargs):
"""
This will return the single object
"""
if not id:
raise APIException('Invalid ID or ID hasn\'t been specified')
url = "%s%s/" % (self.url, id)
return self._client.delete(url, **self._append_headers(kwargs))
def delete_bulk(self, ids):
'''
given a list of ids, delete them all in one request
'''
if not ids:
raise APIException('Invalid IDs or IDs haven\'t been specified')
data = {'json' : {'ids': ids}}
return self._client.delete(self.url, **self._append_headers(data))
def _append_headers(self, data, append_content_type=True):
headers = data.get('headers', {})
if self._token:
headers["Authorization"] = "JWT " + str(self._token)
if 'content-type' not in headers and append_content_type:
headers['content-type'] = 'application/json'
data['headers'] = headers
return data
def create_bulk_in_chunks(self, data, **kwargs):
"""
Create objects in chunks based on limit if present, takes DEFAULT_BULK_CREATE_LIMIT as default.
:param Data: Data to create. eg: if messages: [{phone_number:8929292929,send_on:"",tag1:"tag_number1",templatelanguage:23,language:"hi"}]
:type Data: List of dict
:param limit: Number of objects to create in one chunked request
:type limit: integer
:return: Response from bulk create api
:rtype: List of dict [{phone_number:8929292929,send_on:"",tag1:"tag_number1",templatelanguage:23,language:"hi",status:"created"}}
"""
limit = kwargs.get('limit') if kwargs.get('limit') else APIConstants.DEFAULT_BULK_CREATE_LIMIT
response = []
for data_chunk in CommonUtils.process_iterable_in_chunks(data, limit):
response += self.create_bulk(data_chunk)
return response
def list_depaginated(self, params=None):
"""
Gets all messages from awaazde API based on the filters passed
"""
data = []
response = self.list(params=params)
while response.get('next') is not None:
# Get next page URL
next_page_url = response['next']
params['page'] = urllib.parse.parse_qs(urllib.parse.urlparse(next_page_url).query)['page'][0]
# And then we request for the data on the next page
response = self.list(params=params)
if response:
data.extend(response['results'])
else:
logging.error("Error in Fetching Results")
| 33.480519 | 145 | 0.597944 | 4,979 | 0.965671 | 0 | 0 | 0 | 0 | 0 | 0 | 1,688 | 0.327386 |
96296ccad66334cb3060947522a0c3b215f8f83c
| 774 |
py
|
Python
|
configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 746 |
2021-12-27T10:50:28.000Z
|
2022-03-31T13:34:14.000Z
|
configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 253 |
2021-12-28T05:59:13.000Z
|
2022-03-31T18:22:25.000Z
|
configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 147 |
2021-12-27T10:50:33.000Z
|
2022-03-30T10:44:20.000Z
|
_base_ = ['./rotated-detection_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(
output_names=['dets', 'labels'],
input_shape=None,
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
},
)
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 1024, 1024],
max_shape=[1, 3, 1024, 1024])))
])
| 23.454545 | 76 | 0.432817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.198966 |
96298d33b6be77c822487f95da02b8e986a6434c
| 4,053 |
py
|
Python
|
rainy/envs/parallel_wrappers.py
|
alexmlamb/blocks_rl_gru_setup
|
fe462f79518d14f828e2c7cbf210cd105ff982f4
|
[
"Apache-2.0"
] | null | null | null |
rainy/envs/parallel_wrappers.py
|
alexmlamb/blocks_rl_gru_setup
|
fe462f79518d14f828e2c7cbf210cd105ff982f4
|
[
"Apache-2.0"
] | null | null | null |
rainy/envs/parallel_wrappers.py
|
alexmlamb/blocks_rl_gru_setup
|
fe462f79518d14f828e2c7cbf210cd105ff982f4
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from typing import Any, Iterable, Tuple
from .ext import EnvSpec
from .parallel import ParallelEnv
from ..prelude import Action, Array, State
from ..utils.rms import RunningMeanStd
class ParallelEnvWrapper(ParallelEnv[Action, State]):
def __init__(self, penv: ParallelEnv) -> None:
self.penv = penv
def close(self) -> None:
self.penv.close()
def reset(self) -> Array[State]:
return self.penv.reset()
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
return self.penv.step(actions)
def seed(self, seeds: Iterable[int]) -> None:
self.penv.seed(seeds)
@property
def num_envs(self) -> int:
return self.penv.num_envs
@property
def spec(self) -> EnvSpec:
return self.penv.spec
def extract(self, states: Iterable[State]) -> Array:
return self.penv.extract(states)
class FrameStackParallel(ParallelEnvWrapper):
"""Parallel version of atari_wrappers.FrameStack
"""
def __init__(self, penv: ParallelEnv, nstack: int = 4, dtype: type = np.float32) -> None:
super().__init__(penv)
idx = 0
shape = self.penv.state_dim
for dim in shape:
if dim == 1:
idx += 1
else:
break
self.shape = (nstack, *self.penv.state_dim[idx:])
self.obs = np.zeros((self.num_envs, *self.shape), dtype=dtype)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array, Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.obs = np.roll(self.obs, shift=-1, axis=1)
for i, _ in filter(lambda t: t[1], enumerate(done)):
self.obs[i] = 0.0
self.obs[:, -1] = self.extract(state).squeeze()
return (self.obs, reward, done, info)
def reset(self) -> Array[State]:
self.obs.fill(0)
state = self.penv.reset()
self.obs[:, -1] = self.extract(state).squeeze()
return self.obs
@property
def state_dim(self) -> Tuple[int, ...]:
return self.shape
class NormalizeObs(ParallelEnvWrapper[Action, Array[float]]):
def __init__(self, penv: ParallelEnv, obs_clip: float = 10.) -> None:
super().__init__(penv)
self.obs_clip = obs_clip
self._rms = RunningMeanStd(shape=self.state_dim)
self.training_mode = True
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[Array[float]], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
return self._filter_obs(state), reward, done, info
def _filter_obs(self, obs: Array[Array[float]]) -> Array[Array[float]]:
if self.training_mode:
self._rms.update(obs) # type: ignore
obs = np.clip((obs - self._rms.mean) / self._rms.std(), -self.obs_clip, self.obs_clip)
return obs
def reset(self) -> Array[Array[float]]:
obs = self.penv.reset()
return self._filter_obs(obs)
class NormalizeReward(ParallelEnvWrapper[Action, State]):
def __init__(self, penv: ParallelEnv, reward_clip: float = 10., gamma: float = 0.99) -> None:
super().__init__(penv)
self.reward_clip = reward_clip
self.gamma = gamma
self._rms = RunningMeanStd(shape=())
self.ret = np.zeros(self.num_envs)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.ret = self.ret * self.gamma + reward
self._rms.update(self.ret)
reward = np.clip(reward / self._rms.std(), -self.reward_clip, self.reward_clip)
self.ret[done] = 0.0
return state, reward, done, info
def reset(self) -> Array[State]:
self.ret = np.zeros(self.num_envs)
return self.penv.reset()
| 32.685484 | 97 | 0.603504 | 3,841 | 0.947693 | 0 | 0 | 223 | 0.055021 | 0 | 0 | 70 | 0.017271 |
962a9c50351cba1947f6e3a1a14ce2f159196743
| 1,205 |
py
|
Python
|
oldp/apps/search/templatetags/search.py
|
docsuleman/oldp
|
8dcaa8e6e435794c872346b5014945ace885adb4
|
[
"MIT"
] | 66 |
2018-05-07T12:34:39.000Z
|
2022-02-23T20:14:24.000Z
|
oldp/apps/search/templatetags/search.py
|
Justice-PLP-DHV/oldp
|
eadf235bb0925453d9a5b81963a0ce53afeb17fd
|
[
"MIT"
] | 68 |
2018-06-11T16:13:17.000Z
|
2022-02-10T08:03:26.000Z
|
oldp/apps/search/templatetags/search.py
|
Justice-PLP-DHV/oldp
|
eadf235bb0925453d9a5b81963a0ce53afeb17fd
|
[
"MIT"
] | 15 |
2018-06-23T19:41:13.000Z
|
2021-08-18T08:21:49.000Z
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django import template
from django.template.defaultfilters import urlencode
from django.urls import reverse
from haystack.models import SearchResult
from haystack.utils.highlighting import Highlighter
register = template.Library()
@register.filter
def get_search_snippet(search_result: SearchResult, query: str) -> str:
hlr = Highlighter(query, html_tag='strong')
if search_result and hasattr(search_result, 'get_stored_fields') and 'text' in search_result.get_stored_fields():
text = search_result.get_stored_fields()['text']
return hlr.highlight(text)
else:
return ''
@register.filter
def format_date(start_date: datetime) -> str:
"""
Format for search facets (year-month)
"""
return start_date.strftime('%Y-%m')
@register.filter
def date_range_query(start_date: datetime, date_format='%Y-%m-%d') -> str:
"""
Monthly range
"""
return start_date.strftime(date_format) + ',' + (start_date + relativedelta(months=1)).strftime(date_format)
@register.filter
def search_url(query):
return reverse('haystack_search') + '?q=' + urlencode(query)
| 28.690476 | 117 | 0.73444 | 0 | 0 | 0 | 0 | 878 | 0.728631 | 0 | 0 | 165 | 0.136929 |
962b1992cdd2dfaf0952dfed1c1a16307ccc9f57
| 372 |
py
|
Python
|
interview/leet/1029_Two_City_Scheduling.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | 1 |
2020-10-12T13:33:29.000Z
|
2020-10-12T13:33:29.000Z
|
interview/leet/1029_Two_City_Scheduling.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | null | null | null |
interview/leet/1029_Two_City_Scheduling.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | 1 |
2016-11-09T07:28:45.000Z
|
2016-11-09T07:28:45.000Z
|
#!/usr/bin/env python
class Solution:
def twoCitySchedCost(self, costs):
N = len(costs)//2
costs = list(sorted(costs, key=lambda c: c[0]-c[1]))
s = 0
for i, c in enumerate(costs):
s += c[0] if i < N else c[1]
return s
costs = [[10,20],[30,200],[400,50],[30,20]]
sol = Solution()
print(sol.twoCitySchedCost(costs))
| 24.8 | 60 | 0.553763 | 251 | 0.674731 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.056452 |
962b25ef7d6c6efe9c549cbaf3d04d00594f4f6d
| 23,818 |
py
|
Python
|
Metrics/plots.py
|
liorfrenkel1992/focal_calibration
|
4f020e022be501ee3f723e6105afe793a1e522f0
|
[
"MIT"
] | null | null | null |
Metrics/plots.py
|
liorfrenkel1992/focal_calibration
|
4f020e022be501ee3f723e6105afe793a1e522f0
|
[
"MIT"
] | null | null | null |
Metrics/plots.py
|
liorfrenkel1992/focal_calibration
|
4f020e022be501ee3f723e6105afe793a1e522f0
|
[
"MIT"
] | null | null | null |
'''
This file contains method for generating calibration related plots, eg. reliability plots.
References:
[1] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from torch.nn import functional as F
from scipy.interpolate import make_interp_spline
plt.rcParams.update({'font.size': 20})
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def reliability_plot(confs, preds, labels, save_plots_loc, dataset, model, trained_loss, num_bins=15, scaling_related='before', save=False):
'''
Method to draw a reliability plot from a model's predictions and confidences.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
y = []
for i in range(num_bins):
y.append(bin_dict[i][BIN_ACC])
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, bns, align='edge', width=0.05, color='pink', label='Expected')
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Actual')
plt.ylabel('Accuracy')
plt.xlabel('Confidence')
plt.legend()
if save:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'reliability_plot_{}_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.show()
def bin_strength_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a plot for the number of samples in each confidence bin.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
num_samples = len(labels)
y = []
for i in range(num_bins):
n = (bin_dict[i][COUNT] / float(num_samples)) * 100
y.append(n)
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Percentage samples')
plt.ylabel('Percentage of samples')
plt.xlabel('Confidence')
plt.show()
def pos_neg_ece_bins_plot(bins_vec, bins_ece_over, bins_ece_under, bins_ece_over_after, bins_ece_under_after, save_plots_loc, dataset, model, trained_loss,
acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(bins_vec, bins_ece_over.cpu(), s=70)
plt.scatter(bins_vec, bins_ece_under.cpu(), s=70)
#plt.scatter(bins_vec, bins_ece_over_after.cpu())
#plt.scatter(bins_vec, bins_ece_under_after.cpu())
plt.xlabel('bins', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=18)
#plt.legend(('over-confidence classes', 'under-confidence classes', 'over-confidence classes after scaling', 'under-confidence classes after scaling'), fontsize=10)
plt.legend(('over-confidence classes', 'under-confidence classes'), fontsize=22)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def pos_neg_ece_plot(acc, csece_pos, csece_neg, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_pos.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_neg.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_acc_plot(acc, csece, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False, unc=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
#plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=100)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_iters_plot(scaled_model, save_plots_loc, dataset, model, trained_loss, init_temp, acc_check=False):
plt.figure()
plt.plot(range(scaled_model.iters + 1), scaled_model.ece_list)
plt.plot(range(scaled_model.iters + 1), scaled_model.ece*torch.ones((scaled_model.iters + 1)))
plt.legend(('class-based temp scaling', 'single temp scaling'), fontsize=10)
plt.xlabel('iterations', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE', fontsize=10)
plt.yticks(fontsize=10)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}_acc.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
plt.close()
def temp_acc_plot(acc, temp, single_temp, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False):
plt.figure()
plt.scatter(acc, temp.cpu(), label='Class-based temperature')
plt.plot(acc, single_temp*torch.ones(len(acc)), color='red', label='Single temperature')
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=10)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_const_temp.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_acc.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}.pdf'.format(dataset, model, trained_loss)), dpi=40)
def diff_ece_plot(acc, csece1, csece2, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_type='class_based'):
plt.figure()
plt.scatter(acc, (csece1 - csece2).cpu())
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE difference', fontsize=10)
plt.yticks(fontsize=10)
plt.axhline(y=0, color='r')
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}_acc.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
def bins_over_conf_plot(bins, diff, save_plots_loc, dataset, model, trained_loss, scaling_related='before'):
plt.figure()
plt.plot(bins, diff)
plt.xlabel('bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('confidence - accuracy', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_conf_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
def temp_bins_plot(single_T, bins_T, bin_boundaries, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False, divide='reg_divide', ds='val', version=1, cross_validate='ECE', y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#x_new = np.linspace(1, bins_T.shape[0], 300)
#a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
#y_new = a_BSpline(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Iteration #{}'.format(i + 1))
#plt.plot(x_new, y_new, label='CBT ({})'.format(cross_validate))
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i + 1))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
#plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS'.format(cross_validate))
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
# plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds, cross_validate)), dpi=40)
def ece_bin_plot(ece_bin, single_ece_bin, origin_ece_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
origin_ece_bin = [i * 100 for i in origin_ece_bin]
single_ece_bin = [i * 100 for i in single_ece_bin]
ece_bin = [i * 100 for i in ece_bin]
plt.plot(range(len(ece_bin)), origin_ece_bin, label='ECE before scaling')
plt.plot(range(len(ece_bin)), single_ece_bin, label='ECE after single temp scaling')
plt.plot(range(len(ece_bin)), ece_bin, label='ECE after per bin temp scaling')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('ECE(%)', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'ece_bins_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def logits_diff_bin_plot(logits_diff_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
plt.plot(range(len(logits_diff_bin)), logits_diff_bin)
plt.xlabel('Bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Logits difference', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'logits_diff_bins_{}_{}_{}_ver_{}_{}_{}.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def temp_bins_plot2(single_T, single_T2, bins_T, bins_T2, bin_boundaries, bin_boundaries2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1, y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#bin_lowers2 = bin_boundaries2[i][:-1]
# x_new = np.linspace(1, bins_T.shape[0], 300)
# a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
# a_BSpline2 = make_interp_spline(bin_lowers, bins_T2[:, i].cpu())
# y_new = a_BSpline(x_new)
# y_new2 = a_BSpline2(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Weights')
plt.plot(bin_lowers, (1 / bins_T2[:, i]).cpu(), label=r'$1/Temperatures$')
# plt.plot(x_new, y_new, label='CBT ResNet-152')
# plt.plot(x_new, y_new2, label='CBT DenseNet-161')
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
# plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS ResNet-152')
# plt.plot(x_new, torch.ones(len(y_new2)) * single_T2, label='TS DenseNet-161')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds)), dpi=40)
def exp_value(confidences, diff):
numerator = (-1 + torch.sqrt(1 + 4 * (1 - confidences) / confidences)) / 2
denominator = (-1 + torch.sqrt(1 + 4 * (1 - (confidences - diff)) / (confidences - diff))) / 2
return numerator, denominator
def plot_temp_different_bins(save_plots_loc):
confidences = torch.linspace(0.61, 1, 40)
#optim_temps = torch.log((1 - confidences) / confidences) / torch.log((1 - (confidences - 0.1)) / (confidences - 0.1))
numerator1, denominator1 = exp_value(confidences, 0.1)
numerator2, denominator2 = exp_value(confidences, 0.05)
numerator3, denominator3 = exp_value(confidences, 0.03)
#numerator4, denominator4 = exp_value(confidences, 0.2)
optim_temps1 = torch.log(numerator1) / torch.log(denominator1)
optim_temps2 = torch.log(numerator2) / torch.log(denominator2)
optim_temps3 = torch.log(numerator3) / torch.log(denominator3)
#optim_temps4 = torch.log(numerator4) / torch.log(denominator4)
plt.figure()
#plt.plot(confidences, optim_temps4, label='\u03B5=0.2')
plt.plot(confidences, optim_temps1, label='\u03B5=0.1')
plt.plot(confidences, optim_temps2, label='\u03B5=0.05')
plt.plot(confidences, optim_temps3, label='\u03B5=0.03')
plt.xlabel('Confidence', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, 'temp_movements_between_bins_3_classes.pdf'), dpi=40)
def ece_iters_plot2(single_ece, single_ece2, ece_list1, ece_list2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
if len(ece_list1) < len(ece_list2):
ece_list1 = ece_list1 + (len(ece_list2) - len(ece_list1)) * [ece_list1[-1]]
elif len(ece_list1) > len(ece_list2):
ece_list2 = ece_list2 + (len(ece_list1) - len(ece_list2)) * [ece_list2[-1]]
ece_list1 = [i * 100 for i in ece_list1]
ece_list2 = [i * 100 for i in ece_list2]
plt.figure()
plt.plot(range(len(ece_list1)), ece_list1, label='CBT ResNet-152')
plt.plot(range(len(ece_list2)), ece_list2, label='CBT DenseNet-161')
plt.plot(range(len(ece_list1)), torch.ones(len(ece_list1)) * single_ece, label='TS ResNet-152')
plt.plot(range(len(ece_list2)), torch.ones(len(ece_list2)) * single_ece2, label='TS DenseNet-161')
plt.xlabel('Iterations', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('ECE(%)', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_iters_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(len(ece_list1) - 1, dataset, model, trained_loss, version, divide, ds)), dpi=40)
def plot_trajectory(save_plots_loc): # For probabilities [0.6, 0.3, 0.1]
weights = torch.linspace(0, 1, 100).unsqueeze(-1)
temperatures = torch.linspace(1, 100, 10000).unsqueeze(-1)
starting_point = torch.tensor([0.6, 0.3]).unsqueeze(0)
starting_logits = torch.tensor([math.log(0.6), math.log(0.3), math.log(0.1)])
# starting_logits = torch.tensor([2.2, 1.525, 0.5])
ts_points = [F.softmax(starting_logits / temperature, dim=0) for temperature in temperatures]
ts_points = torch.stack(ts_points)
n_classes = starting_point.shape[1] + 1
ws_points = torch.matmul(weights, (1 / n_classes) * torch.ones(starting_point.shape)) + torch.matmul(1 - weights, starting_point)
ws_points_full = torch.cat((ws_points, (1 - torch.sum(ws_points, 1)).unsqueeze(-1)), 1)
weights_ent = -torch.sum(ws_points_full * torch.log2(ws_points_full), 1)
softmaxes_100 = torch.tensor([8.4042679500e-13, 1.4278050742e-08, 3.9925965312e-11, 7.8529644267e-14,
1.1687384394e-10, 9.7083494401e-14, 7.9007286824e-13, 1.1496912363e-13,
5.3773496073e-12, 7.6878958755e-10, 8.9035365747e-09, 5.3947623278e-12,
2.4426896617e-10, 2.2383541201e-11, 1.2707822294e-10, 2.1816673468e-10,
5.0172353387e-15, 1.6286461112e-12, 5.1560413925e-12, 8.6647043707e-12,
1.8531972623e-09, 2.7630087107e-10, 7.1155463308e-16, 3.7386840152e-11,
5.1252758981e-11, 3.1181262433e-11, 2.6755674298e-06, 9.9959415197e-01,
1.9884007635e-11, 1.1077156523e-04, 1.7637266647e-11, 2.2995503279e-09,
7.3481587606e-06, 1.2129663940e-09, 3.2103027479e-05, 5.2368401282e-11,
2.3453745612e-09, 2.9135565488e-11, 2.9145277771e-12, 3.5043259961e-11,
9.6558103581e-14, 1.9227650583e-09, 1.5236486206e-07, 4.5127812598e-09,
8.7795990112e-05, 3.4632095776e-05, 3.3900747098e-08, 5.3773188159e-12,
4.9334299666e-13, 4.7792599739e-11, 9.7179556069e-12, 2.9196653486e-05,
1.2558685400e-15, 1.9376671101e-10, 2.1402189916e-12, 1.7672345792e-12,
4.2892519397e-11, 8.4134947273e-12, 1.5762311595e-11, 2.2964830992e-12,
1.1481499413e-14, 4.4955605211e-11, 2.6382507290e-11, 1.0882557433e-07,
3.2325153665e-10, 1.4755903444e-10, 2.8219235976e-11, 1.1946493714e-06,
5.6229808136e-12, 4.9992823214e-09, 1.2134488726e-11, 2.2948927203e-09,
1.0463446776e-09, 2.0963939562e-07, 1.3484322992e-08, 1.1520114862e-09,
1.9648471489e-13, 6.5380464775e-07, 2.2771805561e-06, 6.8640011210e-12,
2.4578919692e-05, 2.0577129952e-13, 2.1242145684e-13, 2.3415527872e-13,
4.5339165755e-10, 4.0936140522e-07, 9.8099343132e-16, 9.6455538001e-11,
4.4561368484e-11, 4.3079886880e-10, 1.0865559563e-09, 7.0311572927e-05,
6.6880915140e-14, 4.8056293167e-08, 3.0499626199e-16, 5.0754581093e-11,
4.9211958293e-12, 9.5986638371e-07, 1.9191167766e-08, 1.8387422074e-07]).unsqueeze(0)
ws_points2 = torch.matmul(weights, (1 / n_classes) * torch.ones(softmaxes_100.shape)) + torch.matmul(1 - weights, softmaxes_100)
weights_ent2 = -torch.sum(ws_points2 * torch.log2(ws_points2), 1)
plt.figure()
plt.plot(ws_points[:, 0], ws_points[:, 1], label='Weight Scaling')
plt.plot(ts_points[:, 0], ts_points[:, 1], label='Temperature Scaling')
plt.xlabel(r'$p_1$', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(r'$p_2$', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'trajectories.pdf'), dpi=40)
plt.close()
plt.figure()
plt.plot(ws_points[:, 0], weights_ent)
plt.xlabel(r'$p_1$', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Entropy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'entropy.pdf'), dpi=40)
plt.figure()
plt.plot(ws_points2.max(1)[0], weights_ent2)
plt.xlabel('Confidence', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Entropy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'entropy_100.pdf'), dpi=40)
def conf_acc_diff_plot(conf_acc_diff, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
plt.plot(range(len(conf_acc_diff)), conf_acc_diff)
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Confidence - Accuracy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'conf_acc_diff_bins_{}_{}_{}_{}_ver_{}_{}_{}.pdf'.format(len(conf_acc_diff), dataset, model, trained_loss, version, divide, ds)), dpi=40)
| 54.00907 | 234 | 0.674196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,033 | 0.211311 |
962cd88d6f79f8b3352c0cd041ccfcff6c478fe5
| 11,137 |
py
|
Python
|
sdk/python/pulumi_oci/sch/get_service_connector.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5 |
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/sch/get_service_connector.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/sch/get_service_connector.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceConnectorResult',
'AwaitableGetServiceConnectorResult',
'get_service_connector',
]
@pulumi.output_type
class GetServiceConnectorResult:
"""
A collection of values returned by getServiceConnector.
"""
def __init__(__self__, compartment_id=None, defined_tags=None, description=None, display_name=None, freeform_tags=None, id=None, lifecyle_details=None, service_connector_id=None, source=None, state=None, system_tags=None, target=None, tasks=None, time_created=None, time_updated=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecyle_details and not isinstance(lifecyle_details, str):
raise TypeError("Expected argument 'lifecyle_details' to be a str")
pulumi.set(__self__, "lifecyle_details", lifecyle_details)
if service_connector_id and not isinstance(service_connector_id, str):
raise TypeError("Expected argument 'service_connector_id' to be a str")
pulumi.set(__self__, "service_connector_id", service_connector_id)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if system_tags and not isinstance(system_tags, dict):
raise TypeError("Expected argument 'system_tags' to be a dict")
pulumi.set(__self__, "system_tags", system_tags)
if target and not isinstance(target, dict):
raise TypeError("Expected argument 'target' to be a dict")
pulumi.set(__self__, "target", target)
if tasks and not isinstance(tasks, list):
raise TypeError("Expected argument 'tasks' to be a list")
pulumi.set(__self__, "tasks", tasks)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the resource. Avoid entering confidential information.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-friendly name. It does not have to be unique, and it is changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecyleDetails")
def lifecyle_details(self) -> str:
"""
A message describing the current state in more detail. For example, the message might provide actionable information for a resource in a `FAILED` state.
"""
return pulumi.get(self, "lifecyle_details")
@property
@pulumi.getter(name="serviceConnectorId")
def service_connector_id(self) -> str:
return pulumi.get(self, "service_connector_id")
@property
@pulumi.getter
def source(self) -> 'outputs.GetServiceConnectorSourceResult':
"""
An object that represents the source of the flow defined by the service connector. An example source is the VCNFlow logs within the NetworkLogs group. For more information about flows defined by service connectors, see [Service Connector Hub Overview](https://docs.cloud.oracle.com/iaas/Content/service-connector-hub/overview.htm).
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the service connector.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter
def target(self) -> 'outputs.GetServiceConnectorTargetResult':
"""
An object that represents the target of the flow defined by the service connector. An example target is a stream. For more information about flows defined by service connectors, see [Service Connector Hub Overview](https://docs.cloud.oracle.com/iaas/Content/service-connector-hub/overview.htm).
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def tasks(self) -> Sequence['outputs.GetServiceConnectorTaskResult']:
"""
The list of tasks.
"""
return pulumi.get(self, "tasks")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time when the service connector was created. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time when the service connector was updated. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
class AwaitableGetServiceConnectorResult(GetServiceConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceConnectorResult(
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
description=self.description,
display_name=self.display_name,
freeform_tags=self.freeform_tags,
id=self.id,
lifecyle_details=self.lifecyle_details,
service_connector_id=self.service_connector_id,
source=self.source,
state=self.state,
system_tags=self.system_tags,
target=self.target,
tasks=self.tasks,
time_created=self.time_created,
time_updated=self.time_updated)
def get_service_connector(service_connector_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceConnectorResult:
"""
This data source provides details about a specific Service Connector resource in Oracle Cloud Infrastructure Service Connector Hub service.
Gets the specified service connector's configuration information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_service_connector = oci.sch.get_service_connector(service_connector_id=oci_sch_service_connector["test_service_connector"]["id"])
```
:param str service_connector_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
"""
__args__ = dict()
__args__['serviceConnectorId'] = service_connector_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:sch/getServiceConnector:getServiceConnector', __args__, opts=opts, typ=GetServiceConnectorResult).value
return AwaitableGetServiceConnectorResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecyle_details=__ret__.lifecyle_details,
service_connector_id=__ret__.service_connector_id,
source=__ret__.source,
state=__ret__.state,
system_tags=__ret__.system_tags,
target=__ret__.target,
tasks=__ret__.tasks,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
| 43.846457 | 347 | 0.679806 | 8,833 | 0.793122 | 717 | 0.06438 | 8,021 | 0.720212 | 0 | 0 | 4,897 | 0.439705 |
962dd9983600a5e9baec739d1eaccc092f1e2982
| 3,258 |
py
|
Python
|
manim_demo/srcs/new_Scene_demo.py
|
shujunge/manim_tutorial
|
8e320373f0404dcc0a200ab3750ee70784dc1345
|
[
"MIT"
] | null | null | null |
manim_demo/srcs/new_Scene_demo.py
|
shujunge/manim_tutorial
|
8e320373f0404dcc0a200ab3750ee70784dc1345
|
[
"MIT"
] | null | null | null |
manim_demo/srcs/new_Scene_demo.py
|
shujunge/manim_tutorial
|
8e320373f0404dcc0a200ab3750ee70784dc1345
|
[
"MIT"
] | null | null | null |
from manimlib.imports import *
class LSystem(Scene):
CONFIG = {
'rules': {'F': 'F+F--F+F'},
'length': 1,
'start_loc': ORIGIN,
'angle': PI / 3,
'start_rot': 0,
'iteration': 1,
'initial': 'F',
'actions': {},
'locations': [],
'rotations': [],
'graphs': [],
'expression': '',
'step_time': 1,
'animation': None,
'weight': 1,
}
def setup(self):
self.actions['F'] = self.draw_forward
self.actions['+'] = self.rotate_forward
self.actions['-'] = self.rotate_backward
self.actions['['] = self.push
self.actions[']'] = self.pop
self.cur_loc = self.start_loc
self.cur_rot = self.start_rot
self.expression = self.initial
self.animation = lambda x: \
self.play(ShowCreation(x), \
run_time=self.step_time)
def draw_forward(self):
o = self.cur_loc
l = self.length
a = self.cur_rot
e = o + \
l * np.cos(a) * RIGHT + \
l * np.sin(a) * UP
self.cur_loc = e
g = Line(o, e)
g.stroke_width = self.weight
self.animation(g)
def rotate_forward(self):
self.cur_rot += self.angle
def rotate_backward(self):
self.cur_rot -= self.angle
def push(self):
self.locations.append(self.cur_loc)
self.rotations.append(self.cur_rot)
def pop(self):
self.cur_loc = self.locations.pop()
self.cur_rot = self.rotations.pop()
def generate(self):
for i in range(self.iteration):
print(f'generating iteration {i + 1}')
new_exp = ''
for e in self.expression:
new_exp += self.rules.get(e, e)
self.expression = new_exp
print(f'iteration {i + 1} is finished')
def draw(self):
count = self.expression.count("F")
print(f'Total {count} Fs')
for e in self.expression:
act = self.actions.get(e, None)
if act is not None:
act()
class Koch(LSystem):
CONFIG = {
'iteration': 3,
'start_loc': (FRAME_X_RADIUS - 0.3) * LEFT + DOWN,
'step_time': 0.1,
'length': 0.5
}
def construct(self):
self.generate()
self.draw()
self.wait()
class Levy(LSystem):
CONFIG = {
'iteration': 9,
'start_loc': 2.5 * LEFT + 2 * DOWN,
'length': 0.2,
'step_time': 0.1,
'angle': PI / 4,
'rules': {
'F': '+F--F+'
}
}
def construct(self):
self.generate()
# self.animation = lambda x: self.add(x)
self.draw()
self.wait()
class Tree(LSystem):
CONFIG = {
'iteration': 6,
'start_loc': 3.5 * DOWN + 3 * LEFT,
'angle': 25 * DEGREES,
'rules': {
'X': 'F+[[X]-X]-F[-FX]+X',
'F': 'FF'
},
'initial': 'X',
'length': 0.05,
'step_time': 0.1,
'start_rot': 75 * DEGREES
}
def construct(self):
self.generate()
# self.animation = lambda x: self.add(x)
self.draw()
self.wait()
| 24.313433 | 58 | 0.482198 | 3,216 | 0.987109 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.171885 |
962e6f77c6888ab263ac0737fad6faa36799e3b3
| 4,720 |
py
|
Python
|
prototyping/OpenCv/robot_tracking.py
|
ssnover/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 3 |
2021-01-07T07:46:50.000Z
|
2021-11-17T10:48:39.000Z
|
prototyping/OpenCv/robot_tracking.py
|
ssnover/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 3 |
2018-02-19T20:30:30.000Z
|
2018-04-20T23:25:29.000Z
|
prototyping/OpenCv/robot_tracking.py
|
ssnover95/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 1 |
2021-01-07T07:46:52.000Z
|
2021-01-07T07:46:52.000Z
|
import imutils
import cv2
import numpy as np
import math
from math import sqrt
def find_robot_orientation(image):
robot = {}
robot['angle'] = []
robot['direction'] = []
robotLower = (139, 227, 196)
robotUpper = (255, 255, 255)
distances = []
# img = cv2.imread('all_color_terrain_with_robot.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, robotLower, robotUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
print(extBot, extLeft, extRight, extTop, (cx, cy))
# Take care of the extra point, because there are only 3 sides,
# the distance max will be flawed of far point is 2 points (ie bottom and right)
if abs(extLeft[0] - extRight[0]) < 10 and abs(extLeft[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extLeft[0] - extTop[0]) < 10 and abs(extLeft[1] - extTop[1]) < 10:
extTop = (cx, cy)
if abs(extLeft[0] - extBot[0]) < 10 and abs(extLeft[1] - extBot[1]) < 10:
extBot = (cx, cy)
if abs(extBot[0] - extRight[0]) < 10 and abs(extBot[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extTop[0] - extRight[0]) < 10 and abs(extTop[1] - extRight[1]) < 10:
extRight = (cx, cy)
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
cv2.circle(image, (cx, cy), 7, (255, 0, 255), -1)
cv2.circle(image, extLeft, 6, (0, 0, 255), -1)
cv2.circle(image, extRight, 6, (0, 255, 0), -1)
cv2.circle(image, extTop, 6, (255, 0, 0), -1)
cv2.circle(image, extBot, 6, (255, 255, 0), -1)
# create list of extreme points
extreme_points = (extLeft, extRight, extTop, extBot)
for i in range(0, len(extreme_points)):
dist = sqrt((extreme_points[i][0] - extLeft[0]) ** 2 +
(extreme_points[i][1] - extLeft[1]) ** 2 +
(extreme_points[i][0] - extRight[0]) ** 2 +
(extreme_points[i][1] - extRight[1]) ** 2 +
(extreme_points[i][0] - extBot[0]) ** 2 +
(extreme_points[i][1] - extBot[1]) ** 2 +
(extreme_points[i][0] - extTop[0]) ** 2 +
(extreme_points[i][1] - extTop[1]) ** 2)
distances += [dist]
index_min = np.argmax(distances)
print(distances)
top_triangle = (extreme_points[index_min])
print(top_triangle)
center = (cx, cy)
# Create vector containing the top of the isosceles triangle
# and the center of the contour that was found
centerline_points = [center, top_triangle]
# draw a line through the triangle in the direction of the robot motion
rows, cols = image.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.float32(centerline_points), cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((cols - x) * vy / vx) + y)
cv2.line(image, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
# find the angle of the robot
rad = math.atan2(vx, vy)
angle = math.degrees(rad)
'''
# fix the angle such that the tip pointing up is 0deg,
# movement to the right of that is +deg
# movement to the left is -deg
# angle measurements are from -180:180
'''
if top_triangle[0] < center[0]:
angle = -angle
if top_triangle[0] > center[0]:
angle = 180 - angle
angle = round(angle)
print(angle)
cv2.putText(image, str(angle), (int(cx) - 50, int(cy) - 50), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 2,
cv2.LINE_AA)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
return angle, center
'''
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', img)
cv2.destroyAllWindows()
'''
| 39.333333 | 114 | 0.587288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,273 | 0.269703 |
962e9ec005fd784de4a3baab20160d8df9ba9898
| 7,404 |
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1 |
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 5 |
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1 |
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ntp
short_description: Manage NTP configurations about an ESXi host
description:
- This module can be used to manage NTP configuration information about an ESXi host.
- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- NTP settings are applied to every ESXi host system in the given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- NTP settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
ntp_servers:
description:
- "IP or FQDN of NTP server/s."
- This accepts a list of NTP servers. For multiple servers, please look at the examples.
required: True
state:
description:
- "present: Add NTP server/s, if it specified server/s are absent else do nothing."
- "absent: Remove NTP server/s, if specified server/s are present else do nothing."
default: present
choices: [ present, absent ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set NTP setting for all ESXi Host in given Cluster
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Set NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Remove NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: absent
ntp_servers:
- bad.server.ntp.org
delegate_to: localhost
'''
RETURN = r'''#
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareNtpConfigManager(PyVmomi):
def __init__(self, module):
super(VmwareNtpConfigManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.ntp_servers = self.params.get('ntp_servers', list())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.results = {}
self.desired_state = module.params['state']
def update_ntp_servers(self, host, ntp_servers, operation='add'):
changed = False
host_date_time_manager = host.configManager.dateTimeSystem
if host_date_time_manager:
available_ntp_servers = host_date_time_manager.dateTimeInfo.ntpConfig.server
available_ntp_servers = list(filter(None, available_ntp_servers))
if operation == 'add':
available_ntp_servers = available_ntp_servers + ntp_servers
elif operation == 'delete':
for server in ntp_servers:
if server in available_ntp_servers:
available_ntp_servers.remove(server)
ntp_config_spec = vim.host.NtpConfig()
ntp_config_spec.server = available_ntp_servers
date_config_spec = vim.host.DateTimeConfig()
date_config_spec.ntpConfig = ntp_config_spec
try:
host_date_time_manager.UpdateDateTimeConfig(date_config_spec)
self.results[host.name]['after_change_ntp_servers'] = host_date_time_manager.dateTimeInfo.ntpConfig.server
changed = True
except vim.fault.HostConfigFault as e:
self.results[host.name]['error'] = to_native(e.msg)
except Exception as e:
self.results[host.name]['error'] = to_native(e)
return changed
def check_host_state(self):
change_list = []
changed = False
for host in self.hosts:
ntp_servers_to_change = self.check_ntp_servers(host=host)
self.results[host.name].update(dict(
ntp_servers_to_change=ntp_servers_to_change,
desired_state=self.desired_state,
)
)
if not ntp_servers_to_change:
change_list.append(False)
self.results[host.name]['current_state'] = self.desired_state
elif ntp_servers_to_change:
if self.desired_state == 'present':
changed = self.update_ntp_servers(host=host, ntp_servers=ntp_servers_to_change)
change_list.append(changed)
elif self.desired_state == 'absent':
changed = self.update_ntp_servers(host=host, ntp_servers=ntp_servers_to_change, operation='delete')
change_list.append(changed)
self.results[host.name]['current_state'] = self.desired_state
if any(change_list):
changed = True
self.module.exit_json(changed=changed, results=self.results)
def check_ntp_servers(self, host):
update_ntp_list = []
host_datetime_system = host.configManager.dateTimeSystem
if host_datetime_system:
ntp_servers = host_datetime_system.dateTimeInfo.ntpConfig.server
self.results[host.name] = dict(available_ntp_servers=ntp_servers)
for ntp_server in self.ntp_servers:
if self.desired_state == 'present' and ntp_server not in ntp_servers:
update_ntp_list.append(ntp_server)
if self.desired_state == 'absent' and ntp_server in ntp_servers:
update_ntp_list.append(ntp_server)
return update_ntp_list
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
ntp_servers=dict(type='list', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_host_ntp_config = VmwareNtpConfigManager(module)
vmware_host_ntp_config.check_host_state()
if __name__ == "__main__":
main()
| 35.768116 | 122 | 0.66356 | 3,771 | 0.509319 | 0 | 0 | 0 | 0 | 0 | 0 | 2,838 | 0.383306 |
962f56d3ff295087050794dbedace7481235e971
| 337 |
py
|
Python
|
molecule/default/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 3 |
2020-04-14T19:57:25.000Z
|
2021-01-11T09:09:16.000Z
|
molecule/default/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 4 |
2020-08-12T10:24:25.000Z
|
2022-01-17T17:48:28.000Z
|
molecule/default/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 2 |
2021-06-17T21:57:42.000Z
|
2022-02-20T08:02:43.000Z
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_crypto_devices(host):
f = host.file('/dev/mapper/cryptotest')
assert f.exists
f = host.file('/dev/mapper/crypto-test1')
assert f.exists
| 24.071429 | 63 | 0.744807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.237389 |
962ff9f355899a8526a5df34e1ab89d319623352
| 796 |
py
|
Python
|
test_weakref.py
|
xzfn/retroreload
|
79466ef013a2a0892e096e510b847e222478caeb
|
[
"MIT"
] | null | null | null |
test_weakref.py
|
xzfn/retroreload
|
79466ef013a2a0892e096e510b847e222478caeb
|
[
"MIT"
] | null | null | null |
test_weakref.py
|
xzfn/retroreload
|
79466ef013a2a0892e096e510b847e222478caeb
|
[
"MIT"
] | null | null | null |
"""
weakref should be valid.
"""
import gc
import importlib
import autoreload
import retroreload
switch = 2
if switch == 0:
reload_module = importlib.reload
elif switch == 1:
reload_module = autoreload.superreload
elif switch == 2:
reload_module = retroreload.retroreload
import mod3
if __name__ == '__main__':
dispatcher = mod3.Dispatcher()
c = mod3.C()
dispatcher.register(c.func)
dispatcher.dispatch()
input('modify mod3.py if you like, and press enter')
reload_module(mod3)
print('gc before')
gc.collect()
print('gc after')
dispatcher.dispatch()
# builtin: preserve weakref, but result is bad
# autoreload: loses weakref when gc.collect is called, cb() returns None
# retroreload: preserve weakref, result is good
| 18.090909 | 76 | 0.688442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.342965 |
963000235c468e48e66c69225a889ee4596a7a88
| 814 |
py
|
Python
|
model/model.py
|
Okestro-symphony/Log-Anomaly-Detection
|
ab6548ca93c8d6073faf96d8a39bf4517139d8ea
|
[
"Apache-2.0"
] | null | null | null |
model/model.py
|
Okestro-symphony/Log-Anomaly-Detection
|
ab6548ca93c8d6073faf96d8a39bf4517139d8ea
|
[
"Apache-2.0"
] | 1 |
2021-11-03T04:17:55.000Z
|
2021-11-03T04:17:55.000Z
|
model/model.py
|
Okestro-symphony/Log-Anomaly-Detection
|
ab6548ca93c8d6073faf96d8a39bf4517139d8ea
|
[
"Apache-2.0"
] | 1 |
2021-11-03T04:15:33.000Z
|
2021-11-03T04:15:33.000Z
|
def train_isolation_forest(df, padding_data):
'''
* Isolation Forest model setting
- n_estimators=100
- max_samples='auto'
- n_jobs=-1
- max_features=2
- contamination=0.01
'''
#padding한 data load
data_df = padding_data
# model 정의
model = IsolationForest(n_estimators=100, max_samples='auto', n_jobs=-1,
max_features=2, contamination=0.01)
try:
model = model.fit(data_df)
except Exception as ex:
print('모델 실행 실패 : ', ex)
try:
# score & anomaly 판단 여부 값 추가
score = model.decision_function(data_df)
anomaly = model.predict(data_df)
except Exception as ex:
print('이상징후 판별 실패 : ', ex)
# anomaly_data = df.loc[df['is_anomaly'] == -1] # 이상값은 -1으로 나타낸다.
return df
| 24.666667 | 76 | 0.589681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.43424 |
963036c125a200d1a52d12e53f35e03ad2ffc294
| 1,619 |
py
|
Python
|
datasets.py
|
Tracesource/DCEC
|
8b9dca56bc032fb81d18dd9709c170802600e06b
|
[
"MIT"
] | 154 |
2017-10-01T22:32:26.000Z
|
2022-03-08T14:09:38.000Z
|
datasets.py
|
Tracesource/DCEC
|
8b9dca56bc032fb81d18dd9709c170802600e06b
|
[
"MIT"
] | 10 |
2017-12-28T11:38:14.000Z
|
2020-07-22T04:46:27.000Z
|
datasets.py
|
Tracesource/DCEC
|
8b9dca56bc032fb81d18dd9709c170802600e06b
|
[
"MIT"
] | 59 |
2017-12-18T11:50:53.000Z
|
2022-03-16T17:42:18.000Z
|
import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
| 34.446809 | 113 | 0.618901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.239036 |
9630b2873f1800433cfbc8d045129730577eb455
| 121,027 |
py
|
Python
|
balto_gui.py
|
peckhams/balto_gui
|
1c599bce4e90569f34aab1546d1adfd9dcaad943
|
[
"MIT"
] | 8 |
2020-07-27T16:16:50.000Z
|
2022-03-09T19:42:27.000Z
|
balto_gui.py
|
peckhams/balto_gui
|
1c599bce4e90569f34aab1546d1adfd9dcaad943
|
[
"MIT"
] | 1 |
2020-05-21T01:36:16.000Z
|
2020-05-21T01:36:16.000Z
|
balto_gui.py
|
peckhams/balto_gui
|
1c599bce4e90569f34aab1546d1adfd9dcaad943
|
[
"MIT"
] | 5 |
2020-05-07T13:16:42.000Z
|
2021-02-18T18:57:42.000Z
|
"""
This module defines a class called "balto_gui" that can be used to
create a graphical user interface (GUI) for downloading data from
OpenDAP servers from and into a Jupyter notebook. If used with Binder,
this GUI runs in a browser window and does not require the user to
install anything on their computer. However, this module should be
included in the same directory as the Jupyter notebook.
"""
#------------------------------------------------------------------------
#
# Copyright (C) 2020. Scott D. Peckham
#
#------------------------------------------------------------------------
from ipyleaflet import Map, basemaps, FullScreenControl
from ipyleaflet import MeasureControl, Rectangle
## from ipyleaflet import ScaleControl # (doesn't work)
from traitlets import Tuple
## import ipyleaflet as ipyl
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, HTML
## from IPython.core.display import display
## from IPython.lib.display import display
import pydap.client # (for open_url, etc.)
import requests # (used by get_filenames() )
import json
import datetime # (used by get_duration() )
import copy
import numpy as np
import balto_plot as bp
#------------------------------------------------------------------------
#
# class balto_gui
# __init__()
# pix_str()
# show_gui()
# make_acc_gui()
# make_tab_gui()
# make_data_panel()
# reset_data_panel()
# make_map_panel()
# make_dates_panel()
# make_download_panel()
# make_prefs_panel()
# #--------------------------
# get_map_bounds()
# replace_map_bounds()
# replace_map_bounds2()
# update_map_bounds()
# zoom_out_to_new_bounds()
# --------------------------
# get_url_dir_filenames()
# update_filename_list()
# get_opendap_file_url()
# open_dataset()
# update_data_panel()
# --------------------------
# update_var_info()
# get_all_var_shortnames()
# get_all_var_longnames()
# get_all_var_units()
# --------------------------
# get_var_shortname()
# get_var_longname()
# get_var_units()
# get_var_shape()
# get_var_dimensions()
# get_var_dtype()
# get_var_attributes()
# get_var_time_attributes()
# -------------------------------
# update_datetime_panel()
# get_years_from_time_since()
# clear_datetime_notes()
# append_datetime_notes()
# list_to_string()
# -------------------------------
# pad_with_zeros()
# get_actual_time_units()
# get_time_delta_str()
# get_datetime_obj_from_str()
# get_datetime_obj_from_one_str()
# get_start_datetime_obj()
# get_end_datetime_obj()
# get_dt_from_datetime_str()
# split_datetime_str()
# split_date_str()
# split_time_str()
# get_datetime_from_time_since()
# get_time_since_from_datetime()
# get_month_difference()
# -------------------------------
# get_new_time_index_range()
# get_new_lat_index_range()
# get_new_lon_index_range()
# -------------------------------
# get_duration() ## not used yet
# ----------------------------
# get_download_format()
# clear_download_log()
# append_download_log()
# print_user_choices()
# download_data()
# show_grid()
# -------------------------------
# get_opendap_package() # (in prefs panel)
# ----------------------------
# get_abbreviated_var_name()
# get_possible_svo_names()
#
#------------------------------
# Example GES DISC opendap URL
#------------------------------
# https://gpm1.gesdisc.eosdis.nasa.gov/opendap/GPM_L3/GPM_3IMERGHHE.05/2014/091/
# 3B-HHR-E.MS.MRG.3IMERG.20140401-S000000-E002959.0000.V05B.HDF5.nc
# ?HQprecipitation[1999:2200][919:1049],lon[1999:2200],lat[919:1049]
#------------------------------------------------------------------------
class balto_gui:
#--------------------------------------------------------------------
def __init__(self):
self.version = '0.5'
self.user_var = None
self.default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.timeout_secs = 60 # (seconds)
#----------------------------------------------------------
# "full_box_width" = (label_width + widget_width)
# gui_width = left_label_width + mid_width + button_width
# The 2nd, label + widget box, is referred to as "next".
# (2 * half_widget_width) + left_label + next_label = 540
#----------------------------------------------------------
self.gui_width = 680
self.left_label_width = 120
self.next_label_width = 50
self.all_label_width = 170
self.full_box_width = 540
self.widget_width = (self.full_box_width - self.left_label_width)
# self.half_widget_width = (self.full_box_width - self.all_label_width)/2
# self.half_widget_width = 183
self.left_widget_width = 230
self.next_widget_width = 136
self.left_box_width = (self.left_label_width + self.left_widget_width)
self.next_box_width = (self.next_label_width + self.next_widget_width)
self.button_width = 70 # big enough for "Reset"
#-----------------------------------------------------
self.map_width = (self.gui_width - 40)
self.map_height = 230 # was 250
self.map_center_init = (20.0, 0)
self.add_fullscreen_control = True
self.add_scale_control = False # (doesn't work)
self.add_measure_control = True
#-----------------------------------------------------
self.gui_width_px = self.pix_str( self.gui_width )
self.map_width_px = self.pix_str( self.map_width )
self.map_height_px = self.pix_str( self.map_height )
#-----------------------------------------------------
self.date_width_px = '240px'
self.time_width_px = '180px'
self.hint_width_px = '120px'
#---------------------------------------------------
self.log_box_width_px = self.pix_str( self.full_box_width )
self.log_box_height_px = '200px'
#---------------------------------------------------
# These styles are used to control width of labels
# self.init_label_style is the initial default.
#---------------------------------------------------
llw_px = self.pix_str( self.left_label_width )
nlw_px = self.pix_str( self.next_label_width )
self.init_label_style = {'description_width': 'initial'}
self.left_label_style = {'description_width': llw_px}
self.next_label_style = {'description_width': nlw_px}
self.date_style = {'description_width': '70px'}
self.time_style = {'description_width': '70px'}
# __init__()
#--------------------------------------------------------------------
def pix_str(self, num):
return str(num) + 'px'
#--------------------------------------------------------------------
def show_gui(self, ACC_STYLE=False, SHOW_MAP=True):
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
#------------------------------------
# Create & display the complete GUI
#-----------------------------------
if (ACC_STYLE):
self.make_acc_gui()
else:
# Use the TAB style
self.make_tab_gui( SHOW_MAP=SHOW_MAP)
gui_output = widgets.Output()
display(self.gui, gui_output)
# show_gui()
#--------------------------------------------------------------------
def make_acc_gui(self):
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel()
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=None causes all cells to be collapsed
#-------------------------------------------------------
acc = widgets.Accordion( children=[p0, p1, p2, p3, p4],
selected_index=None,
layout=Layout(width=gui_width_px) )
acc.set_title(0, p0_title)
acc.set_title(1, p1_title)
acc.set_title(2, p2_title)
acc.set_title(3, p3_title)
acc.set_title(4, p4_title)
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=4>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
# self.gui = widgets.VBox([pad, head, acc]) # (top padding
self.gui = widgets.VBox([head, acc]) # (no top padding)
# make_acc_gui()
#--------------------------------------------------------------------
def make_tab_gui(self, SHOW_MAP=True):
#---------------------------------------------------------
# If there is a problem with ipyleaflet, it can prevent
# any part of the GUI from being displayed. You can
# set SHOW_MAP=False to remove the map to test for this.
#---------------------------------------------------------
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel( SHOW_MAP=SHOW_MAP )
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=0 shows Browse Data panel
#-------------------------------------------------------
tab = widgets.Tab( children=[p0, p1, p2, p3, p4],
selected_index=0,
layout=Layout(width=gui_width_px) )
tab.set_title(0, p0_title)
tab.set_title(1, p1_title)
tab.set_title(2, p2_title)
tab.set_title(3, p3_title)
tab.set_title(4, p4_title)
#### tab.titles = [str(i) for i in range(len(children))]
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=5>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
## self.gui = widgets.VBox([pad, head, acc])
self.gui = widgets.VBox([head, tab]) # (no padding above)
# make_tab_gui()
#--------------------------------------------------------------------
def get_padding(self, n, HORIZONTAL=True):
#-------------------------------
# Get some white space padding
#-------------------------------
if (HORIZONTAL):
#--------------------------------
# Use overloaded multiplication
#--------------------------------
## s = (' ' * n) # overloaded multiplication
s = "<p>" + (' ' * n) + "</p>"
pad = widgets.HTML( value=s )
else:
s = ("<br>" * n)
pad = widgets.HTML( value=s )
return pad
# get_padding()
#--------------------------------------------------------------------
def make_data_panel(self):
#-----------------------------------
# Browse data on an OpenDAP server
#-----------------------------------
left_style = self.left_label_style
next_style = self.next_label_style
full_width_px = self.pix_str( self.full_box_width )
left_width_px = self.pix_str( self.left_box_width )
next_width_px = self.pix_str( self.next_box_width )
btn_width_px = self.pix_str( self.button_width )
#---------------------------------------------------------
o1 = widgets.Text(description='OpenDAP URL Dir:',
value=self.default_url_dir,
disabled=False, style=left_style,
layout=Layout(width=full_width_px))
b1 = widgets.Button(description="Go", layout=Layout(width=btn_width_px))
o2 = widgets.Dropdown( description='Filename:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
#------------------------------------------------------------------
oL = widgets.Text(description='Long name:', style=left_style,
value='', layout=Layout(width=full_width_px) )
## o3 = widgets.Select( description='Variable:',
o3 = widgets.Dropdown( description='Variable:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=left_width_px) )
o4 = widgets.Text(description='Units:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o5 = widgets.Text(description='Dimensions:', style=left_style,
value='', layout=Layout(width=left_width_px) )
o6 = widgets.Text(description='Shape:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o7 = widgets.Text(description='Data type:', style=left_style,
value='', layout=Layout(width=full_width_px) )
o8 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
o9 = widgets.Text(description='Status:', style=left_style,
value='Ready.', layout=Layout(width=full_width_px) )
b2 = widgets.Button(description="Reset", layout=Layout(width=btn_width_px))
## pd = widgets.HTML((' ' * 1)) # for padding
#-------------------------------
# Arrange widgets in the panel
#-------------------------------
url_box = widgets.HBox([o1, b1]) # directory + Go button
stat_box = widgets.HBox([o9, b2]) # status + Reset button
name_box = widgets.VBox([o3, o5])
## pad_box = widgets.VBox([pd, pd])
unit_box = widgets.VBox([o4, o6])
mid_box = widgets.HBox([name_box, unit_box])
## mid_box = widgets.HBox([name_box, pad_box, unit_box])
panel = widgets.VBox([url_box, o2, oL, mid_box, o7, o8, stat_box])
self.data_url_dir = o1 # on an OpenDAP server
self.data_filename = o2
self.data_var_long_name = oL
self.data_var_name = o3 # short_name
self.data_var_units = o4
self.data_var_dims = o5
self.data_var_shape = o6
self.data_var_type = o7
self.data_var_atts = o8
self.data_status = o9
self.data_panel = panel
#-----------------
# Event handlers
#-----------------------------------------------------
# Note: NEED to set names='value' here. If names
# keyword is omitted, only works intermittently.
#------------------------------------------------------------
# "on_click" handler function is passed b1 as argument.
# "observe" handler function is passed "change", which
# is a dictionary, as argument. See Traitlet events.
#------------------------------------------------------------
b1.on_click( self.update_filename_list )
b2.on_click( self.reset_data_panel )
o2.observe( self.update_data_panel, names=['options','value'] )
o3.observe( self.update_var_info, names=['options', 'value'] )
## o3.observe( self.update_var_info, names='value' )
## o2.observe( self.update_data_panel, names='All' )
## o3.observe( self.update_var_info, names='All' )
#-------------------------------------------------------
# It turned out this wasn't an issue, but interesting.
#-------------------------------------------------------
# Note: Method functions have type "method" instead
# of "function" and therefore can't be passed
# directly to widget handlers like "on_click".
# But we can use the "__func__" attribute.
#-------------------------------------------------------
# b1.on_click( self.update_filename_list.__func__ )
# o2.observe( self.update_data_panel.__func__ )
# o3.observe( self.update_var_info.__func__, names='value' )
# make_data_panel()
#--------------------------------------------------------------------
def reset_data_panel(self, caller_obj=None, KEEP_DIR=False):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Reset" button beside the status box.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
if not(KEEP_DIR):
self.data_url_dir.value = self.default_url_dir
self.data_filename.options = ['']
self.data_var_name.options = [''] # short names
self.data_var_long_name.value = ''
self.data_var_units.value = ''
self.data_var_shape.value = ''
self.data_var_dims.value = ''
self.data_var_type.value = ''
self.data_var_atts.options = ['']
self.data_status.value = 'Ready.'
#------------------------------------------
self.download_log.value = ''
# reset_data_panel()
#--------------------------------------------------------------------
def make_map_panel(self, SHOW_MAP=True):
map_width_px = self.map_width_px
map_height_px = self.map_height_px
btn_width_px = self.pix_str( self.button_width )
#--------------------------------------------------
# bm_style = {'description_width': '70px'} # for top
bbox_style = {'description_width': '100px'}
bbox_width_px = '260px'
#---------------------------------------
# Create the map width with ipyleaflet
# Center lat 20 looks better than 0.
#---------------------------------------
map_center = self.map_center_init # (lat, lon)
m = Map(center=map_center, zoom=1,
layout=Layout(width=map_width_px, height=map_height_px))
#----------------------
# Add more controls ?
#----------------------
if (self.add_fullscreen_control):
m.add_control( FullScreenControl( position='topright' ) )
#---------------------------------------------------------
# Cannot be imported. (2020-05-18)
# if (self.add_scale_control):
# m.add_control(ScaleControl( position='bottomleft' ))
#---------------------------------------------------------
if (self.add_measure_control):
measure = MeasureControl( position='bottomright',
active_color = 'orange',
primary_length_unit = 'kilometers')
m.add_control(measure)
measure.completed_color = 'red'
## measure.add_length_unit('yards', 1.09361, 4)
## measure.secondary_length_unit = 'yards'
## measure.add_area_unit('sqyards', 1.19599, 4)
## measure.secondary_area_unit = 'sqyards'
#-----------------------------------------------------
# Does "step=0.01" restrict accuracy of selection ??
#-----------------------------------------------------
w1 = widgets.BoundedFloatText(
value=-180, step=0.01, min=-360, max=360.0,
description='West edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w2 = widgets.BoundedFloatText(
value=180, step=0.01, min=-360, max=360.0,
description='East edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w3 = widgets.BoundedFloatText(
value=90, min=-90, max=90.0, step=0.01,
# description='North latitude:',
description='North edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w4 = widgets.BoundedFloatText(
value=-90, min=-90, max=90.0, step=0.01,
# description='South latitude:',
description='South edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
pd = widgets.HTML((' ' * 2)) # for padding
b1 = widgets.Button(description="Update",
layout=Layout(width=btn_width_px))
b2 = widgets.Button(description="Reset",
layout=Layout(width=btn_width_px))
#---------------------
# Choose the basemap
#---------------------
options = self.get_basemap_list()
bm = widgets.Dropdown( description='Base map:',
options=options, value=options[0],
disabled=False, style=bbox_style,
layout=Layout(width='360px') )
#-----------------------------------
# Arrange the widgets in the panel
#-----------------------------------
lons = widgets.VBox([w1, w2])
lats = widgets.VBox([w3, w4])
pads = widgets.VBox([pd, pd])
btns = widgets.VBox([b1, b2])
bbox = widgets.HBox( [lons, lats, pads, btns])
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
if (SHOW_MAP):
panel = widgets.VBox( [m, bbox, bm] )
else:
panel = widgets.VBox( [bbox, bm] )
self.map_window = m
self.map_minlon = w1
self.map_maxlon = w2
self.map_maxlat = w3
self.map_minlat = w4
self.map_basemap = bm
self.map_panel = panel
## self.map_bounds = (-180, -90, 180, 90)
#-----------------
# Event handlers
#-----------------
bm.observe( self.change_base_map, names=['options','value'] )
m.on_interaction( self.replace_map_bounds )
m.observe( self.zoom_out_to_new_bounds, 'bounds' )
m.new_bounds = None # (used for "zoom to fit")
b1.on_click( self.update_map_bounds )
b2.on_click( self.reset_map_panel )
# make_map_panel()
#--------------------------------------------------------------------
def get_basemap_list(self):
basemap_list = [
'OpenStreetMap.Mapnik', 'OpenStreetMap.HOT', 'OpenTopoMap',
'Esri.WorldStreetMap', 'Esri.DeLorme', 'Esri.WorldTopoMap',
'Esri.WorldImagery', 'Esri.NatGeoWorldMap',
'NASAGIBS.ModisTerraTrueColorCR', 'NASAGIBS.ModisTerraBands367CR',
'NASAGIBS.ModisTerraBands721CR', 'NASAGIBS.ModisAquaTrueColorCR',
'NASAGIBS.ModisAquaBands721CR', 'NASAGIBS.ViirsTrueColorCR',
'NASAGIBS.ViirsEarthAtNight2012',
'Strava.All', 'Strava.Ride', 'Strava.Run', 'Strava.Water',
'Strava.Winter', 'Stamen.Terrain', 'Stamen.Toner',
'Stamen.Watercolor' ]
#---------------------------------
# 'HikeBike.HikeBike', 'MtbMap'
# 'OpenStreetMap.BlackAndWhite',
# 'OpenStreetMap.France',
#----------------------------------
return basemap_list
# get_basemap_list()
#--------------------------------------------------------------------
def change_base_map(self, caller_obj=None):
#--------------------------------------------------------
# Cannot directly change the basemap for some reason.
# self.map_window.basemap = basemaps.Esri.WorldStreetMap
# Need to call clear_layers(), then add_layer().
#---------------------------------------------------------
map_choice = self.map_basemap.value
self.map_window.clear_layers()
basemap_layer = eval( 'basemaps.' + map_choice )
self.map_window.add_layer( basemap_layer )
# For testing
# print('map_choice =', map_choice)
# print('Changed the basemap.')
# change_base_map()
#--------------------------------------------------------------------
def update_map_view(self, caller_obj=None):
pass
# update_map_view()
#--------------------------------------------------------------------
def reset_map_panel(self, caller_obj=None):
self.map_window.center = self.map_center_init
self.map_window.zoom = 1
self.map_minlon.value = '-225.0'
self.map_maxlon.value = '225.0'
self.map_minlat.value = '-51.6'
self.map_maxlat.value = '70.6'
# reset_map_panel()
#--------------------------------------------------------------------
def make_datetime_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
date_width_px = self.date_width_px
time_width_px = self.time_width_px
hint_width_px = self.hint_width_px
#-----------------------------------
date_style = self.date_style
time_style = self.time_style
d1 = widgets.DatePicker( description='Start Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d2 = widgets.DatePicker( description='End Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d3 = widgets.Text( description='Start Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d4 = widgets.Text( description='End Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d3.value = '00:00:00'
d4.value = '00:00:00'
#-------------------------------
# Add some padding on the left
#-------------------------------
## margin = '0px 0px 2px 10px' # top right bottom left
pp = widgets.HTML((' ' * 3)) # for padding
d5 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d6 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d7 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=date_style,
layout=Layout(width=full_box_width_px) )
# d8 = widgets.Text( description='Notes:',
# disabled=False, style=self.date_style,
# layout=Layout(width=full_box_width_px) )
d8 = widgets.Textarea( description='Notes:', value='',
disabled=False, style=self.date_style,
layout=Layout(width=full_box_width_px, height='140px'))
dates = widgets.VBox([d1, d2])
times = widgets.VBox([d3, d4])
hints = widgets.VBox([d5, d6])
pad = widgets.VBox([pp, pp])
top = widgets.HBox([dates, times, pad, hints])
panel = widgets.VBox([top, d7, d8])
## panel = widgets.VBox([top, pp, d7, d8])
self.datetime_start_date = d1
self.datetime_start_time = d3
self.datetime_end_date = d2
self.datetime_end_time = d4
self.datetime_attributes = d7
self.datetime_notes = d8
self.datetime_panel = panel
# make_datetime_panel()
#--------------------------------------------------------------------
def make_download_panel(self):
init_style = self.init_label_style
f1 = widgets.Dropdown( description='Download Format:',
options=['HDF', 'netCDF', 'netCDF4', 'ASCII'],
value='netCDF',
disabled=False, style=init_style)
pad = widgets.HTML(value=f"<p> </p>") # padding
b3 = widgets.Button(description="Download")
h3 = widgets.HBox([f1, pad, b3])
#-----------------------------------
# Could use this for info messages
#-----------------------------------
# status = widgets.Text(description=' Status:', style=self.style0,
# layout=Layout(width='380px') )
width_px = self.log_box_width_px
height_px = self.log_box_height_px
log = widgets.Textarea( description='', value='',
disabled=False, style=init_style,
layout=Layout(width=width_px, height=height_px))
## panel = widgets.VBox([h3, status, log])
panel = widgets.VBox([h3, log])
self.download_format = f1
self.download_button = b3
self.download_log = log
self.download_panel = panel
#-----------------
# Event handlers
#-----------------
b3.on_click( self.download_data )
# make_download_panel()
#--------------------------------------------------------------------
def make_prefs_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
left_style = self.left_label_style
w1 = widgets.Dropdown( description='OpenDAP package:',
options=['pydap', 'netcdf4'],
value='pydap',
disabled=False, style=left_style)
ts = self.timeout_secs
t1 = widgets.BoundedIntText( description='Timeout:',
value=ts, min=10, max=1000,
step=1, disabled=False,
style=left_style)
t2 = widgets.Label( ' (seconds)',
layout=Layout(width='80px') )
w2 = widgets.HBox([t1, t2])
note = 'Under construction; preferences will go here.'
w3 = widgets.Textarea( description='Notes:', value=note,
disabled=False, style=left_style,
layout=Layout(width=full_box_width_px, height='50px'))
panel = widgets.VBox([w1, w2, w3])
self.prefs_package = w1
self.prefs_timeout = t1
self.prefs_notes = w2
self.prefs_panel = panel
# make_prefs_panel()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def get_map_bounds(self, FROM_MAP=True, style='sw_and_ne_corners'):
#-------------------------------------------------------
# Notes: ipyleaflet defines "bounds" as:
# [[minlat, maxlat], [minlon, maxlon]]
# matplotlib.imshow defines "extent" as:
# extent = [minlon, maxlon, minlat, maxlat]
#-------------------------------------------------------
# Return value is a list, not a tuple, but
# ok to use it like this:
# [minlon, minlat, maxlon, maxlat] = get_map_bounds().
#-------------------------------------------------------
if (FROM_MAP):
#------------------------------------
# Get the visible map bounds, after
# interaction such as pan or zoom
#------------------------------------
# bounds = self.map_window.bounds
# minlat = bounds[0][0]
# minlon = bounds[0][1]
# maxlat = bounds[1][0]
# maxlon = bounds[1][1]
#------------------------------------
# Is this more reliable ?
#------------------------------------
minlon = self.map_window.west
minlat = self.map_window.south
maxlon = self.map_window.east
maxlat = self.map_window.north
else:
#---------------------------------
# Get map bounds from text boxes
#---------------------------------
minlon = self.map_minlon.value
minlat = self.map_minlat.value
maxlon = self.map_maxlon.value
maxlat = self.map_maxlat.value
#------------------------------------------
# Return map bounds in different "styles"
#------------------------------------------
if (style == 'ipyleaflet'):
bounds = [[minlat, maxlat], [minlon, maxlon]]
elif (style == 'pyplot_imshow'):
bounds = [minlon, maxlon, minlat, maxlat]
elif (style == 'sw_and_ne_corner'):
bounds = [minlon, minlat, maxlon, maxlat]
else:
bounds = [minlon, minlat, maxlon, maxlat]
return bounds
# get_map_bounds()
#--------------------------------------------------------------------
def replace_map_bounds(self, event, type=None, coordinates=None):
#-------------------------------------------
# Get visible map bounds after interaction
# Called by m.on_interaction().
# Don't need to process separate events?
#-------------------------------------------
[minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
#--------------------------------
# Save new values in text boxes
# Format with 8 decimal places.
#--------------------------------
self.map_minlon.value = "{:.8f}".format( minlon )
self.map_maxlon.value = "{:.8f}".format( maxlon )
self.map_maxlat.value = "{:.8f}".format( maxlat )
self.map_minlat.value = "{:.8f}".format( minlat )
# replace_map_bounds()
#--------------------------------------------------------------------
# def replace_map_bounds2(self, event, type=None, coordinates=None):
#
# # events: mouseup, mousedown, mousemove, mouseover,
# # mouseout, click, dblclick, preclick
# event = kwargs.get('type')
# # print('event = ', event)
# if (event == 'mouseup') or (event == 'mousemove') or \
# (event == 'click') or (event == 'dblclick'):
# w1.value = m.west
# w2.value = m.east
# w3.value = m.north
# w4.value = m.south
#
# # status.value = event
#
# # with output2:
# # print( event )
#
#--------------------------------------------------------------------
def update_map_bounds(self, caller_obj=None):
[bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
self.get_map_bounds( FROM_MAP = False )
bb_midlon = (bb_minlon + bb_maxlon) / 2
bb_midlat = (bb_minlat + bb_maxlat) / 2
bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
#----------------------------------------------------------
zoom = self.map_window.max_zoom # (usually 18)
self.map_window.center = bb_center
self.map_window.zoom = zoom
## print('max_zoom =', self.map_window.max_zoom)
## print('map_window.bounds =', self.map_window.bounds )
#------------------------------------
# Add "new_bounds" attribute to map
#------------------------------------
new_bounds = ((bb_minlat, bb_minlon), (bb_maxlat, bb_maxlon))
self.map_window.new_bounds = Tuple()
self.map_window.new_bounds = new_bounds
# update_map_bounds()
#--------------------------------------------------------------------
def zoom_out_to_new_bounds(self, change=None):
# change owner is the widget that triggers the handler
m = change.owner
#-----------------------------------------
# If not zoomed all the way out already,
# and we have a target bounding box
#-----------------------------------------
if (m.zoom > 1 and m.new_bounds):
b = m.new_bounds
n = change.new
if (n[0][0] < b[0][0] and n[0][1] < b[0][1] and
n[1][0] > b[1][0] and n[1][1] > b[1][1]):
#---------------------------------------
# new_bounds are now within map window
# Show bounding box as a rectangle ?
# weight = line/stroke thickness
#---------------------------------------
# rectangle = Rectangle( bounds=b, fill=False, weight=4)
# ## fill_opacity=0.0, \ fill_color="#0033FF" )
# m.add_layer(rectangle)
#-----------------------
m.new_bounds = None # (remove target)
else:
# zoom out
m.zoom = m.zoom - 1
# zoom_out_to_new_bounds()
#--------------------------------------------------------------------
# def zoom_out_to_new_bounds_v0(self, caller_obj=None):
#
# [bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
# self.get_map_bounds( FROM_MAP = False )
# bb_midlon = (bb_minlon + bb_maxlon) / 2
# bb_midlat = (bb_minlat + bb_maxlat) / 2
# bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
# zoom = self.map_window.max_zoom # (usually 18)
# zoom = zoom - 1
# ## print('max_zoom =', self.map_window.max_zoom)
#
# self.map_window.center = bb_center
# self.map_window.zoom = zoom
# print('map_window.bounds =', self.map_window.bounds )
# # bounds is read-only
# ## self.map_window.bounds = ((bb_midlat,bb_midlon),(bb_midlat,bb_midlon))
# while (True):
# # time.sleep(0.5) ######
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# # zoom_out_to_new_bounds_v0
#--------------------------------------------------------------------
def get_url_dir_filenames(self):
#-----------------------------------------
# Construct a list of filenames that are
# available in the opendap url directory
#-----------------------------------------
r = requests.get( self.data_url_dir.value )
lines = r.text.splitlines()
# n_lines = len(lines)
filenames = list()
for line in lines:
if ('"sameAs": "http://' in line) and ('www' not in line):
line = line.replace('.html"', '')
parts = line.split("/")
filename = parts[-1]
filenames.append( filename )
return filenames
# get_url_dir_filenames()
#--------------------------------------------------------------------
def update_filename_list(self, caller_obj=None):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.data_status.value = 'Retrieving filenames in URL dir...'
filenames = self.get_url_dir_filenames()
if (len(filenames) == 0):
self.reset_data_panel( KEEP_DIR=True )
msg = 'Error: No data files found in URL dir.'
self.data_status.value = msg
return
#-----------------------------------
# Update filename list & selection
#-----------------------------------
self.data_filename.options = filenames
self.data_filename.value = filenames[0]
self.data_status.value = 'Ready.'
# update_filename_list()
#--------------------------------------------------------------------
def get_opendap_file_url(self):
directory = self.data_url_dir.value
if (directory[-1] != '/'):
directory += '/'
#------------------------------------
filename = self.data_filename.value
self.opendap_file_url = (directory + filename)
# get_opendap_file_url()
#--------------------------------------------------------------------
def open_dataset(self):
timeout = self.timeout_secs
opendap_url = self.opendap_file_url
dataset = pydap.client.open_url( opendap_url, timeout=timeout )
self.dataset = dataset
# open_dataset()
#--------------------------------------------------------------------
def update_data_panel(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
# print('type(change) =', type(change))
if (self.data_filename.value == ''):
## self.update_filename_list() # (try this?)
return
self.get_opendap_file_url()
self.open_dataset()
self.get_all_var_shortnames()
self.get_all_var_longnames()
self.get_all_var_units()
#------------------------------------------
# Create map between long and short names
#------------------------------------------
long_names = self.var_long_names
short_names = self.var_short_names
units_names = self.var_units_names
self.short_name_map = dict(zip(long_names, short_names ))
self.units_map = dict(zip(long_names, units_names ))
#-------------------------------------------
# Update variable list and selected value.
#-------------------------------------------
self.data_var_name.options = short_names
self.data_var_name.value = short_names[0]
#------------------------------------
# Show other info for this variable
#------------------------------------
self.update_var_info()
self.clear_download_log() #####
#-------------------------------------------
# Try to show map extent in map panel
#-------------------------------------------
#### self.update_map_panel()
#-------------------------------------------
# Try to show date range in datetime panel
#-------------------------------------------
self.update_datetime_panel() # clears notes, too
# update_data_panel()
#--------------------------------------------------------------------
def update_var_info(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
return
#-----------------------------------------------
# Maybe later wrap this block in "try, except"
#----------------------------------------------
# Note: short_name is selected from Dropdown.
# var = dataset[ short_name ]
#----------------------------------------------
long_name = self.get_var_longname( short_name )
units = self.get_var_units( short_name )
shape = self.get_var_shape( short_name )
dims = self.get_var_dimensions( short_name )
dtype = self.get_var_dtype( short_name )
atts = self.get_var_attributes( short_name )
#---------------------------------------------
self.data_var_long_name.value = long_name
self.data_var_units.value = units
self.data_var_shape.value = shape
self.data_var_dims.value = dims
self.data_var_type.value = dtype
self.data_var_atts.options = atts
# update_var_info()
#--------------------------------------------------------------------
def get_all_var_shortnames(self):
self.var_short_names = list( self.dataset.keys() )
# get_all_var_shortnames()
#--------------------------------------------------------------------
def get_all_var_longnames(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
long_names = list()
for name in self.var_short_names:
try:
long_name = get_var_longname( name )
long_names.append( long_name )
except:
# Use short name if there is no long_name.
long_names.append( name )
# print('No long name found for:', name)
self.var_long_names = long_names
# get_all_var_longnames()
#--------------------------------------------------------------------
def get_all_var_units(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
units_names = list()
for name in self.var_short_names:
try:
units = self.get_var_units( name )
units_names.append( units )
except:
units_names.append( 'unknown' )
# print('No units name found for:', name)
self.var_units_names = units_names
# get_all_var_units()
#--------------------------------------------------------------------
def get_var_shortname(self):
short_name = self.data_var_name.value
if (short_name == ''):
pass
## print('Short name is not set.')
return short_name
# get_var_shortname()
#--------------------------------------------------------------------
def get_var_longname( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'long_name'):
return var.long_name
else:
return 'Long name not found.'
## return short_name
# get_var_longname()
#--------------------------------------------------------------------
def get_var_units( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'units'):
return var.units
else:
return 'unknown'
# get_var_units()
#--------------------------------------------------------------------
def get_var_shape( self, short_name ):
var = self.dataset[ short_name ]
return str(var.shape)
# get_var_shape()
#--------------------------------------------------------------------
def get_var_dimensions( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'dimensions'):
return str(var.dimensions)
else:
return 'No dimensions found.'
# get_var_dimensions()
#--------------------------------------------------------------------
def get_var_dtype( self, short_name ):
# The old Numeric single-character typecodes:
# ('f','d','h', 's','b','B','c','i','l'),
# corresponding to:
# ('f4','f8','i2','i2','i1','i1','S1','i4','i4'),
# are not yet supported.
type_map = {
'i1' : '1-byte signed integer',
'i2' : '2-byte signed integer',
'i4' : '4-byte signed integer',
'i8' : '8-byte signed integer',
'f4' : '4-byte floating point',
'f8' : '8-byte floating point',
'u1' : '1-byte unsigned integer',
'u2' : '2-byte unsigned integer',
'u4' : '4-byte unsigned integer',
'u8' : '8-byte unsigned integer' }
type_list = list( type_map.keys() )
var = self.dataset[ short_name ]
type_str = str( var.dtype )
#----------------------------------------
# The ">" & "<" indicate big and little
# endian byte order (i.e. MSB or LSB)
#----------------------------------------
endian = ''
if (type_str[0] == '>'):
type_str = type_str[1:]
endian = ' (big endian)'
## endian = ' (MSB)'
if (type_str[0] == '<'):
type_str = type_str[1:]
endian = ' (little endian)'
## endian = ' (LSB)'
#---------------------------------
if (type_str in type_list):
return type_map[ type_str ] + endian
elif (type_str[:2] == '|S'):
try:
num = int( type_str[2:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
elif (type_str[0] == 'S'):
try:
num = int( type_str[1:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
else:
return type_str
# get_var_dtype()
#--------------------------------------------------------------------
def get_var_attributes( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in var.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( var.attributes ) #### use str()
else:
return 'No attributes found.'
# get_var_attributes()
#--------------------------------------------------------------------
def get_time_attributes( self):
if (hasattr(self.dataset, 'time')):
time = self.dataset.time
elif (hasattr(self.dataset, 'TIME')):
time = self.dataset.TIME
if hasattr(time, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in time.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( time.attributes ) #### use str()
else:
return 'No time attributes found.'
# get_time_attributes()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def update_datetime_panel(self):
self.clear_datetime_notes() # erase notes
#-----------------------------------------
# Are there any times for this dataset ?
#-----------------------------------------
short_names = self.var_short_names # self.dataset.keys()
if ('time' in short_names):
self.time_obj = self.dataset.time
self.time_var = self.time_obj.data[:]
elif ('TIME' in short_names):
self.time_obj = self.dataset.TIME
self.time_var = self.time_obj.data[:]
else:
msg = 'Unable to find times for this dataset.'
self.append_datetime_notes( msg )
return
#-----------------------------------------
# Show all time attributes in a droplist
#-----------------------------------------
time_att_list = self.get_time_attributes()
if (time_att_list is not None):
self.datetime_attributes.options = time_att_list
#----------------------------------------------------
# Compute the min and max times; save as time_range
#----------------------------------------------------
min_time = self.time_var.min()
max_time = self.time_var.max()
self.time_range = [min_time, max_time]
msg = 'Time range for this dataset = '
msg += '(' + str(min_time) + ', ' + str(max_time) + ')'
self.append_datetime_notes( msg )
#------------------------------------------------
# Is there an attribute called "actual_range" ?
#------------------------------------------------
# if not(hasattr(self.time_obj, 'actual_range')):
# msg = 'Unable to find "actual range" for times.'
# self.datetime_notes.value = msg
# return
# else:
# self.time_range = self.time_obj.actual_range
#-----------------------------------------
# Is there an attribute called "units" ?
#-----------------------------------------
# The full string may be something like:
# hour since 0000-01-01 00:00:00
# Save both full string and just units.
#-----------------------------------------
if (hasattr(self.time_obj, 'units')):
self.time_units_str = self.time_obj.units
self.get_actual_time_units() # (set self.time_units)
else:
msg = 'Unable to find "units" for time.'
self.append_datetime_notes( msg )
return
#-------------------------------------------
# Is there an attribute called "delta_t" ?
# If so, assume it is in "datetime" form,
# such as 00-01-00 00:00:00" for 1 month.
#-------------------------------------------
HAS_DELTA_T = hasattr(self.time_obj, 'delta_t')
if (HAS_DELTA_T):
self.time_delta = self.time_obj.delta_t
else:
self.get_time_delta_str()
# For testing:
# print('In update_datetime_panel():' )
# print('self.time_delta =', self.time_delta )
# print('HAS_DELTA_T =', HAS_DELTA_T )
#---------------------------------------------------
# Are time units given as "time since" some date ?
#---------------------------------------------------
# Sample data has cases with:
# 'days since', 'hour since' (vs hours), 'seconds since'
#--------------------------------------------------------
# Already saved "time_units_str" AND "time_units" above.
# strip() removes leading and trailing whitespace
#--------------------------------------------------------
time_units_str = self.time_units_str
if ('since' not in time_units_str):
msg = 'Time units string has no "since" part.'
self.append_datetime_notes( msg )
return
#-------------------------------------
# Process the "origin" date and time
#-------------------------------------
parts = time_units_str.split('since')
odt = parts[1].strip()
self.origin_datetime_str = odt
(date_str, time_str) = self.split_datetime_str( odt )
if (date_str.startswith('0000')):
msg = 'Warning: "Since" year must be > 0, changing to 1.'
self.append_datetime_notes( msg )
date_str = date_str[:3] + '1' + date_str[4:]
self.origin_datetime_obj = self.get_datetime_obj_from_str( date_str, time_str)
#---------------------------------------------
# Now process time_since for start and end
#---------------------------------------------
time_since1 = self.time_range[0]
time_since2 = self.time_range[1]
start_datetime_obj = self.get_datetime_from_time_since(time_since1)
end_datetime_obj = self.get_datetime_from_time_since(time_since2)
start_datetime_str = str(start_datetime_obj)
end_datetime_str = str(end_datetime_obj)
(start_date, start_time) = self.split_datetime_str( start_datetime_str )
(end_date, end_time) = self.split_datetime_str( end_datetime_str )
#-------------------------------
# Save these also, as numbers.
#-------------------------------
self.start_year = start_datetime_obj.year
self.end_year = end_datetime_obj.year
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.start_year = y1
# self.end_year = y2
#-----------------------------------------------------------
# Be sure to set date values as date_obj, not datetime_obj
#-----------------------------------------------------------
self.datetime_start_date.value = start_datetime_obj.date()
self.datetime_end_date.value = end_datetime_obj.date()
self.datetime_start_time.value = start_time
self.datetime_end_time.value = end_time
#----------------------------------
# This also works, but more steps
#----------------------------------
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.datetime_start_date.value = datetime.date(y1, m1, d1)
# self.datetime_end_date.value = datetime.date(y2, m2, d2)
# update_datetime_panel()
#--------------------------------------------------------------------
def get_years_from_time_since(self, data_time_since):
#----------------------------------------------------
# Notes: self.time_var contains "times since" some
# origin time, in days, hours or seconds,
# unrestricted by user start/end times.
# self.time_range[0] = self.time_var.min()
# self.time_range[1] = self.time_var.max()
#----------------------------------------------------
# For plots, want to convert these time
# offsets to decimal years, keeping in mind
# that user may have restricted the time
# range further.
#----------------------------------------------------
units_per_year = {
'years':1.0, 'days':365.0, 'hours':8760.0,
'minutes':525600.0, 'seconds':31536000.0 }
min_data_time_since = self.time_range[0]
time_since_start = (data_time_since - min_data_time_since)
#----------------------------------------------------
units = self.time_units
if (units in units_per_year.keys()):
factor = units_per_year[ units ]
years_since_start = (time_since_start / factor)
else:
print('ERROR, Unsupported units:', units)
return None
#----------------------------------------------------
start_year = self.start_year
dec_years = (years_since_start + start_year)
return dec_years
# get_years_from_time_since()
#--------------------------------------------------------------------
def clear_datetime_notes(self):
self.datetime_notes.value = ''
# clear_datetime_notes()
#--------------------------------------------------------------------
def append_datetime_notes(self, msg):
self.datetime_notes.value += (msg + '\n')
# append_datetime_notes()
#--------------------------------------------------------------------
# def list_to_string( self, array ):
#
# s = ''
# for item in array:
# s = s + item + '\n'
# return s
#
# # list_to_string()
#--------------------------------------------------------------------
def pad_with_zeros(self, num, target_len):
num_string = str( int(num) ) # int removes decimal part
n = len( num_string )
m = (target_len - n)
num_string = ('0'*m) + num_string
return num_string
# pad_with_zeros()
#--------------------------------------------------------------------
def get_actual_time_units(self):
# secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
# next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units_list = ['second', 'minute', 'hour',
'day', 'year', 'None'] # ascending, skip month
for units in units_list:
if (self.time_units_str.startswith(units)):
break
if (units != None):
units += 's' # (make units plural now; not before)
else:
print('ERROR: No match found for units.')
return
self.time_units = units
# get_actual_time_units()
#--------------------------------------------------------------------
def get_time_delta_str(self):
## print('### self.time_var.size =', self.time_var.size )
## print('###')
#-----------------------------------
# Check size of the time_var array
#-----------------------------------
if (self.time_var.size == 1):
dt = 0
self.time_delta = '0000-00-00 00:00:00'
# print('At top of get_time_delta_str():')
# print('self.time_var.size =', self.time_var.size )
# print('self.time_delta =', self.time_delta )
return
if (self.time_var.size > 1):
dt = (self.time_var[1] - self.time_var[0])
print('dt1 =', dt)
if (self.time_var.size > 3):
dt2 = (self.time_var[2] - self.time_var[1]) ###
dt3 = (self.time_var[3] - self.time_var[2]) ###
print('dt2 =', dt2) # check if evenly spaced
print('dt3 =', dt3)
#---------------------------------------------------
# Note: Actual time units were stripped from units
# string and saved as self.time_units.
# A full units attribute string may be:
# 'hour since 0000-00-00 00:00:00'
#---------------------------------------------------
units_list = ['seconds', 'minutes', 'hours',
'days', 'years', 'None'] # ascending, skip month
secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units = self.time_units
units_index = units_list.index( units )
#----------------------------------------
if (units == 'years'):
s = self.pad_with_zeros(dt,4)
else:
if (len(str(dt)) <= 2):
s = self.pad_with_zeros(dt,2)
else:
#-------------------------------
# Must convert units to get dt
# down to 1 or 2 digits.
#-------------------------------
old_dt = dt
old_units = units
k = units_index
n = len( str(int(dt)) )
while (n > 2) and (units != 'None'):
k = k + 1
dt = (dt / next_unit_factor[k-1])
units = units_list[k]
n = len( str(int(dt)) )
if (units == 'None'):
print('#####################################')
print('ERROR in get_time_delta_str():')
print(' dt has too many digits.')
print('#####################################')
return
else:
# Note that any remainder has been dropped.
s = self.pad_with_zeros(dt,2)
print('Old dt and units =', old_dt, old_units)
print('New dt and units =', dt, units)
print('Remainder not retained yet.')
#----------------------------------------------
if (units == 'years'):
td = (s + '-00-00 00:00:00')
# if (units == 'months'):
# td= ('0000-' + s + '-00 00:00:00')
if (units == 'days'):
td = ('0000-00-' + s + ' 00:00:00')
if (units == 'hours'):
td = ('0000-00-00 ' + s + ':00:00')
if (units == 'minutes'):
td = ('0000-00-00 00:' + s + ':00')
if (units == 'seconds'):
td = ('0000-00-00 00:00:' + s)
#------------------------------------------------
self.time_delta = td
# print('At bottom of get_time_delta_str():')
# print('self.time_delta =', td)
# print()
# get_time_delta_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_str(self, date_str, time_str='00:00:00'):
#---------------------------------------------------
# date_str = 'YYYY-MM-DD', time_str = 'HH:MM:SS'
#---------------------------------------------------
## e.g. d1 = str(self.datetime_end_date.value)
## e.g. t1 = self.datetime_end_time.value
(y, m1, d) = self.split_date_str(date_str)
(h, m2, s) = self.split_time_str(time_str)
if( y <= 0 ):
# msg = 'Year cannot be < 1 in start date.\n'
# msg += 'Changed year from ' + str(y) + ' to 1.'
# self.datetime_notes.value = msg
print('Year cannot be < 1 in start date.')
print('Changed year from ' + str(y) + ' to 1.')
print()
y = 1
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_one_str(self, datetime_str):
(date, time) = self.split_datetime_str( datetime_str )
(y, m1, d) = self.split_date_str( date )
(h, m2, s) = self.split_time_str( time )
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_one_str()
#--------------------------------------------------------------------
def get_start_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_start_date
t1 = self.datetime_start_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_start_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_start_datetime_obj()
#--------------------------------------------------------------------
def get_end_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_end_date
t1 = self.datetime_end_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_end_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_end_datetime_obj()
#--------------------------------------------------------------------
def split_datetime_str(self, datetime_obj, datetime_sep=' ',
ALL=False):
#-----------------------------------------------
# Note: Still works if datetime_obj is string.
#-----------------------------------------------
datetime_str = str(datetime_obj)
parts = datetime_str.split( datetime_sep )
## print('## datetime_str =', datetime_str )
## print('## parts =', str(parts) )
date_str = parts[0]
time_str = parts[1]
if not(ALL):
return (date_str, time_str)
else:
(y,m1,d) = self.split_date_str( date_str )
(h,m2,s) = self.split_time_str( time_str )
return (y,m1,d,h,m2,s)
# split_datetime_str()
#--------------------------------------------------------------------
def split_date_str(self, date_str, date_sep='-'):
date_parts = date_str.split( date_sep )
year = int(date_parts[0])
month = int(date_parts[1]) # NOTE: int('08') = 8
day = int(date_parts[2])
return (year, month, day)
# split_date_str()
#--------------------------------------------------------------------
def split_time_str(self, time_str, time_sep=':'):
time_parts = time_str.split( time_sep )
hour = int(time_parts[0])
minute = int(time_parts[1])
second = int(time_parts[2])
return (hour, minute, second)
# split_time_str()
#--------------------------------------------------------------------
def get_datetime_from_time_since(self, time_since):
# For testing
# print('## type(times_since) =', type(time_since) )
# print('## time_since =', time_since )
# print('## int(time_since) =', int(time_since) )
#---------------------------------------------------
# Note: datetime.timedelta() can take integer or
# float arguments, and the arguments can be
# very large numbers. However, it does not
# accept any numpy types, whether float or
# int (e.g. np.int16, np.float32).
# https://docs.python.org/3/library/datetime.html
#---------------------------------------------------
units = self.time_units # ('days', 'hours', etc.)
delta = None
time_since2 = float(time_since) ## No numpy types
#------------------------------------------------------
if (units == 'days'):
delta = datetime.timedelta( days=time_since2 )
if (units == 'hours'):
delta = datetime.timedelta( hours=time_since2 )
if (units == 'minutes'):
delta = datetime.timedelta( minutes=time_since2 )
if (units == 'seconds'):
delta = datetime.timedelta( seconds=time_since2 )
#------------------------------------------------------
if (delta is None):
msg = 'ERROR: Units: ' + units + ' not supported.'
self.append_datetime_notes( msg )
return
# For testing
## print('#### delta =', delta)
#---------------------------------------------
# Create new datetime object from time_since
#---------------------------------------------
origin_obj = self.origin_datetime_obj
new_dt_obj = (origin_obj + delta)
return new_dt_obj
# get_datetime_from_time_since()
#--------------------------------------------------------------------
# def get_datetime_from_time_since_OLD(self, time_since):
#
# #---------------------------------------------------
# # datetime.timedelta has limits on inputs, e.g.
# # numpy.int32 is unsupported time for seconds arg.
# # So here we adjust big numbers for timedelta.
# # The days argument can handle really big numbers.
# #---------------------------------------------------
# maxint = 32767
# units = self.time_units # ('days', 'hours', etc.)
# n_per_day = {'seconds':86400.0, 'minutes':1440.0,
# 'hours':24.0, 'days':1.0}
# if (time_since > maxint):
# time_since = time_since / n_per_day[ units ]
# units = 'days' # (new units)
#
# #-------------------------------------------------
# # Note: We now save self.time_units_str separate
# # from self.time_units.
# #-------------------------------------------------
# delta = None
# if (units == 'days'):
# delta = datetime.timedelta( days=time_since )
# if (units == 'hours'):
# delta = datetime.timedelta( hours=time_since )
# if (units == 'minutes'):
# delta = datetime.timedelta( minutes=time_since )
# if (units == 'seconds'):
# delta = datetime.timedelta( seconds=time_since )
# #-----------------------------------------------------
# if (delta is None):
# msg = 'ERROR: Units: ' + units + ' not supported.'
# self.append_datetime_notes( msg )
# return
#
# #---------------------------------------------
# # Create new datetime object from time_since
# #---------------------------------------------
# origin_obj = self.origin_datetime_obj
# new_dt_obj = (origin_obj + delta)
# return new_dt_obj
#
# # For testing
# ## print('origin_datetime_obj =', str(origin_obj) )
# ## print('time_since delta =', str(delta) )
# ## print('new_dt_obj =', str(new_dt_obj) )
# ## return new_dt_obj
#
# # get_datetime_from_time_since()
#--------------------------------------------------------------------
def get_time_since_from_datetime(self, datetime_obj, units='days'):
#-------------------------------------------------
# Compute time duration between datetime objects
#-------------------------------------------------
origin_obj = self.origin_datetime_obj
duration_obj = (datetime_obj - origin_obj)
duration_secs = duration_obj.total_seconds()
#---------------------------------------------------
# There is not a fixed number of seconds per month
# Also 52 (weeks/year) * 7 (days/week) = 364.
#---------------------------------------------------
secs_per_unit_map = {
'years':31536000.0, 'weeks':604800.0, 'days':86400.0,
'hours':3600.0, 'minutes':60.0, 'seconds':1 }
secs_per_unit = secs_per_unit_map[ units ]
duration = (duration_secs / secs_per_unit )
time_since = duration # (in units provided)
return time_since
# get_time_since_from_datetime()
#--------------------------------------------------------------------
def get_month_difference(self, start_datetime_obj, end_datetime_obj ):
#-------------------------------------------
# Example 0: 2017-09 to 2017-09
# months = (2017-2017)*12 = 0
# months = (months - 9) = (0-9) = -0
# months = (months + 9) = 0 (as index)
#-------------------------------------------
# Example 1: 2017-09 to 2018-02
# 9:10, 10:11, 11:12, 12:1, 1:2 = 5 (if same days)
# months = (2018-2017)*12 = 12
# months = (months - 9) = 3
# months = (months + 2) = 3 + 2 = 5
#-------------------------------------------
start_year = start_datetime_obj.year
end_year = end_datetime_obj.year
months = (end_year - start_year) * 12
#-------------------------------------------
start_month = start_datetime_obj.month
end_month = end_datetime_obj.month
months = months - start_month
months = months + end_month
## months = months + 1 # (no: get 1 if dates same)
## print('month difference =', months)
return months
# get_month_difference()
#--------------------------------------------------------------------
def get_new_time_index_range(self, REPORT=True):
if not(hasattr(self, 'origin_datetime_str')):
msg = 'Sorry, origin datetime is not set.'
self.append_download_log( [msg, ' '] )
if (hasattr(self, 'time_var')):
nt = len(self.time_var)
return (0, nt - 1) # (unrestricted by choices)
else:
return (None, None)
#----------------------------------------------------
# Get min possible datetime, from time_vars.min().
# Every time_var value is measured from an "origin"
# such as: '1800-01-01 00:00:00'
#----------------------------------------------------
## origin_datetime_obj = self.origin_datetime_obj
time_since_min = self.time_var.min()
min_datetime_obj = self.get_datetime_from_time_since( time_since_min )
#-----------------------------------------------
# Get current settings from the datetime panel
#-----------------------------------------------
start_datetime_obj = self.get_start_datetime_obj()
end_datetime_obj = self.get_end_datetime_obj()
#---------------------------------------------------
# Convert dt datetime string to "timedelta" object
# e.g. 00-01-00 00:00:00
#---------------------------------------------------
# Note: datetime.timedelta() does not do "months",
# since they're not a fixed number of days,
# so we use "get_month_difference()". Also
# it does not have a "years" argument.
#---------------------------------------------------
## print('In get_new_time_index_range():')
## print('self.time_delta =', self.time_delta)
USE_LOOPS = True
(y,m1,d,h,m2,s) = self.split_datetime_str(self.time_delta, ALL=True)
## print('time_delta =', self.time_delta )
## print('y, m1, d, h, m2, s =', y, m1, d, h, m2, s )
if (m1 == 0):
d = (y*365) + d # python int(), not 2-byte int.
# print('days =', d)
dt_timedelta_obj = datetime.timedelta(days=d, hours=h, minutes=m2, seconds=s)
elif (m1 > 0 and (y+d+h+m2+s == 0)):
n_months1 = self.get_month_difference( min_datetime_obj, start_datetime_obj )
n_months2 = self.get_month_difference( min_datetime_obj, end_datetime_obj )
start_index = int(n_months1 / m1)
end_index = int(n_months2 / m1)
USE_LOOPS = False
else:
# Note: I think there is a "monthdelta" package ?
# Or we may be able to use dateutils.
print('ERROR: Cannot handle this dt case yet.')
return None
#-------------------------------------------------
# Compute start and end index into time array.
# General method, if delta_t is datetime string.
#-------------------------------------------------
if (USE_LOOPS):
start_index = 0
# print('min_datetime_str =', str(min_datetime_obj) )
# print('dt_timedelta_str =', str(dt_timedelta_obj) )
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
## print('next =', str(next))
if (next < start_datetime_obj):
start_index += 1
else: break
#-------------------------------------------------
end_index = 0
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
if (next < end_datetime_obj):
end_index += 1
else: break
#---------------------------------
# Make sure indices are in range
#---------------------------------
nt = len( self.time_var )
start_index = max(0, start_index)
end_index = min(end_index, nt-1)
#---------------------------------------
# User time period may be smaller than
# time spacing (dt).
#----------------------------------------------------
# We are using these indices like this:
# a[ t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
# So if indices are equal, result will be empty.
# If indices differ by 1, get 1 value for that dim.
#----------------------------------------------------
if (start_index == end_index):
end_index = start_index + 1
if (REPORT):
# print('n_times =', nt)
# print('New time indices =', start_index, ',', end_index)
# print()
#--------------------------
i1s = str(start_index)
i2s = str(end_index)
msg1 = 'n_times = ' + str(nt)
msg2 = 'New time indices = ' + i1s + ',' + i2s
self.append_download_log( [msg1, msg2, ' '] )
return (start_index, end_index)
# Not needed for current problem.
# days_since1 = self.get_days_since_from_datetime(start_datetime_obj)
# days_since2 = self.get_days_since_from_datetime(end_datetime_obj)
# For testing
# print('type(start_index) =', type(start_index) )
# print('type(end_index) =', type(end_index) )
# print('start_index =', start_index)
# print('end_index =', end_index)
# print('n_times =', nt)
# return (start_index, end_index)
# get_new_time_index_range()
#--------------------------------------------------------------------
def get_new_lat_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lat_name_list = ['lat', 'LAT', 'coadsy', 'COADSY',
'latitude', 'LATITUDE', 'None']
for lat_name in lat_name_list:
if (lat_name in dim_list):
break
if (lat_name == 'None'):
msg1 = 'Sorry, could not find a "latitude" variable.'
msg2 = 'Checked: lat, LAT, coadsy, COADSY,'
msg3 = ' latitude and LATITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lats for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lat_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlat = self.map_minlat.value
user_maxlat = self.map_maxlat.value
#----------------------------------
# Get the array of lats, and info
#-----------------------------------------
# <class 'pydap.model.BaseType'>' object
# has no attribute 'array'
#--------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and has no attribute "array".
#--------------------------------------------------
# lats = self.dataset[ lat_name ]
# lats = self.dataset[ lat_name ].array
#----------------------------------------------------------
# Next line type: <class 'pydap.handlers.dap.BaseProxy'>
# and has no attribute "size".
#----------------------------------------------------------
# lats = self.dataset[ lat_name ].data
#----------------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and data is downloaded from server.
#----------------------------------------------------------
# lats = self.dataset[ lat_name ][:]
#----------------------------------------------------------
# Next line type: <class 'numpy.ndarray'>
#----------------------------------------------------------
lats = self.dataset[ lat_name ][:].data
if (lats.ndim > 1):
msg1 = 'Sorry, cannot yet restrict latitude indices'
msg2 = ' when lat array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lats) =', type(lats) )
# print('## lats.shape =', lats.shape )
# print('## lats =', lats )
#------------------------------------------------
# It seems that values may be reverse sorted to
# indicate that the origin is upper left corner
# Don't sort them, need indices into original.
#------------------------------------------------
if (lats[0] > lats[-1]):
origin = 'upper'
else:
origin = 'lower'
#------------------------------------------
# Compute the latitude spacing, dlat
#------------------------------------------
# This only works if lats are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlats = lats.size
minlat = lats.min()
maxlat = lats.max()
dlat = np.abs(lats[1] - lats[0])
#--------------
# Another way
#--------------
# latdif = (maxlat - minlat)
# if (CENTERS):
# dlat = (latdif / (nlats - 1))
# else:
# dlat = (latdif / nlats)
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlats )
w = np.logical_and(lats > user_minlat, lats < user_maxlat) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lat_i1 = indices[0]
lat_i2 = indices[-1]
else:
lat_i1 = 0
lat_i2 = nlats-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lat_i2 ???
#--------------------------------------
# lat_i1 = int( (user_minlat - minlat) / dlat )
# lat_i2 = int( (user_maxlat - minlat) / dlat )
# lat_i2 = (lat_i2 + 1) ########
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lat_i1 = min( max(lat_i1, 0), nlats-1 )
# lat_i2 = min( max(lat_i2, 0), nlats-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lat_i1 == lat_i2): # (still possible?)
# lat_i2 = lat_i1 + 1
if (REPORT):
print('lat_name =', lat_name)
print('minlat =', minlat, '(var)' )
print('maxlat =', maxlat, '(var)' )
print('dlat =', dlat)
print('u_minlat =', user_minlat, '(user)' )
print('u_maxlat =', user_maxlat, '(user)' )
print('lat_i1 =', lat_i1, '(new index)')
print('lat_i2 =', lat_i2, '(new index)')
# print('nlats =', nlats)
# print('New latitude indices =', lat_i1, ',', lat_i2)
# print()
#-------------------------------
i1s = str(lat_i1)
i2s = str(lat_i2)
msg1 = 'lat_name = ' + lat_name
msg2 = 'dlat = ' + str(dlat)
msg3 = 'nlats = ' + str(nlats)
msg4 = 'min, max = ' + str(minlat) + ', ' + str(maxlat) + ' (data)'
msg5 = 'min, max = ' + str(user_minlat) + ', ' + str(user_maxlat) + ' (user)'
msg6 = 'New latitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lat_i1, lat_i2)
# get_new_lat_index_range()
#--------------------------------------------------------------------
def get_new_lon_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lon_name_list = ['lon', 'LON', 'coadsx', 'COADSX',
'longitude', 'LONGITUDE', 'None']
for lon_name in lon_name_list:
if (lon_name in dim_list):
break
if (lon_name == 'None'):
msg1 = 'Sorry, could not find a "longitude" variable.'
msg2 = 'Checked: lon, LON, coadsx, COADSX,'
msg3 = ' longitude and LONGITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lons for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lon_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlon = self.map_minlon.value
user_maxlon = self.map_maxlon.value
#----------------------------------
# Get the array of lons, and info
#----------------------------------
lons = self.dataset[ lon_name ][:].data
if (lons.ndim > 1):
msg1 = 'Sorry, cannot yet restrict longitude indices'
msg2 = ' when lon array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lons) =', type(lons) )
# print('## lons.shape =', lons.shape )
# print('## lons.ndim =', lons.ndim )
#------------------------------------------
# Compute the longitude spacing, dlon
#------------------------------------------
# This only works if lons are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlons = lons.size
minlon = lons.min()
maxlon = lons.max()
dlon = np.abs(lons[1] - lons[0])
#--------------
# Another way
#--------------
# londif = (maxlon - minlon)
# if (CENTERS):
# dlon = (londif / (nlons - 1))
# else:
# dlon = (londif / nlons)
#-----------------------------------------
# Convert lons to have range [-180,180]?
#-----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #####################
# user_maxlon = ((user_maxlon + 180.0) % 360) - 180
# user_minlon = ((user_minlon + 180.0) % 360) - 180
# if (user_minlon > user_maxlon):
# user_minlon -= 180.0
#-------------------------------------------
# Convert user lons to have range [0,360]?
#-------------------------------------------
if (minlon >= 0) and (maxlon <= 360):
user_minlon = (user_minlon + 360.0) % 360
user_maxlon = (user_maxlon + 360.0) % 360
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlons )
w = np.logical_and(lons > user_minlon, lons < user_maxlon) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lon_i1 = indices[0]
lon_i2 = indices[-1]
else:
lon_i1 = 0
lon_i2 = nlons-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lon_i2 ???
#--------------------------------------
# lon_i1 = int( (user_minlon - minlon) / dlon )
# lon_i2 = int( (user_maxlon - minlon) / dlon )
# lon_i2 = lon_i2 + 1 #######
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lon_i1 = min( max(lon_i1, 0), nlons-1 )
# lon_i2 = min( max(lon_i2, 0), nlons-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lon_i1 == lon_i2): # (still needed?)
# lon_i2 = lon_i1 + 1
if (REPORT):
print()
print('lon_name =', lon_name)
print('minlon =', minlon, '(var)')
print('maxlon =', maxlon, '(var)')
print('dlon =', dlon)
print('u_minlon =', user_minlon, '(user)')
print('u_maxlon =', user_maxlon, '(user)')
print('lon_i1 =', lon_i1, '(new index)')
print('lon_i2 =', lon_i2, '(new index)')
# print('nlons =', nlons)
# print('New longitude indices =', lon_i1, ',', lon_i2 )
# print()
#--------------------------------------------------
i1s = str(lon_i1)
i2s = str(lon_i2)
msg1 = 'lon_name = ' + lon_name
msg2 = 'dlon = ' + str(dlon)
msg3 = 'nlons = ' + str(nlons)
msg4 = 'min, max = ' + str(minlon) + ', ' + str(maxlon) + ' (data)'
msg5 = 'min, max = ' + str(user_minlon) + ', ' + str(user_maxlon) + ' (user)'
msg6 = 'New longitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lon_i1, lon_i2)
# get_new_lon_index_range()
#--------------------------------------------------------------------
def get_duration(self, start_date=None, start_time=None,
end_date=None, end_time=None,
dur_units=None, REPORT=False):
#------------------------------------------------
# Note: Compute time span between 2 datetimes.
#------------------------------------------------
## date_sep = '/'
date_sep = '-'
time_sep = ':'
#-------------------------------------
# Get parts of the start date & time
#-------------------------------------
(y1, m1, d1) = self.split_date_str( start_date )
(h1, mm1, s1) = self.split_time_str( start_time )
#-----------------------------------
# Get parts of the end date & time
#-----------------------------------
(y2, m2, d2) = self.split_date_str( end_date )
(h2, mm2, s2) = self.split_time_str( end_time )
#------------------------------
# Convert to datetime objects
#------------------------------
start_obj = datetime.datetime(y1, m1, d1, h1, mm1, s1)
end_obj = datetime.datetime(y2, m2, d2, h2, mm2, s2)
#---------------------------------------------
# Comput time duration between start and end
#---------------------------------------------
duration_obj = (end_obj - start_obj)
duration_secs = duration_obj.total_seconds()
#-----------------------------------------
# Convert duration to dur_units provided
#-----------------------------------------
if (dur_units == 'seconds'):
duration = duration_secs
elif (dur_units == 'minutes'):
duration = (duration_secs / 60.0)
elif (dur_units == 'hours'):
duration = (duration_secs / 3600.0)
elif (dur_units == 'days'):
duration = (duration_secs / 86400.0)
elif (dur_units == 'years'):
duration = (duration_secs / 31536000.0)
else:
print('Unknown duration units = ' + dur_units + '.')
print('Returning duration in hours.')
duration = (duration_secs / 3600.0)
if (REPORT):
print( 'duration =', duration, '[' + dur_units + ']' )
return duration
#-----------------------------------------
# Alternate approach, where dur_units is
# determined and then returned
#-----------------------------------------
# if (duration_secs < 60):
# duration = duration_secs
# dur_units = 'seconds'
# elif (duration_secs < 3600):
# duration = divmod( duration_secs, 60 )[0]
# dur_units = 'minutes'
# elif (duration_secs < 86400):
# duration = divmod( duration_secs, 3600 )[0]
# dur_units = 'hours'
# elif (duration_secs < 31536000):
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
# else:
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
#
# return (duration, dur_units)
# get_duration()
#--------------------------------------------------------------------
def get_download_format(self):
return self.download_format.value
# get_download_format()
#--------------------------------------------------------------------
def clear_download_log(self):
self.download_log.value = ''
# clear_download_log()
#--------------------------------------------------------------------
def append_download_log(self, msg):
## type_str = str( type(msg) )
## if (type_str == "<class 'list'>"):
if (isinstance( msg, list)):
for string in msg:
self.download_log.value += (string + '\n')
else:
self.download_log.value += (msg + '\n')
# append_download_log()
#--------------------------------------------------------------------
def print_user_choices(self):
if not(hasattr(self, 'dataset')):
msg = 'ERROR: No dataset has been selected.'
self.append_download_log( msg )
return ############
start_datetime_obj = self.get_start_datetime_obj()
if (start_datetime_obj is not None):
start_date = str( start_datetime_obj.date() )
start_time = str( start_datetime_obj.time() )
else:
start_date = 'unknown'
start_time = 'unknown'
end_datetime_obj = self.get_end_datetime_obj()
if (end_datetime_obj is not None):
end_date = str( end_datetime_obj.date() )
end_time = str( end_datetime_obj.time() )
else:
end_date = 'unknown'
end_time = 'unknown'
#------------------------------------------
# Show message in downloads panel log box
#------------------------------------------
msg1 = 'var short name = ' + self.get_var_shortname()
msg2 = 'download format = ' + self.get_download_format()
msg3 = 'map bounds = ' + str(self.get_map_bounds( FROM_MAP=False ))
msg4 = 'start date and time = ' + start_date + ' ' + start_time
msg5 = 'end date and time = ' + end_date + ' ' + end_time
## msg6 = 'opendap package = ' + self.get_opendap_package()
msgs = [msg1, msg2, msg3, msg4, msg5]
self.append_download_log( msgs )
# print_user_choices()
#--------------------------------------------------------------------
def download_data(self, caller_obj=None):
#-------------------------------------------------
# Note: After a reset, self still has a dataset,
# but short_name was reset to ''.
#-------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
msg = 'Sorry, no variable has been selected.'
self.download_log.value = msg
return
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## status = self.download_status
self.print_user_choices()
#--------------------------------------------------
# print_user_choices() already displayed error msg
#--------------------------------------------------
if not(hasattr(self, 'dataset')):
return
#----------------------------------------
# Get names of the variables dimensions
#----------------------------------------
dim_list = self.dataset[ short_name ].dimensions
#--------------------------------------
# Uncomment to test other time_deltas
#------------------------------------------
# If test time_delta is too small, we'll
# get a start_index that is out of range.
# Next 3 worked in some SST tests.
#------------------------------------------
# self.time_delta = '0000-02-00 00:00:00'
# self.time_delta = '0000-00-30 12:00:00'
# self.time_delta = '0001-00-00 00:00:00'
#----------------------------------------------
# Is there a time variable ? If so, use time
# range selected in GUI to clip the data.
#----------------------------------------------
(t_i1, t_i2) = self.get_new_time_index_range( REPORT=True)
#--------------------------------------------
# Is there a lat variable ? If so, use lat
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lat_i1, lat_i2) = self.get_new_lat_index_range( REPORT=True)
#--------------------------------------------
# Is there a lon variable ? If so, use lon
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lon_i1, lon_i2) = self.get_new_lon_index_range( REPORT=True)
#--------------------------------------
# Did user set a spatial resolution ?
#--------------------------------------
# Asynchronous download. How do we know its here?
# print('Downloading variable:', short_name, '...' )
# print('Variable saved in: balto.user_var')
# print()
msg1 = 'Downloading variable: ' + short_name + '...'
msg2 = 'Variable saved in: balto.user_var'
msg3 = ' '
self.append_download_log( [msg1, msg2, msg3] )
#---------------------------------------------
# Convert reference to actual numpy variable
# which causes it to be downloaded, and then
# store it into balto.user_var.
#---------------------------------------------------
# This grid includes var and its dimension vectors.
# Note: type(pydap_grid) = pydap.model.GridType
#---------------------------------------------------
pydap_grid = self.dataset[ short_name ]
ndims = len( pydap_grid.dimensions ) # (e.g. time, lat, lon)
## data_obj = self.dataset[ short_name ]
## data_dims = data_obj.dimensions
## ndim = len( data_dims )
#------------------------------------------------
# Actually download the data here to a variable
# in the notebook, but restrict indices first,
# to only download the required data.
#------------------------------------------------
if (ndims == 3):
#-------------------------------------
# Assume dims are: (time, lat, lon)
#------------------------------------------
# After subscripting, grid still has type:
# pydap.model.GridType
#------------------------------------------
if (lat_i1 is None) or (lon_i1 is None):
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2, :, :]
else:
if (t_i1 is None):
grid = pydap_grid[:, lat_i1:lat_i2, lon_i1:lon_i2]
else:
grid = pydap_grid[t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
#----------------------------------------
elif (ndims == 1): # time series
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2]
#-----------------------------------
elif (ndims == 2): # spatial grid
#-------------------------------
# Assume dims are: (lat, lon)
#-------------------------------
if (lat_i1 is None) or (lon_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[lat_i1:lat_i2, lon_i1:lon_i2]
#------------------------------------
else:
grid = pydap_grid[:]
#--------------------------------------------------
# Note: type(pydap_grid) = pydap.model.gridtype
# type(grid) = pydap.model.gridtype
# type(grid[:].data) = list
# type(grid.data) = list
#--------------------------------------------------
# Subscript by *ranges* doesn't change data type.
#--------------------------------------------------
grid_list = grid.data ########
n_list = len(grid_list)
var = grid_list[0]
# For testing
# print('## type(grid) =', type(grid) )
# print('## type(grid.data) =', type(grid_list) )
# print('## len(grid.data) =', n_list )
# print('## type(var) =', type(var) )
# print()
times = None # (defaults)
lats = None
lons = None
if (n_list > 1):
times = grid_list[1]
if (n_list > 2):
lats = grid_list[2]
if (n_list > 3):
lons = grid_list[3]
#----------------------------------------------
# Are lats in reverse order ? (2020-12-12)
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# origin = None
# if (lats is not None):
# if (lats[0] > lats[-1]):
# origin = 'upper' # (row major?)
# lats.sort() #############################
# else:
# origin = 'lower'
#----------------------------------------------
# Adjust the longitudes ?
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# if (n_list > 3):
# SIGNED_LONS = True
# if (SIGNED_LONS):
# #----------------------------------------
# # Convert lons to have range [-180,180]
# #----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #################
#-----------------------------
# Is there a missing value ?
# Is there a fill value ?
#-----------------------------
atts = pydap_grid.attributes
REPLACE_MISSING = False
if ('missing_value' in atts.keys()):
REPLACE_MISSING = True
missing_value = pydap_grid.attributes['missing_value']
w = (var == missing_value)
#---------------------------------------
# Is there a scale factor and offset ?
#---------------------------------------
if ('scale_factor' in atts.keys()):
#---------------------------------------------------
# Note: var may have type ">i2" while scale_factor
# may have type "float64", so need to upcast
# var and can't use "*="
#---------------------------------------------------
factor = pydap_grid.attributes['scale_factor']
## print('type(var) =', type(var))
## print('type(factor) =', type(factor))
var = var * factor
if ('add_offset' in atts.keys()):
offset = pydap_grid.attributes['add_offset']
## print('type(var) =', type(var))
## print('type(offset) =', type(offset))
var = var + offset
#-----------------------------------------
# Restore missing values after scaling ?
#-----------------------------------------
if (REPLACE_MISSING):
var[w] = missing_value
#-----------------------------------------
# Save var into balto object as user_var
#-----------------------------------------
self.user_var = var
self.user_var_times = times # (maybe None)
self.user_var_lats = lats # (maybe None)
self.user_var_lons = lons # (maybe None)
#----------------------------------------------------
# Could define self.user_var as a list, and append
# new variables to the list as downloaded.
# Could also put them into a dictionary.
#----------------------------------------------------
# download_data()
#--------------------------------------------------------------------
def show_grid(self, grid, var_name=None, extent=None,
cmap='rainbow', xsize=8, ysize=8 ):
#---------------------------------------------------
# Note: extent = [minlon, maxlon, minlat, maxlat]
# But get_map_bounds() returns:
# (minlon, minlat, maxlon, maxlat)
#---------------------------------------------------
if (grid.ndim != 2):
print('Sorry, show_grid() only works for 2D arrays.')
return
if (var_name is None):
var_name = self.data_var_long_name.value
## var_name = self.data_var_name.value
if (extent is None):
extent = self.get_map_bounds(style='plt.imshow')
## (minlon, minlat, maxlon, maxlat) = self.get_map_bounds()
## extent = [minlon, maxlon, minlat, maxlat]
bp.show_grid_as_image( grid, var_name, extent=extent,
cmap='rainbow', stretch='hist_equal',
xsize=xsize, ysize=ysize,
nodata_value=None )
## NO_SHOW=False, im_file=None,
# show_grid()
#--------------------------------------------------------------------
def get_opendap_package(self):
return self.prefs_package.value
#--------------------------------------------------------------------
def get_abbreviated_var_name(self, abbreviation ):
map = {
'lat' : ['geodetic_latitude', 'quantity'],
'lon' : ['geodetic_longitude', 'quantity'],
'sst' : ['sea_surface__temperature', 'variable'],
'temp': ['temperature', 'quantity'],
'x' : ['x-coordinate', 'quantity'],
'y' : ['y-coordinate', 'quantity'],
'z' : ['z-coordinate', 'quantity'] }
try:
return map[ abbreviation ]
except:
print('Sorry, no matches found for abbreviation.')
# get_abbreviated_var_name()
#--------------------------------------------------------------------
def get_possible_svo_names(self, var_name, SHOW_IRI=False):
#-----------------------------------------------------
# Use the SVO "match phrase" service to get a
# ranked list of possible SVO variable name matches.
#-----------------------------------------------------
# var_name should be a list of words, as a single
# string, separated by underscores.
#-----------------------------------------------------
var_name2 = var_name.replace(' ', '_')
match_phrase_svc = 'http://34.73.227.230:8000/match_phrase/'
match_phrase_url = match_phrase_svc + var_name2 + '/'
print('Working...')
#-----------------------------------------------------------------
# The result is in JSON format, for example:
# result = { "results": [
# {"IRI":"result1_IRI", "label":"result1_label", "matchrank": "result1_rank"},
# {"IRI":"result2_IRI", "label":"result2_label", "matchrank": "result2_rank"} ] }
#------------------------------------------------------------------
result = requests.get( match_phrase_url )
print('Finished.')
print()
json_str = result.text
# print( json_str )
json_data = json.loads( json_str )
match_list = json_data['results']
for item in match_list:
## print('item =', item)
if (SHOW_IRI):
print('IRI =', item['IRI'])
print('label =', item['label'])
print('rank =', item['matchrank'])
print()
# get_possible_svo_names()
#-------------------------------------------------------------------
| 42.811107 | 96 | 0.432118 | 117,038 | 0.96704 | 0 | 0 | 0 | 0 | 0 | 0 | 62,616 | 0.517372 |
9632975c75b20b8d1e791a57c8e86aa3a4d6057f
| 586 |
py
|
Python
|
w0rplib/url.py
|
w0rp/w0rpzone
|
06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c
|
[
"BSD-2-Clause"
] | null | null | null |
w0rplib/url.py
|
w0rp/w0rpzone
|
06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c
|
[
"BSD-2-Clause"
] | 13 |
2019-07-05T18:44:46.000Z
|
2021-06-19T12:19:46.000Z
|
w0rplib/url.py
|
w0rp/w0rpzone
|
06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c
|
[
"BSD-2-Clause"
] | null | null | null |
from django.views.generic.base import RedirectView
from django.conf.urls import re_path
def redir(regex, redirect_url, name=None):
"""
A shorter wrapper around RedirectView for 301 redirects.
"""
return re_path(
regex,
RedirectView.as_view(url=redirect_url, permanent=True),
name=name,
)
def redir_temp(regex, redirect_url, name=None):
"""
A shorter wrapper around RedirectView for 302 redirects.
"""
return re_path(
regex,
RedirectView.as_view(url=redirect_url, permanent=False),
name=name,
)
| 23.44 | 64 | 0.663823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.245734 |
963361945d482a5cef7d35e152993ddbfadb7240
| 1,889 |
py
|
Python
|
launcher.py
|
lucaso60/DiscordDMSpammer
|
336a20195cf32013cf50c98c2a400ec79750758b
|
[
"MIT"
] | 1 |
2021-08-15T13:21:22.000Z
|
2021-08-15T13:21:22.000Z
|
launcher.py
|
lucaso60/DiscordDMSpammer
|
336a20195cf32013cf50c98c2a400ec79750758b
|
[
"MIT"
] | 1 |
2021-09-14T15:29:30.000Z
|
2021-09-14T15:42:01.000Z
|
launcher.py
|
lucaso60/DiscordDMSpammer
|
336a20195cf32013cf50c98c2a400ec79750758b
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 lucaso60
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVUSER_IDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord import *
from datetime import datetime
from time import sleep
from extensions import on_start_screen
def time_now():
time = datetime.now()
current_time = time.strftime("%y-%m-%d %H:%M:%S")
now = current_time + " >"
return now
TOKEN = input(f"{time_now()} Please input your bot token: ")
bot = discord.Bot(command_prefix=".")
@bot.event
async def on_ready():
print(f"{time_now()} Logged in as {bot.user}")
USER_ID = input(f"{time_now()} Please input USER ID: ")
MESSAGE = input(f"{time_now()} Please input the spam message: ")
user = await bot.fetch_user(USER_ID)
while True:
await user.send(MESSAGE)
print(f"{time_now()} Spammed {user} with {MESSAGE}")
sleep(0.8)
bot.run(TOKEN)
| 33.732143 | 79 | 0.741133 | 0 | 0 | 0 | 0 | 384 | 0.203282 | 373 | 0.197459 | 1,351 | 0.715193 |
9633daded62c203085e90cc7105a91f913793c8c
| 2,393 |
py
|
Python
|
src/pipelines.py
|
charnley/bayes-mndo
|
38662dd738af7cba73f98ffacc5c719aaa9a036d
|
[
"CC0-1.0"
] | null | null | null |
src/pipelines.py
|
charnley/bayes-mndo
|
38662dd738af7cba73f98ffacc5c719aaa9a036d
|
[
"CC0-1.0"
] | null | null | null |
src/pipelines.py
|
charnley/bayes-mndo
|
38662dd738af7cba73f98ffacc5c719aaa9a036d
|
[
"CC0-1.0"
] | null | null | null |
import multiprocessing as mp
import os
import shutil
from functools import partial
from tqdm import tqdm
import data
from chemhelp import mndo
# def calculate(binary, filename, scr=None):
# """
# Collect sets of lines for each molecule as they become available
# and then call a parser to extract the dictionary of properties.
# DEPRECIATED
# """
# props_list = mndo.calculate_file(filename, scr=scr, mndo_cmd=binary)
# props_list = list(props_list) # NOTE that calculate_file returns an iterator
# return props_list
def calculate_parallel(
params_joblist,
param_keys,
mean_params,
scale_params,
filename,
binary,
n_procs=2,
mndo_input=None,
scr="_tmp_optim",
**kwargs,
):
worker_kwargs = {
"scr": scr,
"filename": filename,
"param_keys": param_keys,
"mean_params": mean_params,
"scale_params": scale_params,
"binary": binary,
}
mapfunc = partial(worker, **worker_kwargs)
# NOTE generating multiple pools each iteration was leading to a memory leak
# NOTE using imap may be slower but done for development purposes to check
# it's working
with mp.Pool(n_procs) as p:
# results = p.map(mapfunc, params_joblist)
results = list(tqdm(p.imap(mapfunc, params_joblist), total=len(params_joblist)))
return results
def worker(*args, **kwargs):
"""
"""
scr = kwargs["scr"]
filename = kwargs["filename"]
param_keys = kwargs["param_keys"]
mean_params = kwargs["mean_params"]
scale_params = kwargs["scale_params"]
binary = kwargs["binary"]
# Ensure unique directory for this worker in scratch directory
pid = os.getpid()
cwd = os.path.join(scr, str(pid))
if not os.path.exists(cwd):
os.mkdir(cwd)
if not os.path.exists(os.path.join(cwd, filename)):
shutil.copy2(os.path.join(scr, filename), os.path.join(cwd, filename))
# Set params in worker dir
param_list = args[0]
data.set_params(
param_list, param_keys, mean_params, scale_params, scr=cwd,
)
# Calculate properties
properties_list = mndo.calculate_file(filename, scr=cwd, mndo_cmd=binary)
# NOTE JCK properties_list is a generator, so complete parsing on worker
properties_list = list(properties_list)
shutil.rmtree(cwd)
return properties_list
| 25.457447 | 88 | 0.670288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.389887 |
96343356750cc9fb146f0fb6d55a57fd12b0dbb2
| 2,915 |
py
|
Python
|
lib/googlecloudsdk/api_lib/logging/common.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/logging/common.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/logging/common.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that contains common logging commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
def FetchLogs(log_filter=None,
order_by='DESC',
limit=None,
parent=None):
"""Fetches log entries.
This method uses Cloud Logging V2 api.
https://cloud.google.com/logging/docs/api/introduction_v2
Entries are sorted on the timestamp field, and afterwards filter is applied.
If limit is passed, returns only up to that many matching entries.
If neither log_filter nor log_ids are passed, no filtering is done.
Args:
log_filter: filter expression used in the request.
order_by: the sort order, either DESC or ASC.
limit: how many entries to return.
parent: the name of the log's parent resource, e.g. "projects/foo" or
"organizations/123" or "folders/123". Defaults to the current project.
Returns:
A generator that returns matching log entries.
Callers are responsible for handling any http exceptions.
"""
if parent:
if not ('projects/' in parent or 'organizations/' in parent
or 'folders/' in parent or 'billingAccounts/' in parent):
raise exceptions.InvalidArgumentException(
'parent', 'Unknown parent type in parent %s' % parent)
else:
parent = 'projects/%s' % properties.VALUES.core.project.Get(required=True)
# The backend has an upper limit of 1000 for page_size.
# However, there is no need to retrieve more entries if limit is specified.
page_size = min(limit or 1000, 1000)
if order_by.upper() == 'DESC':
order_by = 'timestamp desc'
else:
order_by = 'timestamp asc'
client = util.GetClient()
request = client.MESSAGES_MODULE.ListLogEntriesRequest(resourceNames=[parent],
filter=log_filter,
orderBy=order_by)
return list_pager.YieldFromList(
client.entries, request, field='entries', limit=limit,
batch_size=page_size, batch_size_attribute='pageSize')
| 38.355263 | 80 | 0.704974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,743 | 0.597942 |
96346b689665119bb71187a849bd5ed61453fc88
| 23,288 |
py
|
Python
|
hkdataminer/utils/plot_.py
|
stephenliu1989/HK_DataMiner
|
312d8244d33337971d81305ec7a9986427c669d9
|
[
"Apache-2.0"
] | 3 |
2020-06-12T21:25:05.000Z
|
2021-03-02T09:38:24.000Z
|
hkdataminer/utils/plot_.py
|
stephenliu1989/HK_DataMiner
|
312d8244d33337971d81305ec7a9986427c669d9
|
[
"Apache-2.0"
] | 1 |
2018-01-30T09:52:01.000Z
|
2018-01-30T09:52:01.000Z
|
hkdataminer/utils/plot_.py
|
stephenliu1989/HK_DataMiner
|
312d8244d33337971d81305ec7a9986427c669d9
|
[
"Apache-2.0"
] | 1 |
2021-01-16T13:07:50.000Z
|
2021-01-16T13:07:50.000Z
|
__author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
def plot_block_matrix(labels, tProb_, name='BlockMatrix'):
print("Plot Block Matrix")
indices = np.argsort(labels)
#print indices
block_matrix = tProb_[:,indices]
block_matrix = block_matrix[indices,:]
block_matrix = 1 - block_matrix
#print block_matrix
pylab.matshow(block_matrix, cmap=plt.cm.OrRd)
plt.colorbar()
plt.savefig('./' + name + '.png', dpi=400)
#pylab.show()
plt.close()
def plot_cluster_size_distribution(populations, name='Populations'):
fig = plt.figure(1, (10,6))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.rc("font", size=30)
plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = range(len(populations))
X_xtick = ['']
for i in xrange(1, len(populations)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
plt.xticks(np.arange(len(populations)+1), X_xtick)
plt.ylabel(r"Probability")
plt.ylim([0,100])
print("X:", X)
distrib.bar(X, populations*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181',
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
def plot_compare_cluster_size_distribution(populations_1, populations_2, name='Populations'):
fig = plt.figure(1, (10,8))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
bar_width = 0.45
plt.rc("font", size=20)
#plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = np.arange(len(populations_1))
X_xtick = ['']
for i in xrange(1, len(populations_1)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X, populations_1*100, facecolor='black', edgecolor='white', width=bar_width,label="kNN Density Peaks 3645 states") #facecolor='#f78181',
# populations_2
#X = range(len(populations_2))
X_xtick = ['']
for i in xrange(1, len(populations_2)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X+bar_width, populations_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states") #facecolor='#f78181',
plt.xticks(np.arange(len(populations_1)+1+bar_width), X_xtick)
#plt.ylabel(r"Fraction number of clusters")
plt.ylabel(r"Probability")
plt.ylim([0,60])
plt.legend()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
#From Wang Wei's code
def plot_landscape(labels=None, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=80, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '.', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in range(0, len(phi_angles)):
if psi_angles[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_angles[i])[0][0] - 1
if phi_angles[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_angles[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_angles)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
if potential is False:
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
else:
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
#Cluster Centers on Free energy landscape distribution
fig = plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.title('Cluster Centers on Free energy landscape distribution', fontsize=20)
plt.xlabel("$k_B T$")
plt.ylabel(r"Probability")
plt.ylim([0, 100])
plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
distrib.bar(np.arange(10), distribution*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181'
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_compare_distribution(labels_1=None, labels_2=None, phi_angles=None, psi_angles=None, phi_ctr_1=None, psi_ctr_1=None, phi_ctr_2=None, psi_ctr_2=None, name='Energy_Landscape', bins=36, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
#extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
#plt.figure(figsize=(10, 10))
#plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
#if labels_1 is not None:
# plt.plot(phi_ctr_1, psi_ctr_1, '*', markersize=8, color='r')
distribution_1 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_1)):
if psi_ctr_1[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_1[i])[0][0] - 1
if phi_ctr_1[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_1[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_1[index_distrib] += 1
distribution_1 /= len(phi_ctr_1)
print(distribution_1)
distribution_2 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_2)):
if psi_ctr_2[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_2[i])[0][0] - 1
if phi_ctr_2[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_2[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_2[index_distrib] += 1
distribution_2 /= len(phi_ctr_2)
print(distribution_2)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
#cbar = plt.colorbar(shrink=0.77)
##plt.title('Free energy landscape', fontsize=20)
#cbar.set_label("$k_B T$", size=20)
#cbar.ax.tick_params(labelsize=20)
#if potential is False:
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-120, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
#else:
# plt.xlim([-75, 75])
# plt.ylim([-75, 75])
# plt.xticks([-50, 0, 50])
# plt.yticks([-50, 0, 50])
#plt.savefig('./' + name + '.png', dpi=400)
##plt.show()
#plt.close()
#Cluster Centers on Free energy landscape distribution
fig=plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
# plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
n_groups = 10
index = np.arange(n_groups)
bar_width = 0.45
distrib.bar(index, distribution_1*100, facecolor='black', edgecolor='white', width=bar_width, label="kNN Density Peaks 3645 states") #facecolor='#f78181'
distrib.bar(index+bar_width, distribution_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states")
#plt.title('Cluster Centers on Free energy landscape distribution', fontsize=10)
plt.xlabel("$k_B T$")
plt.ylabel(r"Fraction number of clusters")
plt.ylim([0, 50])
plt.xticks(index+bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
plt.legend()
#plt.tight_layout()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_landscape_barrier(labels=None, selected=1, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=36, potential=False, outliers=-1):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot points
colors = ['y', 'b', 'tomato', 'm', 'g', 'c', 'yellowgreen']
color_index = 0
clusters = np.unique(labels)
for i in clusters:
if i != outliers:
if i in selected:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '2', alpha=0.20, color=colors[color_index])#, color=colors_jet[i])
color_index += 1
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '*', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in xrange(0, len(phi_ctr)):
if psi_ctr[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr[i])[0][0] - 1
if phi_ctr[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_ctr)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
plt.plot([-103,-103],[30,180],'w') #plot the barrier
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
def calculate_population(labels, name='Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
#states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print("Populations Probability:")
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%")
#bins.append(10**(i+1))
name += '_Populations'
print("name:", name)
plot_cluster_size_distribution(populations=populations, name=name)
print("Done.")
def compare_population(labels_1, labels_2, name='Compare_Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels_1).values())
total_states = np.max(labels_1) + 1
total_frames = len(labels_1)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_1 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_1[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_1)):
populations_1[i] = populations_1[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_1[i]*100, "%")
counts = list(Counter(labels_2).values())
total_states = np.max(labels_2) + 1
total_frames = len(labels_2)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_2 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_2[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_2)):
populations_2[i] = populations_2[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_2[i]*100, "%")
name += '_Populations'
print("name:", name)
plot_compare_cluster_size_distribution(populations_1=populations_1, populations_2=populations_2, name=name)
#plot_cluster_size_distribution(populations_1=populations_1, name=name)
print("Done.")
def calculate_landscape(labels, centers, phi_angles, psi_angles, potential=False, name='Energy_Landscape'):
print("Calculating and plotting Landscape...")
phi_ctr = phi_angles[centers]
psi_ctr = psi_angles[centers]
labels_ctr = labels[centers]
name = name + '_Energy_Landscape'
print("name:", name)
plot_landscape(labels=labels_ctr, phi_angles=phi_angles, psi_angles=psi_angles, phi_ctr=phi_ctr, psi_ctr=psi_ctr, potential=potential, name=name)
print("Done")
#plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles)
| 38.556291 | 209 | 0.591635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,974 | 0.342408 |
9634bfc41291ba70d3cb8d6d1b58e82b77a84ebf
| 494 |
py
|
Python
|
commanderbot_lib/database/yaml_file_database.py
|
CommanderBot-Dev/commanderbot-lib
|
2716279b059056eaf0797085149b61f71b175ed5
|
[
"MIT"
] | 1 |
2020-09-25T19:22:47.000Z
|
2020-09-25T19:22:47.000Z
|
commanderbot_lib/database/yaml_file_database.py
|
CommanderBot-Dev/commanderbot-lib
|
2716279b059056eaf0797085149b61f71b175ed5
|
[
"MIT"
] | 1 |
2021-01-06T00:22:56.000Z
|
2021-08-29T20:54:50.000Z
|
commanderbot_lib/database/yaml_file_database.py
|
CommanderBot-Dev/commanderbot-lib
|
2716279b059056eaf0797085149b61f71b175ed5
|
[
"MIT"
] | 2 |
2020-09-25T19:23:07.000Z
|
2020-09-25T21:06:11.000Z
|
from typing import IO
from commanderbot_lib.database.abc.file_database import FileDatabase
from commanderbot_lib.database.mixins.yaml_file_database_mixin import (
YamlFileDatabaseMixin,
)
class YamlFileDatabase(FileDatabase, YamlFileDatabaseMixin):
# @implements FileDatabase
async def load(self, file: IO) -> dict:
return await self.load_yaml(file)
# @implements FileDatabase
async def dump(self, data: dict, file: IO):
await self.dump_yaml(data, file)
| 29.058824 | 71 | 0.755061 | 298 | 0.603239 | 0 | 0 | 0 | 0 | 165 | 0.334008 | 52 | 0.105263 |
9634f1c7c56270380c2632695615ddd30a7c567d
| 1,663 |
py
|
Python
|
git_verkefni_forritun.py
|
JonasFreyrB/Forritun
|
61cfda738693b255131bf6fb4ebea3af6f3a4ecf
|
[
"MIT"
] | null | null | null |
git_verkefni_forritun.py
|
JonasFreyrB/Forritun
|
61cfda738693b255131bf6fb4ebea3af6f3a4ecf
|
[
"MIT"
] | null | null | null |
git_verkefni_forritun.py
|
JonasFreyrB/Forritun
|
61cfda738693b255131bf6fb4ebea3af6f3a4ecf
|
[
"MIT"
] | null | null | null |
#Jónas Freyr Bjarnason
#25.01.2017
#Forritun
#Liður 1
#Byð notanda um tölu 1
tala1=int(input("Sláðu inn tölu 1 "))
#Byð notanda um tölu 2
tala2=int(input("Sláðu inn tölu 2 "))
#Birti tölu 1 og 2 lagðar saman
print("Tölurnar lagðar saman ",tala1+tala2)
#Birti tölu 1 og 2 margfaldaðar saman
print("Tölurnar margfaldaðar saman ",tala1*tala2)
#Liður 2
#Byð notanda um fornafn
fornafn=input("Sláðu inn fornafnið þitt ")
#Byð notanda um eftirnafn
eftirnafn=input("Sláðu inn eftirnafnið þitt ")
#Birti skilaboð ásamt bæði nöfnin lögð saman
print("Halló",fornafn,eftirnafn)
#Liður 3
#Byð notanda um texta
text=input("Sláðu inn texta ")
#Bý til teljara fyrir lágstafi
tellagstafi=0
#Bý til teljara fyrir hágstafi
telhastafi=0
#Bý til teljara fyrir lágstafi á eftir hástafi
tellagstafieftir=0
#Bý til for lykkju sem keyrir í gegnum textann
for x in range(len(text)):
#Ef stafurinn í texta er bókstafur og er í hástaf
if (text[x].isalpha() and text[x].isupper()):
#Bæti 1 við teljara fyrir hágstafi
telhastafi=telhastafi+1
#Ef næsti stafur er lágstafur
if (text[x +1].islower()):
#Bæti 1 við teljara fyrir lágstafi á eftir hástafi
tellagstafieftir=tellagstafieftir+1
#Ef stafurinn í texta er bókstafur og er í lágstaf
elif(text[x].isalpha() and text[x].islower()):
#Bæti 1 við teljara fyrir lágstafi
tellagstafi=tellagstafi+1
#Birti fjölda hástafi
print("Það komu",telhastafi,"hástafir")
#Birti fjölda lágstafi
print("Það komu",tellagstafi,"lágstafir")
#Birti fjölda lágstafi á eftir hástafi
print("Það komu",tellagstafieftir,"lágstafir koma strax á eftir hástaf")
| 29.175439 | 72 | 0.723391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,124 | 0.640091 |
96359eac01afe317df5fd3c215b39bdd662a534c
| 14,568 |
py
|
Python
|
test/pdu.py
|
praekelt/python-smpp
|
8a0753fc498ab6bcd6243aed5953cddd69cef2c0
|
[
"BSD-3-Clause"
] | 36 |
2015-01-15T09:38:06.000Z
|
2021-06-14T15:27:34.000Z
|
test/pdu.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | 8 |
2015-02-12T15:52:53.000Z
|
2017-05-22T12:28:45.000Z
|
test/pdu.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | 22 |
2015-04-29T15:06:17.000Z
|
2021-05-25T11:19:41.000Z
|
pdu_objects = [
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'outbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'generic_nack',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 0,
'short_message': None,
# 'short_message' can be of zero length
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '5666',
},
],
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_RSYSERR',
'sequence_number': 0,
},
# submit_sm_resp can have no body for failures
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'number_of_dests': 0,
'dest_address': [
{
'dest_flag': 1,
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': 'the address'
},
{
'dest_flag': 2,
'dl_name': 'the list',
},
{
'dest_flag': 2,
'dl_name': 'the other list',
},
# {}
],
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'no_unsuccess': 5,
'unsuccess_sme': [
{
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'error_status_code': 0,
},
{
'dest_addr_ton': 3,
'dest_addr_npi': 1,
'destination_addr': '555',
'error_status_code': 0,
},
],
},
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'registered_delivery': 0,
'data_coding': 0,
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '',
},
],
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'final_date': '',
'message_state': 0,
'error_code': 0,
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'is this an = sign?',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'alert_notification',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'source_addr_ton': 'international',
'source_addr_npi': 1,
'source_addr': '',
'esme_addr_ton': 9,
'esme_addr_npi': '',
'esme_addr': '',
},
},
},
]
| 28.17795 | 57 | 0.376922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,360 | 0.436573 |
96361040937f80ec08a4104661139247c1a2e9f9
| 3,616 |
py
|
Python
|
arxiv/release/dist_version.py
|
cul-it/arxiv-base
|
a5beadf44c24f72e21313299bfafc1ffb9d28ac7
|
[
"MIT"
] | 23 |
2019-01-10T22:01:18.000Z
|
2022-02-02T10:28:25.000Z
|
arxiv/release/dist_version.py
|
arXiv/arxiv-base
|
b59490abc1656c240025e19af86d6a246926914a
|
[
"MIT"
] | 57 |
2018-12-17T16:45:38.000Z
|
2021-12-14T14:20:58.000Z
|
arxiv/release/dist_version.py
|
cul-it/arxiv-base-ui
|
a5beadf44c24f72e21313299bfafc1ffb9d28ac7
|
[
"MIT"
] | 5 |
2019-01-10T22:01:28.000Z
|
2021-11-05T12:25:31.000Z
|
"""
Functions to deal with arxiv package versions.
It can be used in the setup.py file:
from arxiv.release.dist_version import get_version
setup(
version=get_version('arxiv-filemanager'),
....
)
"""
import sys
import pathlib
from subprocess import Popen, PIPE
from datetime import datetime
import pkg_resources
from pathlib import Path
from typing import Any, Optional
def get_version(dist_name: str) -> Optional[str]:
"""Get the version written by write_version(), or the git describe version.
Parameters
----------
dist_name: str
Which arxiv distribution to get. ex arxiv-base
arxiv-filemanager. This should be the name from setup.py or
pypi. These will be mapped to arxiv.base.version and
arxiv.filemanager.version.
Returns
-------
str
The version.__version__ value if it exists or the git describe
version if it exists or the string 'no-git-or-release-version'
"""
# TODO We might want to make it an error if we are under git
# and there is a version.py file? It doesn't seem like a good state.
pkg = ".".join(dist_name.split("-")) + ".version"
try:
name = "__version__"
dist_version = str(getattr(__import__(pkg, fromlist=[name]), name))
return dist_version
except ModuleNotFoundError:
pass
pkv=get_pkg_version(dist_name)
if pkv is not None:
return pkv
try:
return get_git_version()
except ValueError:
pass
return "0.0.1+no-git-or-release-version"
def write_version(dist_name: str, version: str) -> Path:
"""Write version to version.py in package corresponding with dist_name.
Parameters
----------
dist_name: str
Which arxiv distribution to get. ex arxiv-base
arxiv-filemanager. These will be mapped to arxiv.base.version
and arxiv.filemanager.version.
version: str
A string with a semantic version.
Returns
-------
Path
This returns the path to the version.py file.
"""
dir = "/".join(dist_name.split("-")) + "/version.py"
path = pathlib.Path(dir)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w+") as ff: # overwrite existing version
when = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
ff.write("'Created by tag_check.write_version'\n")
ff.write("# NEVER CHECK THIS version.py file INTO GIT.\n")
ff.write(
"# Generated when the package was build for distribution.\n"
)
ff.write(f"__when__ = '{when}'\n")
ff.write(f"__version__ = '{version}'\n")
return path
def get_pkg_version(pkg: Any) -> Optional[str]:
"""Get the python package version.
pkg needs to be the package name from setup.py or the name used to
install from pypi.
"""
try:
return pkg_resources.get_distribution(pkg).version
except:
return None
def get_git_version(abbrev: int = 7) -> str:
"""Get the current version using `git describe`."""
try:
p = Popen(
["git", "describe", "--dirty", "--abbrev=%d" % abbrev],
stdout=PIPE,
stderr=PIPE,
)
p.stderr.close()
line = p.stdout.readlines()[0]
return str(line.strip().decode("utf-8"))
except Exception:
raise ValueError("Cannot get the version number from git")
# Below is intended to let this module be used in CI scripts:
# ``export APP_VER=$(python -m arxiv.release.get_version arxiv-hatsize-agent)``
if __name__ == "__main__":
print(get_version(sys.argv[1]))
| 29.16129 | 79 | 0.634679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,071 | 0.572732 |
9636f9d68fd104dbb3836b714d906a33ec4f48ed
| 15,812 |
py
|
Python
|
rssynergia/base_diagnostics/read_bunch.py
|
radiasoft/rs_synergia
|
b43509de7f4a938354dc127762d8e723463e0e95
|
[
"Apache-2.0"
] | null | null | null |
rssynergia/base_diagnostics/read_bunch.py
|
radiasoft/rs_synergia
|
b43509de7f4a938354dc127762d8e723463e0e95
|
[
"Apache-2.0"
] | null | null | null |
rssynergia/base_diagnostics/read_bunch.py
|
radiasoft/rs_synergia
|
b43509de7f4a938354dc127762d8e723463e0e95
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""?
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
#import argparse
#import tables
from mpi4py import MPI
import h5py
import inspect
import numpy as np
import os
import synergia
# load the particles that will be used for the simulation
# The particles file is a text file with particle coordinates
# defined with the MAD-X conventions: X PX Y PY T PT
# Read this in using numpy's loadtxt command
# particle coordinates are converted to Synergia conventions
# input arguments:
# particles_file: the file name
# reference particle: the lattice reference particle for kinematic conversions
# num_real_particles: the number of real particles in the bunch
# bucket_length: the longitudinal length of the bucket
# comm: the Commxx communicator object for this bunch
# verbose: be chatty about what's happening
#
def read_bunch(particles, refpart, real_particles, comm, bucket_length = None, verbose=False):
'''
Read a bunch from file (either .txt, .h5, or .mxtxt (MAD-X txt file)) and construct a Synergia bunch object.
Arguments:
- particles (string or np.ndarray): EITHER a file containing particles coordinates OR an ndarray of coordinates
- refpart (synergia.foundation.foundation.Reference_particle): the Synergia reference particle describing the bunch
- num_real_particles (float): the number of real particles
- comm (synergia.utils.parallel_utils.Commxx): the Commxx communicator object for this bunch
- bucket_length (Optional[float]): if specified, the longitudinal length of the bucket in m
- verbose (Optional[Boolean]): Flag for verbose feedback
Returns:
-bunch: A Synergia bunch object is created in the current session
'''
#first attempt to load the particles as an h5 file
try:
return read_h5_particles(particles, refpart, real_particles, bucket_length, comm, verbose)
#it's not an h5 file - then there are two possibilities:
#1. It's another sort of file, in which case, an IOError will be thrown
#2. It's a numpy array, in which case a TypeError will be thrown
#Therefore, we will catch the IOErrror and process it as an input file to check if it's a legible text file
#Then we will catch the possible TypeError and process it for being a numpy array
except IOError:
#IOError, so it's a file but not an .h5 file
name,extension = os.path.splitext(particles)
#assuming no error is thrown, we continue processing the file - whihc should be now either a .txt or .mxtxt
assert extension == '.txt' or extension == '.mxtxt', \
"Supported file types are hdf5 (.h5) and plain text (.txt/.mxtxt)"
return read_txt_particles(particles, refpart, real_particles, bucket_length, comm, extension == '.mxtxt', verbose)
except TypeError:
#TypeError, so it's not a file - so we should check if it's a numpy array
#Had we checked the .txt read first, it would have return an AttributeError
assert isinstance(particles, np.ndarray), \
"Supported data types are numpy arrays only."
return read_array_particles(particles, refpart, real_particles, bucket_length, comm, verbose)
#====================================================================
# if madx_format is True, the particles are in madX units, otherwise they are in
# synergia units
def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):
"""Read an array of particles from a text file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
if madx_format:
print("Loading madX particles from txt file: ", particles_file)
else:
print("Loading Synergia particles from txt file: ", particles_file)
if myrank == 0:
particles = np.loadtxt(particles_file)
num_total_particles = particles.shape[0]
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 6) and (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
if madx_format:
# numpy manipulations to convert kinematics
# convert MAD-X T=-c*dt to Synergia c*ct
particles[:,4] = -particles[:,4]
# convert MAD-X Delta-E/pc to Synergia delta-p/p
# sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)
m_over_pc = pmass/p0c
E_0_over_pc = E_0/p0c
particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *
(particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0
# if there are no IDs, append particle ID column
if particles.shape[1] != 7:
particles_w_id = np.column_stack((particles,
np.arange(num_total_particles, dtype='d')))
else:
particles_w_id = particles
if myrank == 0:
print("Read ", num_total_particles, " particles")
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles_w_id[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#==========================================================
def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles from an HDF-5 file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from h5 file: ", particles_file)
if myrank == 0:
#h5 = tables.open_file(particles_file)
h5 = h5py.File(particles_file)
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = int(h5['particles'].shape[0])
if verbose:
print("Total of ", num_total_particles, " particles from file")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = h5['particles']
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#==========================================================
def read_array_particles(particle_array, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles coordinates from memory"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from: ".format(particle_array))
if myrank == 0:
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = particle_array.shape[0]
if verbose:
print("Total of ", num_total_particles, " particles")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = particle_array
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particle_array.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particle_array[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#================================================================
def print_bunch_stats(bunch):
coord_names = ("x", "xp", "y", "yp", "c*dt", "dp/p")
myrank = bunch.get_comm().get_rank()
means = synergia.bunch.Core_diagnostics().calculate_mean(bunch)
stds = synergia.bunch.Core_diagnostics().calculate_std(bunch, means)
if myrank == 0:
print("%20s %20s %20s"%("coord","mean","rms"))
print("%20s %20s %20s"%("====================",)
"====================",
"====================")
for i in range(6):
print("%20s %20.12e %20.12e"%(coord_names[i], means[i], stds[i]))
#=========================================================
| 41.177083 | 142 | 0.623893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,862 | 0.433974 |
9637f8491206f394aea0791103d4e2cc75fcd07e
| 15,043 |
py
|
Python
|
funfolding/binning/classic_binning.py
|
tudo-astroparticlephysics/funfolding
|
2f485b04f8d79698527fcaab015baf708505e8dd
|
[
"MIT"
] | 1 |
2019-05-22T13:46:46.000Z
|
2019-05-22T13:46:46.000Z
|
funfolding/binning/classic_binning.py
|
tudo-astroparticlephysics/funfolding
|
2f485b04f8d79698527fcaab015baf708505e8dd
|
[
"MIT"
] | null | null | null |
funfolding/binning/classic_binning.py
|
tudo-astroparticlephysics/funfolding
|
2f485b04f8d79698527fcaab015baf708505e8dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ._binning import Binning
import itertools
import numpy as np
import copy
try:
from astroML.density_estimation.bayesian_blocks import bayesian_blocks
got_astroML = True
except ImportError:
got_astroML = False
class ClassicBinning(Binning):
name = 'ClassicalBinning'
status_need_for_digitize = 0
def __init__(self,
bins,
range=None,
oor_handle='individual',
random_state=None):
super(ClassicBinning, self).__init__()
self.hist_func = np.histogram
self.n_dims = len(bins)
self.bins = bins
if range is None:
self.range = [None] * self.n_dims
else:
self.range = range
self.edges = []
self.t_to_i = None
self.i_to_t = None
self.n_bins = None
self.oor_tuples = None
self.oor_handle = oor_handle
if not isinstance(random_state, np.random.RandomState):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
def initialize(self,
X,
y=None,
sample_weight=None):
super(ClassicBinning, self).initialize()
for dim_i in range(self.n_dims):
if sample_weight is None:
w_i = None
else:
w_i = sample_weight[:, dim_i]
if self.bins[dim_i] == 'blocks':
if got_astroML:
self.edges.append(bayesian_blocks(X[:, dim_i]))
else:
raise RuntimeError("Install astroML to use 'blocks'")
else:
self.edges.append(self.hist_func(a=X[:, dim_i],
bins=self.bins[dim_i],
range=self.range[dim_i],
weights=w_i)[1])
self.create_conversion_dict()
self.n_bins = len(self.t_to_i.keys())
def digitize(self, X, right=False):
super(ClassicBinning, self).digitize()
tup_label = np.zeros((len(X), self.n_dims), dtype=int)
for dim_i in range(self.n_dims):
digi = np.digitize(x=X[:, dim_i],
bins=self.edges[dim_i],
right=right)
tup_label[:, dim_i] = digi
return self.convert_tup_label(tup_label)
def convert_tup_label(self, tup_label):
i_label = np.array([self.t_to_i[tuple(key)] for key in tup_label],
dtype=int)
return i_label
def create_conversion_dict(self):
range_list = [np.arange(len(edges_i) + 1)
for edges_i in self.edges]
indices = itertools.product(*range_list)
self.t_to_i = {x: i for i, x in enumerate(indices)}
def is_oor(tup_i):
lower = any(np.array(tup_i) == 0)
upper = any([t == len(self.edges[i]) for i, t in enumerate(tup_i)])
return lower or upper
self.i_to_t = {self.t_to_i[t]: t for t in self.t_to_i.keys()}
self.oor_tuples = set(t for t in self.t_to_i.keys() if is_oor(t))
def copy(self):
clone = ClassicBinning(bins=self.bins)
clone.bins = copy.deepcopy(self.bins)
clone.range = copy.deepcopy(self.range)
clone.edges = copy.deepcopy(self.edges)
clone.t_to_i = copy.deepcopy(self.t_to_i)
clone.i_to_t = copy.deepcopy(self.i_to_t)
clone.n_bins = copy.deepcopy(self.n_bins)
clone.oor_tuples = copy.deepcopy(self.oor_tuples)
clone.oor_handle = copy.deepcopy(self.oor_handle)
clone.random_state = copy.deepcopy(self.random_state)
clone.status = int(self.status)
return clone
def __merge__(self,
X,
min_samples=None,
max_bins=None,
sample_weight=None,
y=None,
right=False,
mode='closest',
**kwargs):
super(ClassicBinning, self).merge()
n_merg_iterations = 0
binned = self.digitize(X, right=right)
counted = np.bincount(binned,
weights=sample_weight,
minlength=self.n_bins)
original_sum = np.sum(counted)
if min_samples is None and max_bins is None:
raise ValueError("Either 'min_samples' or 'max_bins' have "
"to be set!")
elif min_samples is None:
min_samples = original_sum
elif max_bins is None:
max_bins = 1
if mode == 'similar':
if y is None:
raise ValueError("For mode 'similar' labels are needed!")
if sample_weight is None:
w = y
else:
w = y * sample_weight
no_entry = counted == 0
self.mean_label = np.bincount(binned,
weights=w,
minlength=self.n_bins)
self.mean_label[no_entry] = np.nan
self.mean_label /= counted
self.__get_bin_for_merge__ = self.__get_most_similar_neighbor__
elif mode == 'lowest':
self.__get_bin_for_merge__ = self.__get_lowest_neighbor__
elif mode == 'closest':
self.__get_bin_for_merge__ = self.__get_closest_neighbor__
else:
raise ValueError("'closest', 'lowest' and 'similar' are "
"valid options for keyword 'mode'")
while True:
min_val = np.min(counted)
try:
assert min_val <= min_samples
assert self.n_bins > max_bins
except AssertionError:
break
else:
min_indices = np.where(counted == min_val)[0]
min_idx = self.random_state.choice(min_indices)
neighbors = self.__get_neighbors__(min_idx)
i_label_keep, i_label_remove = self.__get_bin_for_merge__(
bin_a=min_idx,
neighbors=neighbors,
counted=counted,
**kwargs)
self.__merge_bins__(i_label_keep, i_label_remove)
counted[i_label_keep] += counted[i_label_remove]
mask = np.ones_like(counted, dtype=bool)
mask[i_label_remove] = False
counted = counted[mask]
n_merg_iterations += 1
self.n_bins -= 1
if np.sum(counted) != original_sum:
raise RuntimeError('Events sum changed!')
self.n_bins = len(self.i_to_t)
return self
def merge(self,
X,
min_samples=None,
max_bins=None,
sample_weight=None,
y=None,
right=False,
mode='closest',
inplace=False):
if inplace:
return self.__merge__(X=X,
min_samples=min_samples,
max_bins=max_bins,
sample_weight=sample_weight,
y=y,
right=right,
mode=mode)
else:
clone = self.copy()
return clone.merge(X=X,
min_samples=min_samples,
max_bins=max_bins,
sample_weight=sample_weight,
y=y,
right=right,
mode=mode,
inplace=True)
def __get_neighbors__(self, i_label):
t_labels = self.i_to_t[i_label]
if isinstance(t_labels, tuple):
t_labels = [t_labels]
neighbors = set()
for t_label in t_labels:
dims = range(self.n_dims)
for i in dims:
upper = []
lower = []
for j in dims:
if j == i:
upper.append(t_label[j] + 1)
lower.append(t_label[j] - 1)
else:
upper.append(t_label[j])
lower.append(t_label[j])
upper = tuple(upper)
lower = tuple(lower)
try:
if upper not in t_labels:
neighbors.add(self.t_to_i[upper])
except KeyError:
pass
try:
if lower not in t_labels:
neighbors.add(self.t_to_i[lower])
except KeyError:
pass
assert i_label not in neighbors
return list(neighbors)
def __merge_bins__(self, i_label_keep, i_label_remove):
if i_label_remove <= i_label_keep:
raise RuntimeError
t_labels_remove = self.i_to_t[i_label_remove]
if isinstance(t_labels_remove, tuple):
t_labels_remove = [t_labels_remove]
t_labels_keep = self.i_to_t[i_label_keep]
if isinstance(t_labels_keep, tuple):
t_labels_keep = [t_labels_keep]
for t_label_remove in t_labels_remove:
self.t_to_i[t_label_remove] = i_label_keep
for t_label in self.t_to_i.keys():
if self.t_to_i[t_label] > i_label_remove:
self.t_to_i[t_label] -= 1
self.i_to_t = {}
for t_label, i_label in self.t_to_i.items():
t_labels = self.i_to_t.get(i_label, [])
t_labels.append(t_label)
self.i_to_t[i_label] = t_labels
return i_label_keep, i_label_remove
def __get_lowest_neighbor__(self, bin_a, neighbors, counted):
counted_neighbors = counted[neighbors]
min_val = np.where(counted_neighbors == np.min(counted_neighbors))[0]
min_index = self.random_state.choice(min_val)
bin_b = neighbors[min_index]
if bin_b < bin_a:
i_label_keep = bin_b
i_label_remove = bin_a
else:
i_label_keep = bin_a
i_label_remove = bin_b
return i_label_keep, i_label_remove
def __get_most_similar_neighbor__(self,
bin_a,
neighbors,
counted):
mean_label = self.mean_label
min_counted = np.min(counted[neighbors])
if min_counted == 0 or counted[bin_a] == 0:
i_label_keep, i_label_remove = self.__get_closest_neighbor__(
bin_a,
neighbors,
counted)
else:
label_diff = np.absolute(mean_label[neighbors] - mean_label[bin_a])
min_idx = np.where(label_diff == np.nanmin(label_diff))[0]
if len(min_idx) > 0:
neighbors = [neighbors[i] for i in min_idx]
i_label_keep, i_label_remove = self.__get_closest_neighbor__(
bin_a,
neighbors,
counted)
else:
bin_b = neighbors[min_idx]
if bin_b < bin_a:
i_label_keep = bin_b
i_label_remove = bin_a
else:
i_label_keep = bin_a
i_label_remove = bin_b
if np.isnan(mean_label[i_label_keep]) and \
np.isnan(mean_label[i_label_remove]):
mean_label[i_label_keep] = np.nan
elif not np.isnan(mean_label[i_label_keep]) and \
np.isnan(mean_label[i_label_remove]):
pass
elif np.isnan(mean_label[i_label_keep]) and not \
np.isnan(mean_label[i_label_remove]):
mean_label[i_label_keep] = mean_label[i_label_remove]
else:
s_k = counted[i_label_keep] * mean_label[i_label_keep]
s_r = counted[i_label_remove] * mean_label[i_label_remove]
s = s_r + s_k
c = (counted[i_label_keep] + counted[i_label_remove])
mean_label[i_label_keep] = s / c
mask = np.ones_like(mean_label, dtype=bool)
mask[i_label_remove] = False
self.mean_label = self.mean_label[mask]
return i_label_keep, i_label_remove
def __get_closest_neighbor__(self,
bin_a,
neighbors,
counted,
unitless=True):
if unitless:
bin_cog = self.__calc_bin_cog_unitless__(bin_a)
bin_cogs = [self.__calc_bin_cog_unitless__(i)
for i in neighbors]
else:
bin_cog = self.__calc_bin_cog__(bin_a)
bin_cogs = [self.__calc_bin_cog__(i)
for i in neighbors]
distance = [np.sqrt(np.sum((bin_cog - bin_i)**2))
for bin_i in bin_cogs]
min_val = np.where(distance == np.min(distance))[0]
bin_b = neighbors[self.random_state.choice(min_val)]
if bin_b < bin_a:
i_label_keep = bin_b
i_label_remove = bin_a
else:
i_label_keep = bin_a
i_label_remove = bin_b
return i_label_keep, i_label_remove
def __calc_bin_cog__(self, i_label):
t_labels = self.i_to_t[i_label]
if isinstance(t_labels, tuple):
t_labels = [t_labels]
cog = np.zeros((self.n_dims, len(t_labels)))
mean_diff = [np.mean(np.diff(self.edges[i]))
for i in range(self.n_dims)]
for j, t_label in enumerate(t_labels):
for i, bin_i in enumerate(t_label):
try:
upper_edge = self.edges[i][bin_i]
except IndexError:
upper_edge = None
try:
lower_edge = self.edges[i][bin_i - 1]
except IndexError:
lower_edge = None
if upper_edge is None and lower_edge is None:
raise ValueError('Invalid label!')
if upper_edge is None:
upper_edge = lower_edge + mean_diff[i]
if lower_edge is None:
lower_edge = upper_edge - mean_diff[i]
cog[i, j] = (upper_edge + lower_edge) / 2.
return np.mean(cog, axis=1)
def __calc_bin_cog_unitless__(self, i_label):
t_labels = self.i_to_t[i_label]
if isinstance(t_labels, tuple):
t_labels = [t_labels]
cog = np.zeros((self.n_dims, len(t_labels)))
for j, t_label in enumerate(t_labels):
cog[:, j] = np.array(t_label)
return np.mean(cog, axis=1)
| 38.670951 | 79 | 0.515456 | 14,766 | 0.981586 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.024131 |
96395cbf1fcfecb1f1e6a9078b9555cfe006e998
| 2,625 |
py
|
Python
|
basic_algorithm/draw/draw.py
|
Quanfita/ImageProcessing
|
2a7c1d093a003c43d6d259f6e8db3b4e6163839b
|
[
"MIT"
] | null | null | null |
basic_algorithm/draw/draw.py
|
Quanfita/ImageProcessing
|
2a7c1d093a003c43d6d259f6e8db3b4e6163839b
|
[
"MIT"
] | null | null | null |
basic_algorithm/draw/draw.py
|
Quanfita/ImageProcessing
|
2a7c1d093a003c43d6d259f6e8db3b4e6163839b
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
def drawPoint(canvas,x,y):
canvas[y,x] = 0
def drawLine(canvas,x1,y1,x2,y2):
dx, dy = abs(x2 - x1), abs(y2 - y1)
xi, yi = x1, y1
sx, sy = 1 if (x2 - x1) > 0 else -1, 1 if (y2 - y1) > 0 else -1
pi = 2*dy - dx
while xi != x2 + 1:
if pi < 0:
pi += 2 * dy
else:
pi += 2 * dy - 2 * dx
yi += 1 * sy
drawPoint(canvas,xi,yi)
xi += 1 * sx
def drawCircle(canvas,x,y,r):
x0, y0 = x, y
xi = 0
yi = r
pi = 5/4 - r
while xi <= yi:
if pi < 0:
pi += 2 * (xi + 1) + 1
else:
pi += 2 * (xi + 1) + 1 - 2 * (yi - 1)
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi = r
yi = 0
pi = 5/4 - r
while not (xi == yi+1 or xi == yi):
if pi < 0:
pi += 2 * (yi + 1) + 1
else:
pi += 2 * (yi + 1) + 1 - 2 * (xi - 1)
xi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi += 1
def drawEllipse(canvas,x,y,rx,ry):
x0, y0 = x, y
xi, yi = 0, ry
rx2 = rx ** 2
ry2 = ry ** 2
p1i = ry2 - rx2 * ry + rx2 / 4
while 2*ry2*xi < 2*rx2*yi:
if p1i < 0:
p1i += 2 * ry2 * (xi + 1) + ry2
else:
p1i += 2 * ry2 * (xi + 1) - 2* rx2 * (yi - 1) + ry2
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi -= 1
p2i = ry2 * (xi + .5) ** 2 + rx2 * (yi - 1) ** 2 - rx2 * ry2
while yi >= 0:
if p2i > 0:
p2i += -2 * rx2 * (yi - 1) + rx2
else:
p2i += 2 * ry2 * (xi + 1) - 2 * rx2 * (yi - 1) + rx2
xi += 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi -= 1
if __name__ == '__main__':
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawLine(canvas,800,100,100,600)
cv2.imwrite('line.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawCircle(canvas,500,500,300)
cv2.imwrite('circle.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawEllipse(canvas,500,500,100,200)
cv2.imwrite('ellipse.png',canvas)
| 27.061856 | 67 | 0.459429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.017143 |
9639600eac70c10e9cb5e5d9d76147c7dda0b313
| 888 |
py
|
Python
|
PyTCI/models/alfentanil.py
|
jcia2192/PyTCI
|
952ac6312015514c8609af5d9a61cc3397758c94
|
[
"MIT"
] | 6 |
2019-02-16T22:29:42.000Z
|
2020-10-17T17:22:52.000Z
|
PyTCI/models/alfentanil.py
|
jcia2192/PyTCI
|
952ac6312015514c8609af5d9a61cc3397758c94
|
[
"MIT"
] | 91 |
2019-03-04T06:11:07.000Z
|
2022-03-30T01:31:27.000Z
|
PyTCI/models/alfentanil.py
|
jcia2192/PyTCI
|
952ac6312015514c8609af5d9a61cc3397758c94
|
[
"MIT"
] | 3 |
2019-05-14T15:09:30.000Z
|
2020-02-19T13:03:03.000Z
|
from .base import Three
class Alfentanil(Three):
"""base Alfentanil class"""
pass
class Maitre(Alfentanil):
def __init__(self, age, weight, height, sex):
if sex == "m":
self.v1 = 0.111 * weight
elif sex == "f":
self.v1 = 0.128 * weight
else:
raise ValueError("Unknown value for sex")
self.k12 = 0.104
self.k13 = 0.017
self.k21 = 0.0673
self.k31 = 0.0126
self.q1 = 0.356
if age > 40:
self.k31 = 0.0126 - (0.000113 * (age - 40))
self.q1 = 0.356 - (0.00269 * (age - 40))
# calulated stuff as source paper gives mix of clearance and rate constants
self.k10 = self.q1 / self.v1
self.v2 = self.v1 * (self.k12 / self.k21)
self.v3 = self.v1 * (self.k13 / self.k31)
self.keo = 0.77
self.setup()
| 24 | 83 | 0.516892 | 858 | 0.966216 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.147523 |
963a4d3128c84db58d2f454e777068e2515b774e
| 307 |
py
|
Python
|
cooee/actions.py
|
yschimke/cooee-cli-py
|
74edeb58ee5cfd0887b73de4f90ffa28892e24df
|
[
"Apache-2.0"
] | null | null | null |
cooee/actions.py
|
yschimke/cooee-cli-py
|
74edeb58ee5cfd0887b73de4f90ffa28892e24df
|
[
"Apache-2.0"
] | null | null | null |
cooee/actions.py
|
yschimke/cooee-cli-py
|
74edeb58ee5cfd0887b73de4f90ffa28892e24df
|
[
"Apache-2.0"
] | null | null | null |
import webbrowser
from typing import Dict, Any
from prompt_toolkit import print_formatted_text
from .format import todo_string
def launch_action(result: Dict[str, Any]):
if "location" in result:
webbrowser.open(result["location"])
else:
print_formatted_text(todo_string(result))
| 21.928571 | 49 | 0.745928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.065147 |
963a7890170f483f8139e5e50f0f73025935d302
| 4,114 |
py
|
Python
|
custom_components/panasonic_smart_app/sensor.py
|
clspeter/panasonic_smart_app
|
22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af
|
[
"MIT"
] | null | null | null |
custom_components/panasonic_smart_app/sensor.py
|
clspeter/panasonic_smart_app
|
22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af
|
[
"MIT"
] | null | null | null |
custom_components/panasonic_smart_app/sensor.py
|
clspeter/panasonic_smart_app
|
22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
STATE_UNAVAILABLE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
PERCENTAGE,
)
from .entity import PanasonicBaseEntity
from .const import (
DOMAIN,
UPDATE_INTERVAL,
DEVICE_TYPE_DEHUMIDIFIER,
DEVICE_TYPE_AC,
DATA_CLIENT,
DATA_COORDINATOR,
LABEL_PM25,
LABEL_HUMIDITY,
LABEL_OUTDOOR_TEMPERATURE,
ICON_PM25,
ICON_THERMOMETER,
ICON_HUMIDITY,
)
_LOGGER = logging.getLogger(__package__)
SCAN_INTERVAL = timedelta(seconds=UPDATE_INTERVAL)
async def async_setup_entry(hass, entry, async_add_entities) -> bool:
client = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]
devices = coordinator.data
sensors = []
for index, device in enumerate(devices):
device_type = int(device["Devices"][0]["DeviceType"])
if device_type == DEVICE_TYPE_DEHUMIDIFIER:
sensors.append(
PanasonicHumiditySensor(
coordinator,
index,
client,
device,
)
)
sensors.append(
PanasonicPM25Sensor(
coordinator,
index,
client,
device,
)
)
if device_type == DEVICE_TYPE_AC:
sensors.append(
PanasonicOutdoorTemperatureSensor(
coordinator,
index,
client,
device,
)
)
async_add_entities(sensors, True)
return True
class PanasonicHumiditySensor(PanasonicBaseEntity, SensorEntity):
""" Panasonic dehumidifier current humidity sensor """
@property
def label(self):
return f"{self.nickname} {LABEL_HUMIDITY}"
@property
def icon(self) -> str:
return ICON_HUMIDITY
@property
def device_class(self) -> str:
return DEVICE_CLASS_HUMIDITY
@property
def state(self) -> int:
status = self.coordinator.data[self.index]["status"]
_current_humd = status.get("0x07") or None
_LOGGER.debug(f"[{self.label}] state: {_current_humd}")
return _current_humd if _current_humd else STATE_UNAVAILABLE
@property
def unit_of_measurement(self) -> str:
return PERCENTAGE
class PanasonicPM25Sensor(PanasonicBaseEntity, SensorEntity):
""" Panasonic dehumidifer PM2.5 sensor """
@property
def label(self) -> str:
return f"{self.nickname} {LABEL_PM25}"
@property
def icon(self) -> str:
return ICON_PM25
@property
def state(self) -> int:
status = self.coordinator.data[self.index]["status"]
_pm25 = int(status.get("0x53") or -1)
_LOGGER.debug(f"[{self.label}] state: {_pm25}")
return _pm25 if _pm25 >= 0 else STATE_UNAVAILABLE
@property
def unit_of_measurement(self) -> str:
return CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
class PanasonicOutdoorTemperatureSensor(PanasonicBaseEntity, SensorEntity):
""" Panasonic AC outdoor temperature sensor """
@property
def label(self) -> str:
return f"{self.nickname} {LABEL_OUTDOOR_TEMPERATURE}"
@property
def icon(self) -> str:
return ICON_THERMOMETER
@property
def device_class(self) -> str:
return DEVICE_CLASS_TEMPERATURE
@property
def state(self) -> int:
status = self.coordinator.data[self.index]["status"]
_outside_temperature = float(status.get("0x21") or -1)
_LOGGER.debug(f"[{self.label}] state: {_outside_temperature}")
return (
_outside_temperature
if _outside_temperature >= 0
else STATE_UNAVAILABLE
)
@property
def unit_of_measurement(self) -> str:
return TEMP_CELSIUS
| 26.714286 | 75 | 0.622265 | 2,237 | 0.543753 | 0 | 0 | 1,794 | 0.436072 | 1,164 | 0.282936 | 437 | 0.106223 |
963b386535b3cdad7d06852710557f50ea31610a
| 5,664 |
py
|
Python
|
fluent.runtime/tests/format/test_placeables.py
|
jakub-szczepaniak/python-fluent
|
2b751220e4ced57fc256df0f25adc72400e5ce9a
|
[
"Apache-2.0"
] | null | null | null |
fluent.runtime/tests/format/test_placeables.py
|
jakub-szczepaniak/python-fluent
|
2b751220e4ced57fc256df0f25adc72400e5ce9a
|
[
"Apache-2.0"
] | null | null | null |
fluent.runtime/tests/format/test_placeables.py
|
jakub-szczepaniak/python-fluent
|
2b751220e4ced57fc256df0f25adc72400e5ce9a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import unittest
from fluent.runtime import FluentBundle
from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError
from ..utils import dedent_ftl
class TestPlaceables(unittest.TestCase):
def setUp(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages(dedent_ftl("""
message = Message
.attr = Message Attribute
-term = Term
.attr = Term Attribute
-term2 = {
*[variant1] Term Variant 1
[variant2] Term Variant 2
}
uses-message = { message }
uses-message-attr = { message.attr }
uses-term = { -term }
bad-message-ref = Text { not-a-message }
bad-message-attr-ref = Text { message.not-an-attr }
bad-term-ref = Text { -not-a-term }
self-referencing-message = Text { self-referencing-message }
cyclic-msg1 = Text1 { cyclic-msg2 }
cyclic-msg2 = Text2 { cyclic-msg1 }
self-cyclic-message = Parent { self-cyclic-message.attr }
.attr = Attribute { self-cyclic-message }
self-attribute-ref-ok = Parent { self-attribute-ref-ok.attr }
.attr = Attribute
self-parent-ref-ok = Parent
.attr = Attribute { self-parent-ref-ok }
"""))
def test_placeable_message(self):
val, errs = self.ctx.format('uses-message', {})
self.assertEqual(val, 'Message')
self.assertEqual(len(errs), 0)
def test_placeable_message_attr(self):
val, errs = self.ctx.format('uses-message-attr', {})
self.assertEqual(val, 'Message Attribute')
self.assertEqual(len(errs), 0)
def test_placeable_term(self):
val, errs = self.ctx.format('uses-term', {})
self.assertEqual(val, 'Term')
self.assertEqual(len(errs), 0)
def test_placeable_bad_message(self):
val, errs = self.ctx.format('bad-message-ref', {})
self.assertEqual(val, 'Text not-a-message')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown message: not-a-message")])
def test_placeable_bad_message_attr(self):
val, errs = self.ctx.format('bad-message-attr-ref', {})
self.assertEqual(val, 'Text Message')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown attribute: message.not-an-attr")])
def test_placeable_bad_term(self):
val, errs = self.ctx.format('bad-term-ref', {})
self.assertEqual(val, 'Text -not-a-term')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown term: -not-a-term")])
def test_cycle_detection(self):
val, errs = self.ctx.format('self-referencing-message', {})
self.assertEqual(val, 'Text ???')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentCyclicReferenceError("Cyclic reference")])
def test_mutual_cycle_detection(self):
val, errs = self.ctx.format('cyclic-msg1', {})
self.assertEqual(val, 'Text1 Text2 ???')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentCyclicReferenceError("Cyclic reference")])
def test_allowed_self_reference(self):
val, errs = self.ctx.format('self-attribute-ref-ok', {})
self.assertEqual(val, 'Parent Attribute')
self.assertEqual(len(errs), 0)
val, errs = self.ctx.format('self-parent-ref-ok.attr', {})
self.assertEqual(val, 'Attribute Parent')
self.assertEqual(len(errs), 0)
class TestSingleElementPattern(unittest.TestCase):
def test_single_literal_number_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { 1 }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_literal_number_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { 1 }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_number_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo', {'arg': 1})
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_number_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo', {'arg': 1})
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_missing_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, 'arg')
self.assertEqual(len(errs), 1)
def test_single_arg_missing_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, 'arg')
self.assertEqual(len(errs), 1)
| 37.76 | 82 | 0.596751 | 5,428 | 0.958333 | 0 | 0 | 0 | 0 | 0 | 0 | 1,791 | 0.316208 |
963c560293977e228cb5a3afa7c8d254adb111f7
| 956 |
py
|
Python
|
ads/feature_engineering/adsstring/parsers/base.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | 20 |
2022-02-22T19:07:09.000Z
|
2022-03-16T17:21:42.000Z
|
ads/feature_engineering/adsstring/parsers/base.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
ads/feature_engineering/adsstring/parsers/base.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
class Parser:
@property
def pos(self):
raise NotImplementedError()
@property
def noun(self):
raise NotImplementedError()
@property
def adjective(self):
raise NotImplementedError()
@property
def adverb(self):
raise NotImplementedError()
@property
def verb(self):
raise NotImplementedError()
@property
def word(self):
raise NotImplementedError()
@property
def sentence(self):
raise NotImplementedError()
@property
def word_count(self):
raise NotImplementedError()
@property
def bigram(self):
raise NotImplementedError()
@property
def trigram(self):
raise NotImplementedError()
| 19.916667 | 104 | 0.639121 | 743 | 0.777197 | 0 | 0 | 671 | 0.701883 | 0 | 0 | 205 | 0.214435 |
963c73799bbdb00fb97205577d7028cae7121d80
| 1,535 |
py
|
Python
|
tests/conftest.py
|
banteg/lido-vault
|
702fa9c58a26c01c61b24489d18ea099e22e8b09
|
[
"MIT"
] | 22 |
2020-12-19T10:07:38.000Z
|
2022-01-03T03:28:08.000Z
|
tests/conftest.py
|
banteg/lido-vault
|
702fa9c58a26c01c61b24489d18ea099e22e8b09
|
[
"MIT"
] | 1 |
2020-12-23T22:32:42.000Z
|
2020-12-23T22:35:56.000Z
|
tests/conftest.py
|
banteg/lido-vault
|
702fa9c58a26c01c61b24489d18ea099e22e8b09
|
[
"MIT"
] | 1 |
2020-12-21T08:45:07.000Z
|
2020-12-21T08:45:07.000Z
|
import pytest
from brownie import Wei
@pytest.fixture(scope="function", autouse=True)
def shared_setup(fn_isolation):
pass
@pytest.fixture(scope='module')
def nocoiner(accounts, lido):
assert lido.balanceOf(accounts[9]) == 0
return accounts[9]
@pytest.fixture(scope='module')
def ape(accounts):
return accounts[0]
@pytest.fixture(scope='module')
def whale(accounts):
return accounts[1]
@pytest.fixture()
def vault(LidoVault, ape):
return LidoVault.deploy({"from": ape})
@pytest.fixture(scope='module')
def lido(interface, accounts):
lido = interface.Lido("0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84")
oracle = accounts.at(lido.getOracle(), force=True)
return interface.Lido(lido, owner=oracle)
class Helpers:
@staticmethod
def filter_events_from(addr, events):
return list(filter(lambda evt: evt.address == addr, events))
@staticmethod
def assert_single_event_named(evt_name, tx, evt_keys_dict):
receiver_events = Helpers.filter_events_from(tx.receiver, tx.events[evt_name])
assert len(receiver_events) == 1
assert dict(receiver_events[0]) == evt_keys_dict
@staticmethod
def report_beacon_balance_increase(lido):
beacon_stat = lido.getBeaconStat().dict()
total_pooled_ether = lido.getTotalPooledEther()
new_beacon_balance = Wei(total_pooled_ether * 1.5) + "1 ether"
lido.pushBeacon(beacon_stat['beaconValidators'], new_beacon_balance)
@pytest.fixture(scope='module')
def helpers():
return Helpers
| 25.583333 | 84 | 0.721173 | 722 | 0.470358 | 0 | 0 | 1,442 | 0.939414 | 0 | 0 | 127 | 0.082736 |
963e0a388ab593079d1ff2e77544ecf12fa56919
| 112 |
py
|
Python
|
reqto/__init__.py
|
DovaX/reqto
|
4d3cc03535297fb0d5c946632e9de6a3a1ec5420
|
[
"MIT"
] | null | null | null |
reqto/__init__.py
|
DovaX/reqto
|
4d3cc03535297fb0d5c946632e9de6a3a1ec5420
|
[
"MIT"
] | null | null | null |
reqto/__init__.py
|
DovaX/reqto
|
4d3cc03535297fb0d5c946632e9de6a3a1ec5420
|
[
"MIT"
] | null | null | null |
from reqto.core.reqto import get, post, delete, put, patch, head
__all__=[get, post, delete, put, patch, head]
| 28 | 64 | 0.723214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
963e5b6e89e0809787a1d58ba17bc95ac8ccc84f
| 911 |
py
|
Python
|
setup.py
|
sgillies/fio-taxa
|
a278f366c23d1e0946bc4675de905bda712c2490
|
[
"MIT"
] | 2 |
2018-05-20T06:31:44.000Z
|
2021-12-02T20:59:46.000Z
|
setup.py
|
sgillies/fio-taxa
|
a278f366c23d1e0946bc4675de905bda712c2490
|
[
"MIT"
] | 1 |
2018-12-19T17:05:05.000Z
|
2018-12-19T17:05:05.000Z
|
setup.py
|
sgillies/fio-taxa
|
a278f366c23d1e0946bc4675de905bda712c2490
|
[
"MIT"
] | null | null | null |
from codecs import open as codecs_open
from setuptools import setup, find_packages
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='fio_taxa',
version='1.0.0',
description=u"Classification of GeoJSON features",
long_description=long_description,
classifiers=[],
keywords='',
author=u"Sean Gillies",
author_email='[email protected]',
url='https://github.com/sgillies/fio-taxa',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'click', 'fiona'
],
extras_require={
'test': ['pytest'],
},
entry_points="""
[fiona.fio_commands]
taxa=fio_taxa.scripts.cli:taxa
"""
)
| 26.794118 | 72 | 0.630077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.371021 |
963e81d1f86297198f40e8bbac901cbb13572805
| 829 |
py
|
Python
|
python/q04.py
|
holisound/70-math-quizs-for-programmers
|
746d98435a496fd8313a233fe4c2a59fd11d3823
|
[
"MIT"
] | null | null | null |
python/q04.py
|
holisound/70-math-quizs-for-programmers
|
746d98435a496fd8313a233fe4c2a59fd11d3823
|
[
"MIT"
] | null | null | null |
python/q04.py
|
holisound/70-math-quizs-for-programmers
|
746d98435a496fd8313a233fe4c2a59fd11d3823
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import deque
def cutBar(m, n):
res, now = 0, 1
while now < n:
now += now if now < m else m
res += 1
return res
def cutBarBFS(m, n):
if n == 1:
return 0
que = deque([n])
res = 0
while que:
size = len(que)
for _ in range(min(m, size)):
bar = que.popleft()
left = bar >> 1
right = bar - left
if left > 1:
que.append(left)
if right > 1:
que.append(right)
res += 1
return res
def cutBarDFS(m, n, now):
if now >= n:
return 0
if now < m:
return 1 + cutBarDFS(m, n, now * 2)
return 1 + cutBarDFS(m, n, now + m)
print cutBar(3, 8)
print cutBar(3, 20)
print cutBar(5, 100)
print cutBar(1, 1)
| 19.738095 | 43 | 0.472859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.027744 |
963ee23183336e553e81e0efa85833a77f9df80d
| 6,108 |
py
|
Python
|
numba/core/itanium_mangler.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | null | null | null |
numba/core/itanium_mangler.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 1 |
2020-07-28T20:47:24.000Z
|
2020-07-28T20:47:24.000Z
|
numba/core/itanium_mangler.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Itanium CXX ABI Mangler
Reference: http://mentorembedded.github.io/cxx-abi/abi.html
The basics of the mangling scheme.
We are hijacking the CXX mangling scheme for our use. We map Python modules
into CXX namespace. A `module1.submodule2.foo` is mapped to
`module1::submodule2::foo`. For parameterized numba types, we treat them as
templated types; for example, `array(int64, 1d, C)` becomes an
`array<int64, 1, C>`.
All mangled names are prefixed with "_Z". It is followed by the name of the
entity. A name contains one or more identifiers. Each identifier is encoded
as "<num of char><name>". If the name is namespaced and, therefore,
has multiple identifiers, the entire name is encoded as "N<name>E".
For functions, arguments types follow. There are condensed encodings for basic
built-in types; e.g. "i" for int, "f" for float. For other types, the
previously mentioned name encoding should be used.
For templated types, the template parameters are encoded immediately after the
name. If it is namespaced, it should be within the 'N' 'E' marker. Template
parameters are encoded in "I<params>E", where each parameter is encoded using
the mentioned name encoding scheme. Template parameters can contain literal
values like the '1' in the array type shown earlier. There is special encoding
scheme for them to avoid leading digits.
"""
import re
from numba.core import types
# According the scheme, valid characters for mangled names are [a-zA-Z0-9_].
# We borrow the '_' as the escape character to encode invalid char into
# '_xx' where 'xx' is the hex codepoint.
_re_invalid_char = re.compile(r'[^a-z0-9_]', re.I)
PREFIX = "_Z"
# Numba types to mangled type code. These correspond with the codes listed in
# https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-builtin
N2CODE = {
types.void: 'v',
types.boolean: 'b',
types.uint8: 'h',
types.int8: 'a',
types.uint16: 't',
types.int16: 's',
types.uint32: 'j',
types.int32: 'i',
types.uint64: 'y',
types.int64: 'x',
types.float16: 'Dh',
types.float32: 'f',
types.float64: 'd'
}
def _escape_string(text):
"""Escape the given string so that it only contains ASCII characters
of [a-zA-Z0-9_$].
The dollar symbol ($) and other invalid characters are escaped into
the string sequence of "$xx" where "xx" is the hex codepoint of the char.
Multibyte characters are encoded into utf8 and converted into the above
hex format.
"""
def repl(m):
return ''.join(('_%02x' % ch)
for ch in m.group(0).encode('utf8'))
ret = re.sub(_re_invalid_char, repl, text)
# Return str if we got a unicode (for py2)
if not isinstance(ret, str):
return ret.encode('ascii')
return ret
def _fix_lead_digit(text):
"""
Fix text with leading digit
"""
if text and text[0].isdigit():
return '_' + text
else:
return text
def _len_encoded(string):
"""
Prefix string with digit indicating the length.
Add underscore if string is prefixed with digits.
"""
string = _fix_lead_digit(string)
return '%u%s' % (len(string), string)
def mangle_abi_tag(abi_tag: str) -> str:
return "B" + _len_encoded(_escape_string(abi_tag))
def mangle_identifier(ident, template_params='', *, abi_tags=(), uid=None):
"""
Mangle the identifier with optional template parameters and abi_tags.
Note:
This treats '.' as '::' in C++.
"""
if uid is not None:
# Add uid to abi-tags
abi_tags = (f"v{uid}", *abi_tags)
parts = [_len_encoded(_escape_string(x)) for x in ident.split('.')]
enc_abi_tags = list(map(mangle_abi_tag, abi_tags))
extras = template_params + ''.join(enc_abi_tags)
if len(parts) > 1:
return 'N%s%sE' % (''.join(parts), extras)
else:
return '%s%s' % (parts[0], extras)
def mangle_type_or_value(typ):
"""
Mangle type parameter and arbitrary value.
"""
# Handle numba types
if isinstance(typ, types.Type):
if typ in N2CODE:
return N2CODE[typ]
else:
return mangle_templated_ident(*typ.mangling_args)
# Handle integer literal
elif isinstance(typ, int):
return 'Li%dE' % typ
# Handle str as identifier
elif isinstance(typ, str):
return mangle_identifier(typ)
# Otherwise
else:
enc = _escape_string(str(typ))
return _len_encoded(enc)
# Alias
mangle_type = mangle_type_or_value
mangle_value = mangle_type_or_value
def mangle_templated_ident(identifier, parameters):
"""
Mangle templated identifier.
"""
template_params = ('I%sE' % ''.join(map(mangle_type_or_value, parameters))
if parameters else '')
return mangle_identifier(identifier, template_params)
def mangle_args(argtys):
"""
Mangle sequence of Numba type objects and arbitrary values.
"""
return ''.join([mangle_type_or_value(t) for t in argtys])
def mangle(ident, argtys, *, abi_tags=(), uid=None):
"""
Mangle identifier with Numba type objects and abi-tags.
"""
return ''.join([PREFIX,
mangle_identifier(ident, abi_tags=abi_tags, uid=uid),
mangle_args(argtys)])
def prepend_namespace(mangled, ns):
"""
Prepend namespace to mangled name.
"""
if not mangled.startswith(PREFIX):
raise ValueError('input is not a mangled name')
elif mangled.startswith(PREFIX + 'N'):
# nested
remaining = mangled[3:]
ret = PREFIX + 'N' + mangle_identifier(ns) + remaining
else:
# non-nested
remaining = mangled[2:]
head, tail = _split_mangled_ident(remaining)
ret = PREFIX + 'N' + mangle_identifier(ns) + head + 'E' + tail
return ret
def _split_mangled_ident(mangled):
"""
Returns `(head, tail)` where `head` is the `<len> + <name>` encoded
identifier and `tail` is the remaining.
"""
ct = int(mangled)
ctlen = len(str(ct))
at = ctlen + ct
return mangled[:at], mangled[at:]
| 29.650485 | 79 | 0.655861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,110 | 0.509168 |
963ee361844cacc5317b943abf161599e3643da8
| 1,247 |
py
|
Python
|
DRAFTS/CookieStealer.py
|
henryza/Python
|
34af4a915e7bec27268b619246833e65e48d1cb8
|
[
"MIT"
] | null | null | null |
DRAFTS/CookieStealer.py
|
henryza/Python
|
34af4a915e7bec27268b619246833e65e48d1cb8
|
[
"MIT"
] | null | null | null |
DRAFTS/CookieStealer.py
|
henryza/Python
|
34af4a915e7bec27268b619246833e65e48d1cb8
|
[
"MIT"
] | null | null | null |
import requests
import json
class test(object):
def __init__(self):
self._debug = False
self._http_debug = False
self._https = True
self._session = requests.session() # use single session for all requests
def update_csrf(self):
# Retrieve server csrf and update session's headers
for cookie in self._session.cookies:
if cookie.name == 'ccsrftoken':
csrftoken = cookie.value[1:-1] # token stored as a list
self._session.headers.update({'X-CSRFTOKEN': csrftoken})
def login(self,host,username,password):
self.host = host
if self._https is True:
self.url_prefix = 'https://' + self.host
else:
self.url_prefix = 'http://' + self.host
url = self.url_prefix + '/logincheck'
res = self._session.post(url,
data='username='+username+'&secretkey='+password,
verify = False)
#self.dprint(res)
# Update session's csrftoken
self.update_csrf()
def get(self, url):
url = url
res = self._session.get(url)
return res.content
f = test()
f.login(ip,username, password)
| 30.414634 | 81 | 0.567763 | 1,169 | 0.93745 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.190858 |
964088ff23e1f89499e5fcf0c7eaef0bac779407
| 15,483 |
py
|
Python
|
viewers/trpl_h5.py
|
ScopeFoundry/FoundryDataBrowser
|
604506a2e9cabe757f1c5430b688fb98788b6251
|
[
"BSD-3-Clause"
] | 6 |
2017-01-10T20:13:38.000Z
|
2019-05-23T16:25:12.000Z
|
viewers/trpl_h5.py
|
ScopeFoundry/FoundryDataBrowser
|
604506a2e9cabe757f1c5430b688fb98788b6251
|
[
"BSD-3-Clause"
] | null | null | null |
viewers/trpl_h5.py
|
ScopeFoundry/FoundryDataBrowser
|
604506a2e9cabe757f1c5430b688fb98788b6251
|
[
"BSD-3-Clause"
] | null | null | null |
from ScopeFoundry.data_browser import DataBrowser
from FoundryDataBrowser.viewers.hyperspec_base_view import HyperSpectralBaseView
import numpy as np
import h5py
from qtpy import QtWidgets
from ScopeFoundry.logged_quantity import LQCollection
import time
from FoundryDataBrowser.viewers.plot_n_fit import MonoExponentialFitter, BiExponentialFitter, SemiLogYPolyFitter, TauXFitter
class TRPLH5View(HyperSpectralBaseView):
name = 'trpl_scan_h5'
def is_file_supported(self, fname):
for name in ["_trpl_2d_scan.h5", "_trpl_scan.h5", "Picoharp_MCL_2DSlowScan.h5"]:
if name in fname:
self.fname = fname
return True
return False
def load_data(self, fname):
if hasattr(self, 'h5_file'):
try:
self.h5_file.close()
except Exception as err:
print("Could not close old h5 file:", err)
del self.h5_file
self.h5_file = h5py.File(fname)
load_success = False
for measure_path in ['measurement/trpl_scan/', 'measurement/trpl_2d_scan/',
'measurement/Picoharp_MCL_2DSlowScan/']:
if measure_path in self.h5_file:
self.H = self.h5_file[measure_path]
load_success = True
if not load_success:
raise ValueError(self.name, "Measurement group not found in h5 file", fname)
# Note: The behavior of this viewer depends somewhat on the counting device:
# see also set_hyperspec_data
if 'counting_device' in self.H['settings'].attrs.keys():
self.counting_device = self.H['settings'].attrs['counting_device']
else:
self.counting_device = 'picoharp'
self.S = self.h5_file['hardware/{}/settings'.format(self.counting_device)].attrs
time_array = self.H['time_array'][:] * 1e-3
self.time_trace_map = self.H['time_trace_map'][:]
# set defaults
self.spec_x_array = time_array
self.set_hyperspec_data()
integrated_count_map = self.hyperspec_data.sum(axis=-1)
self.display_image = integrated_count_map
print(self.name, 'load_data of shape', self.hyperspec_data.shape)
if 'dark_histogram' in self.H:
self.dark_histogram = self.H['dark_histogram'][:]
if np.ndim(self.dark_histogram)==1:
self.dark_histogram = np.expand_dims(self.dark_histogram, 0)
self.bg_subtract.add_choices('dark_histogram')
if 'h_span' in self.H['settings'].attrs:
h_span = float(self.H['settings'].attrs['h_span'])
units = self.H['settings/units'].attrs['h0']
self.set_scalebar_params(h_span, units)
self.h5_file.close()
self.roll_offset.change_min_max(0, self.spec_x_array.shape[0])
def set_hyperspec_data(self):
# this function sets the hyperspec data based on self.time_trace_map
# and (`chan`, `frame) setting. The shape of time_trace_map depends on counting device:
# 1. 4D picoharp data: (Nframe, Ny, Nx, Ntime_bins)
# 2. 5D hydraharp data: (Nframe, Ny, Nx, Nchan, Ntime_bins)
if hasattr(self, 'time_trace_map'):
shape = self.time_trace_map.shape
n_frame = shape[0]
self.settings.frame.change_min_max(0, n_frame-1)
frame = self.settings['frame']
if np.ndim(self.time_trace_map) == 5:
n_chan = shape[-2]
self.settings.chan.change_min_max(0, n_chan-1)
hyperspec_data = self.time_trace_map[frame,:,:,self.settings['chan'],:]
if np.ndim(self.time_trace_map) == 4:
self.settings['chan'] = 0
self.settings.chan.change_min_max(0, 0)
hyperspec_data = self.time_trace_map[frame,:]
roll_offset = self.roll_offset.val
if roll_offset == 0:
self.hyperspec_data = hyperspec_data
else:
self.hyperspec_data = np.roll(hyperspec_data, self.settings['roll_offset'], -1)
if hasattr(self, 'dark_histogram'):
self.dark_histogram = np.roll(self.dark_histogram, self.settings['roll_offset'], -1)
def add_descriptor_suffixes(self, key):
#key += '_chan{}'.format(str(self.settings['chan']))
return HyperSpectralBaseView.add_descriptor_suffixes(self, key)
def get_bg(self):
if self.bg_subtract.val == 'dark_histogram':
bg = self.dark_histogram[self.settings['chan'],self.x_slicer.slice].mean()
if not self.x_slicer.activated.val:
self.x_slicer.activated.update_value(True)
#self.x_slicer.set_label(title='dark_histogram bg', text=str(bg))
return bg
else:
return HyperSpectralBaseView.get_bg(self)
def scan_specific_setup(self):
self.spec_plot.setLogMode(False, True)
self.spec_plot.setLabel('left', 'Intensity')
self.spec_plot.setLabel('bottom', 'time')
S = self.settings
self.time_unit = self.settings.New('time_unit', str, initial = 'ns')
self.settings.New('chan', dtype=int, initial=0, vmin=0)
self.settings.chan.add_listener(self.set_hyperspec_data)
self.settings.New('frame', dtype=int, initial=0, vmin=0)
self.settings.frame.add_listener(self.set_hyperspec_data)
self.roll_offset = self.settings.New('roll_offset', int, initial=0, unit='idx')
self.roll_offset.add_listener(self.on_change_roll_offset)
self.use_roll_x_target = self.settings.New('use_roll_max_to', bool, initial=False)
self.roll_x_target = self.settings.New('roll_x_target', initial=1, unit='[x]')
self.use_roll_x_target.add_listener(self.on_change_roll_x_target)
self.roll_x_target.add_listener(self.on_change_roll_x_target)
self.export_settings = ES = LQCollection()
ES.New('include_fit_results', bool, initial=True)
ES.New('plot_title', str, initial='')
ES.New('auto_y_lim', bool, initial = True)
ES.New('y_lim_min', initial = -1)
ES.New('y_lim_max', initial = -1)
ES.New('auto_x_lim', bool, initial = True)
ES.New('x_lim_min', initial = -1)
ES.New('x_lim_max', initial = -1)
export_ui = ES.New_UI()
self.export_dock.addWidget( export_ui )
self.export_plot_as_jpeg_pushButton = QtWidgets.QPushButton('export plot as jpeg')
self.export_plot_as_jpeg_pushButton.clicked.connect(self.export_plot_as_jpeg)
self.export_dock.addWidget( self.export_plot_as_jpeg_pushButton )
self.export_plot_as_xlsx_pushButton = QtWidgets.QPushButton('export plot as xlsx')
self.export_plot_as_xlsx_pushButton.clicked.connect(self.export_plot_as_xlsx)
self.export_dock.addWidget( self.export_plot_as_xlsx_pushButton )
self.plot_n_fit.add_fitter(SemiLogYPolyFitter())
self.plot_n_fit.add_fitter(MonoExponentialFitter())
self.plot_n_fit.add_fitter(BiExponentialFitter())
self.plot_n_fit.add_fitter(TauXFitter())
def on_change_roll_x_target(self):
'''
Note: might call a function which reloads the data
'''
if self.use_roll_x_target.val:
target_x = self.roll_x_target.val
arr = self.time_trace_map
y = arr.mean( tuple(range(arr.ndim-1)) )
x = self.spec_x_array
delta_index = np.argmin((x-target_x)**2) - y.argmax()
new_roll_offset = delta_index % x.shape[0]
if new_roll_offset != self.roll_offset.val:
self.roll_offset.update_value(new_roll_offset)
def on_change_roll_offset(self):
self.set_hyperspec_data()
self.update_display()
def export_maps_as_jpegs(self):
for name,image in self.display_images.items():
if 'median' in name:
cmap = 'rainbow'
elif 'tau' in name:
cmap = 'viridis'
else:
cmap = 'gist_heat'
self.export_image_as_jpeg(name, image, cmap)
def save_fit_res_table(self, h5_file):
res_table = self.plot_n_fit.get_result_table()
h5_group = h5_file.create_group('fit_res_table')
for (name, number, unit) in res_table:
h5_group.attrs[name] = number
h5_group.attrs[name + '_unit'] = unit
def gather_plot_data_for_export(self):
export_dict = {}
if self.settings['show_circ_line']:
x,y = self.get_xy(ji_slice=self.rect_roi_slice, apply_use_x_slice = False)
x_shift = x[y.argmax()]
export_dict.update({'point data':(x-x_shift,y)})
if self.settings['show_rect_line']:
x,y = self.get_xy(ji_slice=self.rect_roi_slice, apply_use_x_slice = False)
x_shift = x[y.argmax()]
export_dict.update({'rectangle data':(x-x_shift,y)})
P = self.plot_n_fit
export_dict.update({'fit':(P.x_fit_data-x_shift, P.fit)})
return export_dict
def export_plot_as_xlsx(self):
fname = self.databrowser.settings['data_filename']
xlsx_fname = fname.replace( '.h5','_{:0.0f}.xlsx'.format(time.time()) )
import xlsxwriter
workbook = xlsxwriter.Workbook(xlsx_fname)
worksheet = workbook.add_worksheet('data')
for i,(label,(x,y)) in enumerate(self.gather_plot_data_for_export().items()):
worksheet.write(0, i*2, label)
for ii_, X in enumerate((x,y)):
worksheet.write(1, i*2+ii_, ['time', 'counts'][ii_])
worksheet.write_column(row=2, col=i*2+ii_, data = X)
if self.export_settings['include_fit_results']:
worksheet = workbook.add_worksheet('fit_results')
for i,row_ in enumerate(self.plot_n_fit.get_result_table()):
worksheet.write_row(i,0,row_)
workbook.close()
self.databrowser.ui.statusbar.showMessage('exported data to ' + xlsx_fname)
def export_plot_as_jpeg(self):
print('export_plot_as_jpeg()')
import matplotlib.pylab as plt
ES = self.export_settings
P = self.plot_n_fit
L = self.x_slicer.settings['stop'] - self.x_slicer.settings['start']
plt.figure()
ax = plt.subplot(111)
y_lim = [None, None]
x_lim = [None, None]
for label,(x,y) in self.gather_plot_data_for_export().items():
ax.semilogy(x,y, label = label)
if len(y) == L:
y_lim = [0.9*y[-1], 1.05*y[0]]
x_lim = [0.99*x[0], x[-1]*1.1]
# Apply limits
if ES['auto_y_lim']:
ax.set_ylim(y_lim)
else:
ax.set_ylim(ES['y_lim_min'], ES['y_lim_max'])
if ES['auto_x_lim']:
ax.set_xlim(x_lim)
else:
ax.set_xlim(ES['x_lim_min'], ES['x_lim_max'])
plt.legend(loc=1)
# Put the fit results somewhere
if ES['include_fit_results']:
tab = plt.table(cellText=P.get_result_table(),
colWidths=[0.15,0.1,0.04],
loc='lower left',
colLoc=['right','right','left'],
)
tab.auto_set_font_size(True)
for cell in tab.get_celld().values():
cell.set_linewidth(0)
if ES['plot_title'] != '':
plt.title(ES['plot_title'])
plt.xlabel('time ({})'.format(self.settings['time_unit']))
plt.ylabel('intensity (a.u.)')
plt.tight_layout()
fname = self.databrowser.settings['data_filename']
fig_name = fname.replace( '.h5','_{:0.0f}.jpg'.format(time.time()) )
plt.savefig(fig_name, dpi=300)
plt.close()
self.databrowser.ui.statusbar.showMessage('exported new data to ' + fig_name)
"""class TRPL3dNPZView(HyperSpectralBaseView):
name = 'trpl_3d_npz'
def setup(self):
HyperSpectralBaseView.setup(self)
TRPLNPZView.scan_specific_setup(self)
self.settings.New('plane', dtype=str, initial='xy', choices=('xy', 'yz', 'xz'))
self.settings.New('index', dtype=int)
self.settings.New('auto_level', dtype=bool, initial=True)
for name in ['plane', 'index', 'auto_level']:
self.settings.get_lq(name).add_listener(self.update_display)
#self.ui = QtWidgets.QWidget()
#self.ui.setLayout(QtWidgets.QVBoxLayout())
self.dockarea.addDock(name='Image', widget=self.settings.New_UI())
self.info_label = QtWidgets.QLabel()
self.dockarea.addDock(name='info', widget=self.info_label)
#self.imview = pg.ImageView()
#self.ui.layout().addWidget(self.imview, stretch=1)
#self.graph_layout = pg.GraphicsLayoutWidget()
#self.graph_layout.addPlot()
def on_change_data_filename(self, fname):
try:
TRPLNPZView.load_data(self, fname)
self.update_display()
except Exception as err:
self.imview.setImage(np.zeros((10,10)))
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err))
raise(err)
def is_file_supported(self, fname):
return "trpl_scan3d.npz" in fname
def update_display(self):
ii = self.settings['index']
plane = self.settings['plane']
if plane == 'xy':
arr_slice = np.s_[ii,:,:]
index_max = self.dat['integrated_count_map'].shape[0]-1
elif plane == 'yz':
arr_slice = np.s_[:,:,ii]
index_max = self.dat['integrated_count_map'].shape[2]-1
elif plane == 'xz':
arr_slice = np.s_[:,ii,:]
index_max = self.dat['integrated_count_map'].shape[1]-1
self.settings.index.change_min_max(0, index_max)
self.hyperspec_data = self.time_trace_map[:,:,:,0:self.num_hist_chans][arr_slice]+1
self.display_image = self.integrated_count_map[arr_slice]
#self.imview.setImage(self.dat['integrated_count_map'][arr_slice], autoLevels=self.settings['auto_level'], )
other_ax = dict(xy='z', yz='x', xz='y' )[plane]
self.info_label.setText("{} plane {}={} um (index={})".format(
plane, other_ax, self.dat[other_ax+'_array'][ii], ii))
HyperSpectralBaseView.update_display(self)"""
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(TRPLH5View(app))
sys.exit(app.exec_())
| 39.497449 | 124 | 0.580831 | 12,300 | 0.79442 | 0 | 0 | 0 | 0 | 0 | 0 | 4,603 | 0.297294 |
964093368998bbfce9f4d1b33cd4d8d11bcb3ef0
| 854 |
py
|
Python
|
python/matrices.py
|
silvajhonatan/robotics
|
d1097809e88c744658dab6d661092b6ea8f0e13a
|
[
"MIT"
] | 3 |
2017-11-16T18:34:27.000Z
|
2021-01-28T15:33:46.000Z
|
python/matrices.py
|
sjhonatan/robotics
|
d1097809e88c744658dab6d661092b6ea8f0e13a
|
[
"MIT"
] | null | null | null |
python/matrices.py
|
sjhonatan/robotics
|
d1097809e88c744658dab6d661092b6ea8f0e13a
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.matlib
# soma das matrizes
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A + B
print(C)
# soma das linhas
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
s_linha = sum(A)
print(s_linha)
# soma dos elementos
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
soma = sum(sum(A))
print(soma)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A - B
print(C)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = np.matmul(A,B)
print(C)
# transposta
A = np.array([[1,0],[0,2]])
A_transposta = A.T
print(A_transposta)
# inversa
from numpy.linalg import *
from numpy import linalg as LA
A = np.array([[1,3],[2,0]])
A_inv = inv(A)
print(A_inv)
I = np.matmul(A,A_inv)
print(I)
A = ([2,2],[4,8])
A_det = LA.det(A)
print(A_det)
A = ([[1,2],[1,2]])
A_n = LA.matrix_power(A, 2)
| 16.423077 | 30 | 0.564403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.092506 |
9640feccc968d6a78b76b7178f48a08b2309b36c
| 5,459 |
py
|
Python
|
builder/action.py
|
nagisc007/storybuilder
|
54b28934de8acedbe35930ce27e12a7e75f91be0
|
[
"MIT"
] | null | null | null |
builder/action.py
|
nagisc007/storybuilder
|
54b28934de8acedbe35930ce27e12a7e75f91be0
|
[
"MIT"
] | 176 |
2019-03-07T13:31:26.000Z
|
2019-11-02T12:38:23.000Z
|
builder/action.py
|
nagisc007/storybuilder
|
54b28934de8acedbe35930ce27e12a7e75f91be0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Define action class.
"""
from enum import Enum
from . import assertion
from .basedata import BaseData
from .description import Description, NoDesc, DescType
from .flag import Flag, NoFlag, NoDeflag
from .basesubject import NoSubject
from .person import Person
from .chara import Chara
from .who import Who
class ActType(Enum):
"""Action type.
"""
ACT = "act" # 全般
MOVE = "move" # 動かす
COME = "come" # 出現
GO = "go" # 消去
LOOK = "look" # 描画
BE = "be" # 外部状態
THINK = "think" # 内部状態
HAVE = "have" # 所有変更
HEAR = "hear" # 効果音などの音声
TALK = "talk" # 台詞
TAG = "tag" # for tag
class TagType(Enum):
"""Tag type
"""
BR = "breakline" # BR
COMMENT = "comment" # コメント
HR = "horizontalline" # HR
SYMBOL = "symbol" # シンボル
TITLE = "title" # タイトル
SET_LAYER = "layer" # レイヤー用
class Action(BaseData):
"""Data type of an action.
"""
DEF_PRIORITY = 5
MAX_PRIORITY = 10
MIN_PRIORITY = 0
DEF_LAYER = "__default__"
MAIN_LAYER = "main"
def __init__(self, subject: [Person, Chara, None],
outline: str="", act_type: ActType=ActType.ACT,
layer: str=DEF_LAYER):
super().__init__("__action__")
_subject_is_str = isinstance(subject, str)
self._subject = Who() if _subject_is_str else Action._validatedSubject(subject)
self._outline = assertion.is_str(subject if _subject_is_str else outline)
self._act_type = assertion.is_instance(act_type, ActType)
self._description = NoDesc()
self._flag = NoFlag()
self._deflag = NoDeflag()
self._priority = Action.DEF_PRIORITY
self._layer = assertion.is_str(layer)
def inherited(self, subject=None, outline=None, desc=None):
return Action(subject if subject else self.subject,
outline if outline else self.outline,
self.act_type) \
.flag(self.getFlag()).deflag(self.getDeflag()) \
._setDescription(desc if desc else self.description,
self.description.desc_type) \
.setPriority(self.priority) \
.setLayer(self.layer)
@property
def act_type(self): return self._act_type
@property
def subject(self): return self._subject
@property
def outline(self): return self._outline
@property
def description(self): return self._description
@property
def priority(self): return self._priority
@property
def layer(self): return self._layer
def setPriority(self, pri: int):
self._priority = assertion.is_between(assertion.is_int(pri),
Action.MAX_PRIORITY, Action.MIN_PRIORITY)
return self
def setLayer(self, layer: str):
self._layer = assertion.is_str(layer)
return self
def flag(self, val: [str, NoFlag]):
if isinstance(val, Flag):
self._flag = val
elif isinstance(val, str):
self._flag = Flag(val)
else:
self._flag = NoFlag()
return self
def deflag(self, val: [str, NoDeflag]):
if isinstance(val, Flag):
self._deflag = val
elif isinstance(val, str):
self._deflag = Flag(val, True)
else:
self._deflag = NoDeflag()
return self
def getFlag(self): return self._flag
def getDeflag(self): return self._deflag
def omit(self):
self._priority = Action.MIN_PRIORITY
return self
# methods
def desc(self, *args):
self._description = Description(*args, desc_type=DescType.DESC)
return self
def d(self, *args): return self.desc(*args)
def tell(self, *args):
self._description = Description(*args, desc_type=DescType.DIALOGUE)
return self
def t(self, *args): return self.tell(*args)
def comp(self, *args):
self._description = Description(*args, desc_type=DescType.COMPLEX)
return self
def same(self, desc_type: str=None):
if not desc_type:
desc_type = 't' if self.act_type is ActType.TALK else 'd'
if desc_type in ('t', 'tell'):
self.tell(self.outline)
elif desc_type in ('c', 'comp'):
self.comp(self.outline)
else:
self.desc(self.outline)
return self
# private
def _validatedSubject(sub: [str, Person, Chara, None]):
if isinstance(sub, str):
return Who()
elif isinstance(sub, (Person, Chara)):
return sub
else:
return NoSubject()
def _setDescription(self, descs, desc_type: DescType):
if isinstance(descs, Description):
self._description = descs
else:
self._description = Description(*descs,
desc_type=desc_type)
return self
class TagAction(Action):
def __init__(self, info: str, subinfo: str="", tag_type: TagType=TagType.COMMENT):
super().__init__(None, info, ActType.TAG)
self._subinfo = assertion.is_str(subinfo)
self._tag_type = assertion.is_instance(tag_type, TagType)
@property
def info(self): return self._outline
@property
def subinfo(self): return self._subinfo
@property
def tag_type(self): return self._tag_type
def inherited(self):
return TagAction(self, self.info, self.subinfo, self.tag_type)
| 28.432292 | 87 | 0.605972 | 5,214 | 0.937938 | 0 | 0 | 484 | 0.087066 | 0 | 0 | 517 | 0.093002 |
9641ba7ef69f2c86256af69c136b624ad8b36e71
| 1,025 |
py
|
Python
|
pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
import pyblish.api
class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file.
Saves the current file with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["fusion"]
families = ["render.farm"]
optional = True
def process(self, context):
from pype.lib import version_up
from pype.action import get_errored_plugins_from_data
errored_plugins = get_errored_plugins_from_data(context)
if any(plugin.__name__ == "FusionSubmitDeadline"
for plugin in errored_plugins):
raise RuntimeError("Skipping incrementing current file because "
"submission to render farm failed.")
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current_filepath = context.data["currentFile"]
new_filepath = version_up(current_filepath)
comp.Save(new_filepath)
| 29.285714 | 76 | 0.66439 | 1,003 | 0.978537 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.282927 |
9642f267112ae3cb7eec037a994d03366ec2da1a
| 2,346 |
py
|
Python
|
tests/integration_tests/framework/flask_utils.py
|
ilan-WS/cloudify-manager
|
510d8a277c848db351f38fc5b264806b2cb36d0b
|
[
"Apache-2.0"
] | 124 |
2015-01-22T22:28:37.000Z
|
2022-02-26T23:12:06.000Z
|
tests/integration_tests/framework/flask_utils.py
|
cloudify-cosmo/cloudify-manager
|
4a3f44ceb49d449bc5ebc8766b1c7b9c174ff972
|
[
"Apache-2.0"
] | 345 |
2015-01-08T15:49:40.000Z
|
2022-03-29T08:33:00.000Z
|
tests/integration_tests/framework/flask_utils.py
|
ilan-WS/cloudify-manager
|
510d8a277c848db351f38fc5b264806b2cb36d0b
|
[
"Apache-2.0"
] | 77 |
2015-01-07T14:04:35.000Z
|
2022-03-07T22:46:00.000Z
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from cloudify.utils import setup_logger
from integration_tests.framework.docker import (execute,
copy_file_to_manager)
from integration_tests.tests.constants import MANAGER_PYTHON
from integration_tests.tests.utils import get_resource
logger = setup_logger('Flask Utils', logging.INFO)
security_config = None
PREPARE_SCRIPT_PATH = '/tmp/prepare_reset_storage.py'
SCRIPT_PATH = '/tmp/reset_storage.py'
CONFIG_PATH = '/tmp/reset_storage_config.json'
def prepare_reset_storage_script(container_id):
reset_script = get_resource('scripts/reset_storage.py')
prepare = get_resource('scripts/prepare_reset_storage.py')
copy_file_to_manager(container_id, reset_script, SCRIPT_PATH)
copy_file_to_manager(container_id, prepare, PREPARE_SCRIPT_PATH)
execute(container_id,
[MANAGER_PYTHON, PREPARE_SCRIPT_PATH, '--config', CONFIG_PATH])
def reset_storage(container_id):
logger.info('Resetting PostgreSQL DB')
# reset the storage by calling a script on the manager, to access
# localhost-only APIs (rabbitmq management api)
execute(container_id,
[MANAGER_PYTHON, SCRIPT_PATH, '--config', CONFIG_PATH])
def set_ldap(config_data):
logger.info('Setting LDAP configuration')
_prepare_set_ldap_script()
execute("{manager_python} {script_path} --config '{cfg_data}'"
.format(manager_python=MANAGER_PYTHON,
script_path='/tmp/set_ldap.py',
cfg_data=json.dumps(config_data)))
def _prepare_set_ldap_script():
set_ldap_script = get_resource('scripts/set_ldap.py')
copy_file_to_manager(set_ldap_script, '/tmp/set_ldap.py')
| 36.65625 | 75 | 0.738704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,069 | 0.455669 |
9643beb9c22472b136ce8bcd1f8f9fb526f1f46a
| 11,096 |
py
|
Python
|
dependencies/FontTools/Lib/fontTools/misc/bezierTools.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 21 |
2015-01-16T05:10:02.000Z
|
2021-06-11T20:48:15.000Z
|
dependencies/FontTools/Lib/fontTools/misc/bezierTools.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 1 |
2019-09-09T12:10:27.000Z
|
2020-05-22T10:12:14.000Z
|
dependencies/FontTools/Lib/fontTools/misc/bezierTools.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 2 |
2015-05-03T04:51:08.000Z
|
2018-08-24T08:28:53.000Z
|
"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments."""
__all__ = [
"calcQuadraticBounds",
"calcCubicBounds",
"splitLine",
"splitQuadratic",
"splitCubic",
"splitQuadraticAtT",
"splitCubicAtT",
"solveQuadratic",
"solveCubic",
]
from fontTools.misc.arrayTools import calcBounds
import numpy
epsilon = 1e-12
def calcQuadraticBounds(pt1, pt2, pt3):
"""Return the bounding rectangle for a qudratic bezier segment.
pt1 and pt3 are the "anchor" points, pt2 is the "handle".
>>> calcQuadraticBounds((0, 0), (50, 100), (100, 0))
(0.0, 0.0, 100.0, 50.0)
>>> calcQuadraticBounds((0, 0), (100, 0), (100, 100))
(0.0, 0.0, 100.0, 100.0)
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
# calc first derivative
ax, ay = a * 2
bx, by = b
roots = []
if ax != 0:
roots.append(-bx/ax)
if ay != 0:
roots.append(-by/ay)
points = [a*t*t + b*t + c for t in roots if 0 <= t < 1] + [pt1, pt3]
return calcBounds(points)
def calcCubicBounds(pt1, pt2, pt3, pt4):
"""Return the bounding rectangle for a cubic bezier segment.
pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".
>>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0))
(0.0, 0.0, 100.0, 75.0)
>>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100))
(0.0, 0.0, 100.0, 100.0)
>>> calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))
(35.5662432703, 0.0, 64.4337567297, 75.0)
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
# calc first derivative
ax, ay = a * 3.0
bx, by = b * 2.0
cx, cy = c
xRoots = [t for t in solveQuadratic(ax, bx, cx) if 0 <= t < 1]
yRoots = [t for t in solveQuadratic(ay, by, cy) if 0 <= t < 1]
roots = xRoots + yRoots
points = [(a*t*t*t + b*t*t + c * t + d) for t in roots] + [pt1, pt4]
return calcBounds(points)
def splitLine(pt1, pt2, where, isHorizontal):
"""Split the line between pt1 and pt2 at position 'where', which
is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of two line segments if the
line was successfully split, or a list containing the original
line.
>>> printSegments(splitLine((0, 0), (100, 100), 50, True))
((0, 0), (50.0, 50.0))
((50.0, 50.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 100, True))
((0, 0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, True))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, False))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
"""
pt1, pt2 = numpy.array((pt1, pt2))
a = (pt2 - pt1)
b = pt1
ax = a[isHorizontal]
if ax == 0:
return [(pt1, pt2)]
t = float(where - b[isHorizontal]) / ax
if 0 <= t < 1:
midPt = a * t + b
return [(pt1, midPt), (midPt, pt2)]
else:
return [(pt1, pt2)]
def splitQuadratic(pt1, pt2, pt3, where, isHorizontal):
"""Split the quadratic curve between pt1, pt2 and pt3 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False))
((0, 0), (50, 100), (100, 0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False))
((0.0, 0.0), (12.5, 25.0), (25.0, 37.5))
((25.0, 37.5), (62.5, 75.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True))
((0.0, 0.0), (7.32233047034, 14.6446609407), (14.6446609407, 25.0))
((14.6446609407, 25.0), (50.0, 75.0), (85.3553390593, 25.0))
((85.3553390593, 25.0), (92.6776695297, 14.6446609407), (100.0, -7.1054273576e-15))
>>> # XXX I'm not at all sure if the following behavior is desirable:
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (50.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
solutions = solveQuadratic(a[isHorizontal], b[isHorizontal],
c[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3)]
return _splitQuadraticAtT(a, b, c, *solutions)
def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False))
((0, 0), (25, 100), (75, 100), (100, 0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True))
((0.0, 0.0), (2.2937927384, 9.17517095361), (4.79804488188, 17.5085042869), (7.47413641001, 25.0))
((7.47413641001, 25.0), (31.2886200204, 91.6666666667), (68.7113799796, 91.6666666667), (92.52586359, 25.0))
((92.52586359, 25.0), (95.2019551181, 17.5085042869), (97.7062072616, 9.17517095361), (100.0, 1.7763568394e-15))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
d[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3, pt4)]
return _splitCubicAtT(a, b, c, d, *solutions)
def splitQuadraticAtT(pt1, pt2, pt3, *ts):
"""Split the quadratic curve between pt1, pt2 and pt3 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (62.5, 50.0), (75.0, 37.5))
((75.0, 37.5), (87.5, 25.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
return _splitQuadraticAtT(a, b, c, *ts)
def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (59.375, 75.0), (68.75, 68.75), (77.34375, 56.25))
((77.34375, 56.25), (85.9375, 43.75), (93.75, 25.0), (100.0, 0.0))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
return _splitCubicAtT(a, b, c, d, *ts)
def _splitQuadraticAtT(a, b, c, *ts):
ts = list(ts)
segments = []
ts.insert(0, 0.0)
ts.append(1.0)
for i in range(len(ts) - 1):
t1 = ts[i]
t2 = ts[i+1]
delta = (t2 - t1)
# calc new a, b and c
a1 = a * delta**2
b1 = (2*a*t1 + b) * delta
c1 = a*t1**2 + b*t1 + c
pt1, pt2, pt3 = calcQuadraticPoints(a1, b1, c1)
segments.append((pt1, pt2, pt3))
return segments
def _splitCubicAtT(a, b, c, d, *ts):
ts = list(ts)
ts.insert(0, 0.0)
ts.append(1.0)
segments = []
for i in range(len(ts) - 1):
t1 = ts[i]
t2 = ts[i+1]
delta = (t2 - t1)
# calc new a, b, c and d
a1 = a * delta**3
b1 = (3*a*t1 + b) * delta**2
c1 = (2*b*t1 + c + 3*a*t1**2) * delta
d1 = a*t1**3 + b*t1**2 + c*t1 + d
pt1, pt2, pt3, pt4 = calcCubicPoints(a1, b1, c1, d1)
segments.append((pt1, pt2, pt3, pt4))
return segments
#
# Equation solvers.
#
from math import sqrt, acos, cos, pi
def solveQuadratic(a, b, c,
sqrt=sqrt):
"""Solve a quadratic equation where a, b and c are real.
a*x*x + b*x + c = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
if abs(a) < epsilon:
if abs(b) < epsilon:
# We have a non-equation; therefore, we have no valid solution
roots = []
else:
# We have a linear equation with 1 root.
roots = [-c/b]
else:
# We have a true quadratic equation. Apply the quadratic formula to find two roots.
DD = b*b - 4.0*a*c
if DD >= 0.0:
rDD = sqrt(DD)
roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a]
else:
# complex roots, ignore
roots = []
return roots
def solveCubic(a, b, c, d,
abs=abs, pow=pow, sqrt=sqrt, cos=cos, acos=acos, pi=pi):
"""Solve a cubic equation where a, b, c and d are real.
a*x*x*x + b*x*x + c*x + d = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
#
# adapted from:
# CUBIC.C - Solve a cubic polynomial
# public domain by Ross Cottrell
# found at: http://www.strangecreations.com/library/snippets/Cubic.C
#
if abs(a) < epsilon:
# don't just test for zero; for very small values of 'a' solveCubic()
# returns unreliable results, so we fall back to quad.
return solveQuadratic(b, c, d)
a = float(a)
a1 = b/a
a2 = c/a
a3 = d/a
Q = (a1*a1 - 3.0*a2)/9.0
R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0
R2_Q3 = R*R - Q*Q*Q
if R2_Q3 < 0:
theta = acos(R/sqrt(Q*Q*Q))
rQ2 = -2.0*sqrt(Q)
x0 = rQ2*cos(theta/3.0) - a1/3.0
x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0
x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0
return [x0, x1, x2]
else:
if Q == 0 and R == 0:
x = 0
else:
x = pow(sqrt(R2_Q3)+abs(R), 1/3.0)
x = x + Q/x
if R >= 0.0:
x = -x
x = x - a1/3.0
return [x]
#
# Conversion routines for points to parameters and vice versa
#
def calcQuadraticParameters(pt1, pt2, pt3):
pt1, pt2, pt3 = numpy.array((pt1, pt2, pt3))
c = pt1
b = (pt2 - c) * 2.0
a = pt3 - c - b
return a, b, c
def calcCubicParameters(pt1, pt2, pt3, pt4):
pt1, pt2, pt3, pt4 = numpy.array((pt1, pt2, pt3, pt4))
d = pt1
c = (pt2 - d) * 3.0
b = (pt3 - pt2) * 3.0 - c
a = pt4 - d - c - b
return a, b, c, d
def calcQuadraticPoints(a, b, c):
pt1 = c
pt2 = (b * 0.5) + c
pt3 = a + b + c
return pt1, pt2, pt3
def calcCubicPoints(a, b, c, d):
pt1 = d
pt2 = (c / 3.0) + d
pt3 = (b + c) / 3.0 + pt2
pt4 = a + d + c + b
return pt1, pt2, pt3, pt4
def _segmentrepr(obj):
"""
>>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], numpy.array([0.1, 2.2])]]])
'(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))'
"""
try:
it = iter(obj)
except TypeError:
return str(obj)
else:
return "(%s)" % ", ".join([_segmentrepr(x) for x in it])
def printSegments(segments):
"""Helper for the doctests, displaying each segment in a list of
segments on a single line as a tuple.
"""
for segment in segments:
print _segmentrepr(segment)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.31694 | 114 | 0.594809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,251 | 0.563356 |
9643d280cf21edb06bbf96df561f04888a36f82e
| 19,403 |
py
|
Python
|
pymach/present.py
|
EnriqueU/pymach
|
b9918dcb0964fc4645f548639a762ef03c3c2e13
|
[
"MIT"
] | null | null | null |
pymach/present.py
|
EnriqueU/pymach
|
b9918dcb0964fc4645f548639a762ef03c3c2e13
|
[
"MIT"
] | null | null | null |
pymach/present.py
|
EnriqueU/pymach
|
b9918dcb0964fc4645f548639a762ef03c3c2e13
|
[
"MIT"
] | null | null | null |
# Standard Libraries
import subprocess
import datetime
import sys # print en consola
import os
import json
# Local Libraries
import define
import analyze
import prepare
import fselect
import evaluate
import improve
import tools
import pandas as pd
from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin
from flask import Flask, render_template, redirect, request, url_for, jsonify, flash, session
from requests_oauthlib import OAuth2Session
from requests.exceptions import HTTPError
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from werkzeug.utils import secure_filename
from collections import OrderedDict
"""
basedir = os.path.abspath(os.path.dirname(__file__))
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
"""
"""App Configuration""" ########################################################
"""
class Auth:
# Google Project Credentials
CLIENT_ID = ('814931001809-tch3d62bdn7f0j3qkdu7dmp21n7t87ra'
'.apps.googleusercontent.com')
CLIENT_SECRET = 'M9s6kUQ3MYllNAl4t2NAv_9V'
REDIRECT_URI = 'http://127.0.0.1:8002/oauth2callback'
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
SCOPE = ['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
class Config:
# Base config
APP_NAME = "Pymach"
SECRET_KEY = os.environ.get("SECRET_KEY") or os.urandom(24)
class DevConfig(Config):
# Dev config
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "test.db")
class ProdConfig(Config):
# Production config
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "prod.db")
config = {
"dev": DevConfig,
"prod": ProdConfig,
"default": DevConfig
}
"""
"""APP creation and configuration""" ###########################################
app = Flask(__name__)
#app.config.from_object(config['dev'])
#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
#app.secret_key = os.urandom(24)
#db = SQLAlchemy(app)
#login_manager = LoginManager(app)
#login_manager.login_view = "login"
#login_manager.session_protection = "strong"
APP_PATH = os.path.dirname(os.path.abspath(__file__))
app.config['UPLOAD_DIR'] = os.path.join(APP_PATH, 'uploads')
app.config['MODELS_DIR'] = os.path.join(APP_PATH, 'models')
app.config['MARKET_DIR'] = os.path.join(APP_PATH, 'market')
ALLOWED_EXTENSIONS = ['txt', 'csv', 'ml', 'html']
""" DB Models """ ##############################################################
"""
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True, nullable=False)
name = db.Column(db.String(100), nullable=True)
avatar = db.Column(db.String(200))
tokens = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow())
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
"""
""" OAuth Session creation """ #################################################
"""
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(Auth.CLIENT_ID, state=state, redirect_uri=Auth.REDIRECT_URI)
oauth = OAuth2Session(Auth.CLIENT_ID, redirect_uri=Auth.REDIRECT_URI, scope=Auth.SCOPE)
return oauth
"""
def report_analyze(figures, data_path, data_name,tipo='normal'):
# tipo indica normalizar o no los datos
# Here read and save the description of the data
definer = define.Define(data_path=data_path,data_name=data_name).pipeline()
if tipo=='normal':
analyzer = analyze.Analyze(definer).pipeline()
elif tipo=='real':
analyzer = analyze.Analyze(definer).pipelineReal()
table1 = definer.describe
table2 = analyzer.describe
dict_figures = OrderedDict()
for fig in figures:
data_name = data_name.replace(".csv", "")
plot_path = os.path.join(app.config['MARKET_DIR'], data_name, 'analyze')
tools.path_exists(plot_path)
plot_path_plot = os.path.join(plot_path, fig+'.html')
dict_figures[fig] = analyzer.plot(fig)
analyzer.save_plot(plot_path_plot)
dict_report = {'plot': dict_figures, 'table1': table1, 'table2' : table2}
return dict_report
def report_model(response, data_path, data_name, problem_type):
definer = define.Define(data_path=data_path,data_name=data_name,problem_type=problem_type).pipeline()
preparer = prepare.Prepare(definer).pipeline() # scaler
selector = fselect.Select(definer).pipeline() # pca
evaluator = evaluate.Evaluate(definer, preparer, selector).pipeline()
plot = evaluator.plot_models()
table = evaluator.report
data_name = data_name.replace(".csv", "")
plot_path = os.path.join(app.config['MARKET_DIR'], data_name, 'model')
tools.path_exists(plot_path)
plot_path_plot = os.path.join(plot_path, 'boxplot.html')
evaluator.save_plot(plot_path_plot)
plot_path_report = os.path.join(plot_path, 'report.csv')
evaluator.save_report(plot_path_report)
dict_report = {'plot': plot, 'table': table}
return dict_report
def report_improve(data_path, data_name, problem_type, optimizer, modelos):
definer = define.Define(data_path=data_path,data_name=data_name,problem_type=problem_type).pipeline()
preparer = prepare.Prepare(definer).pipeline()
selector = fselect.Select(definer).pipeline()
evaluator = evaluate.Evaluate(definer, preparer, selector)
improver = improve.Improve(evaluator, optimizer, modelos).pipeline()
plot = improver.plot_models()
table = improver.report
dict_report = {'plot': plot, 'table': table}
#dict_report = {'table': table}
return dict_report
def report_market(data_name):
# analyze_report = OrderedDict()
# model_report = OrderedDict()
data_name = data_name.replace(".csv", "")
app_path = os.path.join(app.config['MARKET_DIR'], data_name)
# app_dirs = os.listdir(app_path)
# Show Model info
try:
model_path = os.path.join(app_path, 'model')
plot_model = ''
with open(os.path.join(model_path, 'boxplot.html')) as f:
plot_model = f.read()
table_model = pd.read_csv(os.path.join(model_path, 'report.csv'))
dict_report_model = {'plot':plot_model, 'table':table_model} # return 1
except:
dict_report_model = {'plot':None, 'table':None} # return 1
# Show Analyze info
try:
analyze_path = os.path.join(app_path, 'analyze')
plot_analyze = OrderedDict()
for plot in os.listdir(analyze_path):
with open(os.path.join(analyze_path, plot)) as f:
fig = plot.replace('.html', '')
plot_analyze[fig] = f.read()
# Join full report: model and analyze
dicts_market = {'model':dict_report_model, 'analyze':plot_analyze}
except:
dicts_market = {'model':dict_report_model, 'analyze':None} # return 2
return dicts_market
def allowed_file(file_name):
return '.' in file_name and file_name.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
########################### Start Upload Button ##################################
@app.route('/')
#def index():
# return render_template('home.html')
# @app.route('/login')
# def login():
# if current_user.is_authenticated:
# return redirect(url_for('defineData'))
# google = get_google_auth()
# auth_url, state = google.authorization_url(Auth.AUTH_URI, access_type='offline')
# session['oauth_state'] = state
# return redirect(auth_url)
#
# @app.route('/oauth2callback', methods=["GET"])
# def callback():
# if current_user is not None and current_user.is_authenticated:
# return redirect(url_for('defineData'))
# if 'error' in request.args:
# if request.args.get('error') == 'access_denied':
# return 'You denied access.'
# return 'Error encountered.'
# if 'code' not in request.args and 'state' not in request.args:
# return redirect(url_for('login'))
# else:
# google = get_google_auth(state=session['oauth_state'])
# try:
# token = google.fetch_token(Auth.TOKEN_URI,
# client_secret=Auth.CLIENT_SECRET,
# authorization_response=request.url)
# except HTTPError:
# return 'HTTPError occurred.'
# google = get_google_auth(token=token)
# resp = google.get(Auth.USER_INFO)
# if resp.status_code == 200:
# user_data = resp.json()
# email = user_data['email']
# user = User.query.filter_by(email=email).first()
# if user is None:
# user = User()
# user.email = email
# user.name = user_data['name']
# path = os.path.join(app.config['UPLOAD_DIR'], user.name)
# if not os.path.exists(path):
# os.makedirs(path)
# print(token)
# user.tokens = json.dumps(token)
# user.avatar = user_data['picture']
# db.session.add(user)
# db.session.commit()
# login_user(user)
# return redirect(url_for('defineData'))
# return 'Could not fetch your information.'
#
# @app.route('/logout')
# @login_required
# def logout():
# logout_user()
# return redirect(url_for('index'))
@app.route('/defineData', methods=['GET', 'POST'])
#@login_required
def defineData():
""" Show the files that have been uploaded """
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
return render_template('uploadData.html', files=dirs)
@app.route('/storeData', methods=['GET', 'POST'])
#@login_required
def storedata():
""" Upload a new file """
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
if 'file' not in request.files:
flash('Chosse a file .csv',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='Chosse a file .csv',
files=dirs)
file = request.files['file'] # get the file
if file.filename == '':
flash('File not selected',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='file not selected',
files=dirs)
file_name = ''
data_name = ''
if file and allowed_file(file.filename):
file_name = secure_filename(file.filename)
file_path = os.path.join(path, file_name)
file.save(file_path)
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
flash('Uploaded!! '+file_name,"alert alert-success")
return render_template(
'uploadData.html',
infoUpload='Uploaded!! '+file_name,
files=dirs)
flash('Error',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='Error',
files=dirs)
else:
return redirect(url_for('defineData'))
@app.route('/chooseData', methods=['GET', 'POST'])
#@login_required
def chooseData():
""" choose a file and show its content """
from itertools import islice
# tools.localization()
file_name = ''
data_name = ''
data_path = ''
dire = ''
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
file_name = request.form['submit']
data_name = file_name.replace(".csv", "")
data_path = os.path.join(path, file_name)
dire = open(data_path)
return render_template(
'uploadData.html',
files=dirs,
dataset = dire,
data_name=data_name)
else:
return render_template(
'uploadData.html',
infoUpload='Error',
files=dirs)
########################### End Upload Button ##################################
# ########################## Start Analyze Button ##################################
@app.route('/analyze_base', methods=['GET', 'POST'])
#@login_required
def analyze_base():
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
return render_template('analyzeData.html', files=dirs)
@app.route('/analyze_app', methods=['GET', 'POST'])
#@login_required
def analyze_app():
figures = ['Histogram', 'Boxplot', 'Correlation']
data_name = ''
data_path = ''
archivo = ''
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
data_name = request.form['submit']
data_path = os.path.join(path, data_name)
tipo = request.args.get('tipo', default = 'real', type = str)
#if tipo=='normal':
figures1=report_analyze(figures, data_path, data_name)
#elif tipo=='real':
figures2=report_analyze(figures,data_path, data_name,tipo='real')
else:
return redirect(url_for('analyze_base'))
return render_template(
'analyzeData.html',
files=dirs,
figures1=figures1,
figures2=figures2,
data_name=data_name)
########################### End Analyze Button ##################################
########################### Start Model Button ##################################
@app.route('/model_base', methods=['GET', 'POST'])
#@login_required
def model_base():
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
return render_template('models.html', files=dirs)
@app.route('/model_app', methods=['GET', 'POST'])
#@login_required
def model_app():
response = "class"
data_name = ''
data_path = ''
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
problem_type = request.form['typeModel']
data_name = request.form['submit']
data_path = os.path.join(path, data_name)
return render_template(
'models.html',
files=dirs,
report=report_model(response, data_path, data_name, problem_type),
data_name=data_name)
########################### End Model Button ##################################
########################### Start Improve Button ##################################
@app.route('/improve_base', methods=['GET', 'POST'])
#@login_required
def improve_base():
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
return render_template('improve.html', files=dirs)
@app.route('/improve_app', methods=['GET', 'POST'])
#@login_required
def improve_app():
data_name = ''
data_path = ''
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
optimizer = request.form['search']
problem_type = request.form['typeModelRC']
modelos = request.form.getlist('typeModel')
# ---------------------------------------------------------------------
data_name = request.form['submit'] # choosed data
data_path = os.path.join(path, data_name)
return render_template(
'improve.html',
files=dirs,
report=report_improve(data_path, data_name, problem_type, optimizer, modelos),
data_name=data_name)
########################### End Improve Button ##################################
########################### Start Model Button ##################################
@app.route('/market_base', methods=['GET', 'POST'])
#@login_required
def market_base():
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
return render_template('market.html', files=dirs)
@app.route('/market_app', methods=['GET', 'POST'])
#@login_required
def market_app():
response = "class"
data_name = ''
data_path = ''
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
data_name = request.form['submit']
# data_path = os.path.join(app.config['MARKET_DIR'], data_name)
return render_template(
'market.html',
files=dirs,
report=report_market(data_name),
data_name=data_name)
########################### End Market Button ##################################
@app.route('/prediction', methods=['GET', 'POST'])
def prediction():
attributes = []
dirs = os.listdir(app.config['UPLOAD_DIR'])
data_class = 'class'
file_name = 'iris.csv'
filepath = os.path.join(app.config['UPLOAD_DIR'], file_name)
model = 'Naive Bayes'
f = open(filepath, 'r')
g = open(filepath, 'r')
for item in g.readline().split(','):
if item.strip() != data_class:
attributes.append(item)
print(attributes, ' this is something')
return render_template('showPrediction.html', file = f, attributes = attributes, data_class = data_class, model = model)
################################################################################
if __name__ == '__main__':
#db.create_all()
app.secret_key = os.urandom(24)
app.run(host='0.0.0.0', debug=True, port=8002)
#falta: para mensaje flush
#app.secret_key = 'some_secret'
| 35.931481 | 124 | 0.611091 | 0 | 0 | 0 | 0 | 10,836 | 0.55847 | 0 | 0 | 9,340 | 0.481369 |
96442ec34b3f08fd8d2dea36e730470c13f2a4b5
| 7,344 |
py
|
Python
|
api/ddu/management-zone-calculation/dduConsumptionPerMZ.py
|
pawelsiwek/snippets
|
6b551bf98e1ca514c0176363acfcb7dd20288b30
|
[
"Apache-2.0"
] | 11 |
2019-07-26T08:35:08.000Z
|
2021-11-04T11:25:28.000Z
|
api/ddu/management-zone-calculation/dduConsumptionPerMZ.py
|
pawelsiwek/snippets
|
6b551bf98e1ca514c0176363acfcb7dd20288b30
|
[
"Apache-2.0"
] | 12 |
2019-07-09T07:55:36.000Z
|
2022-03-10T22:26:42.000Z
|
api/ddu/management-zone-calculation/dduConsumptionPerMZ.py
|
pawelsiwek/snippets
|
6b551bf98e1ca514c0176363acfcb7dd20288b30
|
[
"Apache-2.0"
] | 46 |
2019-04-24T13:35:46.000Z
|
2022-03-23T01:00:17.000Z
|
import sys, requests, json, time
METRIC_NAME = "builtin:billing.ddu.metrics.byEntity"
PAGE_SIZE = 500
sys.tracebacklimit = 0
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 MyManagementZone
arguments = len(sys.argv) - 1
if arguments != 5 and arguments != 6:
print(
"The script was called with {} arguments but expected 5 or 6: \nFROM_DATE_AND_TIME TO_DATE_AND_TIME URL_TO_ENVIRONMENT API_TOKEN MAX_REQUESTS_PER_MINUTE [SELECTED_MANAGEMENT_ZONE]\n"
"Example: python dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 [myManagementZone]\n"
"Note: The SELECTED_MANAGEMENT_ZONE is optional. Specify it if you only want the calculate the ddu consumption for a single management zone.".format(
arguments
)
)
exit()
FROM = str(sys.argv[1])
TO = str(sys.argv[2])
BASE_URL = str(sys.argv[3])
API_TOKEN = str(sys.argv[4])
MAX_REQUESTS_PER_MINUTE = int(sys.argv[5])
if arguments == 6:
SELECTED_MANAGEMENT_ZONE_NAME = str(sys.argv[6])
else:
SELECTED_MANAGEMENT_ZONE_NAME = None
# Get all available management zones
# https://mySampleEnv.live.dynatrace.com/api/config/v1/managementZones
# try:
response = requests.get(
BASE_URL + "config/v1/managementZones",
headers={"Authorization": "Api-Token " + API_TOKEN},
)
# Show error message when a connection can’t be established. Terminates the script when there’s an error.
response.raise_for_status()
allManagemementZones = json.loads(response.content)["values"]
# print("Amount of different management zones: ", len(allManagemementZones))
# If the management zone is specified: Get the index of the occurrence
if SELECTED_MANAGEMENT_ZONE_NAME != None:
for mzIndex, managementZone in enumerate(allManagemementZones):
if allManagemementZones[mzIndex].get("name") == SELECTED_MANAGEMENT_ZONE_NAME:
SELECTED_MANAGEMENT_ZONE_INDEX = mzIndex
# Get all different entityTypes. Due to the high number of different types you can't fetch all at once => Loop through every page with nextPageKey
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes?nextPageKey=AQAAADIBAAAAMg==
response = requests.get(
BASE_URL + "v2/entityTypes", headers={"Authorization": "Api-Token " + API_TOKEN}
)
response.raise_for_status()
allEntityTypes = json.loads(response.content)["types"]
nextPage = json.loads(response.content)["nextPageKey"]
while nextPage != None:
response = requests.get(
BASE_URL + "v2/entityTypes?nextPageKey=" + nextPage,
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
nextPage = (json.loads(response.content)).get("nextPageKey", None)
allEntityTypes.extend(json.loads(response.content)["types"])
# print("Amount of different entity types: ", len(allEntityTypes))
# print()
dduConsumptionObjectOfManagementZone = {}
# Result JSON Object with Array of dduConsumption for each management zone
dduConsumptionPerManagementZone = "[ "
dduConsumptionOfEntityType = 0
dduConsumptionOfManagementZone = 0
# https://mySampleEnv.live.dynatrace.com/api/v2/metrics/query?metricSelector=builtin:billing.ddu.metrics.byEntity&entitySelector=type(HOST),mzId(123456789)&from=2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00
# Loop through every entityType of every management zone
# If there is a specific management zone selected: "loop through" the single management zone
for managementZoneIndex, managementZone in (
enumerate([allManagemementZones[SELECTED_MANAGEMENT_ZONE_INDEX]])
if SELECTED_MANAGEMENT_ZONE_NAME != None
else enumerate(allManagemementZones)
):
# If a management zone got specified: access it via the index in all management zones
if SELECTED_MANAGEMENT_ZONE_NAME != None:
managementZoneIndex = SELECTED_MANAGEMENT_ZONE_INDEX
for entityTypeIndex, entityType in enumerate(allEntityTypes):
"""
print(
"MZId: {:21} MZName: {:20} ET Name: {:5}".format(
allManagemementZones[managementZoneIndex]["id"],
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
)
)
"""
# Replace the "+" of Timezone to the encoded %2B
response = requests.get(
"{}v2/metrics/query?metricSelector={}:splitBy()&entitySelector=mzId({}),type({})&pageSize={}&from={}&to={}".format(
BASE_URL,
METRIC_NAME,
allManagemementZones[managementZoneIndex]["id"],
allEntityTypes[entityTypeIndex]["type"],
str(PAGE_SIZE),
FROM.replace("+", "%2B", 1),
TO.replace("+", "%2B", 1),
),
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
# print("Waiting for ", 60 / MAX_REQUESTS_PER_MINUTE, " seconds")
time.sleep(60 / MAX_REQUESTS_PER_MINUTE)
dduConsumptionOfMZandETDict = json.loads(response.content)["result"][0]["data"]
# If there are any results
if dduConsumptionOfMZandETDict:
# Filter out every empty usage values and create the sum of ddu usage
dduConsumptionOfMZandET = sum(
filter(None, dduConsumptionOfMZandETDict[0]["values"])
)
"""
print(
"Ddu consumption of manangement zone {} and entityType {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
round(dduConsumptionOfMZandET, 3),
)
)
"""
dduConsumptionOfManagementZone += dduConsumptionOfMZandET
dduConsumptionOfMZandET = 0
"""
print(
"Ddu consumption of management zone {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
round(dduConsumptionOfManagementZone, 3),
)
)
"""
# print()
# Populate JSON Object
dduConsumptionObjectOfManagementZone["MZId"] = allManagemementZones[
managementZoneIndex
]["id"]
dduConsumptionObjectOfManagementZone["MZName"] = allManagemementZones[
managementZoneIndex
]["name"]
dduConsumptionObjectOfManagementZone["dduConsumption"] = round(
dduConsumptionOfManagementZone, 3
)
dduConsumptionOfManagementZone = 0
# <[ > takes 2 chars
if len(dduConsumptionPerManagementZone) > 2:
dduConsumptionPerManagementZone = (
dduConsumptionPerManagementZone
+ ", "
+ json.dumps(dduConsumptionObjectOfManagementZone)
)
else:
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + json.dumps(
dduConsumptionObjectOfManagementZone
)
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + " ]"
print(dduConsumptionPerManagementZone)
| 42.697674 | 212 | 0.687228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,679 | 0.50068 |
964481acbba226d6bc1f722acc9bd0960d9cebe5
| 936 |
py
|
Python
|
Lesson3/coefficient_of_determination2.py
|
rmhyman/DataScience
|
c839c97c76f104ab298563a5c8b48f6d90be5f60
|
[
"MIT"
] | 1 |
2015-09-17T18:49:09.000Z
|
2015-09-17T18:49:09.000Z
|
Lesson3/coefficient_of_determination2.py
|
rmhyman/DataScience
|
c839c97c76f104ab298563a5c8b48f6d90be5f60
|
[
"MIT"
] | null | null | null |
Lesson3/coefficient_of_determination2.py
|
rmhyman/DataScience
|
c839c97c76f104ab298563a5c8b48f6d90be5f60
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
import matplotlib.pyplot as plt
import sys
def compute_r_squared(data, predictions):
'''
In exercise 5, we calculated the R^2 value for you. But why don't you try and
and calculate the R^2 value yourself.
Given a list of original data points, and also a list of predicted data points,
write a function that will compute and return the coefficient of determination (R^2)
for this data. numpy.mean() and numpy.sum() might both be useful here, but
not necessary.
Documentation about numpy.mean() and numpy.sum() below:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html
'''
mean = data.mean()
numerator = np.sum((data - predictions)**2)
denom = np.sum((data-mean)**2)
r_squared = 1 - numerator/denom
return r_squared
| 36 | 89 | 0.679487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.67094 |
964490d0c12237f4b5b63e54d5ed293032299a1f
| 414 |
py
|
Python
|
setup.py
|
10sr/pyltsv
|
d31286cef6caca941d20d364863bf3bd0d95b008
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
10sr/pyltsv
|
d31286cef6caca941d20d364863bf3bd0d95b008
|
[
"Apache-2.0"
] | 16 |
2020-06-15T11:04:39.000Z
|
2022-01-11T15:34:14.000Z
|
setup.py
|
10sr/pyltsv
|
d31286cef6caca941d20d364863bf3bd0d95b008
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# type: ignore
"""Setup script."""
from setuptools import setup
def _get_version():
with open("pyltsv/_version.py") as f:
for line in f:
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
setup(
version=_get_version(),
)
| 19.714286 | 56 | 0.589372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.309179 |
96461908df787c4f715fcb78e9b9a2b6846a1ccf
| 15,328 |
py
|
Python
|
hypha/apply/projects/models/project.py
|
slifty/hypha
|
93313933c26589858beb9a861e33431658cd3b24
|
[
"BSD-3-Clause"
] | null | null | null |
hypha/apply/projects/models/project.py
|
slifty/hypha
|
93313933c26589858beb9a861e33431658cd3b24
|
[
"BSD-3-Clause"
] | null | null | null |
hypha/apply/projects/models/project.py
|
slifty/hypha
|
93313933c26589858beb9a861e33431658cd3b24
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
import decimal
import json
import logging
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value
from django.db.models.functions import Cast, Coalesce
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core.fields import StreamField
from addressfield.fields import ADDRESS_FIELDS_ORDER
from hypha.apply.funds.models.mixins import AccessFormData
from hypha.apply.stream_forms.blocks import FormFieldsBlock
from hypha.apply.stream_forms.files import StreamFieldDataEncoder
from hypha.apply.stream_forms.models import BaseStreamForm
from hypha.apply.utils.storage import PrivateStorage
from .vendor import Vendor
logger = logging.getLogger(__name__)
def contract_path(instance, filename):
return f'projects/{instance.project_id}/contracts/{filename}'
def document_path(instance, filename):
return f'projects/{instance.project_id}/supporting_documents/{filename}'
COMMITTED = 'committed'
CONTRACTING = 'contracting'
IN_PROGRESS = 'in_progress'
CLOSING = 'closing'
COMPLETE = 'complete'
PROJECT_STATUS_CHOICES = [
(COMMITTED, _('Committed')),
(CONTRACTING, _('Contracting')),
(IN_PROGRESS, _('In Progress')),
(CLOSING, _('Closing')),
(COMPLETE, _('Complete')),
]
class ProjectQuerySet(models.QuerySet):
def active(self):
# Projects that are not finished.
return self.exclude(status=COMPLETE)
def in_progress(self):
# Projects that users need to interact with, submitting reports or payment request.
return self.filter(
status__in=(IN_PROGRESS, CLOSING,)
)
def complete(self):
return self.filter(status=COMPLETE)
def in_approval(self):
return self.filter(
is_locked=True,
status=COMMITTED,
approvals__isnull=True,
)
def by_end_date(self, desc=False):
order = getattr(F('proposed_end'), 'desc' if desc else 'asc')(nulls_last=True)
return self.order_by(order)
def with_amount_paid(self):
return self.annotate(
amount_paid=Coalesce(Sum('invoices__paid_value'), Value(0)),
)
def with_last_payment(self):
return self.annotate(
last_payment_request=Max('invoices__requested_at'),
)
def with_outstanding_reports(self):
Report = apps.get_model('application_projects', 'Report')
return self.annotate(
outstanding_reports=Subquery(
Report.objects.filter(
project=OuterRef('pk'),
).to_do().order_by().values('project').annotate(
count=Count('pk'),
).values('count'),
output_field=models.IntegerField(),
)
)
def with_start_date(self):
return self.annotate(
start=Cast(
Subquery(
Contract.objects.filter(
project=OuterRef('pk'),
).approved().order_by(
'approved_at'
).values('approved_at')[:1]
),
models.DateField(),
)
)
def for_table(self):
return self.with_amount_paid().with_last_payment().with_outstanding_reports().select_related(
'report_config',
'submission__page',
'lead',
)
class Project(BaseStreamForm, AccessFormData, models.Model):
lead = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL, related_name='lead_projects')
submission = models.OneToOneField("funds.ApplicationSubmission", on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='owned_projects')
title = models.TextField()
vendor = models.ForeignKey(
"application_projects.Vendor",
on_delete=models.SET_NULL,
null=True, blank=True, related_name='projects'
)
value = models.DecimalField(
default=0,
max_digits=10,
decimal_places=2,
validators=[MinValueValidator(decimal.Decimal('0.01'))],
)
proposed_start = models.DateTimeField(_('Proposed Start Date'), null=True)
proposed_end = models.DateTimeField(_('Proposed End Date'), null=True)
status = models.TextField(choices=PROJECT_STATUS_CHOICES, default=COMMITTED)
form_data = JSONField(encoder=StreamFieldDataEncoder, default=dict)
form_fields = StreamField(FormFieldsBlock(), null=True)
# tracks read/write state of the Project
is_locked = models.BooleanField(default=False)
# tracks updates to the Projects fields via the Project Application Form.
user_has_updated_details = models.BooleanField(default=False)
activities = GenericRelation(
'activity.Activity',
content_type_field='source_content_type',
object_id_field='source_object_id',
related_query_name='project',
)
created_at = models.DateTimeField(auto_now_add=True)
sent_to_compliance_at = models.DateTimeField(null=True)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return self.title
@property
def status_display(self):
return self.get_status_display()
def get_address_display(self):
try:
address = json.loads(self.vendor.address)
except (json.JSONDecodeError, AttributeError):
return ''
else:
return ', '.join(
address.get(field)
for field in ADDRESS_FIELDS_ORDER
if address.get(field)
)
@classmethod
def create_from_submission(cls, submission):
"""
Create a Project from the given submission.
Returns a new Project or the given ApplicationSubmissions existing
Project.
"""
if not settings.PROJECTS_ENABLED:
logging.error(f'Tried to create a Project for Submission ID={submission.id} while projects are disabled')
return None
# OneToOne relations on the targetted model cannot be accessed without
# an exception when the relation doesn't exist (is None). Since we
# want to fail fast here, we can use hasattr instead.
if hasattr(submission, 'project'):
return submission.project
# See if there is a form field named "legal name", if not use user name.
legal_name = submission.get_answer_from_label('legal name') or submission.user.full_name
vendor, _ = Vendor.objects.get_or_create(
user=submission.user
)
vendor.name = legal_name
vendor.address = submission.form_data.get('address', '')
vendor.save()
return Project.objects.create(
submission=submission,
user=submission.user,
title=submission.title,
vendor=vendor,
value=submission.form_data.get('value', 0),
)
@property
def start_date(self):
# Assume project starts when OTF are happy with the first signed contract
first_approved_contract = self.contracts.approved().order_by('approved_at').first()
if not first_approved_contract:
return None
return first_approved_contract.approved_at.date()
@property
def end_date(self):
# Aiming for the proposed end date as the last day of the project
# If still ongoing assume today is the end
return max(
self.proposed_end.date(),
timezone.now().date(),
)
def paid_value(self):
return self.invoices.paid_value()
def unpaid_value(self):
return self.invoices.unpaid_value()
def clean(self):
if self.proposed_start is None:
return
if self.proposed_end is None:
return
if self.proposed_start > self.proposed_end:
raise ValidationError(_('Proposed End Date must be after Proposed Start Date'))
def save(self, *args, **kwargs):
creating = not self.pk
if creating:
files = self.extract_files()
else:
self.process_file_data(self.form_data)
super().save(*args, **kwargs)
if creating:
self.process_file_data(files)
def editable_by(self, user):
if self.editable:
return True
# Approver can edit it when they are approving
return user.is_approver and self.can_make_approval
@property
def editable(self):
if self.status not in (CONTRACTING, COMMITTED):
return True
# Someone has approved the project - consider it locked while with contracting
if self.approvals.exists():
return False
# Someone must lead the project to make changes
return self.lead and not self.is_locked
def get_absolute_url(self):
if settings.PROJECTS_ENABLED:
return reverse('apply:projects:detail', args=[self.id])
return '#'
@property
def can_make_approval(self):
return self.is_locked and self.status == COMMITTED
def can_request_funding(self):
"""
Should we show this Project's funding block?
"""
return self.status in (CLOSING, IN_PROGRESS)
@property
def can_send_for_approval(self):
"""
Wrapper to expose the pending approval state
We don't want to expose a "Sent for Approval" state to the end User so
we infer it from the current status being "Comitted" and the Project
being locked.
"""
correct_state = self.status == COMMITTED and not self.is_locked
return correct_state and self.user_has_updated_details
@property
def requires_approval(self):
return not self.approvals.exists()
def get_missing_document_categories(self):
"""
Get the number of documents required to meet each DocumentCategorys minimum
"""
# Count the number of documents in each category currently
existing_categories = DocumentCategory.objects.filter(packet_files__project=self)
counter = collections.Counter(existing_categories)
# Find the difference between the current count and recommended count
for category in DocumentCategory.objects.all():
current_count = counter[category]
difference = category.recommended_minimum - current_count
if difference > 0:
yield {
'category': category,
'difference': difference,
}
@property
def is_in_progress(self):
return self.status == IN_PROGRESS
@property
def has_deliverables(self):
return self.deliverables.exists()
# def send_to_compliance(self, request):
# """Notify Compliance about this Project."""
# messenger(
# MESSAGES.SENT_TO_COMPLIANCE,
# request=request,
# user=request.user,
# source=self,
# )
# self.sent_to_compliance_at = timezone.now()
# self.save(update_fields=['sent_to_compliance_at'])
@register_setting
class ProjectSettings(BaseSetting):
compliance_email = models.TextField("Compliance Email")
vendor_setup_required = models.BooleanField(default=True)
class Approval(models.Model):
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="approvals")
by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="approvals")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ['project', 'by']
def __str__(self):
return _('Approval of {project} by {user}').format(project=self.project, user=self.by)
class ContractQuerySet(models.QuerySet):
def approved(self):
return self.filter(is_signed=True, approver__isnull=False)
class Contract(models.Model):
approver = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL, related_name='contracts')
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="contracts")
file = models.FileField(upload_to=contract_path, storage=PrivateStorage())
is_signed = models.BooleanField("Signed?", default=False)
created_at = models.DateTimeField(auto_now_add=True)
approved_at = models.DateTimeField(null=True)
objects = ContractQuerySet.as_manager()
@property
def state(self):
return _('Signed') if self.is_signed else _('Unsigned')
def __str__(self):
return _('Contract for {project} ({state})').format(project=self.project, state=self.state)
def get_absolute_url(self):
return reverse('apply:projects:contract', args=[self.project.pk, self.pk])
class PacketFile(models.Model):
category = models.ForeignKey("DocumentCategory", null=True, on_delete=models.CASCADE, related_name="packet_files")
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="packet_files")
title = models.TextField()
document = models.FileField(upload_to=document_path, storage=PrivateStorage())
def __str__(self):
return _('Project file: {title}').format(title=self.title)
def get_remove_form(self):
"""
Get an instantiated RemoveDocumentForm with this class as `instance`.
This allows us to build instances of the RemoveDocumentForm for each
instance of PacketFile in the supporting documents template. The
standard Delegated View flow makes it difficult to create these forms
in the view or template.
"""
from ..forms import RemoveDocumentForm
return RemoveDocumentForm(instance=self)
@receiver(post_delete, sender=PacketFile)
def delete_packetfile_file(sender, instance, **kwargs):
# Remove the file and don't save the base model
instance.document.delete(False)
class DocumentCategory(models.Model):
name = models.CharField(max_length=254)
recommended_minimum = models.PositiveIntegerField()
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
verbose_name_plural = 'Document Categories'
class Deliverable(models.Model):
name = models.TextField()
available_to_invoice = models.IntegerField(default=1)
unit_price = models.DecimalField(
max_digits=10,
decimal_places=2,
validators=[MinValueValidator(decimal.Decimal('0.01'))],
)
project = models.ForeignKey(
Project,
null=True, blank=True,
on_delete=models.CASCADE,
related_name='deliverables'
)
def __str__(self):
return self.name
| 32.892704 | 123 | 0.666819 | 13,295 | 0.867367 | 778 | 0.050757 | 3,623 | 0.236365 | 0 | 0 | 3,499 | 0.228275 |
964683b856f9816db2cfc1191cef1b460b9b0c10
| 2,030 |
py
|
Python
|
logsight/result/template.py
|
aiops/logsight-sdk-py
|
35fd9f99eb03472dee7ae6a1639502b7ea0c485e
|
[
"MIT"
] | 1 |
2021-10-02T09:29:10.000Z
|
2021-10-02T09:29:10.000Z
|
logsight/result/template.py
|
aiops/logsight-sdk-py
|
35fd9f99eb03472dee7ae6a1639502b7ea0c485e
|
[
"MIT"
] | null | null | null |
logsight/result/template.py
|
aiops/logsight-sdk-py
|
35fd9f99eb03472dee7ae6a1639502b7ea0c485e
|
[
"MIT"
] | null | null | null |
class Template:
def __init__(self, data):
"""Class representing log templates.
Note:
Timestamps are represented in ISO format with timezone information.
e.g, 2021-10-07T13:18:09.178477+02:00.
"""
self._timestamp = data.get("@timestamp", None)
self._actual_level = data.get("actual_level", None)
self._app_name = data.get("app_name", None)
self._message = data.get("message", None)
self._name = data.get("name", None)
self._params = data.get("params", None)
self._template = data.get("template", None)
self._tag = data.get("tag", None)
def __repr__(self):
return {"app_name": self._app_name, "template": self._template}
@property
def timestamp(self):
"""str: Timestamp when the log message was generated."""
return self._timestamp
@property
def actual_level(self):
"""str: Log level of the message (e.g., WARNING)."""
return self._actual_level
@property
def app_name(self):
"""str: Application name."""
return self._app_name
@property
def message(self):
"""str: Log message."""
return self._message
@property
def name(self):
"""str: Name."""
return self._name
@property
def template(self):
"""str: Template generated from log message.
Examples:
nova.virt.libvirt.imagecache <*> ] <*> base <*> <*>
"""
return self._template
@property
def params(self):
"""(:obj:`list` of :obj:`str`): Parameters extracted from log message.
Examples:
"param_0":"[req-addc1839-2ed5-4778-b57e-5854eb7b8b09"
"param_1":"Unknown"
"param_2":"file:"
"param_3":"/var/lib/nova/instances/_base/a489c868..."
"""
return self._params
@property
def tag(self):
"""str: Tag associated with a log message.
"""
return self._tag
| 25.375 | 79 | 0.567488 | 2,028 | 0.999015 | 0 | 0 | 1,230 | 0.605911 | 0 | 0 | 957 | 0.471429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.