blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a07eed7fc4cd25cff6102764ad121bc3248e2bb2
|
7366e4c0f0b13f009570aaf5f07c0ec05f9457e4
|
/CFR_external_sampling.py
|
b7487253ec4b675c7aad940b2954f16bdea88921
|
[] |
no_license
|
MaxwellDeJong/incan_gold_CFR
|
d79be2a44dd22473995ffea718ef4e9af3399438
|
9ac8ab35d7b093ed93a3d656f7b69aa8238d2da5
|
refs/heads/master
| 2023-02-12T20:07:49.050691 | 2021-01-10T05:03:55 | 2021-01-10T05:03:55 | 328,307,562 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,558 |
py
|
import numpy as np
import copy
import torch
from memory import Buffer
NUM_PLAYERS = 2
def calc_payoff(h, p, round_vars, update_round_vars=False):
'''Calculates the payoff for player p from a history h.'''
round_scores = np.zeros(NUM_PLAYERS)
n_played_cards = 1 + int(len(h[1:]) / (NUM_PLAYERS + 1))
num_artifact_pts = 0
num_round_artifacts = 0
num_tot_artifacts = round_vars['n_collected_artifacts']
active_players = np.ones(NUM_PLAYERS)
played_monsters = np.zeros(5)
shared_gems = 0
rem_gems = 0
for i in range(n_played_cards):
leaving_players = np.zeros(NUM_PLAYERS)
history_idx = i * (NUM_PLAYERS + 1)
drawn_card = h[history_idx]
if len(h[history_idx+1:]) > NUM_PLAYERS:
actions = h[history_idx+1:(i+1)*(NUM_PLAYERS+1)]
else:
n_missing_actions = NUM_PLAYERS - len(h[(history_idx+1):])
actions = h[(history_idx+1):] + [0] * n_missing_actions
if drawn_card[0] == 'M':
monster_idx = int(drawn_card[1]) - 1
played_monsters[monster_idx] += 1
elif drawn_card[0] == 'A':
num_tot_artifacts += 1
num_round_artifacts += 1
if (num_tot_artifacts > 3):
num_artifact_pts += 10
else:
num_artifact_pts += 5
else:
gem_value = int(drawn_card)
shared_gems += int(gem_value / np.sum(active_players))
rem_gems += gem_value % np.sum(active_players)
if np.any(played_monsters == 2):
monster_idx = np.argwhere(played_monsters == 2)[0][0]
if update_round_vars:
round_vars['removed_monsters'][monster_idx] += 1
return 0
for j in range(len(actions)):
action = actions[j]
if (active_players[j] == 1) and (action == 0):
leaving_players[j] = 1
active_players[j] = 0
n_leaving_players = np.sum(leaving_players)
if n_leaving_players == 0:
rem_gem_contrib = 0
else:
rem_gem_contrib = int(rem_gems / n_leaving_players)
rem_gems -= (rem_gem_contrib * n_leaving_players)
for j in range(NUM_PLAYERS):
if (leaving_players[j] == 1):
if n_leaving_players == 1:
round_scores[j] += num_artifact_pts
if update_round_vars:
round_vars['n_collected_artifacts'] += num_round_artifacts
num_artifact_pts = 0
num_round_artifacts = 0
round_scores[j] += shared_gems
round_scores[j] += rem_gem_contrib
if leaving_players[p] == 1:
if update_round_vars:
round_vars['n_destroyed_artifacts'] += num_round_artifacts
return round_scores[p]
if update_round_vars:
round_vars['n_destroyed_artifacts'] += num_round_artifacts
return round_scores[p]
def double_monster(h):
'''Checks if a second monster has been drawn for history h.'''
n_played_cards = 1 + int(len(h[1:]) / (NUM_PLAYERS + 1))
if n_played_cards < 2:
return False
n_monsters_played = np.zeros(5)
for i in range(n_played_cards):
card_idx = i * (NUM_PLAYERS + 1)
if h[card_idx][0] == 'M':
monster_idx = int(h[card_idx][1]) - 1
n_monsters_played[monster_idx] += 1
double_monster_termination = np.any(n_monsters_played > 1)
return double_monster_termination
def all_leave(h):
'''Checks if all players have left the temple.'''
leave_termination = False
if len(h) < (NUM_PLAYERS + 1):
return False
prev_actions = h[-1 * (NUM_PLAYERS + 1):]
player_actions = [prev_actions[i] for i in range(NUM_PLAYERS + 1) if not is_str(prev_actions[i])]
return (np.sum(player_actions) == 0)
def is_hist_terminal(h):
'''Checks if a history is terminal.'''
double_monster_termination = double_monster(h)
all_leave_termination = all_leave(h)
return double_monster_termination or all_leave_termination
def is_hist_p_terminal(h, p):
'''Checks if a history is terminal for player p.'''
double_monster_termination = double_monster(h)
if len(h) < NUM_PLAYERS+1:
p_termination = False
else:
p_termination = (h[-1 * (NUM_PLAYERS+1)] == 0)
return double_monster_termination or p_termination
def available_actions(h, player):
if len(h) < (NUM_PLAYERS + 1):
return [0, 1]
prev_action = h[-1 * (NUM_PLAYERS + 1)]
if (prev_action == 0):
return [0]
return [0, 1]
def is_chance_node(h):
if len(h) == 0:
return True
if len(h) < (NUM_PLAYERS + 1):
return False
return is_str(h[-1 * (NUM_PLAYERS + 1)])
def calc_n_max_artifact_pts(round_vars):
rd = round_vars['round_num']
n_collected_artifacts = round_vars['n_collected_artifacts']
n_destroyed_artifacts = round_vars['n_destroyed_artifacts']
max_n_artifacts = rd - (n_collected_artifacts + n_destroyed_artifacts)
artifact_pts = 0
curr_n_collected_artifacts = n_collected_artifacts
for art in range(max_n_artifacts):
curr_n_collected_artifacts += 1
if (curr_n_collected_artifacts > 3):
artifact_pts += 10
else:
artifact_pts += 5
return artifact_pts
def embed_history(h, round_vars):
embedded_history = {'n_gem_cards_played': 0,
'n_shared_gems': 0,
'n_rem_gems': 0,
'n_artifact_pts': 0,
'n_full_monsters': 0,
'n_handicapped_monsters': 0,
'n_rem_players': NUM_PLAYERS,
'n_removed_monsters': 0,
'n_max_artifact_pts': 5}
embedded_history['n_removed_monsters'] = np.sum(round_vars['removed_monsters'])
embedded_history['n_max_artifact_pts'] = calc_n_max_artifact_pts(round_vars)
if len(h) == 0:
return embedded_history
round_scores = np.zeros(NUM_PLAYERS)
n_played_cards = 1 + int(len(h[1:]) / (NUM_PLAYERS + 1))
n_collected_artifacts = 0
active_players = np.ones(NUM_PLAYERS)
n_monsters_played = np.zeros(5)
for i in range(n_played_cards):
leaving_players = np.zeros(NUM_PLAYERS)
history_idx = i * (NUM_PLAYERS + 1)
drawn_card = h[history_idx]
if len(h[history_idx+1:]) > NUM_PLAYERS:
actions = h[history_idx+1:(i+1)*(NUM_PLAYERS+1)]
else:
actions = h[history_idx+1:]
if drawn_card[0] == 'M':
monster_idx = int(drawn_card[1]) - 1
if round_vars['removed_monsters'][monster_idx] == 0:
embedded_history['n_full_monsters'] += 1
else:
embedded_history['n_handicapped_monsters'] += 1
n_monsters_played[monster_idx] += 1
elif drawn_card[0] == 'A':
if (n_collected_artifacts + round_vars['n_collected_artifacts']) > 3:
embedded_history['n_artifact_pts'] += 10
else:
embedded_history['n_artifact_pts'] += 5
n_collected_artifacts += 1
else:
embedded_history['n_gem_cards_played'] += 1
gem_value = int(drawn_card)
embedded_history['n_shared_gems'] += int(gem_value / np.sum(active_players))
embedded_history['n_rem_gems'] += gem_value % np.sum(active_players)
# Only distribute remaining gems and artifacts if all players
# have acted
if len(actions) < NUM_PLAYERS:
break
for j in range(len(actions)):
action = actions[j]
if (active_players[j] == 1) and (action == 0):
leaving_players[j] = 1
active_players[j] = 0
n_leaving_players = np.sum(leaving_players)
if n_leaving_players == 0:
rem_gem_contrib = 0
else:
rem_gem_contrib = int(embedded_history['n_rem_gems'] / n_leaving_players)
embedded_history['n_rem_gems'] -= (rem_gem_contrib * n_leaving_players)
for j in range(len(actions)):
if (leaving_players[j] == 1) and (n_leaving_players == 1):
if embedded_history['n_artifact_pts'] != 0:
embedded_history['n_artifact_pts'] = 0
embedded_history['n_max_artifact_pts'] = 0
embedded_history['n_rem_players'] = np.sum(active_players)
return embedded_history
def predict_strategy(net_arr, pid, embedded_history):
embedded_state = list(embedded_history.values())
def full_game_traversal(pid, net_arr, advantage_buffer_arr, strategy_buffer, t):
# Keep track of number of removed artifacts, number of removed gems, and round
round_vars = {'removed_monsters': [0] * 5,
'n_collected_artifacts': 0,
'n_destroyed_artifacts': 0,
'round_num': 1}
for rd in range(1, 6):
h = []
round_vars['round_num'] = rd
traverse(h, pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t)
def traverse(h, pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t):
if is_hist_terminal(h):
return calc_payoff(h, pid, round_vars)
# Chance node
if is_chance_node(h):
chance_node = ChanceNode(h, round_vars)
action = chance_node.sample_action()
return traverse(h + [action], pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t)
embedded_history = embed_history(h, round_vars)
if (get_active_player(h) == pid):
if is_hist_p_terminal(h, pid):
return calc_payoff(h, pid, round_vars)
strategy = net_arr.get_strategy(embedded_history, pid)
regret_arr = np.zeros(2)
for a in [0, 1]:
regret_arr[a] = traverse(h + [a], pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t)
mean_value = np.dot(strategy, regret_arr)
regret_arr -= mean_value
advantage_buffer_arr[pid].add(embedded_history, t, regret_arr)
return mean_value
else:
opp_idx = get_active_player(h)
avail_actions = available_actions(h, opp_idx)
if len(avail_actions) == 1:
return traverse(h + [0], pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t)
strategy = net_arr.get_strategy(embedded_history, opp_idx)
strategy_buffer.add(embedded_history, t, strategy)
action = np.random.choice([0, 1], p=strategy)
return traverse(h + [action], pid, round_vars, net_arr, advantage_buffer_arr, strategy_buffer, t)
def is_str(elem):
py_str = type(elem) is str
np_str = type(elem) is np.str_
return (py_str or np_str)
def get_active_player(h):
player = 0
if len(h) < NUM_PLAYERS+1:
return len(h) - 1
for i in range(NUM_PLAYERS+1):
if is_str(h[-1 + i]):
return player
else:
player += 1
return player
class ChanceNode():
def __init__(self, h, round_vars):
self.rem_cards, self.probs = self.get_remaining_cards(h, round_vars)
def get_all_cards(self, round_vars):
all_gems = ['1', '2', '3', '4', '5', '7', '9', '11', '13', '14', '15']
gem_counts = [1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1]
n_artifacts = round_vars['round_num'] - (round_vars['n_collected_artifacts'] + round_vars['n_destroyed_artifacts'])
all_artifacts = []
for i in range(1, n_artifacts + 1):
all_artifacts.append('A' + str(i))
artifact_counts = len(all_artifacts) * [1]
all_monsters = ['M' + str(num) for num in range(1, 6)]
monster_counts = [3 for _ in range(1, 6)]
for monster_id in range(5):
monster_counts[monster_id] -= round_vars['removed_monsters'][monster_id]
all_cards = all_gems + all_artifacts + all_monsters
all_counts = gem_counts + artifact_counts + monster_counts
return (all_cards, all_counts)
def get_remaining_cards(self, actions_history, round_vars):
(all_cards, all_counts) = self.get_all_cards(round_vars)
if len(actions_history) == 0:
probs = all_counts / np.sum(all_counts)
return (all_cards, probs)
n_played_cards = 1 + int(len(actions_history[1:]) / (NUM_PLAYERS + 1))
idx_to_remove = []
for i in range(n_played_cards):
played_card = actions_history[i * (NUM_PLAYERS + 1)]
master_idx = all_cards.index(played_card)
all_counts[master_idx] -= 1
if (all_counts[master_idx] == 0):
idx_to_remove.append(master_idx)
rem_cards = [all_cards[i] for i in range(len(all_cards)) if i not in idx_to_remove]
rem_counts = [all_counts[i] for i in range(len(all_counts)) if i not in idx_to_remove]
n_cards = np.sum(rem_counts)
probs = rem_counts / n_cards
return (rem_cards, probs)
def sample_action(self):
return np.random.choice(self.rem_cards, p=self.probs)
if __name__ == '__main__':
h = ['M3', 1, 1, '3', 1, 1, 'A1', 1, 1, 'M3']
round_vars = {'removed_monsters': [0] * 5,
'n_collected_artifacts': 1,
'n_destroyed_artifacts': 0,
'round': 2}
round_vars['n_collected_artifacts'] = 1
round_vars['removed_monsters'][2] = 1
n_artifacts = round_vars['round'] - (round_vars['n_collected_artifacts'] + round_vars['n_destroyed_artifacts'])
print(n_artifacts)
#CN = ChanceNode(h, round_vars)
#all_cards = CN.get_all_cards(round_vars)
#rem_cards = CN.get_remaining_cards(h, round_vars)
#print('All cards: ', all_cards)
#print('Rem cards: ', rem_cards)
|
[
"[email protected]"
] | |
6648216653a33b3c0b1253ac7b7ca4de21dfac0a
|
95bad2478065ffaa00be48644675b49f71b74bdc
|
/fuel_additive/api/bootstrap.py
|
f79e61b7cbafe87de3d83380e7a07f2ce4094312
|
[
"Apache-2.0"
] |
permissive
|
skdong/fuel-additive
|
f727f27f08a528d3dccea02b90d2f5f443a0227f
|
a0ce9516ee7510a1ed02264a775cb50b35b84b48
|
refs/heads/master
| 2020-06-18T22:55:13.507181 | 2019-08-23T05:43:33 | 2019-08-23T05:43:33 | 196,483,393 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
from oslo_log import log as logging
from oslo_config import cfg
from fuel_agent.utils import utils
from fuel_additive.drivers.os.bootstrap import Bootstrap as Driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Bootstrap(object):
def __init__(self, data):
self.config = utils.get_driver(CONF.data_driver)(data)
def create(self):
driver = Driver.create(self.config)
driver.build()
|
[
"[email protected]"
] | |
7eaa61249897f9720dec448a49c97158fa47b5ae
|
0b2e4cb5c0cd81ecca262f06455f40a14ce1809f
|
/text/preprocessing.py
|
fd21b3004105fa1671e398eb3847667d80ea2b3f
|
[] |
no_license
|
gmaggiotti/deeplearning-examples
|
f37cca697cb3b0846470d0e65543bc027635fc3f
|
1ac147dfdcaf463299fad95bc49f9f081dd728ed
|
refs/heads/master
| 2023-02-19T23:25:20.812573 | 2022-08-04T17:17:03 | 2022-08-04T17:17:03 | 91,253,148 | 3 | 0 | null | 2023-02-15T21:36:15 | 2017-05-14T15:33:30 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 637 |
py
|
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import numpy as np
ps = PorterStemmer()
lemmatizer = WordNetLemmatizer()
sentence = "Books are on the table and the radio is playing a song".lower()
words = word_tokenize(sentence)
print(words)
stop_words = stopwords.words('english')
print(stop_words)
filtered = [word for word in words if not word in stop_words]
print(filtered)
stemmed = [ps.stem(word) for word in filtered]
print(stemmed)
np.vectorize(ps.stem)
lemm = [lemmatizer.lemmatize(word) for word in filtered]
print(lemm)
|
[
"[email protected]"
] | |
cb5cd586e26ea5262665d65a424da8f4e90f7647
|
10d85f6a962c323c522f68ed58c16c39e57b573b
|
/run-length-encoding/run_length_encoding.py
|
e8f6e786655a62478cd085024033de16d9bdc725
|
[] |
no_license
|
arpit04/Python-Exercism
|
cf833a7684cf9ee03ca092cb9f6ec9e57fac90d3
|
e429c83137da2c2159b56820b45efe4960793ba8
|
refs/heads/master
| 2020-08-23T00:14:04.161763 | 2019-12-06T10:59:50 | 2019-12-06T10:59:50 | 216,498,101 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,096 |
py
|
def encode(string):
count = 1
s = ''
for i in range(len(string)):
if i == (len(string)-1):
if count!=1:
s+=str(str(count)+string[i])
count = 1
else:
s+=str(string[i])
count = 1
else:
if i!=(len(string)-1) and string[i] == string[i+1]:
count += 1
else:
if count!=1:
s+=str(str(count)+string[i])
count = 1
else:
s+=str(string[i])
count = 1
return s
def decode(string):
new = encode(string)
res = ''
counter_len = 0
for i in range(len(new)):
c = new[i]
if c.isdigit():
counter_len += 1
else:
count = int(new[i-counter_len: i] or 1)
res += c * count
counter_len = 0
return res
string = 'awwwwrrrrxttttpppppm'
print("Your Raw String is : ",string)
print("Encoded String : ",encode(string))
print("Decoded String is : ",decode(string))
|
[
"[email protected]"
] | |
2805480b81fd54fb75c0e4226de2d61cc95d2377
|
4b94e4abe6a9298ce1072d0d64d0ca1aedf19da2
|
/landing_page/migrations/0003_commenti.py
|
26859e3b84b9a00ca2e360d00bb7923cecd52375
|
[] |
no_license
|
gitsh1t/vetrina_test
|
7e30e1766e3700e31af487357b23cd7d79712615
|
7295b49569448b2e311c481014938e850dec67ce
|
refs/heads/main
| 2023-04-30T16:56:43.851201 | 2021-05-10T12:35:22 | 2021-05-10T12:35:22 | 365,960,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,096 |
py
|
# Generated by Django 3.0.8 on 2020-10-01 15:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing_page', '0002_post'),
]
operations = [
migrations.CreateModel(
name='Commenti',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('corpo', models.TextField()),
('creato', models.DateTimeField(auto_now_add=True)),
('aggiornato', models.DateTimeField(auto_now=True)),
('attivo', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='landing_page.Post')),
],
options={
'ordering': ('creato',),
},
),
]
|
[
"[email protected]"
] | |
f4f82fdf2defdca2f29897494104c073ac82e9a8
|
4335c4674202cea368eeb4ab8844e04474c21cd9
|
/get_url.py
|
5d566c375b3088f1053556036f620acbcaef6607
|
[
"MIT"
] |
permissive
|
MrYo531/TL-DrBot
|
6e87b3dcc99805a51f94bccbf253020ca1931c88
|
e0331d7c27e6db64590c71baef44d8748081eba4
|
refs/heads/main
| 2023-01-21T11:35:54.745727 | 2020-11-21T22:45:29 | 2020-11-21T22:45:29 | 314,695,346 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 645 |
py
|
import requests
import json
# Gets the embedded article url from the given tweet
# This uses v2 of the twitter API (v1 from tweepy was inconsistent)
def get_url(bearer_token, tweet_id):
headers = {
'Authorization': 'Bearer {}'.format(bearer_token),
}
params = (
('tweet.fields', 'entities'),
)
response = requests.get(f'https://api.twitter.com/2/tweets/{tweet_id}', headers=headers, params=params)
tweet = json.loads(response.text)
if "urls" not in tweet["data"]["entities"]:
return "https://www.twitter.com/"
url = tweet["data"]["entities"]["urls"][0]["expanded_url"]
return url
|
[
"[email protected]"
] | |
55de08feb376e6244032a61051eb65c7227e4d36
|
a6c103844d15cf1126a79ab8ea548b338ad5bce5
|
/test/interfaceframework/script/ind_interface/test_updateuser.py
|
f8c0d3bcc88742ce9e16c4751df6656f5356bbed
|
[] |
no_license
|
cherry1203-cloud/autotest
|
a3da734842267572e773dd653eca1fd3bcc8e4da
|
9b929ef10172a4884037b0f101ccbf472a479ffb
|
refs/heads/master
| 2020-09-29T21:59:00.271795 | 2019-12-12T02:16:05 | 2019-12-12T02:16:05 | 227,131,600 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,437 |
py
|
#v1.0 对更新用户信息的脚本进行测试,使用unittest框架技术
#更新个人信息时,需要用到登录接口获取的sessionID
#接口说明:
#接口访问地址:http://localhost:8080/jwshoplogin/user/update_information.do
#接口传入参数:1.email 2.phone 3.answer 4.question
#接口预期返回值:email已存在,请更换email再尝试更新 更新个人信息失败 更新个人信息成功
#脚本实现
#导入相关测试类
import unittest
import requests
#定义测试类,继承unittest框架
class test_updateuser(unittest.TestCase):
def setUp(self):
url="http://localhost:8080/jwshoplogin/user/login.do"
userinfo={"username":"meimei","password":"123456"}
response=requests.post(url,userinfo)
self.session=dict(response.cookies)['JSESSIONID']
print(self.session)
def test_case1(self):
url="http://localhost:8080/jwshoplogin/user/update_information.do"
userinfo={"email":"[email protected]",
"phone":"13211111111",
"answer":"西游记1",
"question":"喜欢的书1"}
# session = {"JSESSIONID": "23A54B7E221BD45BAE2F3E9F142EB8CB"}
session={'JSESSIONID':self.session}
response=requests.post(url,userinfo,cookies=session).text
print(response)
self.assertIn("更新个人信息成功",response)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
776556e7e22349f95f42676b03f1a3a64e805073
|
9a0adae237f159a77b2357f1e2e70db8af61d419
|
/mysite/settings.py
|
fdf2912825bd0534bde7e9ff5dfa1bab4710d43b
|
[] |
no_license
|
aakash003/my-first-blog
|
3390ecc8e052075f0702f6ebe969c3210263a972
|
7ac7cff1c4608470ed20f75b029aea7b94ba14c8
|
refs/heads/master
| 2021-05-16T11:27:47.765880 | 2017-10-01T03:02:57 | 2017-10-01T03:02:57 | 104,975,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,118 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&&5xt+#y_sioc*=h-e9!3%zla&j^$i2lgqvigkb13h)axwk!su'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [ 'localhost' ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
9e260b27e1ea2548669f321717fbdef594ce67a1
|
f1fe58c1ae11f9b80f08afad5354f54f96f0dfe9
|
/pitho/controllers/user/login.py
|
a027d0eb870b0f44425e74e73b9e66b19f3baf37
|
[] |
no_license
|
talqeen23/College-Canteen-Manager
|
701af39af08c3fcd80d4bab28c76d655d4e7783e
|
a73ca65f7866ea6f816e908b7ba11e3486a7478f
|
refs/heads/master
| 2023-08-15T19:06:33.511362 | 2021-10-05T09:42:38 | 2021-10-05T09:42:38 | 413,758,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,102 |
py
|
from pitho import app
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Length, Email
from flask import render_template, request, redirect, session
from pitho.models.user.model_login import LoginModel
class MyForm(FlaskForm):
youremail = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired(), Length(min=8)])
@app.route("/", methods=['GET', 'POST'])
def user_login():
if session.get("logged_in") is not None :
return redirect("/dashboard")
message = ""
data = request.form
wform = MyForm(request.form)
obj=LoginModel()
if wform.validate_on_submit():
#message=data.get("youremail")
status = obj.logincheck(data)
if status == True:
if session.get('user_type') == 1:
return redirect("/dashboard")
elif session.get('user_type') == 2:
return redirect("/author-dashboard")
return render_template ("user/login.html", data=message, form=wform)
@app.route("/logout")
def logout():
session['logged_in'] = None
return redirect("/")
|
[
"[email protected]"
] | |
b3d3277c535eaa6f706a071f5b547c8b412419d8
|
c1d03f41b6c80ef1e0a42b1bb710ba90d680e4c2
|
/tests/unit/test_xmlgen.py
|
f4224d6cded6be0fe94660d2a9f52f5f3283b56e
|
[
"BSD-3-Clause"
] |
permissive
|
boxingbeetle/softfab
|
4f96fc389dec5cd3dc987a427c2f491a19cbbef4
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
refs/heads/master
| 2021-06-22T15:42:38.857018 | 2020-11-23T22:53:21 | 2020-11-23T22:53:21 | 169,245,088 | 20 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,294 |
py
|
# SPDX-License-Identifier: BSD-3-Clause
"""Test XML generation module."""
from pytest import raises
from softfab.xmlgen import parseHTML, xhtml
# Test text inside the <script> XHTML element:
def testScriptNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = 'if (a > b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">{text}</script>'
)
def testScriptCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'if (a < b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
text = 'if (a = b) return c & 3;'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
def testScriptCDATAEnd():
"""Check that a CDATA block is not closed too early."""
text = 'var f = x[y[i]]>0 && z<0;'
# ^^^-- CDATA end marker
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var f = x[y[i]]\\>0 && z<0;/*]]>*/'
'</script>'
)
def testScriptTagEnd():
"""Check that a <script> tag is not closed too early."""
text = 'var s = "</script>";'
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var s = "<\\/script>";/*]]>*/'
'</script>'
)
# Test text inside the <style> XHTML element.
# Since <script> is handled in the same way, we test fewer scenarios here.
def testStyleNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = '.nav > a[href] { color: #FFC000 }'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">{text}</style>'
)
def testStyleCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'book.c /* K&R */'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</style>'
)
def testStyleTagEnd():
"""Check that a <style> tag is not closed too early."""
text = '@import url(more.css); /* </StyLe */'
# HTML tags are case-insensitive: ^^^^^
assert xhtml.style[text].flattenXML() == (
'<style xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/@import url(more.css); /* <\\/StyLe *//*]]>*/'
'</style>'
)
# Test parsing of HTML fragments:
def testBasic():
"""Check whether basic functionality works."""
parsed = parseHTML('<h1>Hello!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
)
def testMultiTopLevel():
"""Check whether we can handle multiple top-level tags."""
parsed = parseHTML('<h1>Hello!</h1><h1>Goodbye!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
'<h1 xmlns="http://www.w3.org/1999/xhtml">Goodbye!</h1>'
)
def testNested():
"""Check handling of nested content."""
parsed = parseHTML('<p>Text with <i>nested</i> tags.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with <i>nested</i> tags.'
'</p>'
)
def testVoid():
"""Check handling of void elements."""
parsed = parseHTML('<p>Text with<br/>a void element.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with<br/>a void element.'
'</p>'
)
def testIgnorePI():
"""Check parsing of processing instruction with no handlers."""
parsed = parseHTML('<p>A processing <?jump> instruction.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing instruction.'
'</p>'
)
def testRaisePI():
"""Check propagation of handler exceptions."""
def handler(name, arg):
raise KeyError(f'unknown PI: {name}')
with raises(KeyError):
parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
def testNoArgPI():
"""Check parsing of processing instruction with no arguments."""
def handler(name, arg):
assert name == 'jump'
assert arg == ''
return xhtml.br
parsed = parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <br/> instruction.'
'</p>'
)
def testArgPI():
"""Check parsing of processing instruction with an argument."""
def handler(name, arg):
assert name == 'jump'
return xhtml.span[arg]
parsed = parseHTML(
'<p>A processing <?jump a little higher> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <span>a little higher</span> instruction.'
'</p>'
)
def testIgnoreXMLDecl():
"""Check parsing of XML declaration."""
def handler(name, arg):
assert False
parsed = parseHTML(
'<?xml version="1.0" encoding="UTF-8" ?>'
'<html><body><p>XHTML document.</p></body></html>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<html xmlns="http://www.w3.org/1999/xhtml">'
'<body><p>XHTML document.</p></body>'
'</html>'
)
def testIgnoreXMLSyntax():
"""Check parsing of a PI using XML syntax (question mark at end)."""
def handler(name, arg):
assert name == 'jump'
return arg.upper()
parsed = parseHTML(
'<p>A processing <?jump lazy fox?> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing LAZY FOX instruction.'
'</p>'
)
|
[
"[email protected]"
] | |
8586507b15e96fdd61407bb3036d6c40ffb255e4
|
704f79c33714c3d747500792c74a60812dcfb327
|
/2018/Pyboard/on pyboard/pyboard_razor_IMU.py
|
9c9121faea62f677111be20ed0809149f93e9372
|
[] |
no_license
|
CRAWlab/ARLISS
|
4952e631613adc7cc7b7ab77de9537cfd6a3b7a5
|
39468e533b7b4d40a63f3e20fce9b4244054f342
|
refs/heads/master
| 2020-05-21T23:04:56.366569 | 2018-09-26T01:22:30 | 2018-09-26T01:22:30 | 39,913,463 | 3 | 3 | null | 2018-08-19T01:59:05 | 2015-07-29T20:00:19 |
HTML
|
UTF-8
|
Python
| false | false | 5,354 |
py
|
###############################################################################
# pyboard_RazorIMU.py
#
# Script demonstrating using the serial communication on the pyboard
# to receive data and set settings on the SparkFun Razor IMU
# https://www.sparkfun.com/products/10736
#
# This code assumes that the razor is running the firmware found at:
# https://github.com/ptrbrtz/razor-9dof-ahrs/blob/master/Arduino/Razor_AHRS/Razor_AHRS.ino
#
#
# Created: 09/14/16
# - Joshua Vaughan
# - [email protected]
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# *
#
# TODO:
# * 09/14/16 - Implement binary parsing, only text parsing so far
# * 09/14/16 - Implement parsing of data in calibrated and raw modes
# * 09/14/16 - Improve parsing of data in angle mode
###############################################################################
import pyb # Is it bad practice to import the entire pyboard module?
class Razor(object):
""" Class of convenience methods for the RAZOR IMU """
def __init__(self, port, baudrate, bits=8, parity=None, stop=1, read_buf_len = 512):
# Set up the UART communications
self.port = port
self.baudrate = baudrate
self.uart = pyb.UART(port, baudrate)
# Start with streaming off
self.streaming = False
self.uart.write('#o0') # UART command to disable streaming
# Set the default mode to return angles in text format
self.mode = 'angles'
self.uart.write('#ot')
def stop_streaming(self):
"""
method to stop streaming data from the sensor. Good for use with the
get_one_frame() method below for infrequent readings from the sensor
The status LED will be off.
"""
self.uart.write('#o0')
self.streaming = False
def start_streaming(self):
"""
Method to start streaming data from the sensor
The status LED will be on if streaming output is enabled.
"""
self.uart.write('#o1')
self.streaming = True
def set_angle_output(self):
"""
Output angles in TEXT format (Output frames have form like
"#YPR=-142.28,-5.38,33.52", followed by carriage return and line feed [\r\n]).
"""
self.uart.write('#ot')
self.mode = 'angles'
def set_all_calibrated_output(self):
"""
Set output to all CALIBRATED SENSOR data of all 9 axes in TEXT format.
One frame consist of three lines - one for each sensor: acc, mag, gyr.
"""
self.uart.write('#osct')
self.mode = 'calibrated'
def set_all_raw_output(self):
"""
Output RAW SENSOR data of all 9 axes in TEXT format.
One frame consist of three lines - one for each sensor: acc, mag, gyr.
"""
self.uart.write('#osrt')
self.mode = 'raw'
def get_one_frame(self):
"""
Request one output frame - useful when continuous output is disabled and updates are
required in larger intervals only. Though #f only requests one reply, replies are still
bound to the internal 20ms (50Hz) time raster. So worst case delay that #f can add is 19.99ms.
"""
self.uart.write('#f')
if not self.streaming:
data = self.uart.readline()
if data:
if self.mode == 'calibrated':
pass
# TODO: Finish regular expression based parsing of this
# We need regular expressions because we're not guaranteed the
# order that accel, gyro, and magnometer data is returned in
elif self.mode == 'raw':
pass
# TODO: Finish regular expression based parsing of this
# We need regular expressions because we're not guaranteed the
# order that accel, gyro, and magnometer data is returned in
elif self.mode == 'angles':
# TODO: Finish regular expression based parsing of this
# This removing the first set of characters then using split is
# not very robust
# The data returned in this mode matches the form
# '#YPR=-35.87,26.25,0.26\r\n'
# So, we'll ignore the first 5 characters, then split at the commas
# Afterwards, we can convert the values to floats
yaw, pitch, roll = data[5:-1].decode('utf-8').split(',')
# Now convert the values from strings to floats
yaw = float(yaw)
pitch = float(pitch)
roll = float(roll)
return yaw, pitch, roll
else:
return None, None, None
## TODO: Implement binary output settings and reading
# "#oscb" - Output CALIBRATED SENSOR data of all 9 axes in BINARY format.
# One frame consist of three 3x3 float values = 36 bytes. Order is: acc x/y/z, mag x/y/z, gyr x/y/z.
# "#osrb" - Output RAW SENSOR data of all 9 axes in BINARY format.
# One frame consist of three 3x3 float values = 36 bytes. Order is: acc x/y/z, mag x/y/z, gyr x/y/z.
# "#osbb" - Output BOTH raw and calibrated SENSOR data of all 9 axes in BINARY format.
# One frame consist of 2x36 = 72 bytes - like #osrb and #oscb combined (first RAW, then CALIBRATED).
|
[
"[email protected]"
] | |
f025cacb2d88b8c5c786aaadfd9b7b0f43725934
|
9767e059c7e90231453ac2b26b580c0a7b619f3e
|
/main.py
|
8f0b1e932b5c12be71f8284ca56addc1a1231556
|
[] |
no_license
|
Centrovoi777/stepik-python
|
a3e2d8212cdad7dbcb727f226451ff7099b8cbe3
|
3c7576cd2b2e1b8b8e1629c1c967ebed155c07b4
|
refs/heads/master
| 2022-12-25T19:41:16.992400 | 2020-10-07T06:44:44 | 2020-10-07T06:44:44 | 301,950,600 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,077 |
py
|
import requests
import telebot
from telebot import types
import random
import json
import os
token = os.environ["TELEGRAM_TOKEN"]
api_url = 'https://stepik.akentev.com/api/millionaire'
bot = telebot.TeleBot(token)
a = {}
states = {}
START = 'start'
MAIN_STATE = 'main'
QUESTION = 'question'
STATES = 'answers'
try:
data = json.load(open('db/data.json', 'r', encoding='utf-8'))
except FileNotFoundError:
data = {
'states': {},
MAIN_STATE: {
},
QUESTION: {
},
STATES: {
}
}
def change_data(key, user_id, value):
data[key][user_id] = value
json.dump(
data,
open('db/data.json', 'w', encoding='utf-8'),
indent=2,
ensure_ascii=False,
)
def update_question():
response = requests.get(
api_url,
# params=update_complexity()
# params={'complexity': '3'}
)
data_api = response.json()
print(data_api)
question = data_api['question']
answers = data_api['answers']
correct_answer = answers[0]
random.shuffle(answers)
# union_answers = ', '.join(answers)
dict_update_question = {'question': question, 'answers': answers, 'correct_answer': correct_answer, }
print(dict_update_question)
return dict_update_question
def update_counter(a, key, value):
if key in a:
a[key] += [value]
else:
a.setdefault(key, []).append(value)
class test:
dict_question = update_question()
counter = 0
quest = dict_question["question"]
answers = dict_question["answers"]
correct_answer = dict_question["correct_answer"]
# complexity = {}
def __init__(self):
self.text = None
self.from_user = None
@bot.message_handler(func=lambda message: True)
def dispatcher(message):
user_id = str(message.from_user.id)
state = data['states'].get(user_id, MAIN_STATE) # если пользователь впервый раз у нас то он получит MAIN_STATE
if state == MAIN_STATE:
test.main_handler(message)
elif state == QUESTION:
test.question_handler(message)
elif state == STATES:
test.answer_area(message)
def main_handler(message):
user_id = str(message.from_user.id)
if message.text == '/start':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(types.KeyboardButton('Миллионер'))
bot.send_message(
user_id,
'Это игра кто хочет стать миллионером',
reply_markup=markup
)
change_data('states', user_id, MAIN_STATE)
elif message.text == 'Миллионер':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(types.KeyboardButton('задать вопрос'))
bot.send_message(user_id, 'Хочешь задам вопрос?', reply_markup=markup)
change_data('states', user_id, QUESTION)
else:
markup = types.ReplyKeyboardRemove()
bot.send_message(user_id, 'Я тебя не понял', reply_markup=markup)
def question_handler(message):
user_id = str(message.from_user.id)
print(message)
if message.text == 'задать вопрос':
if test.counter < 1:
change_data('states', user_id, STATES)
bot.send_message(user_id, test.quest)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add(
*[types.KeyboardButton(button) for button in test.answers]
)
bot.send_message(user_id, 'Выберите ответ', reply_markup=markup)
test.counter = 1
print(test.answers)
print(test.counter)
else:
test.dict_question = update_question()
test.quest = test.dict_question["question"]
test.answers = test.dict_question["answers"]
test.correct_answer = test.dict_question["correct_answer"]
data['states'][user_id] = STATES
# bot.send_message(user_id, 'Norm')
bot.send_message(user_id, test.quest)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add(
*[types.KeyboardButton(button) for button in test.answers]
)
bot.send_message(user_id, 'Выберите ответ', reply_markup=markup)
elif message.text == 'ответы':
bot.send_message(user_id, text=str(len(a['victory'])) + ' правильных ответов')
bot.send_message(user_id, text=str(len(a['defeats'])) + ' неправильных ответов')
else:
bot.reply_to(message, 'Я тебя не понял 2')
def answer_area(message):
user_id = str(message.from_user.id)
print(states)
if message.text == test.correct_answer:
bot.send_message(user_id, 'Молодец, правильно!')
update_counter(a, 'victory', 0)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(
*[types.KeyboardButton(button) for button in ['задать вопрос', 'ответы']]
)
bot.send_message(user_id, 'Повторим?', reply_markup=markup)
change_data('states', user_id, QUESTION)
else:
bot.send_message(user_id, 'Фу, не правильно')
update_counter(a, 'defeats', 0)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(types.KeyboardButton('задать вопрос'))
bot.send_message(user_id, 'Повторим?', reply_markup=markup)
change_data('states', user_id, QUESTION)
bot.polling()
|
[
"[email protected]"
] | |
3a9baf4f9122069e89d3d3e9c447adba687d8866
|
7942342d457276bb266228d0236af647b3d55477
|
/django/contrib/gis/gdal/geomtype.pyi
|
4d825dbc2a0344758cb103a9b71335753e67e32a
|
[
"MIT"
] |
permissive
|
AsymmetricVentures/mypy-django
|
847c4e521ce4dec9a10a1574f9c32b234dafd00b
|
f6e489f5cf5672ecede323132665ccc6306f50b8
|
refs/heads/master
| 2020-06-30T01:53:44.434394 | 2016-12-22T22:45:50 | 2016-12-22T22:45:50 | 74,397,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
pyi
|
# Stubs for django.contrib.gis.gdal.geomtype (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
class OGRGeomType:
wkb25bit = ... # type: int
num = ... # type: Any
def __init__(self, type_input) -> None: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
@property
def name(self): ...
@property
def django(self): ...
def to_multi(self): ...
|
[
"[email protected]"
] | |
dc0834c18416bb2944ac4d86f4bd5e62cca0f9ff
|
5b63774decae6dcd7208aee48e92f730e1364df3
|
/NGIOBenchmarksDataHandling/analyse_benchio_output.py
|
cb82a5adcdb5682e349968c937531c96df9b5436
|
[] |
no_license
|
adrianjhpc/scriptsandtools
|
9355ff7e0d0342a4cda9bd976ae0b797b8077143
|
161a9c0918f747da6d40d67f3907b58620e2fb9b
|
refs/heads/master
| 2023-07-19T00:25:02.078205 | 2023-07-12T08:42:33 | 2023-07-12T08:42:33 | 69,058,629 | 0 | 2 | null | 2020-07-20T11:20:18 | 2016-09-23T20:31:42 |
Shell
|
UTF-8
|
Python
| false | false | 13,529 |
py
|
#!/usr/bin/env python
#
# Analyse IOR output files
#
# System modules for grabbing data
import sys
import os.path
import re
from glob import glob
from datetime import datetime
import datetime as dt
import calendar
# Modules for analysing and visualising data
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
matplotlib.rcParams.update({'font.size': 9})
matplotlib.rcParams.update({'figure.autolayout': True})
from matplotlib import dates
def main(argv):
maxsize = 10000
resdir = sys.argv[1]
files = get_filelist(resdir, "benchio_", maxsize)
mpi_max_stripecount = 0
mpi_max_stripemin = np.empty(maxsize)
mpi_max_stripemax = np.empty(maxsize)
mpi_max_stripeav = np.empty(maxsize)
mpi_max_striperate = np.empty(maxsize)
mpi_max_stripedate = [None] * maxsize
mpi_4_stripecount = 0
mpi_4_stripemin = np.empty(maxsize)
mpi_4_stripemax = np.empty(maxsize)
mpi_4_stripeav = np.empty(maxsize)
mpi_4_striperate = np.empty(maxsize)
mpi_4_stripedate = [None] * maxsize
hdf5_max_stripecount = 0
hdf5_max_stripemin = np.empty(maxsize)
hdf5_max_stripemax = np.empty(maxsize)
hdf5_max_stripeav = np.empty(maxsize)
hdf5_max_striperate = np.empty(maxsize)
hdf5_max_stripedate = [None] * maxsize
hdf5_4_stripecount = 0
hdf5_4_stripemin = np.empty(maxsize)
hdf5_4_stripemax = np.empty(maxsize)
hdf5_4_stripeav = np.empty(maxsize)
hdf5_4_striperate = np.empty(maxsize)
hdf5_4_stripedate = [None] * maxsize
netcdf_max_stripecount = 0
netcdf_max_stripemin = np.empty(maxsize)
netcdf_max_stripemax = np.empty(maxsize)
netcdf_max_stripeav = np.empty(maxsize)
netcdf_max_striperate = np.empty(maxsize)
netcdf_max_stripedate = [None] * maxsize
netcdf_4_stripecount = 0
netcdf_4_stripemin = np.empty(maxsize)
netcdf_4_stripemax = np.empty(maxsize)
netcdf_4_stripeav = np.empty(maxsize)
netcdf_4_striperate = np.empty(maxsize)
netcdf_4_stripedate = [None] * maxsize
cmin = 0
cmax = 0
cav = 0
ccount = 0
# Loop over files getting data
for file in files:
infile = open(file, 'r')
# Get date of test from file name
tokens = file.split('_')
datestring = tokens[-1].split('.')[0]
runtime = datetime.strptime(datestring, "%Y%m%d%H%M%S")
resdict = {}
resdict['JobID'] = 'Unknown'
header = True
striping = 0
for line in infile:
if header:
if re.search('Running', line):
tokens = line.split()
resdict['Writers'] = int(tokens[2])
elif re.search('Array', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['LocalSize'] = (x, y, z)
elif re.search('Global', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['GlobalSize'] = (x, y, z)
elif re.search('Total', line):
tokens = line.split()
resdict['TotData'] = float(tokens[5])
elif re.search('MPI-IO', line):
mpiio = True
header = False
cmin = 0
cmax = 0
cav = 0
ccount = 0
delete_count = 0
elif re.search('HDF5', line):
hdf5 = True
header = False
cmin = 0
cmax = 0
cav = 0
ccount = 0
delete_count = 0
elif re.search('NetCDF', line):
netcdf = True
header = False
cmin = 0
cmax = 0
cav = 0
ccount = 0
delete_count = 0
else:
if re.search('Writing to', line):
tokens = line.split()
if re.match('striped', tokens[2]):
striping = -1
elif re.match('defstriped', tokens[2]):
striping = 4
elif(re.search('time', line) and not re.search('mintime', line) and not re.search('avgtime', line) and not re.search('utime', line)):
tokens = line.split()
time = float(tokens[2])
if(cmin == 0):
cmin = time
elif(time < cmin):
cmin = time
if(cmax == 0):
cmax = time
elif(time > cmax):
cmax = time
cav = cav + time
ccount = ccount + 1
elif re.search('Deleting', line):
delete_count = delete_count + 1
ccount = ccount
cav = cav / ccount
if(mpiio):
if(striping == -1):
mpi_max_stripemin[mpi_max_stripecount] = cmin
mpi_max_stripemax[mpi_max_stripecount] = cmax
mpi_max_stripeav[mpi_max_stripecount] = cav
mpi_max_stripedate[mpi_max_stripecount] = runtime
mpi_max_stripecount = mpi_max_stripecount + 1
else:
mpi_4_stripemin[mpi_4_stripecount] = cmin
mpi_4_stripemax[mpi_4_stripecount] = cmax
mpi_4_stripeav[mpi_4_stripecount] = cav
mpi_4_stripedate[mpi_4_stripecount] = runtime
mpi_4_stripecount = mpi_4_stripecount + 1
mpiio = False
elif(hdf5):
if(striping == -1):
hdf5_max_stripemin[hdf5_max_stripecount] = cmin
hdf5_max_stripemax[hdf5_max_stripecount] = cmax
hdf5_max_stripeav[hdf5_max_stripecount] = cav
hdf5_max_stripedate[hdf5_max_stripecount] = runtime
hdf5_max_stripecount = hdf5_max_stripecount + 1
else:
hdf5_4_stripemin[hdf5_4_stripecount] = cmin
hdf5_4_stripemax[hdf5_4_stripecount] = cmax
hdf5_4_stripeav[hdf5_4_stripecount] = cav
hdf5_4_stripedate[hdf5_4_stripecount] = runtime
hdf5_4_stripecount = hdf5_4_stripecount + 1
hdf5 = False
elif(netcdf):
if(striping == -1):
netcdf_max_stripemin[netcdf_max_stripecount] = cmin
netcdf_max_stripemax[netcdf_max_stripecount] = cmax
netcdf_max_stripeav[netcdf_max_stripecount] = cav
netcdf_max_stripedate[netcdf_max_stripecount] = runtime
netcdf_max_stripecount = netcdf_max_stripecount + 1
else:
netcdf_4_stripemin[netcdf_4_stripecount] = cmin
netcdf_4_stripemax[netcdf_4_stripecount] = cmax
netcdf_4_stripeav[netcdf_4_stripecount] = cav
netcdf_4_stripedate[netcdf_4_stripecount] = runtime
netcdf_4_stripecount = netcdf_4_stripecount + 1
netcdf = False
if(delete_count == 2):
header = True
elif(delete_count == 1):
cmin = 0
cmax = 0
cav = 0
ccount = 0
if(mpiio != False):
if(ccount != 0):
cav = cav / ccount
if(striping == -1):
mpi_max_stripemin[mpi_max_stripecount] = cmin
mpi_max_stripemax[mpi_max_stripecount] = cmax
mpi_max_stripeav[mpi_max_stripecount] = cav
mpi_max_stripedate[mpi_max_stripecount] = runtime
mpi_max_stripecount = mpi_max_stripecount + 1
else:
mpi_4_stripemin[mpi_4_stripecount] = cmin
mpi_4_stripemax[mpi_4_stripecount] = cmax
mpi_4_stripeav[mpi_4_stripecount] = cav
mpi_4_stripedate[mpi_4_stripecount] = runtime
mpi_4_stripecount = mpi_4_stripecount + 1
mpiio = False
elif(hdf5 != False):
if(ccount != 0):
cav = cav / ccount
if(striping == -1):
hdf5_max_stripemin[hdf5_max_stripecount] = cmin
hdf5_max_stripemax[hdf5_max_stripecount] = cmax
hdf5_max_stripeav[hdf5_max_stripecount] = cav
hdf5_max_stripedate[hdf5_max_stripecount] = runtime
hdf5_max_stripecount = hdf5_max_stripecount + 1
else:
hdf5_4_stripemin[hdf5_4_stripecount] = cmin
hdf5_4_stripemax[hdf5_4_stripecount] = cmax
hdf5_4_stripeav[hdf5_4_stripecount] = cav
hdf5_4_stripedate[hdf5_4_stripecount] = runtime
hdf5_4_stripecount = hdf5_4_stripecount + 1
hdf5 = False
elif(netcdf != False):
if(ccount != 0):
cav = cav / ccount
if(striping == -1):
netcdf_max_stripemin[netcdf_max_stripecount] = cmin
netcdf_max_stripemax[netcdf_max_stripecount] = cmax
netcdf_max_stripeav[netcdf_max_stripecount] = cav
netcdf_max_stripedate[netcdf_max_stripecount] = runtime
netcdf_max_stripecount = netcdf_max_stripecount + 1
else:
netcdf_4_stripemin[netcdf_4_stripecount] = cmin
netcdf_4_stripemax[netcdf_4_stripecount] = cmax
netcdf_4_stripeav[netcdf_4_stripecount] = cav
netcdf_4_stripedate[netcdf_4_stripecount] = runtime
netcdf_4_stripecount = netcdf_4_stripecount + 1
netcdf = False
infile.close()
plot_data(mpi_max_stripedate, mpi_max_stripemin, mpi_max_stripemax, mpi_max_stripeav, mpi_max_stripecount, 'mpiio_max_stripe')
plot_data(mpi_4_stripedate, mpi_4_stripemin, mpi_4_stripemax, mpi_4_stripeav, mpi_4_stripecount, 'mpiio_4_stripe')
plot_data(hdf5_max_stripedate, hdf5_max_stripemin, hdf5_max_stripemax, hdf5_max_stripeav, hdf5_max_stripecount, 'hdf5_max_stripe')
plot_data(hdf5_4_stripedate, hdf5_4_stripemin, hdf5_4_stripemax, hdf5_4_stripeav, hdf5_4_stripecount, 'hdf5_4_stripe')
plot_data(netcdf_max_stripedate, netcdf_max_stripemin, netcdf_max_stripemax, netcdf_max_stripeav, netcdf_max_stripecount, 'netcdf_max_stripe')
plot_data(netcdf_4_stripedate, netcdf_4_stripemin, netcdf_4_stripemax, netcdf_4_stripeav, netcdf_4_stripecount, 'netcdf_4_stripe')
sys.exit(0)
def plot_data(date, min, max, av, ccount, filename):
outfile = open(filename, 'w')
i = 0
while i < ccount:
outfile.write("%s %s %s %s\n" % (date[i],min[i],max[i],av[i]))
i = i + 1
plt.plot(date[0:ccount],max[0:ccount],c='r',ls='--',label='max')
plt.plot(date[0:ccount],min[0:ccount],c='g',ls='--',label='min')
plt.plot(date[0:ccount],av[0:ccount],c='b',marker='s',label='average');
plt.xlabel('Date')
plt.ylabel('Runtime (seconds)')
legend = plt.legend(loc='upper left', shadow=True)
plt.savefig(filename+'.png')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,20))
plt.savefig(filename+'_zoom.png')
current_date = date[0]
end_date = date[ccount-1]
while current_date < end_date:
next_month = add_months(current_date,1)
plt.xticks(rotation=70)
plt.axis((np.float64(date2num(current_date)),np.float64(date2num(next_month)),y1,y2))
plt.savefig(filename+current_date.strftime("%B")+current_date.strftime("%Y")+'.png')
current_date = next_month
plt.close()
def add_months(sourcedate,months):
new_date = dt.date(sourcedate.year + (sourcedate.month/12),(sourcedate.month%12) + 1,1)
return datetime.strptime(new_date.strftime('%Y%m%d'), '%Y%m%d')
def get_filelist(dir, stem, maxsize):
"""
Get list of date files in the specified directory
"""
files = []
if os.path.exists(dir):
files = glob(os.path.join(dir, stem + '*' ))
files.sort()
if(len(files) > maxsize):
sys.stderr.write("Need to increase the maxsize (maximum number of files this program can process)")
sys.exit()
else:
sys.stderr.write("Directory does not exist: {1}".format(dir))
sys.exit(1)
return files
def get_date_from_string(datestring):
y = datestring[0:4]
m = datestring[4:6]
d = datestring[6:8]
return strptime(datestring, "%Y%m%d%H%M%S")
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"[email protected]"
] | |
657e2846619a021d3e3cdfa8eb1d3510dc021b24
|
6405750bc22e5e74a839ab35d385cefb8e443208
|
/data/templates/admin/add_task.mako.py
|
3ba3e8a172b2c1ae96c40b47d9f97815c7c5dde4
|
[] |
no_license
|
yetty/DevContest
|
7ecfcf9ca8e3af4b631037635037a32763d87a15
|
8903985ff5bb95b185bbdda4996715ec8370ac91
|
refs/heads/0.4
| 2021-04-26T16:43:03.853742 | 2014-03-23T20:29:12 | 2014-03-23T20:29:12 | 768,958 | 2 | 1 | null | 2013-03-05T18:21:06 | 2010-07-11T15:02:52 |
Python
|
UTF-8
|
Python
| false | false | 2,657 |
py
|
# -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 5
_modified_time = 1273684601.063113
_template_filename='/home/yetty/Work/Development/DevContest/devcontest/templates/admin/add_task.mako'
_template_uri='/admin/add_task.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
from webhelpers.html import escape
_exports = ['body', 'title']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/admin/base.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n')
# SOURCE LINE 3
__M_writer(u'\n\n')
# SOURCE LINE 12
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_body(context):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 5
__M_writer(u'\n ')
# SOURCE LINE 6
__M_writer(escape(h.form(h.url_for(controller="admin", action="add_task_submit"), method="post", multipart=True)))
__M_writer(u'\n <label>N\xe1zev \xfalohy: </label>')
# SOURCE LINE 7
__M_writer(escape(h.text(name="name")))
__M_writer(u'\n <label>Popis \xfalohy: </label>')
# SOURCE LINE 8
__M_writer(escape(h.textarea(name="description", rows=20, cols=80)))
__M_writer(u'\n <label>Test: </label>')
# SOURCE LINE 9
__M_writer(escape(h.file(name="tests")))
__M_writer(u'\n ')
# SOURCE LINE 10
__M_writer(escape(h.submit("submit", "Odeslat")))
__M_writer(u'\n ')
# SOURCE LINE 11
__M_writer(escape(h.end_form()))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer(u'P\u0159idat \xfalohu')
return ''
finally:
context.caller_stack._pop_frame()
|
[
"root@yetty.(none)"
] |
root@yetty.(none)
|
b0047d53c85ac45e8cb5dc88e75f86af112a09e3
|
ffe4e13584ecf8e10694ef05022e07cba895e8d6
|
/django_new/blog/migrations/0004_auto_20201203_0718.py
|
acd39f6c0a6888f31adc26cc004d259b49c03162
|
[] |
no_license
|
Eazyberry-Official/django
|
5eb350db1efff9ff7227230978280f65018829b7
|
dead6f9111684c7ca1fc8c7789764fdfac98c33b
|
refs/heads/master
| 2023-01-22T17:55:03.637669 | 2020-12-05T09:23:35 | 2020-12-05T09:23:35 | 318,748,924 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 380 |
py
|
# Generated by Django 3.1.3 on 2020-12-03 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20201203_0706'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.TextField(default=''),
),
]
|
[
"[email protected]"
] | |
c764277e09dd60bfe3054906aefe34eca2eae4bf
|
47fa6694ee1641b4375de168fc3148da457badef
|
/tests/test_parser.py
|
1ccecc01f2488b6779c2029552e34877133b7913
|
[
"MIT"
] |
permissive
|
viktorsapozhok/sightspotter
|
93b4dd1c05661b366361153a787d27eeaadc3be7
|
77610c915b69e2826e4b9dab18477fa71ead3604
|
refs/heads/master
| 2023-05-27T02:08:30.249311 | 2020-01-01T16:09:53 | 2020-01-01T16:09:53 | 159,656,466 | 0 | 0 |
MIT
| 2023-05-22T22:29:05 | 2018-11-29T11:36:16 |
Python
|
UTF-8
|
Python
| false | false | 1,284 |
py
|
# -*- coding: utf-8 -*-
from bot import config
from bot.commuter import Commuter
from bot.parser import RouteParser
commuter = Commuter(config.path_to_db)
def test_parser():
parser = RouteParser(commuter, config.parser['url'])
urls = parser.extract_routes()
assert len(urls) >= 299
_urls = [url for url in urls if 'cherepovets2008' in url]
sights, histories, _, year = parser.parse_route(_urls[0], 0, -1)
assert len(sights) >= 44
assert year == 2008
_urls = [url for url in urls if 'kazan2018' in url]
sights, histories, _, year = parser.parse_route(_urls[0], 0, -1)
assert len(sights) >= 37
assert len(histories) >= 21
assert year == 2018
def test_sights():
df = commuter.select('select * from sights')
assert len(df) > 10000
assert len(df.columns) == 10
def test_events():
df = commuter.select('select distinct event from sights')
assert len(df) >= 218
def test_history():
df = commuter.select('select * from history')
assert len(df) > 2500
assert len(df.columns) == 4
def test_user_log():
df = commuter.select('select * from user_log')
assert len(df.columns) == 3
def test_year():
df = commuter.select('select year from sights where year < 2000')
assert len(df) == 0
|
[
"[email protected]"
] | |
5f20947d37c40b225caf658aa24de35a3409eda0
|
1e9ad304868c2bda918c19eba3d7b122bac3923b
|
/kubernetes/client/models/v1_scale_spec.py
|
4cbe43889993ed0f39cd92d9f358c3267a860626
|
[
"Apache-2.0"
] |
permissive
|
pineking/client-python
|
c77e5bd3d476ac852e6dffa96056008baa0f597f
|
74a64d7325518f4298600d4bb300f92843c29347
|
refs/heads/master
| 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,994 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
V1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
0450539ef864eff04784066d5f3516472b600dd4
|
d6dd4a0b3d951489f7b76cf5cd66aee25fb9e156
|
/db_migrate.py
|
09527460fe69bec7a754eeb222f32a145674af03
|
[] |
no_license
|
tomsitter/tag
|
79e05fca50fde853f1b77d8145dca65ed0ee8f4f
|
886a83f4f232c211ae04c027c66dc55d10cc7d08
|
refs/heads/master
| 2021-06-21T21:49:29.478208 | 2017-08-16T20:46:24 | 2017-08-16T20:46:24 | 100,298,606 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 833 |
py
|
import imp
from migrate.versioning import api
from tag import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
[
"[email protected]"
] | |
7c4097a93e7aba50c191355924a8d2563f812aeb
|
69ca6c7ecd1e8ba12e1fd9ca5baf075ed9899f81
|
/scripts/sound_unmute.py
|
0539d8d02b6f6e60d6accb94f2c601c772618679
|
[] |
no_license
|
bekd70/ProjectorControl
|
505f3f9e874fd1f521ec0690c9e65257b6f67a2c
|
d241b44beeb7204c7ffa02fdf895d06b54994680
|
refs/heads/master
| 2021-01-12T02:39:19.985110 | 2017-01-05T06:57:01 | 2017-01-05T06:57:01 | 78,084,186 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
#!/usr/bin/env python
#turn sound on
import sys
import serial
port = serial.Serial('/dev/ttyAMA0', baudrate=38400, bytesize=8, parity=serial.PARITY_NONE, stopbits=1, timeout=5)
port.open
#this is the code sent to the projector. Replace it for your model
port.write("\x02\x13\x00\x00\x00\x15")
received = port.read(8)
print received # newline is printed
port.close
|
[
"[email protected]"
] | |
9074795f04fffda1859ceabffe3265b9dad61ac4
|
c7cba1dad777f461ea546d0437528c985be3c051
|
/client.py
|
559f6546c5344baecc2df329d11dee988617cc63
|
[
"MIT"
] |
permissive
|
elliotthwang/NLU
|
000127b561c5b99340b04bf78aa65ff6ea28c79a
|
0e6a96e4c2f363beb4241b4371244a5229e72811
|
refs/heads/master
| 2022-01-12T06:51:00.036787 | 2018-10-07T21:56:15 | 2018-10-07T21:56:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,038 |
py
|
############################################################################################
#
# The MIT License (MIT)
#
# GeniSys NLU Engine API Client
# Copyright (C) 2018 Adam Milton-Barker (AdamMiltonBarker.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Title: GeniSys NLU Engine API Client
# Description: API client for communicating with the GeniSys AI NLU API endpoint
# Configuration: required/confs.json
# Last Modified: 2018-09-08
#
# Example Usage:
#
# $ python3 client.py CLASSIFY 1 "Do you know what I am saying?"
#
############################################################################################
import sys, time, string, requests, json
from tools.Helpers import Helpers
from tools.Logging import Logging
class Client():
def __init__(self, user):
self.Helpers = Helpers()
self.Logging = Logging()
self._confs = self.Helpers.loadConfigs()
self.LogFile = self.Logging.setLogFile(self._confs["AI"]["Logs"]+"Client/")
self.apiUrl = self._confs["AI"]["FQDN"] + "/communicate/infer/"+user
self.headers = {"content-type": 'application/json'}
self.Logging.logMessage(
self.LogFile,
"CLIENT",
"INFO",
"GeniSys AI Client Ready")
if __name__ == "__main__":
if sys.argv[1] == "CLASSIFY":
Client = Client(sys.argv[2])
data = {"query": str(sys.argv[3])}
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"INFO",
"Sending string for classification...")
response = requests.post(
Client.apiUrl,
data=json.dumps(data),
headers=Client.headers)
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"OK",
"Response: "+str(response))
|
[
"[email protected]"
] | |
50511aaa0c39241f0a3c142e0ae5eb36a441797a
|
eb7f48ce809e526f8bfdc9d623a713c5eedd18ac
|
/software/MCP4728GUI/MainWindow.py
|
f202da8f3782eebee471a952d6d09c2d19590d2f
|
[] |
no_license
|
lucanastasio/TunableFilter
|
6f9981b496002094b7648d48152abf21083a026b
|
6269d3b3945b005338532354ba7831f64404b218
|
refs/heads/master
| 2023-07-17T10:31:37.984120 | 2021-08-23T07:45:33 | 2021-08-23T07:45:33 | 225,726,961 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,502 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(902, 520)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(902, 520))
MainWindow.setMaximumSize(QtCore.QSize(902, 1000))
MainWindow.setInputMethodHints(QtCore.Qt.ImhNone)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_5.setContentsMargins(10, 10, 10, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setStyleSheet("border-color: black")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.SliderA = QtWidgets.QSlider(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SliderA.sizePolicy().hasHeightForWidth())
self.SliderA.setSizePolicy(sizePolicy)
self.SliderA.setMinimumSize(QtCore.QSize(0, 256))
self.SliderA.setInputMethodHints(QtCore.Qt.ImhNone)
self.SliderA.setMaximum(4095)
self.SliderA.setPageStep(12)
self.SliderA.setProperty("value", 0)
self.SliderA.setSliderPosition(0)
self.SliderA.setTracking(True)
self.SliderA.setOrientation(QtCore.Qt.Vertical)
self.SliderA.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.SliderA.setTickInterval(256)
self.SliderA.setObjectName("SliderA")
self.verticalLayout_3.addWidget(self.SliderA, 0, QtCore.Qt.AlignHCenter)
self.SpinBoxA = QtWidgets.QDoubleSpinBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxA.sizePolicy().hasHeightForWidth())
self.SpinBoxA.setSizePolicy(sizePolicy)
self.SpinBoxA.setMinimumSize(QtCore.QSize(110, 0))
self.SpinBoxA.setPrefix("")
self.SpinBoxA.setDecimals(4)
self.SpinBoxA.setMaximum(2.0475)
self.SpinBoxA.setSingleStep(0.0005)
self.SpinBoxA.setObjectName("SpinBoxA")
self.verticalLayout_3.addWidget(self.SpinBoxA, 0, QtCore.Qt.AlignHCenter)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(10)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label, 0, QtCore.Qt.AlignRight)
self.VrefA = QtWidgets.QComboBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.VrefA.sizePolicy().hasHeightForWidth())
self.VrefA.setSizePolicy(sizePolicy)
self.VrefA.setMinimumSize(QtCore.QSize(140, 0))
self.VrefA.setObjectName("VrefA")
self.VrefA.addItem("")
self.VrefA.addItem("")
self.VrefA.addItem("")
self.horizontalLayout_2.addWidget(self.VrefA)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout.addWidget(self.groupBox, 0, QtCore.Qt.AlignHCenter)
self.groupBox1 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox1.setStyleSheet("border-color: black")
self.groupBox1.setObjectName("groupBox1")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.SliderB = QtWidgets.QSlider(self.groupBox1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SliderB.sizePolicy().hasHeightForWidth())
self.SliderB.setSizePolicy(sizePolicy)
self.SliderB.setMinimumSize(QtCore.QSize(0, 256))
self.SliderB.setInputMethodHints(QtCore.Qt.ImhNone)
self.SliderB.setMaximum(4095)
self.SliderB.setPageStep(12)
self.SliderB.setProperty("value", 0)
self.SliderB.setSliderPosition(0)
self.SliderB.setTracking(True)
self.SliderB.setOrientation(QtCore.Qt.Vertical)
self.SliderB.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.SliderB.setTickInterval(256)
self.SliderB.setObjectName("SliderB")
self.verticalLayout_4.addWidget(self.SliderB, 0, QtCore.Qt.AlignHCenter)
self.SpinBoxB = QtWidgets.QDoubleSpinBox(self.groupBox1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxB.sizePolicy().hasHeightForWidth())
self.SpinBoxB.setSizePolicy(sizePolicy)
self.SpinBoxB.setMinimumSize(QtCore.QSize(110, 0))
self.SpinBoxB.setPrefix("")
self.SpinBoxB.setDecimals(4)
self.SpinBoxB.setMaximum(2.0475)
self.SpinBoxB.setSingleStep(0.0005)
self.SpinBoxB.setObjectName("SpinBoxB")
self.verticalLayout_4.addWidget(self.SpinBoxB, 0, QtCore.Qt.AlignHCenter)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(10)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_2 = QtWidgets.QLabel(self.groupBox1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2, 0, QtCore.Qt.AlignRight)
self.VrefB = QtWidgets.QComboBox(self.groupBox1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.VrefB.sizePolicy().hasHeightForWidth())
self.VrefB.setSizePolicy(sizePolicy)
self.VrefB.setMinimumSize(QtCore.QSize(140, 0))
self.VrefB.setObjectName("VrefB")
self.VrefB.addItem("")
self.VrefB.addItem("")
self.VrefB.addItem("")
self.horizontalLayout_3.addWidget(self.VrefB)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.horizontalLayout.addWidget(self.groupBox1, 0, QtCore.Qt.AlignHCenter)
self.groupBox2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox2.setStyleSheet("border-color: black")
self.groupBox2.setObjectName("groupBox2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox2)
self.verticalLayout.setObjectName("verticalLayout")
self.SliderC = QtWidgets.QSlider(self.groupBox2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SliderC.sizePolicy().hasHeightForWidth())
self.SliderC.setSizePolicy(sizePolicy)
self.SliderC.setMinimumSize(QtCore.QSize(0, 256))
self.SliderC.setInputMethodHints(QtCore.Qt.ImhNone)
self.SliderC.setMaximum(4095)
self.SliderC.setPageStep(12)
self.SliderC.setProperty("value", 0)
self.SliderC.setSliderPosition(0)
self.SliderC.setTracking(True)
self.SliderC.setOrientation(QtCore.Qt.Vertical)
self.SliderC.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.SliderC.setTickInterval(256)
self.SliderC.setObjectName("SliderC")
self.verticalLayout.addWidget(self.SliderC, 0, QtCore.Qt.AlignHCenter)
self.SpinBoxC = QtWidgets.QDoubleSpinBox(self.groupBox2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxC.sizePolicy().hasHeightForWidth())
self.SpinBoxC.setSizePolicy(sizePolicy)
self.SpinBoxC.setMinimumSize(QtCore.QSize(110, 0))
self.SpinBoxC.setPrefix("")
self.SpinBoxC.setDecimals(4)
self.SpinBoxC.setMaximum(2.0475)
self.SpinBoxC.setSingleStep(0.0005)
self.SpinBoxC.setObjectName("SpinBoxC")
self.verticalLayout.addWidget(self.SpinBoxC, 0, QtCore.Qt.AlignHCenter)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(10)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.groupBox2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3, 0, QtCore.Qt.AlignRight)
self.VrefC = QtWidgets.QComboBox(self.groupBox2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.VrefC.sizePolicy().hasHeightForWidth())
self.VrefC.setSizePolicy(sizePolicy)
self.VrefC.setMinimumSize(QtCore.QSize(140, 0))
self.VrefC.setObjectName("VrefC")
self.VrefC.addItem("")
self.VrefC.addItem("")
self.VrefC.addItem("")
self.horizontalLayout_4.addWidget(self.VrefC)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout.addWidget(self.groupBox2, 0, QtCore.Qt.AlignHCenter)
self.groupBox3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox3.setStyleSheet("border-color: black")
self.groupBox3.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox3.setObjectName("groupBox3")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox3)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.SliderD = QtWidgets.QSlider(self.groupBox3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SliderD.sizePolicy().hasHeightForWidth())
self.SliderD.setSizePolicy(sizePolicy)
self.SliderD.setMinimumSize(QtCore.QSize(0, 256))
self.SliderD.setInputMethodHints(QtCore.Qt.ImhNone)
self.SliderD.setMaximum(4095)
self.SliderD.setPageStep(12)
self.SliderD.setProperty("value", 0)
self.SliderD.setSliderPosition(0)
self.SliderD.setTracking(True)
self.SliderD.setOrientation(QtCore.Qt.Vertical)
self.SliderD.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.SliderD.setTickInterval(256)
self.SliderD.setObjectName("SliderD")
self.verticalLayout_2.addWidget(self.SliderD, 0, QtCore.Qt.AlignHCenter)
self.SpinBoxD = QtWidgets.QDoubleSpinBox(self.groupBox3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxD.sizePolicy().hasHeightForWidth())
self.SpinBoxD.setSizePolicy(sizePolicy)
self.SpinBoxD.setMinimumSize(QtCore.QSize(110, 0))
self.SpinBoxD.setPrefix("")
self.SpinBoxD.setDecimals(4)
self.SpinBoxD.setMaximum(2.0475)
self.SpinBoxD.setSingleStep(0.0005)
self.SpinBoxD.setObjectName("SpinBoxD")
self.verticalLayout_2.addWidget(self.SpinBoxD, 0, QtCore.Qt.AlignHCenter)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(10)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_4 = QtWidgets.QLabel(self.groupBox3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName("label_4")
self.horizontalLayout_5.addWidget(self.label_4, 0, QtCore.Qt.AlignRight)
self.VrefD = QtWidgets.QComboBox(self.groupBox3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.VrefD.sizePolicy().hasHeightForWidth())
self.VrefD.setSizePolicy(sizePolicy)
self.VrefD.setMinimumSize(QtCore.QSize(140, 0))
self.VrefD.setObjectName("VrefD")
self.VrefD.addItem("")
self.VrefD.addItem("")
self.VrefD.addItem("")
self.horizontalLayout_5.addWidget(self.VrefD)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.horizontalLayout.addWidget(self.groupBox3, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout_5.addLayout(self.horizontalLayout)
spacerItem = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_5.addItem(spacerItem)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(10)
self.gridLayout.setVerticalSpacing(6)
self.gridLayout.setObjectName("gridLayout")
spacerItem1 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSpacing(10)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
self.label_10.setSizePolicy(sizePolicy)
self.label_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_10.setObjectName("label_10")
self.horizontalLayout_7.addWidget(self.label_10, 0, QtCore.Qt.AlignRight)
self.SpinBoxAddress = QtWidgets.QSpinBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxAddress.sizePolicy().hasHeightForWidth())
self.SpinBoxAddress.setSizePolicy(sizePolicy)
self.SpinBoxAddress.setMinimumSize(QtCore.QSize(90, 27))
self.SpinBoxAddress.setInputMethodHints(QtCore.Qt.ImhDigitsOnly|QtCore.Qt.ImhPreferUppercase)
self.SpinBoxAddress.setMinimum(96)
self.SpinBoxAddress.setMaximum(103)
self.SpinBoxAddress.setProperty("value", 96)
self.SpinBoxAddress.setDisplayIntegerBase(16)
self.SpinBoxAddress.setObjectName("SpinBoxAddress")
self.horizontalLayout_7.addWidget(self.SpinBoxAddress)
self.gridLayout.addLayout(self.horizontalLayout_7, 1, 1, 1, 1)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setObjectName("label_5")
self.horizontalLayout_8.addWidget(self.label_5)
self.ComboBoxBus = ComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ComboBoxBus.sizePolicy().hasHeightForWidth())
self.ComboBoxBus.setSizePolicy(sizePolicy)
self.ComboBoxBus.setMinimumSize(QtCore.QSize(120, 0))
self.ComboBoxBus.setEditable(False)
self.ComboBoxBus.setCurrentText("")
self.ComboBoxBus.setObjectName("ComboBoxBus")
self.horizontalLayout_8.addWidget(self.ComboBoxBus)
self.gridLayout.addLayout(self.horizontalLayout_8, 1, 3, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(10)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName("label_9")
self.horizontalLayout_6.addWidget(self.label_9, 0, QtCore.Qt.AlignRight)
self.SpinBoxVdd = QtWidgets.QDoubleSpinBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpinBoxVdd.sizePolicy().hasHeightForWidth())
self.SpinBoxVdd.setSizePolicy(sizePolicy)
self.SpinBoxVdd.setMinimumSize(QtCore.QSize(90, 27))
self.SpinBoxVdd.setDecimals(3)
self.SpinBoxVdd.setMinimum(2.7)
self.SpinBoxVdd.setMaximum(5.5)
self.SpinBoxVdd.setObjectName("SpinBoxVdd")
self.horizontalLayout_6.addWidget(self.SpinBoxVdd)
self.gridLayout.addLayout(self.horizontalLayout_6, 0, 1, 1, 1)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
spacerItem2 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem2)
self.CheckBoxUpdate = QtWidgets.QCheckBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CheckBoxUpdate.sizePolicy().hasHeightForWidth())
self.CheckBoxUpdate.setSizePolicy(sizePolicy)
self.CheckBoxUpdate.setMinimumSize(QtCore.QSize(170, 0))
self.CheckBoxUpdate.setObjectName("CheckBoxUpdate")
self.horizontalLayout_9.addWidget(self.CheckBoxUpdate)
spacerItem3 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem3)
self.gridLayout.addLayout(self.horizontalLayout_9, 0, 3, 1, 1)
self.PushButtonLoad = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PushButtonLoad.sizePolicy().hasHeightForWidth())
self.PushButtonLoad.setSizePolicy(sizePolicy)
self.PushButtonLoad.setMinimumSize(QtCore.QSize(160, 0))
self.PushButtonLoad.setObjectName("PushButtonLoad")
self.gridLayout.addWidget(self.PushButtonLoad, 1, 6, 1, 1)
self.PushButtonWrite = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PushButtonWrite.sizePolicy().hasHeightForWidth())
self.PushButtonWrite.setSizePolicy(sizePolicy)
self.PushButtonWrite.setMinimumSize(QtCore.QSize(120, 0))
self.PushButtonWrite.setObjectName("PushButtonWrite")
self.gridLayout.addWidget(self.PushButtonWrite, 0, 5, 1, 1)
self.PushButtonSave = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PushButtonSave.sizePolicy().hasHeightForWidth())
self.PushButtonSave.setSizePolicy(sizePolicy)
self.PushButtonSave.setMinimumSize(QtCore.QSize(160, 0))
self.PushButtonSave.setObjectName("PushButtonSave")
self.gridLayout.addWidget(self.PushButtonSave, 0, 6, 1, 1)
self.PushButtonRead = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PushButtonRead.sizePolicy().hasHeightForWidth())
self.PushButtonRead.setSizePolicy(sizePolicy)
self.PushButtonRead.setMinimumSize(QtCore.QSize(120, 0))
self.PushButtonRead.setObjectName("PushButtonRead")
self.gridLayout.addWidget(self.PushButtonRead, 1, 5, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 1, 4, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 1, 2, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem6, 1, 7, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout)
self.horizontalLayout_10.addLayout(self.verticalLayout_5)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setAutoFillBackground(True)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label.setBuddy(self.VrefA)
self.label_2.setBuddy(self.VrefB)
self.label_3.setBuddy(self.VrefC)
self.label_4.setBuddy(self.VrefD)
self.label_10.setBuddy(self.SpinBoxAddress)
self.label_9.setBuddy(self.SpinBoxVdd)
self.retranslateUi(MainWindow)
self.VrefD.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MCP4728 GUI"))
MainWindow.setProperty("LICENSE", _translate("MainWindow", "MCP4728 PyQt5 GUI\n"
"Copyright (C) 2019 Luca Anastasio\n"
"<[email protected]>\n"
"\n"
"This program is free software: you can redistribute it and/or modify\n"
"it under the terms of the GNU General Public License as published by\n"
"the Free Software Foundation, either version 3 of the License, or\n"
"(at your option) any later version.\n"
"\n"
"This program is distributed in the hope that it will be useful,\n"
"but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
"GNU General Public License for more details.\n"
"\n"
"You should have received a copy of the GNU General Public License\n"
"along with this program. If not, see <http://www.gnu.org/licenses/>."))
self.groupBox.setTitle(_translate("MainWindow", "Channel A"))
self.SpinBoxA.setSuffix(_translate("MainWindow", " V"))
self.label.setText(_translate("MainWindow", " Vref"))
self.VrefA.setItemText(0, _translate("MainWindow", "Internal (2.048V)"))
self.VrefA.setItemText(1, _translate("MainWindow", "Internal (4.096V)"))
self.VrefA.setItemText(2, _translate("MainWindow", "External (Vdd)"))
self.groupBox1.setTitle(_translate("MainWindow", "Channel B"))
self.SpinBoxB.setSuffix(_translate("MainWindow", " V"))
self.label_2.setText(_translate("MainWindow", " Vref"))
self.VrefB.setItemText(0, _translate("MainWindow", "Internal (2.048V)"))
self.VrefB.setItemText(1, _translate("MainWindow", "Internal (4.096V)"))
self.VrefB.setItemText(2, _translate("MainWindow", "External (Vdd)"))
self.groupBox2.setTitle(_translate("MainWindow", "Channel C"))
self.SpinBoxC.setSuffix(_translate("MainWindow", " V"))
self.label_3.setText(_translate("MainWindow", " Vref"))
self.VrefC.setItemText(0, _translate("MainWindow", "Internal (2.048V)"))
self.VrefC.setItemText(1, _translate("MainWindow", "Internal (4.096V)"))
self.VrefC.setItemText(2, _translate("MainWindow", "External (Vdd)"))
self.groupBox3.setTitle(_translate("MainWindow", "Channel D"))
self.SpinBoxD.setSuffix(_translate("MainWindow", " V"))
self.label_4.setText(_translate("MainWindow", " Vref"))
self.VrefD.setItemText(0, _translate("MainWindow", "Internal (2.048V)"))
self.VrefD.setItemText(1, _translate("MainWindow", "Internal (4.096V)"))
self.VrefD.setItemText(2, _translate("MainWindow", "External (Vdd)"))
self.label_10.setText(_translate("MainWindow", "i2c address"))
self.SpinBoxAddress.setPrefix(_translate("MainWindow", "0x"))
self.label_5.setText(_translate("MainWindow", "i2c bus"))
self.label_9.setText(_translate("MainWindow", "Vdd voltage"))
self.SpinBoxVdd.setSuffix(_translate("MainWindow", " V"))
self.CheckBoxUpdate.setText(_translate("MainWindow", "Continuous update"))
self.PushButtonLoad.setText(_translate("MainWindow", "Load from EEPROM"))
self.PushButtonWrite.setText(_translate("MainWindow", "Write values"))
self.PushButtonSave.setText(_translate("MainWindow", "Save to EEPROM"))
self.PushButtonRead.setText(_translate("MainWindow", "Read values"))
from .ComboBox import ComboBox
|
[
"[email protected]"
] | |
31985323a17529eccc7f8334da21afac1445e01c
|
974e6dfefc6dae7aaeef6bb3b8357218842ad741
|
/030 fifthpower.py
|
83afd41662f07a20574c40f5b6b0460a39a03cc1
|
[] |
no_license
|
IanC162/project-euler
|
2de30a8b1d4f8aad57cd1b33968fc5d99ae217ae
|
11e17e7c4bb627f265e99d4a3e9fbdc63252b8d0
|
refs/heads/master
| 2020-05-17T03:44:34.246966 | 2014-03-16T12:35:36 | 2014-03-16T12:35:36 | 17,645,271 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
#Problem: compute the sum of all numbers that are the ^5 of their digits
total = 0
#Woo, O(n)!
for i in range(2,355000):
if sum([x**5 for x in map(int, str(i))]) == i:
total += i
print total
raw_input()
|
[
"[email protected]"
] | |
d936ce7534943842f0fe4314173d6571201fdf1c
|
179892586a9e26c4c87f972f04182525f1678cfe
|
/model/figures.py
|
53b15f61ea2fb7486d1bc21b17143a78e91b929c
|
[
"MIT"
] |
permissive
|
simeond/data-sharing-abm-model
|
c55aa04b0090300fe7fe909cc291e008fb262963
|
23386f69d817dd1542c5d4464757f8aabadc6b8d
|
refs/heads/master
| 2021-05-17T14:19:09.022869 | 2019-04-30T12:40:14 | 2019-04-30T12:40:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,153 |
py
|
import plotly.graph_objs as go
import numpy as np
from .beta_distr import get_beta_params
from scipy.stats import beta
import matplotlib.pyplot as plt
# get some nice colours
tab10 = plt.get_cmap("tab10").colors
tab10 = list(tuple(x * 255 for x in tab10[i]) for i in range(len(tab10)))
tab10 = ["rgb" + str(x) for x in tab10]
def plot_beta(m, var):
"""
plot a single beta function with mode m and variance var
"""
a, b = get_beta_params(m, var)
xs = np.linspace(0, 1, 1000)
return go.Figure(data=[go.Scatter(x=xs, y=beta.pdf(xs, a, b), mode="line")])
def plot_quality_by_category(quality_df):
"""
evolution of the quality of all products over time, grouped by category
"""
data = []
for i, cat in enumerate(quality_df.category.unique()):
df_ = quality_df.loc[quality_df.category == cat].sort_values(
"tick", ascending=True
)
for j, firm in enumerate(df_.firm.unique()):
df_1 = df_.loc[df_.firm == firm]
data += [
go.Scatter(
x=df_1.tick,
y=df_1.quality,
mode="lines",
line={"color": tab10[np.mod(i, 10)]},
showlegend=True if j == 0 else False,
hovertext="category {} firm {}".format(cat, firm),
legendgroup=str(cat),
name="category {}".format(cat),
)
]
layout = go.Layout(title="Evolution of quality (coloured by category)")
return go.Figure(data=data, layout=layout)
def plot_capital(capital_df):
"""
evolution of the capital of firms over time
"""
data = [
go.Scatter(x=capital_df.index, y=capital_df[c], name="firm " + str(c))
for c in capital_df.columns
]
layout = go.Layout(title="Capital over time")
return go.Figure(data=data, layout=layout)
def plot_quality_by_firm(quality_df):
"""
evolution of the quality of all products over time, grouped by firm
"""
data = []
for i, firm in enumerate(quality_df.firm.unique()):
df_ = quality_df.loc[quality_df.firm == firm].sort_values("tick")
for j, cat in enumerate(df_.category.unique()):
df_1 = df_.loc[df_.category == cat]
data += [
go.Scatter(
x=df_1.tick,
y=df_1.quality,
mode="lines",
line={"color": tab10[np.mod(i, 10)]},
showlegend=True if j == 0 else False,
hovertext="firm {} category {}".format(firm, cat),
legendgroup=str(firm),
name="firm {}".format(firm),
)
]
layout = go.Layout(title="Evolution of quality (coloured by firm)")
return go.Figure(data=data, layout=layout)
def plot_market_entry(cat_entry_and_exit_df):
"""
A plot with the total number of firms which have entered/left the categories
during the simulation
"""
xs = cat_entry_and_exit_df.index
new_per_cat = cat_entry_and_exit_df.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df.exit.astype(int)
data = [
go.Bar(
y=xs,
x=new_per_cat,
orientation="h",
showlegend=False,
hoverinfo="text",
hovertext=[
"{} entries in category {}".format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))
],
marker={"color": "#FF6700"},
),
go.Bar(
y=xs,
x=-dead_per_cat,
orientation="h",
showlegend=False,
hoverinfo="text",
hovertext=[
"{} exits in category {}".format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))
],
marker={"color": "#FF6700"},
),
go.Bar(
y=xs,
x=new_per_cat - dead_per_cat,
orientation="h",
showlegend=False,
hoverinfo="text",
hovertext=[
"{} net entries in category {}".format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))
],
marker={"color": "#993d00"},
),
]
layout = go.Layout(title="Market entry and exit per category", barmode="overlay")
return go.Figure(data=data, layout=layout)
def plot_market_concentration(res_df):
"""
plot of the market share of the top three firmsper category during the last year
"""
data = [
go.Bar(
x=res_df.category.values.astype(int),
y=res_df.consumer,
hoverinfo="text",
hovertext=[
"{}%, total {} firms".format(x, y)
for x, y in zip(np.round(res_df.consumer, 1), res_df.firms_active)
],
marker={"opacity": [1 if x > 3 else 0.5 for x in res_df.firms_active]},
)
]
layout = go.Layout(
title="Market share of top three firms in each category",
yaxis={"title": "% of category served by top three"},
xaxis={"title": "Category"},
)
return go.Figure(data=data, layout=layout)
def plot_market_share_timeline(df):
"""
timeline of the percentage of market share owned by the top
three firms in each category
"""
data = []
for cat in df.category.unique():
df_ = df.loc[df.category == cat]
data += [
go.Scatter(
x=df_.tick,
y=df_.consumer,
hoverinfo="text",
mode="lines",
name="category {}".format(cat),
hovertext=[
"category {}: {}%, total {} firms".format(cat, x, y)
for x, y in zip(np.round(df_.consumer, 1), df_.firms_active)
],
)
]
layout = go.Layout(
title="Market share of top three firms in each category - timeline",
yaxis={"title": "% of category served by top three"},
xaxis={"title": "Category"},
)
return go.Figure(data=data, layout=layout)
def plot_firm_specialisation(df):
"""
histogram of the number of categories firms are active in
"""
data = [
go.Bar(
x=df.bins,
y=df.perc,
hoverinfo="text",
hovertext=["{}%".format(x) for x in np.round(df.perc, 1)],
)
]
layout = go.Layout(
title="Firm activity profile",
xaxis={"title": "Number of categories firms are active in"},
yaxis={"title": "Frequency"},
)
return go.Figure(data=data, layout=layout)
def plot_complimentarity(df):
"""
plot of the distribution of number of firms consumers
have used in the last year
"""
data = [
go.Bar(
x=df.bins,
y=df.perc,
hoverinfo="text",
hovertext=["{}%".format(x) for x in np.round(df.perc, 1)],
)
]
layout = go.Layout(
title="Consumer usage profile",
xaxis={"title": "Number of firms used by consumers in the last year"},
yaxis={"title": "Frequency"},
)
return go.Figure(data=data, layout=layout)
def plot_new_products(counter, new=False):
"""
plot of the number of products being released during the simulation,
in either new of existent categories
"""
ticks = np.arange(len(counter)) + 1
data = [go.Scatter(x=ticks, y=np.cumsum(counter), showlegend=False)]
layout = go.Layout(
title="Cumulative number of products being released in {} categories".format(
"new" if new else "existing"
),
xaxis={"title": "Month in simulation"},
yaxis={"title": ""},
)
return go.Figure(data=data, layout=layout)
def plot_welfare(df):
"""
plot of the highest quality available in a category, at the end of the run_simulation,
ordered by the mean usage per tick
size of dots indicates the number of firms active in that category
"""
def scaler(vals, min=4, max=10):
return min + (vals - np.min(vals)) / np.max(vals) * (max - min)
data = [
go.Scatter(
x=df.mean_usage_per_tick,
y=df.quality,
mode="markers",
marker={"size": scaler(df.num_firms.values)},
hoverinfo="text",
showlegend=False,
hovertext=[
"category {}, offered by {} firms".format(x, y)
for x, y in zip(df.category.values, np.round(df.num_firms.values, 1))
],
)
]
layout = go.Layout(
title="(Highest) quality per category", yaxis={"title": "Quality"}
)
return go.Figure(data=data, layout=layout)
def plot_investment_choices(invest_df):
"""
plot of the type of investment choices of firms over time
"""
data = [
go.Scatter(x=invest_df.index, y=invest_df[c], name=c) for c in invest_df.columns
]
layout = go.Layout(title="investment choices")
return go.Figure(data=data, layout=layout)
def plot_investment_success(df):
"""
plot of investment succes over time
"""
data = [go.Scatter(x=df.index, y=df[c], name=c) for c in df.columns]
layout = go.Layout(title="investment success into new product")
return go.Figure(data=data, layout=layout)
def plot_firm_engagement(df):
"""
a plot of usage over time
"""
data = [go.Scatter(x=df.index, y=df[c], name=c) for c in df.columns]
layout = go.Layout(title="Firm: number of products used over time")
return go.Figure(data=data, layout=layout)
def plot_concern(df):
"""
a plot of the consumer privacy concern over time. Each trace is a consumer
"""
data = [go.Scatter(x=df.index, y=df[c], name=c) for c in df.columns]
layout = go.Layout(title="Privacy concern over time")
return go.Figure(data=data, layout=layout)
def plot_privacy_score(df):
"""
Each trace is the privacy score of a firm over time
"""
data = [go.Scatter(x=df.index, y=df[c], name=c) for c in df.columns]
layout = go.Layout(title="Privacy score over time")
return go.Figure(data=data, layout=layout)
def plot_data_requests(df):
"""
Scatter plot of the number of categories new firms are active in after one year
vs the number of data requests that were granted to them in their first year
"""
n = df.shape[0]
data = [
go.Scatter(
x=df.requests
+ np.random.RandomState(0).uniform(low=-0.05, high=0.05, size=n),
y=df.num_cat
+ np.random.RandomState(48579438).uniform(low=-0.05, high=0.05, size=n),
mode="markers",
)
]
layout = go.Layout(
title="First year for new firms",
xaxis={
"title": "Number of data requests granted to a new firm in its first year"
},
yaxis={"title": "Number of categories a new firm is active in after one year"},
)
return go.Figure(data=data, layout=layout)
|
[
"[email protected]"
] | |
fe174dfb8c14d16fa84acd65a5666d3dc0e989d9
|
9c32f8f4f0fcee5229cd9205295af0b87b5c2af2
|
/mb_project/settings.py
|
7b58acd1086a56192fdd23e247ca1f451f134db4
|
[] |
no_license
|
ClioGMU/ch-4-message-board-lcrossley
|
6a652cc4e28a9861cfafdfeaa1fc2fdaefcec2df
|
6b924b8e3b8913aae64e066c88d0135c264424ef
|
refs/heads/master
| 2023-04-27T12:26:52.374743 | 2019-09-17T17:45:45 | 2019-09-17T17:45:45 | 208,861,055 | 0 | 2 | null | 2023-04-21T20:39:02 | 2019-09-16T17:44:20 |
Python
|
UTF-8
|
Python
| false | false | 3,165 |
py
|
"""
Django settings for mb_project project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#*b+6p=tx2npjg#ex)1%1vh809)8_xsewsen4@bgfl@=5v2pov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts.apps.PostsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mb_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mb_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
d04121d177bb2d6e173e6226b8ffce7f72c49aae
|
6b1f521d6d712ad9dc3c030ab1e51e6e7b5a52d9
|
/XmlObjDesc/scripts/xml/parsers/xmlproc/xmldtd.py
|
8516a1a0e59eb26a69869d5ab237fe381e7f3ca3
|
[] |
no_license
|
feipengsy/JUNO
|
73aa30d6f68c51259e6d76c79b5d82cb94c588d1
|
69b7ce8af4b65a5c7e908023234824fff29df7bc
|
refs/heads/master
| 2021-01-15T22:29:40.963670 | 2015-11-01T06:18:53 | 2015-11-01T06:18:53 | 26,515,605 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,468 |
py
|
"""
These are the DTD-aware classes of xmlproc. They provide both the
DTD event consumers for the DTD parser as well as the objects that
store DTD information for retrieval by clients (including the
validating parser).
$Id: xmldtd.py,v 1.1 2004/12/05 13:48:30 roiser Exp $
"""
import types
from xmlutils import *
from xmlapp import *
# ==============================
# WFC-DTD
# ==============================
class WFCDTD(DTDConsumer):
"DTD-representing class for the WFC parser."
def __init__(self,parser):
DTDConsumer.__init__(self,parser)
self.dtd_listener=DTDConsumer(parser)
self.reset()
def reset(self):
"Clears all DTD information."
self.gen_ents={}
self.param_ents={}
self.elems={}
self.attrinfo={}
self.used_notations={} # Notations used by NOTATION attrs
# Adding predefined entities
for name in predef_ents.keys():
self.new_general_entity(name,predef_ents[name])
def set_dtd_listener(self,listener):
"Registers an object that listens for DTD parse events."
self.dtd_listener=listener
def resolve_pe(self,name):
"""Returns the entitiy object associated with this parameter entity
name. Throws KeyError if the entity is not declared."""
return self.param_ents[name]
def resolve_ge(self,name):
"""Returns the entitiy object associated with this general entity
name. Throws KeyError if the entity is not declared."""
return self.gen_ents[name]
def get_general_entities(self):
"""Returns the names of all declared general entities."""
return self.gen_ents.keys()
def get_parameter_entities(self):
"Returns the names of all declared parameter entities."
return self.param_ents.keys()
def get_elem(self,name):
"""Returns the declaration of this element. Throws KeyError if the
element does not exist."""
return self.elems[name]
def get_elements(self):
"Returns a list of all declared element names."
return self.elems.keys()
def get_notation(self,name):
"""Returns the declaration of the notation. Throws KeyError if the
notation does not exist."""
raise KeyError(name)
def get_notations(self):
"""Returns the names of all declared notations."""
return []
def get_root_elem(self,name):
"""Returns the name of the declared root element, or None if none
were declared."""
return None
# --- Shortcut information for validation
def dtd_end(self):
"Stores shortcut information."
self.attrinfo={}
for elem in self.elems.values():
self.attrinfo[elem.get_name()]=(elem.get_default_attributes(),
elem.get_fixed_attributes())
self.dtd_listener.dtd_end()
def get_element_info(self,name):
return self.attrinfo[name]
# --- Parse events
def new_attribute(self,elem,attr,a_type,a_decl,a_def):
"Receives the declaration of a new attribute."
self.dtd_listener.new_attribute(elem,attr,a_type,a_decl,a_def)
if not self.elems.has_key(elem):
self.elems[elem]=ElementTypeAny(elem) # Adding dummy
self.elems[elem].add_attr(attr,a_type,a_decl,a_def,self.parser)
# --- Echoing DTD parse events
def dtd_start(self):
self.dtd_listener.dtd_start()
# dtd_end is implemented in WFCDTD, no need to repeat here
def handle_comment(self, contents):
self.dtd_listener.handle_comment(contents)
def handle_pi(self, target, data):
self.dtd_listener.handle_pi(target, data)
def new_general_entity(self,name,val):
if self.gen_ents.has_key(name):
## FIXME: May warn
return # Keep first decl
ent=InternalEntity(name,val)
self.gen_ents[name]=ent
self.dtd_listener.new_general_entity(name,val)
def new_parameter_entity(self,name,val):
if self.param_ents.has_key(name):
## FIXME: May warn
return # Keep first decl
ent=InternalEntity(name,val)
self.param_ents[name]=ent
self.dtd_listener.new_parameter_entity(name,val)
def new_external_entity(self,ent_name,pubid,sysid,ndata):
if self.gen_ents.has_key(ent_name):
## FIXME: May warn
return # Keep first decl
if ndata != None and hasattr(self, "notations"):
if not self.notations.has_key(ndata):
self.used_notations[ndata]= ent_name
ent=ExternalEntity(ent_name,pubid,sysid,ndata)
self.gen_ents[ent_name]=ent
self.dtd_listener.new_external_entity(ent_name,pubid,sysid,ndata)
def new_external_pe(self,name,pubid,sysid):
if self.param_ents.has_key(name):
## FIXME: May warn
return # Keep first decl
ent=ExternalEntity(name,pubid,sysid,"")
self.param_ents[name]=ent
self.dtd_listener.new_external_pe(name,pubid,sysid)
def new_comment(self,contents):
self.dtd_listener.new_comment(contents)
def new_pi(self,target,rem):
self.dtd_listener.new_pi(target,rem)
def new_notation(self,name,pubid,sysid):
self.dtd_listener.new_notation(name,pubid,sysid)
def new_element_type(self,elem_name,elem_cont):
self.dtd_listener.new_element_type(elem_name,elem_cont)
# ==============================
# DTD consumer for the validating parser
# ==============================
class CompleteDTD(WFCDTD):
"Complete DTD handler for the validating parser."
def __init__(self,parser):
WFCDTD.__init__(self,parser)
def reset(self):
"Clears all DTD information."
WFCDTD.reset(self)
self.notations={}
self.attlists={} # Attribute lists of elements not yet declared
self.root_elem=None
self.cmhash={}
def get_root_elem(self):
"Returns the name of the declared root element."
return self.root_elem
def get_notation(self,name):
"""Returns the declaration of the notation. Throws KeyError if the
notation does not exist."""
return self.notations[name]
def get_notations(self):
"""Returns the names of all declared notations."""
return self.notations.keys()
# --- DTD parse events
def dtd_end(self):
WFCDTD.dtd_end(self)
self.cmhash={}
for elem in self.attlists.keys():
self.parser.report_error(1006,elem)
self.attlists={} # Not needed any more, can free this memory
for notation in self.used_notations.keys():
try:
self.get_notation(notation)
except KeyError:
self.parser.report_error(2022,(self.used_notations[notation],
notation))
self.used_notations={} # Not needed, save memory
def new_notation(self,name,pubid,sysid):
self.notations[name]=(pubid,sysid)
self.dtd_listener.new_notation(name,pubid,sysid)
def new_element_type(self,elem_name,elem_cont):
if self.elems.has_key(elem_name):
self.parser.report_error(2012,elem_name)
return # Keeping first declaration
if elem_cont=="EMPTY":
elem_cont=("",[],"")
self.elems[elem_name]=ElementType(elem_name,make_empty_model(),
elem_cont)
elif elem_cont=="ANY":
elem_cont=None
self.elems[elem_name]=ElementTypeAny(elem_name)
else:
model=make_model(self.cmhash,elem_cont,self.parser)
self.elems[elem_name]=ElementType(elem_name,model,elem_cont)
if self.attlists.has_key(elem_name):
for (attr,a_type,a_decl,a_def) in self.attlists[elem_name]:
self.elems[elem_name].add_attr(attr,a_type,a_decl,a_def,\
self.parser)
del self.attlists[elem_name]
self.dtd_listener.new_element_type(elem_name,elem_cont)
def new_attribute(self,elem,attr,a_type,a_decl,a_def):
"Receives the declaration of a new attribute."
self.dtd_listener.new_attribute(elem,attr,a_type,a_decl,a_def)
try:
self.elems[elem].add_attr(attr,a_type,a_decl,a_def,self.parser)
except KeyError:
try:
self.attlists[elem].append((attr,a_type,a_decl,a_def))
except KeyError:
self.attlists[elem]=[(attr,a_type,a_decl,a_def)]
# ==============================
# Represents an XML element type
# ==============================
class ElementType:
"Represents an element type."
def __init__(self,name,compiled,original):
self.name=name
self.attrhash={}
self.attrlist=[]
self.content_model=compiled
self.content_model_structure=original
def get_name(self):
"Returns the name of the element type."
return self.name
def get_attr_list(self):
"""Returns a list of the declared attribute names in the order the
attributes were declared."""
return self.attrlist
def get_attr(self,name):
"Returns the attribute or throws a KeyError if it's not declared."
return self.attrhash[name]
def add_attr(self,attr,a_type,a_decl,a_def,parser):
"Adds a new attribute to the element."
if self.attrhash.has_key(attr):
parser.report_error(1007,attr)
return # Keep first declaration
self.attrlist.append(attr)
if a_type=="ID":
for attr_name in self.attrhash.keys():
if self.attrhash[attr_name].type=="ID":
parser.report_error(2013)
if a_decl!="#REQUIRED" and a_decl!="#IMPLIED":
parser.report_error(2014)
elif type(a_type)==types.TupleType and a_type[0]=="NOTATION":
for notation in a_type[1]:
parser.dtd.used_notations[notation]=attr
self.attrhash[attr]=Attribute(attr,a_type,a_decl,a_def,parser)
if a_def!=None:
self.attrhash[attr].validate(self.attrhash[attr].default,parser)
def get_start_state(self):
"Return the start state of this content model."
return self.content_model["start"]
def final_state(self, state):
"True if 'state' is a final state."
return self.content_model["final"] & state
def next_state(self, state, elem_name):
"""Returns the next state of the content model from the given one
when elem_name is encountered. Character data is represented as
'#PCDATA'. If 0 is returned the element is not allowed here or if
the state is unknown."""
return self.content_model[state].get(elem_name, 0)
def next_state_skip(self, state, elem_name):
"""Assumes that one element has been forgotten and tries to
skip forward one state (regardless of what element is attached
to the transition) to get to a state where elem_name is
legal. Returns a (state, elem) tuple, where elem is the
element missing, and state is the state reached by following
the elem_name arc after using the missing element. None is
returned if no missing element can be found."""
arcs = self.content_model[state]
for skipped in arcs.keys():
if self.content_model[arcs[skipped]].has_key(elem_name):
arcs2 = self.content_model[arcs[skipped]]
return (arcs2[elem_name], skipped)
def get_valid_elements(self, state):
"""Returns a list of the valid elements in the given state, or the
empty list if none are valid (or if the state is unknown). If the
content model is ANY, the empty list is returned."""
if self.content_model == None: # that is, any
return [] # any better ideas?
try:
return self.content_model[state].keys()
except KeyError:
return []
def get_content_model(self):
"""Returns the element content model in (sep,cont,mod) format, where
cont is a list of (name,mod) and (sep,cont,mod) tuples. ANY content
models are represented as None, and EMPTYs as ("",[],"")."""
return self.content_model_structure
# --- Methods used to create shortcut validation information
def get_default_attributes(self):
defs={}
for attr in self.attrhash.values():
if attr.get_default()!=None:
defs[attr.get_name()]=attr.get_default()
return defs
def get_fixed_attributes(self):
fixed={}
for attr in self.attrhash.values():
if attr.get_decl()=="#FIXED":
fixed[attr.get_name()]=attr.get_default()
return fixed
# --- Element types with ANY content
class ElementTypeAny(ElementType):
def __init__(self,name):
ElementType.__init__(self,name,None,None)
def get_start_state(self):
return 1
def final_state(self,state):
return 1
def next_state(self,state,elem_name):
return 1
def get_valid_elements(self, state):
return [] # any better ideas? can't get DTD here...
# ==============================
# Attribute
# ==============================
class Attribute:
"Represents a declared attribute."
def __init__(self,name,attrtype,decl,default,parser):
self.name=name
self.type=attrtype
self.decl=decl
# Normalize the default value before setting it
if default!=None and self.type!="CDATA":
self.default=string.join(string.split(default))
else:
self.default=default
# Handling code for special attribute xml:space
if name=="xml:space":
error = 0
if type(self.type) in StringTypes:
parser.report_error(2015)
return
if len(self.type) < 1 or len(self.type) > 2:
error = 1
else:
for alt in self.type:
if alt not in ["default", "preserve"]:
error = 1
if error:
parser.report_error(2016)
def validate(self,value,parser):
"Validates given value for correctness."
if type(self.type) not in StringTypes:
for val in self.type:
if val==value: return
parser.report_error(2017,(value,self.name))
elif self.type=="CDATA":
return
elif self.type=="ID" or self.type=="IDREF" or self.type=="ENTITIY":
if not matches(reg_name,value):
parser.report_error(2018,self.name)
elif self.type=="NMTOKEN":
if not matches(reg_nmtoken,value):
parser.report_error(2019,self.name)
elif self.type=="NMTOKENS":
if not matches(reg_nmtokens,value):
parser.report_error(2020,self.name)
elif self.type=="IDREFS" or self.type=="ENTITIES":
for token in string.split(value):
if not matches(reg_name,token):
parser.report_error(2021,(token,self.name))
def get_name(self):
"Returns the attribute name."
return self.name
def get_type(self):
"Returns the type of the attribute. (ID, CDATA etc)"
return self.type
def get_decl(self):
"Returns the declaration (#IMPLIED, #REQUIRED, #FIXED or #DEFAULT)."
return self.decl
def get_default(self):
"""Returns the default value of the attribute, or None if none has
been declared."""
return self.default
# ==============================
# Entities
# ==============================
class InternalEntity:
def __init__(self,name,value):
self.name=name
self.value=value
def is_internal(self):
return 1
def get_value(self):
"Returns the replacement text of the entity."
return self.value
class ExternalEntity:
def __init__(self,name,pubid,sysid,notation):
self.name=name
self.pubid=pubid
self.sysid=sysid
self.notation=notation
def is_parsed(self):
"True if this is a parsed entity."
return self.notation==""
def is_internal(self):
return 0
def get_pubid(self):
"Returns the public identifier of the entity."
return self.pubid
def get_sysid(self):
"Returns the system identifier of the entity."
return self.sysid
def get_notation(self):
"Returns the notation of the entity, or None if there is none."
return self.notation
# ==============================
# Internal classes
# ==============================
# Non-deterministic state model builder
class FNDABuilder:
"Builds a finite non-deterministic automaton."
def __init__(self):
self.__current=0
self.__transitions=[[]]
self.__mem=[]
def remember_state(self):
"Makes the builder remember the current state."
self.__mem.append(self.__current)
def set_current_to_remembered(self):
"""Makes the current state the last remembered one. The last remembered
one is not forgotten."""
self.__current=self.__mem[-1]
def forget_state(self):
"Makes the builder forget the current remembered state."
del self.__mem[-1]
def new_state(self):
"Creates a new last state and makes it the current one."
self.__transitions.append([])
self.__current=len(self.__transitions)-1
def get_automaton(self):
"Returns the automaton produced by the builder."
return self.__transitions
def get_current_state(self):
"Returns the current state."
return self.__current
def new_transition(self,label,frm,to):
"Creates a new transition from frm to to, over label."
self.__transitions[frm].append((to,label))
def new_transition_to_new(self,label):
"""Creates a new transition from the current state to a new state,
which becomes the new current state."""
self.remember_state()
self.new_state()
self.__transitions[self.__mem[-1]].append((self.__current,label))
self.forget_state()
def new_transition_cur2rem(self,label):
"""Adds a new transition from the current state to the last remembered
state."""
self.__transitions[self.__current].append((self.__mem[-1],label))
def new_transition_rem2cur(self,label):
"""Creates a new transition from the current state to the last
remembered one."""
self.__transitions[self.__mem[-1]].append((self.__current,label))
def new_transition_2cur(self,frm,label):
"Creates a new transition from frm to current state, with label."
self.__transitions[frm].append((self.__current,label))
# Content model class
class ContentModel:
"Represents a singleton content model. (Internal.)"
def __init__(self,contents,modifier):
self.contents=contents
self.modifier=modifier
def add_states(self,builder):
"Builds the part of the automaton corresponding to this model part."
if self.modifier=="?":
builder.remember_state()
self.add_contents(builder)
builder.new_transition_rem2cur("")
builder.forget_state()
elif self.modifier=="+":
self.add_contents(builder)
builder.remember_state()
self.add_contents(builder,1)
builder.set_current_to_remembered()
builder.forget_state()
elif self.modifier=="*":
builder.remember_state()
builder.new_transition_to_new("")
self.add_contents(builder,1)
builder.new_transition_rem2cur("")
builder.forget_state()
else:
self.add_contents(builder)
def add_contents(self,builder,loop=0):
"""Adds the states and transitions belonging to the self.contents
parts. If loop is true the states will loop back to the first state."""
if type(self.contents[0])==types.InstanceType:
if loop:
builder.remember_state()
self.contents[0].add_states(builder)
builder.new_transition_cur2rem("")
builder.set_current_to_remembered()
builder.forget_state()
else:
self.contents[0].add_states(builder)
else:
if loop:
builder.new_transition(self.contents[0],
builder.get_current_state(),
builder.get_current_state())
else:
builder.new_transition_to_new(self.contents[0])
# Sequential content model
class SeqContentModel(ContentModel):
"Represents a sequential content model. (Internal.)"
def add_contents(self,builder,loop=0):
if loop:
builder.remember_state()
for cp in self.contents:
cp.add_states(builder)
if loop:
builder.new_transition_cur2rem("")
builder.forget_state()
# Choice content model
class ChoiceContentModel(ContentModel):
"Represents a choice content model. (Internal.)"
def add_contents(self,builder,loop=0):
builder.remember_state()
end_states=[] # The states at the end of each alternative
for cp in self.contents:
builder.new_state()
builder.new_transition_rem2cur("")
cp.add_states(builder)
end_states.append(builder.get_current_state())
builder.new_state()
for state in end_states:
builder.new_transition_2cur(state,"")
if loop:
builder.new_transition_cur2rem("")
builder.forget_state()
# ==============================
# Conversion of FDAs
# ==============================
def hash(included):
"Creates a hash number from the included array."
no=0
exp=1L
for state in included:
if state:
no=no+exp
exp=exp*2L
return no
def fnda2fda(transitions,final_state,parser):
"""Converts a finite-state non-deterministic automaton into a deterministic
one."""
# transitions: old FNDA as [[(to,over),(to,over),...],
# [(to,over),(to,over),...]] structure
# new FDA as [{over:to,over:to,...},
# {over:to,over:to,...}] structure
# err: error-handler
#print_trans(transitions)
transitions.append([])
new_states={}
# Compute the e-closure of the start state
closure_hash={}
start_state=[0]*len(transitions)
compute_closure(0,start_state,transitions)
state_key=hash(start_state)
closure_hash[0]=state_key
# Add transitions and the other states
add_transitions(0,transitions,new_states,start_state,state_key,parser,
closure_hash)
states=new_states.keys()
states.sort()
#print_states(new_states,2)
for state in states:
if state % 2==1:
new_states["start"]=state
break
new_states["final"]=pow(2L,final_state)
return new_states
def add_transitions(ix,transitions,new_states,cur_state_list,state_key,parser,
closure_hash):
"Set up transitions and create new states."
new_states[state_key]={} # OK, a new one, create it
new_trans={} # Hash from label to a list of the possible destination states
# Find all transitions from this set of states and group them by their
# labels in the new_trans hash
no=0
for old_state in cur_state_list:
if old_state:
for (to,what) in transitions[no]:
if what!="":
if new_trans.has_key(what):
new_trans[what].append(to)
else:
new_trans[what]=[to]
no=no+1
# Go through the list of transitions, creating new transitions and
# destination states in the model
for (over,destlist) in new_trans.items():
# creating new state
# Reports ambiguity, but rather crudely. Will improve this later.
# if len(destlist)>1:
# parser.report_error(1008)
if len(destlist)==1 and closure_hash.has_key(destlist[0]):
# The closure of this state has been computed before, don't repeat
new_state=closure_hash[destlist[0]]
else:
new_inc=[0]*len(transitions)
for to in destlist:
compute_closure(to,new_inc,transitions)
new_state=hash(new_inc)
if len(destlist)==1:
closure_hash[destlist[0]]=new_state
# add transition and destination state
new_states[state_key][over]=new_state
if not new_states.has_key(new_state):
add_transitions(to,transitions,new_states,new_inc,\
new_state,parser,closure_hash)
def compute_closure(ix,included,transitions):
"Computes the e-closure of this state."
included[ix]=1
for (to,what) in transitions[ix]:
if what=="" and not included[to]:
compute_closure(to,included,transitions)
def print_trans(model):
ix=0
for transitions in model:
print "STATE: %d" % ix
for step in transitions:
print " TO %d OVER %s" % step
ix=ix+1
raw_input()
def print_states(states,stop=0):
assert not (states.has_key("start") or states.has_key("final"))
for trans_key in states.keys():
trans=states[trans_key]
print "State: "+`trans_key`
for (to,what) in trans:
try:
print " To: "+`to`+" over: "+what
except TypeError:
print "ERROR: "+`what`
if stop>1:
raw_input()
if stop:
raw_input()
def make_empty_model():
"Constructs a state model for empty content models."
return { 1:{}, "final":1, "start":1 }
def make_model(cmhash,content_model,err):
"Creates an FDA from the content model."
cm=`content_model`
if cmhash.has_key(cm):
return cmhash[cm]
else:
content_model=make_objects(content_model)
builder=FNDABuilder()
content_model.add_states(builder)
content_model=fnda2fda(builder.get_automaton(),
builder.get_current_state(),
err)
cmhash[cm]=content_model
return content_model
def make_objects(content_model):
"Turns a tuple-ized content model into one based on objects."
(sep,contents,mod)=content_model
if contents[0][0]=="#PCDATA":
mod="*" # it's implied that #PCDATA can occur more than once
newconts=[]
for tup in contents:
if len(tup)==2:
newconts.append(ContentModel([tup[0]],tup[1]))
else:
newconts.append(make_objects(tup))
if sep==",":
return SeqContentModel(newconts,mod)
elif sep=="|":
return ChoiceContentModel(newconts,mod)
elif sep=="":
return ContentModel(newconts,mod)
# --- Various utilities
def compile_content_model(cm):
"Parses a content model string, returning a compiled content model."
import dtdparser,utils
p=dtdparser.DTDParser()
p.set_error_handler(utils.ErrorPrinter(p))
p.data=cm[1:]
p.datasize=len(p.data)
p.final=1
return make_model({},p._parse_content_model(),p)
def parse_content_model(cm):
"Parses a content model string, returning a compiled content model."
import dtdparser,utils
p=dtdparser.DTDParser()
p.set_error_handler(utils.ErrorPrinter(p))
p.data=cm[1:]
p.datasize=len(p.data)
p.final=1
return p._parse_content_model()
def load_dtd(sysid):
import dtdparser,utils
dp=dtdparser.DTDParser()
dp.set_error_handler(utils.ErrorPrinter(dp))
dtd=CompleteDTD(dp)
dp.set_dtd_consumer(dtd)
dp.parse_resource(sysid)
return dtd
def load_dtd_string(dtdstr):
import dtdparser, utils
dp = dtdparser.DTDParser()
dp.set_error_handler(utils.ErrorPrinter(dp))
dtd = CompleteDTD(dp)
dp.set_dtd_consumer(dtd)
dp.parse_string(dtdstr)
return dtd
|
[
"[email protected]"
] | |
0f48f2870227759d2cedb58f77816e6429b20a02
|
bdb183769c133f25e92dd6f2a9653fe69cecb715
|
/fds.analyticsapi.engines/fds/analyticsapi/engines/api/documents_api.py
|
936f4f4ad9dd579facdaf41124f1688965ff36e4
|
[
"Apache-2.0"
] |
permissive
|
saigiridhar21/analyticsapi-engines-python-sdk
|
5e6ec364791b63250ef7157eee8635c15e31b4f2
|
bb7c3d20c37dc7a30071962f610ad02db6440117
|
refs/heads/master
| 2022-12-06T22:13:11.551527 | 2020-09-02T17:30:07 | 2020-09-02T17:30:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,713 |
py
|
# coding: utf-8
"""
Engines API
Allow clients to fetch Engines Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from fds.analyticsapi.engines.api_client import ApiClient
from fds.analyticsapi.engines.exceptions import (
ApiTypeError,
ApiValueError
)
class DocumentsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_pa3_documents(self, path, **kwargs): # noqa: E501
"""Get PA3 documents and sub-directories in a directory # noqa: E501
This endpoint looks up all PA3 documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pa3_documents(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents and sub-directories in (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: DocumentDirectories
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_pa3_documents_with_http_info(path, **kwargs) # noqa: E501
def get_pa3_documents_with_http_info(self, path, **kwargs): # noqa: E501
"""Get PA3 documents and sub-directories in a directory # noqa: E501
This endpoint looks up all PA3 documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pa3_documents_with_http_info(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents and sub-directories in (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(DocumentDirectories, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['path'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pa3_documents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'path' is set
if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
local_var_params['path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `path` when calling `get_pa3_documents`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in local_var_params:
path_params['path'] = local_var_params['path'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/lookups/v2/engines/pa/documents/{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentDirectories', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pub_documents(self, path, **kwargs): # noqa: E501
"""Gets Publisher documents and sub-directories in a directory # noqa: E501
This endpoint looks up all Publisher documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pub_documents(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: DocumentDirectories
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_pub_documents_with_http_info(path, **kwargs) # noqa: E501
def get_pub_documents_with_http_info(self, path, **kwargs): # noqa: E501
"""Gets Publisher documents and sub-directories in a directory # noqa: E501
This endpoint looks up all Publisher documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pub_documents_with_http_info(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(DocumentDirectories, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['path'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pub_documents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'path' is set
if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
local_var_params['path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `path` when calling `get_pub_documents`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in local_var_params:
path_params['path'] = local_var_params['path'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/lookups/v2/engines/pub/documents/{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentDirectories', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_spar3_documents(self, path, **kwargs): # noqa: E501
"""Gets SPAR3 documents and sub-directories in a directory # noqa: E501
This endpoint looks up all SPAR3 documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_spar3_documents(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: DocumentDirectories
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_spar3_documents_with_http_info(path, **kwargs) # noqa: E501
def get_spar3_documents_with_http_info(self, path, **kwargs): # noqa: E501
"""Gets SPAR3 documents and sub-directories in a directory # noqa: E501
This endpoint looks up all SPAR3 documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_spar3_documents_with_http_info(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(DocumentDirectories, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['path'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_spar3_documents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'path' is set
if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
local_var_params['path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `path` when calling `get_spar3_documents`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in local_var_params:
path_params['path'] = local_var_params['path'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/lookups/v2/engines/spar/documents/{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentDirectories', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vault_documents(self, path, **kwargs): # noqa: E501
"""Get Vault documents and sub-directories in a directory # noqa: E501
This endpoint looks up all Vault documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vault_documents(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: DocumentDirectories
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_vault_documents_with_http_info(path, **kwargs) # noqa: E501
def get_vault_documents_with_http_info(self, path, **kwargs): # noqa: E501
"""Get Vault documents and sub-directories in a directory # noqa: E501
This endpoint looks up all Vault documents and sub-directories in a given directory. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vault_documents_with_http_info(path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str path: The directory to get the documents in (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(DocumentDirectories, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['path'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vault_documents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'path' is set
if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
local_var_params['path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `path` when calling `get_vault_documents`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in local_var_params:
path_params['path'] = local_var_params['path'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/lookups/v2/engines/vault/documents/{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentDirectories', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"[email protected]"
] | |
1d75466b672c35a613932cb6ff9f6a9ead41becc
|
360fd4e74f752dca1e915cde7d7638e51d8e27ee
|
/problem9.py
|
c53225219c49d3f60b3b81b6e964af51eaa991cd
|
[] |
no_license
|
dsimpson1980/project_euler
|
69718e516038093a34f7f0d0e9d9dc213d658bdc
|
d71c4739af41846a2821b568730c99271cb26eee
|
refs/heads/master
| 2021-01-10T12:22:19.511550 | 2015-09-16T18:08:22 | 2015-09-16T18:08:22 | 36,770,819 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
"""Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
Solve the two formulae so we can reduce the problem to one loop with a test
condition:
a + b + c = 1e3 => c = 1e3 - a - b
a^2 + b^2 = (1e3 - a - b)^2 = 1e6 - 2e3a - 2e3b +2ab + a^2 + b^2
=> 1e6 - 2e3a - 2e3b + 2ab = 0
=> b = (2e3a - 1e6) / 2(a - 1e3)
"""
n = 1000
for a in range(1, n):
b, remainder = divmod(2e3 * a - 1e6, 2 * (a - 1e3))
if b > 0 and remainder == 0:
break
c = n - a - b
print 'abc = %s' % (a * b * c)
|
[
"[email protected]"
] | |
ca8705cc1f1359d399708435066d644118c8025c
|
eba283c7b7d07c9ff15abee322da8fea460ea6be
|
/__init__.py
|
a81e1e6e897d836c409125c7fc0208faa64f920a
|
[] |
no_license
|
ROB-Seismology/layeredbasemap
|
5bfa3daad9b2e47a1fea35c652309541ac88ac23
|
122464656d5534798c4bba38cdda2638e7d8948f
|
refs/heads/master
| 2021-01-20T17:33:02.596090 | 2020-12-16T10:30:54 | 2020-12-16T10:30:54 | 90,877,746 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,569 |
py
|
"""
layeredbasemap
Module to create maps with Basemap using the GIS layer philosophy,
where each layer is defined by a dataset and style.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Make relative imports work in Python 3
import importlib
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Test GDAL environment
import os
#gdal_keys = ["GDAL_DATA", "GDAL_DRIVER_PATH"]
gdal_keys = ["GDAL_DATA"]
for key in gdal_keys:
if not key in os.environ.keys():
print("Warning: %s environment variable not set. This may cause errors" % key)
elif not os.path.exists(os.environ[key]):
print("Warning: %s points to non-existing directory %s" % (key, os.environ[key]))
## Import submodules
## styles
if not reloading:
styles = importlib.import_module('.styles', package=__name__)
else:
reload(styles)
from .styles import *
## data_types
if not reloading:
data_types = importlib.import_module('.data_types', package=__name__)
else:
reload(data_types)
from .data_types import *
## cm
if not reloading:
cm = importlib.import_module('.cm', package=__name__)
else:
reload(cm)
## layered_basemap
if not reloading:
layered_basemap = importlib.import_module('.layered_basemap', package=__name__)
else:
reload(layered_basemap)
from .layered_basemap import *
|
[
"[email protected]"
] | |
6a8a9e833e2feca5d8149bcedae67e48aa74d552
|
43a0ef5b5c3bd9dce498f3504d43dfd4d0ca91d1
|
/fpreproc/transpSConsBuilder.py
|
c1e1dd77418364d4ae09591ad7c2534f6de5cd62
|
[] |
no_license
|
ilonster/pspline
|
083dec1e3c6fd1bf95f9784d80130ccba73f1730
|
9d2b61b747840bc34972c639826d645ad7d2db7f
|
refs/heads/master
| 2021-01-13T01:25:14.434218 | 2011-08-26T01:46:08 | 2011-08-26T01:46:08 | 2,358,776 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 47,243 |
py
|
#
# This is a tool which provides the Builders,Scanners and Emitters for building
# TRANSP code using SCons. This means using a preprocessor when building from
# fortran files and providing the flexibility to link with the Fortran, C++ or
# C linker. Fortran files are assumed to be in free source form when they have
# the .f90 extension.
#
# The SConstruct file should add $CODESYSDIR/source/bpython to sys.path.
#
# -- builders --
# StaticTranspFortran, SharedTranspFortran - standard fortran compilers
# StaticTranspDebugFortran, SharedTranspDebugFortran - compiles with debug flags
#
# StaticTranspC, SharedTranspC - compiles C and C++, not muc different then SCons default
# StaticTranspDebugC, SharedTranspDebugC - compiles with debug flags
#
# ProgramTranspFortran - link with the fortran compiler
# ProgramTranspCPP - link with the C++ compiler
# ProgramTranspFortranCPP - link fortran executable with C++ compiler, useful for forcing
# the link of the fortran executable when FTN_MAIN is not in the
# <name>_exe.link file
# ProgramTranspCC - link with the C compiler
#
# The object files, local (TRANSP) libraries which are a part of this build and
# the <name>_exe.link file if it exists should be the sources in the ProgramTransp* builders.
#
# The following lines in the <name>_exe.link file will modify the linking,
# $L_<exe> - add an external library as one of the libraries to be linked with.
# See the external_packages python variable below for the available libraries.
# $FTN_MAIN - indicates that this executable contains a fortran main program
# $FTN - indicates that this executable code will contain fortran code and so needs
# to be linked with the fortran libraries (see FTN_MAIN,FTN_TRAILER environment variables)
# $CPP - indicates that this executable code will contain C++ code and so needs
# to be linked with the C++ libraries (see CPP_TRAILER environment variable)
#
# everything else including the object file names will be ignored in this file
#
# -- env definitions --
# $CODESYS_DEBUG = set this to an integer XYZ to see debug messages. Each digit turns on a different section
# X -> messages during emitting phase when the object and module files produced are identified
# Y -> messages during scanning phase when implicit dependencies are identified
# Z -> messages during compile phase
# So you can set this to 101 to turn on emitting and compiling messages.
#
# $LINKLOAD_DEBUG = if a nonzero integer then linking debug messages will be written
#
# $FORTRAN = fortran command (e.g. lf95)
# $FORTRANFLAGS = flags for fortran compiling
# $FORTRANFLAGS_DEBUG = flags for fortran debug compiling (replaces $FORTRANFLAGS)
# $FORTRAN_FREE = additional flags added when compiling free format code
# $FORTRAN_FIXED = additional flags added when compiling fixed format code
# $FORTRAN_FREE_EXT = extension to use for preprocessed free formated code ["f90"]
# $FORTRAN_FIXED_EXT = extension to use for preprocessed fixed formated code ["for"]
# $FORTRAN_EXPORT = this should be a list which contains the names of all files exporting public modules
# $FORTRAN_SHARE = additional flag added to shared fortran compile
# it will be setup for you in SConstruct
# $FORTRANPUBLIC = should point to the directory where public modules will be placed
# $FORTRANMODSUFFIX = suffix for modules ["mod"]
# $FORTRANPATH = paths used for searching for include and module files
# $FORTRANMODDIRPREFIX = see SCons manual
# $FORTRANMODDIR = directory where module files will be placed, generally this should be kept empty
# $FORTRANMODDIRSUFFIX = see SCons manual
# $FPPDEFINES = preprocessor flags for fortran
# $FPPPATH = paths used during preprocessing
#
# $CC = C compiler command
# $CCFLAGS = usual definition used for distributed C code
# $CCFLAGS_DEBUG = CCFLAGS when compiling for C debugging, this replaces $CCFLAGS
# $CPPPATH = see SCons manual
# $CPPDEFINES = see SCons manual
#
# $CXX = C++ compiler command
# $CXXFLAGS = usual definition used for distributed C++ code
# $CXXFLAGS_DEBUG = CXXFLAGS when compiling for C++ debugging, this replaces $CXXFLAGS
#
# $CPP_TRAILER = trailer added during fortran linking when C++ object files are included
# $FTN_TRAILER = trailer added during C,C++ linking when fortran object files are included
# $FTN_MAIN = should point to the system object file containing the fortran main()
#
# These trailers can be set automatically by the tools trscons_g++ and trscons_lf95.
#
# $LINKFLAGS = these flags are added for all linking
# $LINK_FORTRAN = the command for fortran linking [$FORTRAN,"gfortran"]
# $LINK_FORTRAN_TRAILER= an additional string added to the fortran link line
# $LINK_FORTRANFLAGS = flags applied during fortran linking only
# $LINK_CXXFLAGS = flags applied during C++ linking only
# $LINK_CXX = command for C++ linking [$CXX,"g++"]
# $LINK_CXX_TRAILER = an additional string added to the C++ link line
# $LINK_CC = command for C linking [$CC,"gcc"]
# $LINK_CCFLAGS = flags applied during C linking only
# $LINK_CC_TRAILER = an additional string added to the C link line
#
# $L_??? = for each of the external libraries these variables should be the list of libraries to be
# added onto the link line. Also setup LIBPATH to point to the library directories.
# See the external_packages variable for the packages which are supported. The purpose
# of these variables is to customize the library dependencies of each individual program.
# To use one of these libraries during linking you must setup the <name>_exe.link file.
#
# examples,
# L_FFTW="fftw",L_LAPACK=["lapack","blas"],L_SUPERLU=["superlu","blas"]
# TRLIB_PATH=["/usr/local/lff95/lib", "/usr/local/lib", "/usr/local/mdsplus/lib", "/usr/local/superlu/lib"]
#
# these can also be set by the various trscons_??? tools though you should make sure the
# trscons_path tool is loaded first to setup the search paths
#
#
# =====================================================================================
# ---- import ----
import os
import sys
import re
import shutil
import string
import SCons.Scanner
import SCons.Scanner.Fortran
import SCons.Tool.fortran
join = os.path.join
#
# transp paths and imports, the sys.path should have been extended
# to include $CODESYSDIR/source/bpython in the SConstruct file.
#
from fpp import *
external_packages = ['BLAS','NETCDF', 'MDSPLUS', 'RFFTW', 'FFTW', 'LAPACK', 'SUPERLU'] # used for L_<name> link definitions
transpFortranSuffixes = [".f90",".for",".f",".F","F90",".FOR"]
transpFortranFreeExts = ["f90"] # extensions recognized as free format (lower case only)
transpCSuffixes = [".c"]
transpCXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
transpCXXSuffixes.append('.C')
else:
transpCSuffixes.append('.C')
# ----------------- Fortran -------------------
re_base = re.compile(r"^([^\.]+)\.([^\.]+)$") # split off extension
# SCons formatted line for fortran building
class TranspFortran:
"""
Allows construction of actions with are modified by the member data.
"""
def __init__(self, isdebug=0, isshare=0):
"""
isdebug = nonzero to debug compile
"""
self.isdebug = isdebug # true for debug compiling
self.isshare = isshare # true for share compiling
self.freeCom = '$FORTRAN_FREECOM'
self.fixedCom = '$FORTRAN_FIXEDCOM'
self.freeComDebug = '$FORTRAN_FREECOM_DEBUG'
self.fixedComDebug = '$FORTRAN_FIXEDCOM_DEBUG'
self.freeComSh = '$FORTRAN_FREECOM_SH'
self.fixedComSh = '$FORTRAN_FIXEDCOM_SH'
self.freeComDebugSh = '$FORTRAN_FREECOM_DEBUG_SH'
self.fixedComDebugSh = '$FORTRAN_FIXEDCOM_DEBUG_SH'
def transpFortranActionFunction(self,target,source,env):
"""
An action which preprocesses the fortran source then compiles it.
"""
ta = target[0].abspath # target .o file
p = os.path.dirname(ta) # path to build directory
rn = source[0] # source node
ra = rn.abspath # full path to source file
r = os.path.basename(ra) # the .for or .f90 file name
base,tail = re_base.match(r).groups() # ("mysource","f90")
tail = tail.lower() # "f90"
free = tail in transpFortranFreeExts # true if free format fortran source
info = int(env.get("CODESYS_DEBUG","0"))%10 >0 # true to print out info during each step
fpppath = env.subst('$_FPPINCFLAGS',target=target) # -I flags for fortran preprocessing
fppdefs = env.subst('$_FORTRANDEFFLAGS') # defines for fortran preprocessing
if (free):
ext = env.get("FORTRAN_FREE_EXT", "f90")
maxlen = 132
else:
ext = env.get("FORTRAN_FIXED_EXT", "for")
maxlen = 72
s = os.path.join(p,base+"_."+ext) # the preprocessed file
z = s+"_tmp" # intermediate step in preprocessing
if (info):
print "transpFortranAction: %s"%ra
print " -> processed source: %s"%s
for x in target:
print " -> target: %s"%str(x)
print " -> temporary file: %s"%z
#
# -- modules --
# identify generated module files and delete them before building
#
isExport = r in env.Split(env.get('FORTRANEXPORT',[])) # true to export generated modules,
# look for file.f90 in environment list
if (isExport):
fpub = env.Dir(env.subst('$FORTRANPUBLIC')).abspath # public directory for exported modules
if (not os.path.exists(fpub)):
os.mkdir(fpub)
mods = [] # module files created by this command (file_base_name, abs_path)
mext = string.strip(env.subst('$FORTRANMODSUFFIX'))
re_mext = re.compile(r'.*'+mext+r'$')
for x in target: # look in target list for the generated modules
q = x.abspath
if (re_mext.match(q)):
bmod = os.path.basename(q) # mymodule.mod
mods.append((bmod,q))
# - clear out old modules -
if (os.path.isfile(bmod)):
os.unlink(bmod) # remove module file in current directory
if (os.path.isfile(q)):
os.unlink(q) # remove module file in final destination
if (isExport):
qp = os.path.join(fpub,bmod)
if (os.path.isfile(qp)):
os.unlink(qp) # remove publicly exported module file
if (len(mods)!=0 and info):
print " -> generated modules:",[ x[0] for x in mods ]
if (isExport):
print " -> export modules to: %s"%fpub
# -- pre-preprocess --
if (os.path.isfile(s)):
os.unlink(s) # remove previous preprocessed file
f = open(ra,'r')
slines = f.readlines() # grab source in list of lines
f.close()
pre = prefpp()
out = pre(slines, free=free, maxlen=maxlen)
f = open(z,'w')
try:
for x in out:
f.write(x+'\n')
f.close()
# -- preprocess through gcc --
fcmd = "gcc -E -P %s %s -x c %s"%(fppdefs,fpppath,z)
if (info):
print " -> preprocess: ",fcmd
f = os.popen(fcmd,'r') # read output from stdout of gcc
gout = f.readlines() # grab lines of preprocessed output
#for x in gout:
# print x,
f.close()
finally:
if (os.path.isfile(z)):
os.unlink(z) # clean up intermediate file
# -- post-preprocess --
post = postfpp()
fout = post(gout, free=free, maxlen=maxlen)
f = open(s,'w')
for x in fout:
f.write(x+'\n') # write preprocessed source file
f.close()
# -- compile --
com = self.getCom(env, free, s, target)
if (info):
print " -> free = %d"%free
print " -> ext = %s"%ext
print " -> command = %s"%com
stat = os.system(com)
#
# -- finish off modules --
#
for m,a in mods:
if (stat==0):
if (info):
print "renaming %s to %s "%(m,a)
os.rename(m,a) # move module file to final resting place
#if (isExport):
# shutil.copyfile(a,os.path.join(fpub,m)) # copy to public place
else:
if (os.path.isfile(m)):
os.unlink(m) # remove module file in current directory on a compile error
return stat
def getCom(self, env, free, s, target):
"""
get the com string
self = self
env = environment
free = true if free source format
s = source file name
target = targets
"""
if (self.isshare):
if (free):
if (self.isdebug):
com = env.subst(self.freeComDebugSh,source=env.File(s),target=target)
else:
com = env.subst(self.freeComSh,source=env.File(s),target=target)
else:
if (self.isdebug):
com = env.subst(self.fixedComDebugSh,source=env.File(s),target=target)
else:
com = env.subst(self.fixedComSh,source=env.File(s),target=target)
else:
if (free):
if (self.isdebug):
com = env.subst(self.freeComDebug,source=env.File(s),target=target)
else:
com = env.subst(self.freeCom,source=env.File(s),target=target)
else:
if (self.isdebug):
com = env.subst(self.fixedComDebug,source=env.File(s),target=target)
else:
com = env.subst(self.fixedCom,source=env.File(s),target=target)
return com
def transpFortranActionDesc(self,target,source,env):
"""
Describes the fortran compilation. Used by SCons when printing out the action.
"""
ta = target[0].abspath # target .o file
p = os.path.dirname(ta) # path to build directory
rn = source[0] # source node
ra = rn.abspath # full path to source file
r = os.path.basename(ra) # the .for or .f90 file name
base,tail = re_base.match(r).groups() # ("mysource","f90")
tail = tail.lower()
free = tail in transpFortranFreeExts
fpppath = env.subst('$_FPPINCFLAGS',target=target) # -I flags for fortran preprocessing
fppdefs = env.subst('$_FORTRANDEFFLAGS') # defines for fortran preprocessing
if (free):
ext = env.get("FORTRAN_FREE_EXT", "f90")
maxlen = 132
else:
ext = env.get("FORTRAN_FIXED_EXT", "for")
maxlen = 72
s = os.path.join(p,base+"_."+ext) # the preprocessed file
com = self.getCom(env, free, s, target)
return com
transpFortranOpt = TranspFortran(0,0)
transpFortranOptAction = SCons.Action.Action(transpFortranOpt.transpFortranActionFunction,
transpFortranOpt.transpFortranActionDesc)
transpFortranDebug = TranspFortran(1,0)
transpFortranDebugAction = SCons.Action.Action(transpFortranDebug.transpFortranActionFunction,
transpFortranDebug.transpFortranActionDesc)
transpFortranOptSh = TranspFortran(0,1)
transpFortranOptActionSh = SCons.Action.Action(transpFortranOptSh.transpFortranActionFunction,
transpFortranOptSh.transpFortranActionDesc)
transpFortranDebugSh = TranspFortran(1,1)
transpFortranDebugActionSh = SCons.Action.Action(transpFortranDebugSh.transpFortranActionFunction,
transpFortranDebugSh.transpFortranActionDesc)
#
# --- emitter ---
# adds the module .mod files to the targets produced from the source. This is
# snatched from the scons Tool/fortran.py file and customized to set the
# module directory
#
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
print "Could not locate " + str(node.name)
return ([], [])
mod_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
cre = re.compile(mod_regex,re.M)
# Retrieve all USE'd module names
modules = cre.findall(node.get_contents())
# Remove unique items from the list
modules = SCons.Util.unique(modules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
moddir = env.subst('$FORTRANMODDIR')
modules = map(lambda x, s=suffix: string.lower(x) + s, modules)
if (len(modules)>0):
isExport = os.path.basename(node.path) in env.Split(env.get('FORTRANEXPORT',[]))
if (isExport):
# generated module is moved to public area
moddir = env.Dir(env.subst('$FORTRANPUBLIC')).abspath # public directory for exported modules
for m in modules:
target.append(env.fs.File(m, moddir))
return (target, source)
# -- snatched code --
def FortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.StaticObjectEmitter(target, source, env)
def ShFortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.SharedObjectEmitter(target, source, env)
#
# -- more snatched code --
# adds preprocessed file to the list of emitted code
#
class transpFortranEmitterClass:
def __init__(self, isshare=0):
self.isshare = isshare # true if for a shared library
def emitter(self,target,source,env):
if (self.isshare):
t = ShFortranEmitter(target, source, env)
else:
t = FortranEmitter(target, source, env)
if (1):
#
# this section will add the preprocessed file as one of the targets
# of the fortran build. If you use this then you will need to filter
# these files out of the env.Object() result so they do not end up
# being put inside the library. An advantage of using this is that
# the preprocessed file will be cleaned up along with the object file.
#
ta = target[0].abspath # target .o file
p = os.path.dirname(ta) # path to build directory
rn = source[0] # source node
ra = rn.abspath # full path to source file
r = os.path.basename(ra) # the .for or .f90 file name
base,tail = re_base.match(r).groups() # ("mysource","f90")
tail = tail.lower()
free = tail in transpFortranFreeExts
if (free):
ext = env.get("FORTRAN_FREE_EXT", "f90")
else:
ext = env.get("FORTRAN_FIXED_EXT", "for")
s = os.path.join(p,base+"_."+ext) # the preprocessed file
t[0].append(env.File(s))
#t[0][:] = [ os.path.basename(str(s)) for s in t[0] ]
info = int(env.get("CODESYS_DEBUG","0"))/100 >0 # true to print out info
if (info):
s = [str(x) for x in source]
u = [str(x) for x in t[0]]
print "from %s emitting %s"%(str(s),str(u))
return t
transpFortranEmitter = transpFortranEmitterClass(0).emitter
transpShFortranEmitter = transpFortranEmitterClass(1).emitter
# --- regular expressions ---
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
cinclude_regex = '^[ \t]*#[ \t]*(?:include|import)[ \t]*[<"]([^>"]+)[>"]'
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
cre_incl = re.compile(include_regex, re.M) # fortran INCLUDE lines
cre_use = re.compile(use_regex, re.M) # fortran USE lines
cre_include = re.compile(cinclude_regex, re.M) # fortran #include lines
cre_def = re.compile(def_regex, re.M)
#
# --- scanning ---
# Snatched from scons Scanner/Fortran.py file. Also need to scan for
# #include preprocessor lines and throw out self usages of module files.
#
def transpFortranScannerFunction(node,env,path):
"""
Scan a fortran file for USE, INCLUDE and cpp #include statements. This code was pulled
from scon's Fortran.py and C.py files
"""
info = (int(env.get("CODESYS_DEBUG","0"))%100)/10 >0 # true to print out info
if (info):
print "scanning ",str(node)
node = node.rfile()
if not node.exists():
return []
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
r = os.path.basename(node.abspath) # the base file name
mext = env.subst('$FORTRANMODSUFFIX').strip()
re_mext = re.compile(r'.*'+mext+r'$') # don't match .mod files
if (not re_mext.match(r)):
# retrieve all included filenames
includes = cre_incl.findall(node.get_contents())
# retrieve all USE'd module names
modules = cre_use.findall(node.get_contents())
# retrieve all #included module names
cincludes = cre_include.findall(node.get_contents())
# retrieve all defined module names
defmodules = cre_def.findall(node.get_contents())
# Remove all USE'd module names that are defined in the same file
d = {}
for m in defmodules:
d[m] = 1
modules = filter(lambda m, d=d: not d.has_key(m), modules)
# Convert module name to a .mod filename
if env.has_key('FORTRANMODSUFFIX'):
suffix = env.subst('$FORTRANMODSUFFIX')
else:
suffix = ".mod"
modules = map(lambda x, s=suffix: x.lower() + s, modules)
if (modules):
# remove any modules from the dependency which are exported by the file
# to prevent a dependency loop.
t = SCons.Tool.fortran.FortranEmitter([], [node], env)
mx = [ ] # modules exported by this file by name
for x in t[0]:
xa = x.abspath # full path to source file
xb = os.path.basename(xa) # the .for or .f90 file name
mx.append(xb)
m = {} # use as a set to hold dependent module names
for x in modules:
if (x not in mx):
m[x] = 1 # keep the module if not exported by the file
modules = m.keys() # unique list of modules not exported by file
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules+cincludes)
if (info):
print " -->deps = ",mods_and_includes
else:
mods_and_includes = []
if (info):
print " -- no scan --"
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if (callable(path)):
path = path()
all_path = (source_dir,) + tuple(path)
for dep in mods_and_includes:
n = SCons.Node.FS.find_file(dep, all_path,info)
if n is None:
pout = "No dependency generated for file: %s (referenced by: %s, path: %s) -- file not found" % (dep, node, all_path)
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,pout)
else:
sortkey = dep
nodes.append((sortkey, n))
nodes.sort()
nodes = map(lambda pair: pair[1], nodes)
return nodes
def scan_check(node,env):
#print "calling scan_check for ",str(node)
c = not node.has_builder() or node.current(env.get_calculator())
return c
transpFortranScanner = SCons.Scanner.Scanner(function=transpFortranScannerFunction,name="transpFortranScanner",
skeys=transpFortranSuffixes,
path_function = SCons.Scanner.FindPathDirs("FPPPATH"), #FindPathDirs("FPPPATH",fs=SCons.Node.FS.default_fs),
recursive=1, scan_check=scan_check)
def newFortranBuilder(env, isdebug=0):
"""
Build new static and shared object builders for compiling transp fortran files.
env = environment
isdebug = nonzero for debug builders
"""
action = {} # map suffix to action
actionsh = {} # map suffix to action for shared support
emitter = {} # map suffix to emitter
emittersh = {} # map suffix to emitter for shared support
for x in transpFortranSuffixes:
if (isdebug):
action[x] = transpFortranDebugAction
actionsh[x] = transpFortranDebugActionSh
else:
action[x] = transpFortranOptAction
actionsh[x] = transpFortranOptActionSh
emitter[x] = transpFortranEmitter
emittersh[x] = transpShFortranEmitter
static = env.Builder(action = action,
emitter = emitter,
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
#src_builder = ['CFile', 'CXXFile'],
source_scanner = transpFortranScanner,
single_source = 1)
shared = env.Builder(action = actionsh,
emitter = emittersh,
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
#src_builder = ['CFile', 'CXXFile'],
source_scanner = transpFortranScanner,
single_source = 1)
return (static,shared)
#
# --------------------- C ------------------------
# create new builders for C,C++ so that CCFLAGS is used for the normal build
# and CCFLAGS_DEBUG is used for the debug build.
#
class transpC:
"""
For some reason SCons does not want to expand a source file in the build directory to point
to the build directory, instead it points to the original source directory. Use this class
to get around this and allow source files in the build directory. Using this class for
normal C builds also for anticipated flexibility.
"""
def __init__(self,com,comstr,ibreak=0):
"""
com = command string
comstr = description string
ibreak = nonzero to break the rules
"""
self.com = com
self.comstr = comstr
self.ibreak = ibreak
def actionFunction(self,target,source,env):
if (self.ibreak):
rn = source[0] # source node
ra = rn.abspath # full path to source file
c = env.subst(self.com,target=target)+" "+ra # breaking the rules
else:
c = env.subst(self.com,source=source[0],target=target[0])
stat = os.system(c)
return stat
def descr(self,target,source,env):
if (self.ibreak):
rn = source[0] # source node
ra = rn.abspath # full path to source file
c = env.subst(self.comstr,target=target)
if (not c):
c = env.subst(self.com,target=target)
return c+" "+ra
else:
c = env.subst(self.comstr,target=target,source=source)
return c
def getAction(self):
return SCons.Action.Action(self.actionFunction, self.descr)
transpCOptAction = transpC("$CCCOM", "$CCCOMSTR").getAction()
transpCOptActionSH = transpC("$SHCCCOM", "$SHCCCOMSTR").getAction()
transpCDebugAction = transpC("$CCCOM_DEBUG", "$CCCOMSTR_DEBUG", 1).getAction() # SCons.Action.Action("$CCCOM_DEBUG", "$CCCOMSTR_DEBUG")
transpCDebugActionSH = transpC("$SHCCCOM_DEBUG", "$SHCCCOMSTR_DEBUG", 1).getAction() # SCons.Action.Action("$SHCCCOM_DEBUG", "$SHCCCOMSTR_DEBUG")
transpCXXOptAction = transpC("$CXXCOM", "$CXXCOMSTR").getAction()
transpCXXOptActionSH = transpC("$SHCXXCOM", "$SHCXXCOMSTR").getAction()
transpCXXDebugAction = transpC("$CXXCOM_DEBUG", "$CXXCOMSTR_DEBUG",1).getAction() # SCons.Action.Action("$CXXCOM_DEBUG", "$CXXCOMSTR_DEBUG")
transpCXXDebugActionSH = transpC("$SHCXXCOM_DEBUG", "$SHCXXCOMSTR_DEBUG",1).getAction() #SCons.Action.Action("$SHCXXCOM_DEBUG", "$SHCXXCOMSTR_DEBUG")
def newCBuilder(env, isdebug=0):
"""
Build new static and shared object builders for compiling transp C,C++ files.
env = environment
isdebug = nonzero for debug builders
"""
action = {} # map suffix to action
emitter = {} # map suffix to emitter
for x in transpCSuffixes:
if (isdebug):
action[x] = transpCDebugAction
else:
action[x] = transpCOptAction
emitter[x] = SCons.Defaults.StaticObjectEmitter
for x in transpCXXSuffixes:
if (isdebug):
action[x] = transpCXXDebugAction
else:
action[x] = transpCXXOptAction
emitter[x] = SCons.Defaults.StaticObjectEmitter
static = env.Builder(action = action,
emitter = emitter,
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
source_scanner = SCons.Tool.SourceFileScanner,
single_source = 1)
action = {} # map suffix to action
emitter = {} # map suffix to emitter
for x in transpCSuffixes:
if (isdebug):
action[x] = transpCDebugActionSH
else:
action[x] = transpCOptActionSH
emitter[x] = SCons.Defaults.SharedObjectEmitter
for x in transpCXXSuffixes:
if (isdebug):
action[x] = transpCXXDebugActionSH
else:
action[x] = transpCXXOptActionSH
emitter[x] = SCons.Defaults.SharedObjectEmitter
shared = env.Builder(action = action,
emitter = emitter,
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
source_scanner = SCons.Tool.SourceFileScanner,
single_source = 1)
return (static,shared)
#
# ----------------- linking -----------------
#
re_comment = re.compile(r'^\s*#')
re_tag = re.compile(r'^\s*\$(\w+)')
class TranspLinkActionClass:
"""
encapsulates the linking action common to fortran, C++ and C linkers
"""
def __init__(self, name, linktype, isftnsrc=0):
"""
linktype = name of linker, 'fortran', 'c++' else it will be 'c'
"""
self.name = name # name of linker
self.linktype = linktype # fortran, c++ or c
self.isftnsrc = isftnsrc # nonzero if the main source file is fortran, only needed
# when the linktype is c++
if (linktype == "fortran"):
self.sdescr = "Fortran" # string describing the linking
elif (linktype == "c++"):
if (isftnsrc):
self.sdescr = "Fortran source with C++"
else:
self.sdescr = "C++"
else:
self.sdescr = "C"
def action(self, target, source, env):
"""
Action for linking with the fortran compiler. The object files, distributed libraries
and the <name>_exe.link file are passed in the source arguemnt. The <name>_exe.link file
is parsed to get the external libraries and possible trailers.
"""
info = int(env.get("LINKLOAD_DEBUG","0")) >0 # true to print out info during each step
ta = target[0].abspath # target executable
if (info):
print "%s: %s"%(self.name,ta)
flink = None # _exe.link file
objects = [] # list of object files and static libraries
for x in source:
xa = x.abspath
ext = os.path.splitext(xa)[1]
if (ext == ".link"):
flink = xa
else:
objects.append(xa)
# -- get support libraries --
has_cpp = 0
has_ftn = 0
has_ftn_main = 0
l_libs = []
if (not flink):
# this is ok
for x in external_packages: # add all external packages which are defined
u = x.upper()
if (env.has_key("L_"+u)):
l_libs.append("$_L_"+u)
#raise RuntimeError("did not find a '.link' file for linking %s"%ta)
else:
f = open(flink,'r')
while(1):
line = f.readline()
if (not line):
break
if (re_comment.match(line)):
continue
m = re_tag.match(line)
if (not m):
continue
name = m.group(1).upper()
if (name == "CPP"):
has_cpp = 1
elif (name == "FTN"):
has_ftn = 1
elif (name == "FTN_MAIN"):
has_ftn_main = 1
else:
if (len(name)<3 or name[0:2]!="L_"):
raise RuntimeError("did not recognize the parameter %s in %s"%(name,flink))
l_libs.append("$_"+name)
f.close()
#if (l_libs):
# l_libs.insert(0, "$_TRLIB_PATH") # add library path if there are external libraries
l_libs.append("$LINK_LIBS") # for stuff like OSX "-framework veclib"
t_libs = [] # trailer libs
# -- add in trailers, select flags --
if (self.linktype == "fortran"):
# fortran
t_libs.append("$LINK_FORTRAN_TRAILER")
if (has_cpp):
t_libs.append("$CPP_TRAILER")
flags = "$LINK_FORTRANFLAGS $LINKFLAGS"
linker = "$_LINK_FORTRAN"
elif (self.linktype == "c++"):
# c++
if (self.isftnsrc or has_ftn):
t_libs.append("$FTN_TRAILER")
if (self.isftnsrc or has_ftn_main):
l_libs.insert(0,"$FTN_MAIN")
t_libs.append("$LINK_CXX_TRAILER")
flags = "$LINK_CXXFLAGS $LINKFLAGS"
linker = "$_LINK_CXX"
else:
# c
if (has_ftn):
t_libs.append("$FTN_TRAILER")
if (has_ftn_main):
l_libs.insert(0,"$FTN_MAIN")
t_libs.append("$LINK_CC_TRAILER")
flags = "$LINK_CCFLAGS $LINKFLAGS"
linker = "$_LINK_CC"
rawcom = "%s %s -o $TARGET %s $_LIBDIRFLAGS %s $_LIBFLAGS %s"%(linker,flags,string.join(objects),string.join(l_libs),string.join(t_libs))
com = env.subst(rawcom,source=source,target=target)
if (info):
print " ->raw command: ",rawcom
print " ->command: ", com
stat = os.system(com) # do the link
if (stat and not info):
print " ->raw command: ",rawcom
print " ->command: ", com
return stat
def descr(self, target, source, env):
"""
Returns a string describing the action
"""
ta = target[0].abspath # target executable
return "%s linker: %s"%(self.sdescr,ta)
def newProgramBuilder(env, name, linktype, isftnsrc=0):
"""
Build a new builder for linking the transp way.
name = name of linker for debug messages
linktype = "fortran", "c++" or "c"
isftnsrc = nonzero to force compiling in the fortran libraries and main
when the linktype is c++
"""
c = TranspLinkActionClass(name, linktype,isftnsrc)
a = c.action
d = c.descr
builder = env.Builder(action = env.Action(a,d),
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
single_source = 0)
return builder
#
# -------------------------- debug methods --------------------------
#
def transpDebugStaticLibraryFunction(target, source, env):
"""
Action for building the static debug library. The nondebug library will be
copied to the target then all of the objects in the source will be used
to replace the nondebug objects
"""
dpath = target[0].abspath # path to debug library to be built
t = os.path.basename(dpath)
libget = env.subst(r"$LIBPREFIX(.*)$LIBSUFFIX") # look for lib<name>.a and extract <name>
m = re.match(libget,t)
if (not m):
raise RuntimeError("could not get the library name from the target %s"%t)
libname = m.group(1)
transp_libraries = env['TRANSP_LIBRARIES']
if (not transp_libraries.has_key(libname)):
raise RuntimeError("did not find the library %s among the currently distributed transp libraries"%libname)
opath = transp_libraries[libname] # path of original distributed library
shutil.copyfile(opath,dpath) # copy the original to the new debug
if (len(source)>0):
arcom = env.subst("$ARCOM",target=target,source=source)
stat = os.system(arcom)
if (stat!=0):
os.unlink(dpath)
return stat
def transpDebugStaticLibraryDescr(target, source, env):
"""
Description for building the static debug library. The nondebug library will be
copied to the target then all of the objects in the source will be used
to replace the nondebug objects
"""
dpath = target[0].abspath # path to debug library to be built
t = os.path.basename(dpath)
libget = env.subst(r"$LIBPREFIX(.*)$LIBSUFFIX")
m = re.match(libget,t)
if (not m):
raise RuntimeError("could not get the library name from the target %s"%t)
libname = m.group(1)
transp_libraries = env['TRANSP_LIBRARIES']
if (not transp_libraries.has_key(libname)):
raise RuntimeError("did not find the library %s among the currently distributed transp libraries"%libname)
opath = transp_libraries[libname] # path of original distributed library
s = "copy %s %s"%(opath,dpath)
if (len(source)>0):
s+="\n"+env.subst("$ARCOM",target=target,source=source)
return s
def newTranspDebugStaticLibraryBuilder(env):
"""
Return a new builder for building a debug static library.
env = environment
"""
action = env.Action(transpDebugStaticLibraryFunction,transpDebugStaticLibraryDescr)
dbg = env.Builder(action = action,
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX')
return dbg
#
# ------------------------- tool methods ----------------------------
#
class VariableListGenerator:
"""
Snatched from fortran.py
"""
def __init__(self, *variablelist, **dict):
self.variablelist = variablelist
self.defvar = ''
if (dict.has_key("def")):
self.defvar = dict['def']
def __call__(self, env, target, source, for_signature=0):
for v in self.variablelist:
try: return env[v]
except KeyError: pass
return self.defvar
def generate(env):
# -- snatched from SCons.Tool.fortran.py --
env['_FORTRANINCFLAGS'] = '$( ${_concat(INCPREFIX, FORTRANPATH, INCSUFFIX, __env__, RDirs, TARGET)} $)'
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__)} $)'
env['_FORTRANDEFFLAGS'] = '$( ${_defines(CPPDEFPREFIX, FPPDEFINES, CPPDEFSUFFIX, __env__)} $)'
env['_FPPINCFLAGS'] = '$( ${_concat(INCPREFIX, FPPPATH, INCSUFFIX, __env__, RDirs, TARGET)} $)'
env['FORTRAN_FREECOM'] = '$FORTRAN $FORTRANFLAGS $FORTRAN_FREE $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FIXEDCOM'] = '$FORTRAN $FORTRANFLAGS $FORTRAN_FIXED $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FREECOM_DEBUG'] = '$FORTRAN $FORTRANFLAGS_DEBUG $FORTRAN_FREE $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FIXEDCOM_DEBUG'] = '$FORTRAN $FORTRANFLAGS_DEBUG $FORTRAN_FIXED $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FREECOM_SH'] = '$FORTRAN $FORTRANFLAGS $FORTRAN_SHARE $FORTRAN_FREE $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FIXEDCOM_SH'] = '$FORTRAN $FORTRANFLAGS $FORTRAN_SHARE $FORTRAN_FIXED $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FREECOM_DEBUG_SH'] = '$FORTRAN $FORTRANFLAGS_DEBUG $FORTRAN_SHARE $FORTRAN_FREE $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
env['FORTRAN_FIXEDCOM_DEBUG_SH'] = '$FORTRAN $FORTRANFLAGS_DEBUG $FORTRAN_SHARE $FORTRAN_FIXED $_FORTRANINCFLAGS $_FORTRANMODFLAG -c -o $TARGET $SOURCES'
tstatic, tshared = newFortranBuilder(env,isdebug=0)
env.Prepend(BUILDERS = {'StaticTranspFortran':tstatic, 'SharedTranspFortran':tshared})
dstatic, dshared = newFortranBuilder(env,isdebug=1)
env.Prepend(BUILDERS = {'StaticTranspDebugFortran':dstatic, 'SharedTranspDebugFortran':dshared})
# --- C,C++ ---
env['_CPPINCFLAGS'] = '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET)} $)'
env['CCCOM'] = '$CC $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['SHCCCOM'] = '$SHCC $SHCCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['CCCOM_DEBUG'] = '$CC $CCFLAGS_DEBUG $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['SHCCCOM_DEBUG'] = '$SHCC $SHCCFLAGS_DEBUG $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['CXXCOM_DEBUG'] = '$CXX $CXXFLAGS_DEBUG $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['SHCXXCOM_DEBUG'] = '$SHCXX $SHCXXFLAGS_DEBUG $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
env['CCCOMSTR'] = '$CCCOM'
env['SHCCCOMSTR'] = '$SHCCCOM'
env['CCCOMSTR_DEBUG'] = '$CCCOM_DEBUG'
env['SHCCCOMSTR_DEBUG'] = '$SHCCCOM_DEBUG'
env['CXXCOMSTR'] = '$CXXCOM'
env['SHCXXCOMSTR'] = '$SHCXXCOM'
env['CXXCOMSTR_DEBUG'] = '$CXXCOM_DEBUG'
env['SHCXXCOMSTR_DEBUG'] = '$SHCXXCOM_DEBUG'
env['CXXFLAGS_DEBUG'] = SCons.Util.CLVar('$CCFLAGS_DEBUG')
tstatic, tshared = newCBuilder(env,isdebug=0)
env.Prepend(BUILDERS = {'StaticTranspC':tstatic, 'SharedTranspC':tshared})
dstatic, dshared = newCBuilder(env,isdebug=1)
env.Prepend(BUILDERS = {'StaticTranspDebugC':dstatic, 'SharedTranspDebugC':dshared})
# -- add builder --
#static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
#for suffix in transpFortranSuffixes:
# static_obj.add_action(suffix,transpFortranAction)
# static_obj.add_emitter(suffix,transpFortranEmitter)
#
# -- add scanner --
#static_scanner = static_obj.source_scanner
#
#for suffix in transpFortranSuffixes:
# static_scanner.add_scanner(suffix,transpFortranScanner)
#print static_obj.source_scanner.dict
#env['LIBDIRPREFIX'] = '-L'
#env['LIBDIRSUFFIX'] = ''
#env['_TRLIB_PATH'] = '$( ${_concat(LIBDIRPREFIX, TRLIB_PATH, LIBDIRSUFFIX, __env__)} $)'
#env['LIBLINKPREFIX']='-l'
#env['LIBLINKSUFFIX']=''
# create L_NETCDF type of definitions
for x in external_packages:
z = x.upper()
env['_L_'+z] = '${_stripixes(LIBLINKPREFIX, L_%s, LIBLINKSUFFIX, LIBPREFIX, LIBSUFFIX, __env__)}'%z
env['_LINK_CC'] = VariableListGenerator('LINK_CC', 'CC', defvar='gcc')
env['_LINK_CXX'] = VariableListGenerator('LINK_CXX', 'CXX', defvar='g++')
env['_LINK_FORTRAN'] = VariableListGenerator('LINK_FORTRAN', 'FORTRAN', defvar='gfortran') # good luck with the default
env.Append(BUILDERS = {'ProgramTranspFortran':newProgramBuilder(env,"TranspFortran","fortran"),
'ProgramTranspCPP':newProgramBuilder(env,"TranspC++","c++"),
'ProgramTranspFortranCPP':newProgramBuilder(env,"TranspFortranC++","c++",1),
'ProgramTranspCC':newProgramBuilder(env,"TranspC","c"),
'TranspDebugStaticLibrary':newTranspDebugStaticLibraryBuilder(env)})
def exists(env):
return 1
|
[
"[email protected]"
] | |
39d9972ace9b2b675fc010522f98d7a0c2e20feb
|
e7a5e140ccacc10a4c51b66fa5942974330cce2c
|
/py_insightvm_sdk/models/vulnerability.py
|
5ca229c4badf76227f09dee1e06eaf8e7fb2b306
|
[
"Apache-2.0"
] |
permissive
|
greenpau/py_insightvm_sdk
|
38864c7e88000181de5c09302b292b01d90bb88c
|
bd881f26e14cb9f0f9c47927469ec992de9de8e6
|
refs/heads/master
| 2020-04-21T08:22:31.431529 | 2020-02-27T02:25:46 | 2020-02-27T02:25:46 | 169,417,392 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 65,459 |
py
|
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from py_insightvm_sdk.models.content_description import ContentDescription # noqa: F401,E501
from py_insightvm_sdk.models.link import Link # noqa: F401,E501
from py_insightvm_sdk.models.pci import PCI # noqa: F401,E501
from py_insightvm_sdk.models.vulnerability_cvss import VulnerabilityCvss # noqa: F401,E501
class Vulnerability(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'added': 'str',
'categories': 'list[str]',
'cves': 'list[str]',
'cvss': 'VulnerabilityCvss',
'denial_of_service': 'bool',
'description': 'ContentDescription',
'exploits': 'int',
'id': 'str',
'links': 'list[Link]',
'malware_kits': 'int',
'modified': 'str',
'pci': 'PCI',
'published': 'str',
'risk_score': 'float',
'severity': 'str',
'severity_score': 'int',
'title': 'str'
}
attribute_map = {
'added': 'added',
'categories': 'categories',
'cves': 'cves',
'cvss': 'cvss',
'denial_of_service': 'denialOfService',
'description': 'description',
'exploits': 'exploits',
'id': 'id',
'links': 'links',
'malware_kits': 'malwareKits',
'modified': 'modified',
'pci': 'pci',
'published': 'published',
'risk_score': 'riskScore',
'severity': 'severity',
'severity_score': 'severityScore',
'title': 'title'
}
def __init__(self, added=None, categories=None, cves=None, cvss=None, denial_of_service=None, description=None, exploits=None, id=None, links=None, malware_kits=None, modified=None, pci=None, published=None, risk_score=None, severity=None, severity_score=None, title=None): # noqa: E501
"""Vulnerability - a model defined in Swagger""" # noqa: E501
self._added = None
self._categories = None
self._cves = None
self._cvss = None
self._denial_of_service = None
self._description = None
self._exploits = None
self._id = None
self._links = None
self._malware_kits = None
self._modified = None
self._pci = None
self._published = None
self._risk_score = None
self._severity = None
self._severity_score = None
self._title = None
self.discriminator = None
if added is not None:
self.added = added
if categories is not None:
self.categories = categories
if cves is not None:
self.cves = cves
if cvss is not None:
self.cvss = cvss
if denial_of_service is not None:
self.denial_of_service = denial_of_service
if description is not None:
self.description = description
if exploits is not None:
self.exploits = exploits
if id is not None:
self.id = id
if links is not None:
self.links = links
if malware_kits is not None:
self.malware_kits = malware_kits
if modified is not None:
self.modified = modified
if pci is not None:
self.pci = pci
if published is not None:
self.published = published
if risk_score is not None:
self.risk_score = risk_score
if severity is not None:
self.severity = severity
if severity_score is not None:
self.severity_score = severity_score
if title is not None:
self.title = title
@property
def added(self):
"""Gets the added of this Vulnerability. # noqa: E501
The date the vulnerability coverage was added. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The added of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._added
@added.setter
def added(self, added):
"""Sets the added of this Vulnerability.
The date the vulnerability coverage was added. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param added: The added of this Vulnerability. # noqa: E501
:type: str
"""
self._added = added
@property
def categories(self):
"""Gets the categories of this Vulnerability. # noqa: E501
All vulnerability categories assigned to this vulnerability. # noqa: E501
:return: The categories of this Vulnerability. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this Vulnerability.
All vulnerability categories assigned to this vulnerability. # noqa: E501
:param categories: The categories of this Vulnerability. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def cves(self):
"""Gets the cves of this Vulnerability. # noqa: E501
All <a target=\"_blank\" href=\"https://cve.mitre.org/\">CVE</a>s assigned to this vulnerability. # noqa: E501
:return: The cves of this Vulnerability. # noqa: E501
:rtype: list[str]
"""
return self._cves
@cves.setter
def cves(self, cves):
"""Sets the cves of this Vulnerability.
All <a target=\"_blank\" href=\"https://cve.mitre.org/\">CVE</a>s assigned to this vulnerability. # noqa: E501
:param cves: The cves of this Vulnerability. # noqa: E501
:type: list[str]
"""
self._cves = cves
@property
def cvss(self):
"""Gets the cvss of this Vulnerability. # noqa: E501
The CVSS vector(s) for the vulnerability. # noqa: E501
:return: The cvss of this Vulnerability. # noqa: E501
:rtype: VulnerabilityCvss
"""
return self._cvss
@cvss.setter
def cvss(self, cvss):
"""Sets the cvss of this Vulnerability.
The CVSS vector(s) for the vulnerability. # noqa: E501
:param cvss: The cvss of this Vulnerability. # noqa: E501
:type: VulnerabilityCvss
"""
self._cvss = cvss
@property
def denial_of_service(self):
"""Gets the denial_of_service of this Vulnerability. # noqa: E501
Whether the vulnerability can lead to Denial of Service (DoS). # noqa: E501
:return: The denial_of_service of this Vulnerability. # noqa: E501
:rtype: bool
"""
return self._denial_of_service
@denial_of_service.setter
def denial_of_service(self, denial_of_service):
"""Sets the denial_of_service of this Vulnerability.
Whether the vulnerability can lead to Denial of Service (DoS). # noqa: E501
:param denial_of_service: The denial_of_service of this Vulnerability. # noqa: E501
:type: bool
"""
self._denial_of_service = denial_of_service
@property
def description(self):
"""Gets the description of this Vulnerability. # noqa: E501
The description of the vulnerability. # noqa: E501
:return: The description of this Vulnerability. # noqa: E501
:rtype: ContentDescription
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Vulnerability.
The description of the vulnerability. # noqa: E501
:param description: The description of this Vulnerability. # noqa: E501
:type: ContentDescription
"""
self._description = description
@property
def exploits(self):
"""Gets the exploits of this Vulnerability. # noqa: E501
The exploits that can be used to exploit a vulnerability. # noqa: E501
:return: The exploits of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._exploits
@exploits.setter
def exploits(self, exploits):
"""Sets the exploits of this Vulnerability.
The exploits that can be used to exploit a vulnerability. # noqa: E501
:param exploits: The exploits of this Vulnerability. # noqa: E501
:type: int
"""
self._exploits = exploits
@property
def id(self):
"""Gets the id of this Vulnerability. # noqa: E501
The identifier of the vulnerability. # noqa: E501
:return: The id of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Vulnerability.
The identifier of the vulnerability. # noqa: E501
:param id: The id of this Vulnerability. # noqa: E501
:type: str
"""
self._id = id
@property
def links(self):
"""Gets the links of this Vulnerability. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this Vulnerability. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Vulnerability.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this Vulnerability. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def malware_kits(self):
"""Gets the malware_kits of this Vulnerability. # noqa: E501
The malware kits that are known to be used to exploit the vulnerability. # noqa: E501
:return: The malware_kits of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._malware_kits
@malware_kits.setter
def malware_kits(self, malware_kits):
"""Sets the malware_kits of this Vulnerability.
The malware kits that are known to be used to exploit the vulnerability. # noqa: E501
:param malware_kits: The malware_kits of this Vulnerability. # noqa: E501
:type: int
"""
self._malware_kits = malware_kits
@property
def modified(self):
"""Gets the modified of this Vulnerability. # noqa: E501
The last date the vulnerability was modified. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The modified of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this Vulnerability.
The last date the vulnerability was modified. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param modified: The modified of this Vulnerability. # noqa: E501
:type: str
"""
self._modified = modified
@property
def pci(self):
"""Gets the pci of this Vulnerability. # noqa: E501
Details the <a target=\"_blank\" href=\"https://www.pcisecuritystandards.org/\">Payment Card Industry (PCI)</a> details of the vulnerability. # noqa: E501
:return: The pci of this Vulnerability. # noqa: E501
:rtype: PCI
"""
return self._pci
@pci.setter
def pci(self, pci):
"""Sets the pci of this Vulnerability.
Details the <a target=\"_blank\" href=\"https://www.pcisecuritystandards.org/\">Payment Card Industry (PCI)</a> details of the vulnerability. # noqa: E501
:param pci: The pci of this Vulnerability. # noqa: E501
:type: PCI
"""
self._pci = pci
@property
def published(self):
"""Gets the published of this Vulnerability. # noqa: E501
The date the vulnerability was first published or announced. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The published of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._published
@published.setter
def published(self, published):
"""Sets the published of this Vulnerability.
The date the vulnerability was first published or announced. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param published: The published of this Vulnerability. # noqa: E501
:type: str
"""
self._published = published
@property
def risk_score(self):
"""Gets the risk_score of this Vulnerability. # noqa: E501
The risk score of the vulnerability, rounded to a maximum of to digits of precision. If using the default Rapid7 Real Risk™ model, this value ranges from 0-1000. # noqa: E501
:return: The risk_score of this Vulnerability. # noqa: E501
:rtype: float
"""
return self._risk_score
@risk_score.setter
def risk_score(self, risk_score):
"""Sets the risk_score of this Vulnerability.
The risk score of the vulnerability, rounded to a maximum of to digits of precision. If using the default Rapid7 Real Risk™ model, this value ranges from 0-1000. # noqa: E501
:param risk_score: The risk_score of this Vulnerability. # noqa: E501
:type: float
"""
self._risk_score = risk_score
@property
def severity(self):
"""Gets the severity of this Vulnerability. # noqa: E501
The severity of the vulnerability, one of: `\"Moderate\"`, `\"Severe\"`, `\"Critical\"`. # noqa: E501
:return: The severity of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._severity
@severity.setter
def severity(self, severity):
"""Sets the severity of this Vulnerability.
The severity of the vulnerability, one of: `\"Moderate\"`, `\"Severe\"`, `\"Critical\"`. # noqa: E501
:param severity: The severity of this Vulnerability. # noqa: E501
:type: str
"""
self._severity = severity
@property
def severity_score(self):
"""Gets the severity_score of this Vulnerability. # noqa: E501
The severity score of the vulnerability, on a scale of 0-10. # noqa: E501
:return: The severity_score of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._severity_score
@severity_score.setter
def severity_score(self, severity_score):
"""Sets the severity_score of this Vulnerability.
The severity score of the vulnerability, on a scale of 0-10. # noqa: E501
:param severity_score: The severity_score of this Vulnerability. # noqa: E501
:type: int
"""
self._severity_score = severity_score
@property
def title(self):
"""Gets the title of this Vulnerability. # noqa: E501
The title (summary) of the vulnerability. # noqa: E501
:return: The title of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Vulnerability.
The title (summary) of the vulnerability. # noqa: E501
:param title: The title of this Vulnerability. # noqa: E501
:type: str
"""
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Vulnerability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Vulnerability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
e22e276db56f57fe87c5bf501adf743ccf1c987f
|
41bfa2b258dcf6672942eaa0722e3147386907a8
|
/exercices/serie4/ex13.py
|
de757155b9e97b3e4f0bb757844c6fd19c9fcee3
|
[] |
no_license
|
tartofour/python-IESN-1BQ1-TIR
|
6ba06857b78bbcacd8b09af42d5b9293207c1917
|
9df606038e7c8593e73602476c7a260ead22ffb7
|
refs/heads/master
| 2022-11-06T20:46:50.474260 | 2020-07-07T21:34:52 | 2020-07-07T21:34:52 | 210,844,476 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 617 |
py
|
#!/usr/bin/python
def print_antipode(coord: tuple) -> None:
if (coord[0] > 180 or coord[0] < -180) or \
(coord[1] > 90 or coord[1] < -90):
print("Erreur dans les coordonnées !")
else:
latitude = coord[0] * -1
if coord[1] >= 0:
longitude = coord[1] - 180
else:
longitude = coord[1] + 180
coordonnées = (latitude, longitude)
print(coordonnées)
latitude = float(input("Entrez une latitude : "))
longitude = float(input("Entrez une longitude : "))
coordonnee_gps = (latitude, longitude)
print_antipode(coordonnee_gps)
|
[
"[email protected]"
] | |
e4b6ad474866f98c6b1f0ecb5442aad86612cc90
|
bb173dbcd5e18fe1402cc7350924c7fffde6d34e
|
/fabfile.py
|
3d6025a5786fe7345942305c56acbf142f7bb00d
|
[] |
no_license
|
HuangShaoyan/huangshaoyan.me
|
73fc67d10249fa44ee4a06636b4913ee7faf52fe
|
200372263dbe1e7ad7db51795f8f458bd2ac1c1b
|
refs/heads/master
| 2021-03-12T22:57:04.218492 | 2014-03-21T01:39:16 | 2014-03-21T01:39:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| true | false | 2,558 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fabric.api import put, sudo
'''
for dev:
fab <taskname> \
-H 127.0.0.1 \
--user=vagrant \
--port=2222 \
-i ~/.`vagrant.d/insecure_private_key \
-D
for test:
fab <taskname>:release=True \
-H 127.0.0.1 \
--user=vagrant \
--port=2222 \
-i ~/.`vagrant.d/insecure_private_key \
-D
for product: (it will use your .ssh/config)
fab <taskname>:release=True \
-H <server IP> \
--user=<user> \
--port=<port> \
'''
def init(release=False):
'''
init server, such as:
install saltstack
config saltstack to use github.com as gitfs if release=True
'''
_apt_upgrade()
_config_master(release)
sudo('service salt-master restart')
_config_minion()
sudo('service salt-minion restart')
print 'You may need to reboot manually'
def _apt_upgrade():
_put_file(
local_path='fab_init_conf/apt_source',
remote_path='/etc/apt/sources.list')
sudo('apt-get -q update')
sudo('DEBIAN_FRONTEND=noninteractive '
'apt-get '
'-o Dpkg::Options::="--force-confdef" '
'-o Dpkg::Options::="--force-confold" '
'upgrade -q -y')
sudo('DEBIAN_FRONTEND=noninteractive '
'apt-get '
'-o Dpkg::Options::="--force-confdef" '
'-o Dpkg::Options::="--force-confold" '
'dist-upgrade -q -y')
sudo('apt-get install python-software-properties -q -y')
sudo('add-apt-repository ppa:saltstack/salt -y')
sudo('apt-get -q update')
sudo('apt-get install salt-master salt-minion -q -y')
sudo('apt-get autoremove -q -y')
def _config_master(release):
sudo('rm -rf /etc/salt/master.d/*')
_put_file(
local_path='fab_init_conf/master',
remote_path='/etc/salt/master.d/base.conf')
if release:
sudo('apt-get install git python-pip -q -y')
sudo('pip install GitPython')
_put_file(
local_path='fab_init_conf/gitfs',
remote_path='/etc/salt/master.d/gitfs.conf')
def _config_minion():
sudo('rm -rf /etc/salt/minion.d/*')
_put_file(
local_path='fab_init_conf/minion',
remote_path='/etc/salt/minion.d/base.conf')
def _put_file(local_path, remote_path):
put(local_path,
remote_path,
use_sudo=True,
mode=0644)
sudo('chown root:root %s' % remote_path)
|
[
"[email protected]"
] | |
b1f92da3bb4d6592d8637a98458c8ce610209e3c
|
11f21ef5e8a137d1e79e3c2a6ebbe22e18ba72f6
|
/repititfiller/__init__.py
|
8eb45e2d10e5ce28cc36fbeb03f9f226e7b971fb
|
[] |
no_license
|
rodmendezp/repit-filler
|
3fedc7e12eec7c63bfc8a7d138a97196cc996271
|
d86b0bdd19d668d6b855e9713d757cbdfbe8e70f
|
refs/heads/master
| 2020-04-20T17:04:20.483309 | 2019-02-06T00:20:25 | 2019-02-06T00:20:25 | 168,978,202 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
from __future__ import absolute_import
from .celery import app as celery_app
__all__ = ['celery_app']
|
[
"[email protected]"
] | |
a023a6dd3b718d9a57a700eb17e72934403b4dfa
|
3f44f39945a153fe580e63aed7531f68fdad6914
|
/venv/lib/python3.6/os.py
|
ca3ee1b9f4a381c53eecd731be3e5e7a19a99282
|
[] |
no_license
|
MatthewBurke1995/DjangoGirlsTutorial
|
4dcc313ea81b37161400ebd8daee34ca694f7c92
|
b5b568ceae24236573a146d2c70d1f889f8c9ce4
|
refs/heads/master
| 2021-08-16T18:27:37.822590 | 2017-11-20T07:39:58 | 2017-11-20T07:39:58 | 111,378,000 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 40 |
py
|
/home/matt/anaconda3/lib/python3.6/os.py
|
[
"[email protected]"
] | |
df376c400e68e9e980eaeac9415a3b82e1d72db3
|
89465bfa1924fe5d46ca84ace7cc613394ba0f8e
|
/config/migrations/0002_auto_20200323_1731.py
|
9923ce5cc36a53a44fbfe07a1a2b7f65825ad79e
|
[] |
no_license
|
xihacode/typeidea
|
735f21caf67106b7d8b948f5f2304c98e968c9e9
|
981fa968da5bf49f9d524c6fdbe5426936766c54
|
refs/heads/master
| 2021-04-12T20:02:48.469582 | 2020-04-04T08:57:40 | 2020-04-04T08:57:40 | 249,106,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 445 |
py
|
# Generated by Django 3.0.4 on 2020-03-23 09:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('config', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sidebar',
name='status',
field=models.PositiveIntegerField(choices=[(1, '展示'), (0, '隐藏')], default=1, verbose_name='状态'),
),
]
|
[
"3266817262@qq .com"
] |
3266817262@qq .com
|
a03eb33d6870aa0e07fdd3f38b90cb7daf448fac
|
e305251cd900203101414f4f819bdbd6bce8511f
|
/Code10-13.py
|
a8345f5b37cd9dbfa9c2962c5c4377ae42633128
|
[] |
no_license
|
munjinho/Python
|
ae761bdb2ba9af88ecc2732aceb1b1a6910ff42c
|
16eb54d344efcd6b3e70453848316aecaace8d0e
|
refs/heads/master
| 2020-04-13T08:59:39.179055 | 2020-03-23T17:14:58 | 2020-03-23T17:14:58 | 163,090,778 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 293 |
py
|
from tkinter import *
from tkinter import messagebox
## 함수 선언 부분 ##
def clickLeft(event) :
messagebox.showinfo("마우스", "마우스 왼쪽 버튼이 클릭됨")
## 메인 코드 부분 ##
window = Tk()
window.bind("<Button-1>", clickLeft)
window.mainloop()
|
[
"[email protected]"
] | |
a9275b235b202ee33d01a9b4e7503532b2d3c007
|
59cd59d91d55a5b7a691c5342fc7b4db3d1f2df6
|
/scripts/program_analysis/gcc_to_agraph.py
|
e6157890dfde6d4c7551cc60acdeefbd4fb9317a
|
[
"Apache-2.0"
] |
permissive
|
ml4ai/automates
|
8af5ec33d5f321860dfb1164ec00e750ec62d0ad
|
2494b1c1f2d11b14eb342ef17b37d4dc9ba9b285
|
refs/heads/master
| 2023-01-19T01:25:12.034993 | 2023-01-09T16:45:12 | 2023-01-09T16:45:12 | 157,479,887 | 23 | 9 |
NOASSERTION
| 2023-01-09T16:45:13 | 2018-11-14T02:42:19 |
Fortran
|
UTF-8
|
Python
| false | false | 5,363 |
py
|
import sys
import json
import html
from typing import Dict
from collections import defaultdict, namedtuple
import pygraphviz as pgv
from pygraphviz import AGraph
OPS_TO_STR = {
"mult_expr": "*",
"plus_expr": "+",
"minus_expr": "-",
"ge_expr": ">=",
"gt_expr": ">",
"le_expr": "<=",
"lt_expr": "<",
"rdiv_expr": "/",
"trunc_div_expr": "/",
"eq_expr": "==",
"ne_expr": "!=",
"negate_expr": "-",
"lshift_expr": "<<",
"rshift_expr": ">>",
"bit_xor_expr": "^",
"bit_and_expr": "&",
"bit_ior_expr": "|",
"bit_not_expr": "~",
"logical_or": "||",
"logical_and": "&&",
"trunc_mod_expr": "%",
}
Edge = namedtuple("Edge", ["src", "tgt", "flags"])
FALLTHROUGH_FLAG = 2**0
TRUE_FLAG = 2**8
FALSE_FLAG = 2**9
edge_colors = {}
edge_colors[FALLTHROUGH_FLAG] = "grey"
edge_colors[TRUE_FLAG] = "green"
edge_colors[FALSE_FLAG] = "red"
EDGE_FLAGS = [FALLTHROUGH_FLAG, TRUE_FLAG, FALSE_FLAG]
def json_to_agraph_pdf(gcc_ast):
input_file = gcc_ast["mainInputFilename"]
input_file_stripped = input_file.split("/")[-1]
functions = gcc_ast["functions"]
types = gcc_ast["recordTypes"]
global_variables = gcc_ast["globalVariables"]
G = pgv.AGraph(directed=True)
for f in functions:
add_function_subgraph(f, G)
G.graph_attr.update(
{"dpi": 227, "fontsize": 20, "fontname": "Menlo", "rankdir": "TB"}
)
G.node_attr.update({"fontname": "Menlo"})
G.draw(f"{input_file_stripped}--gcc_ast-graph.pdf", prog="dot")
def add_basic_block_node(bb: Dict, name: str, subgraph: AGraph):
"""Parameters:
bb: the dict storing the basic block data from the json output of gcc plugin
name: the name for the basic block node
subgraph: the graph to add the basic block node to
Adds an HTML node to `subgraph` with the statements from `bb`
"""
label = f"<<table border=\"0\" cellborder=\"1\" cellspacing=\"0\">"
label += f"<tr><td><b>{name}</b></td></tr>"
for index, stmt in enumerate(bb["statements"]):
type = stmt["type"]
l_start = stmt["line_start"] if "line_start" in stmt else -1
c_start = stmt["col_start"] if "col_start" in stmt else -1
loc = f"{l_start}:{c_start}"
if type == "conditional":
stmt_str = parse_conditional_stmt(stmt)
# have to convert stmt_str to valid HTML
label += f"<tr><td>{html.escape(stmt_str)}</td></tr>"
# default label
else:
label += f"<tr><td>{type} at {loc}</td></tr>"
label += "</table>>"
print(label)
subgraph.add_node(name, label=label, shape="plaintext")
def add_function_subgraph(function: Dict, graph: AGraph):
"""
Parameters:
function: the dict storing the function data from the json output of gcc plugin
graph: the graph to add the function cluster to
Adds a function cluster/subraph consisting of all the basic blocks
in the `function` dict
"""
func_name = function["name"]
bb_label = lambda index: f"{func_name}.BB{index}"
F = graph.add_subgraph(name=f"cluster_{func_name}", label=func_name,
style="bold, rounded", rankdir="LR")
# TODO: verify that index 0 basic block is the entry point and
# index 1 basic block is the exit point
# entry_index = 0
# exit_index = 1
edges_to_add = defaultdict(list)
for bb in function["basicBlocks"]:
# special case nodes for entry and exit
# if bb["index"] == entry_index:
# F.add_node(f"{func_name}.Entry")
# elif bb["index"] == exit_index:
# F.add_node(f"{func_name}.Exit")
bb_name = bb_label(bb["index"])
add_basic_block_node(bb, bb_name, F)
for e in bb["edges"]:
src = bb_label(e["source"])
tgt = bb_label(e["target"])
edge = Edge(src=src, tgt=tgt, flags=e["flags"])
edges_to_add[src].append(edge)
for src in edges_to_add:
for edge in edges_to_add[src]:
color = "black"
if edge.flags in EDGE_FLAGS:
color = edge_colors[edge.flags]
F.add_edge(edge.src, edge.tgt, color=color)
def parse_conditional_stmt(stmt: Dict):
"""
Parameters:
`stmt` 'conditional' type statement from a basic block
obtained from gcc plugin generated json
Returns:
A str representing a suitable label for the conditional statement
"""
op = OPS_TO_STR[stmt["operator"]]
lhs = parse_operand(stmt["operands"][0])
rhs = parse_operand(stmt["operands"][1])
if len(stmt["operands"]) > 2:
print("WARNING: parse_conditional_stmt() more than two operands!")
return f"{lhs} {op} {rhs}"
def parse_operand(operand: Dict):
"""
Parameter:
`operand` is a operand dict obtained from gcc plugin generated json
Returns:
A str representing the operand
"""
# TODO: This only parses a couple things
if "name" in operand:
return operand["name"]
elif "value" in operand:
return operand["value"]
else:
return "Operand"
def main():
json_file = sys.argv[1]
print(f"Loaded json_file: {json_file}")
ast_json = json.load(open(json_file))
json_to_agraph_pdf(ast_json)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
159402a65b203cc8ac8e6b03ed21e5a75701626f
|
bcc59b03cc71f56958801c16158efa97f820799f
|
/MantaCamera.py
|
bd466102448cba9dda0d6cc1c9de553e4001c403
|
[] |
no_license
|
labscript-suite-temp-archive/cavitylab-labscript_devices--forked-from--labscript_suite-labscript_devices
|
c0816159af214ad6d2e9107f7000c81600744ad1
|
58d9b333d94b5f86eb3bdcae225170370d683138
|
refs/heads/master
| 2020-12-26T20:12:48.835227 | 2019-12-06T20:57:48 | 2019-12-06T20:57:48 | 237,628,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,707 |
py
|
#####################################################################
# #
# /labscript_devices/Camera.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript', '2.0.1', '3')
from labscript_devices import labscript_device, BLACS_tab, BLACS_worker
from labscript import TriggerableDevice, LabscriptError, set_passed_properties
import numpy as np
from enum import Enum
@labscript_device
class MantaCamera(TriggerableDevice):
description = 'Generic Camera'
# To be set as instantiation arguments:
trigger_edge_type = None
minimum_recovery_time = None
class Param(Enum):
option0 = 0
option1 = 1
option2 = 2
option3 = 3
option4 = 4
option5 = 5
option6 = 6
option7 = 7
option8 = 8
option9 = 9
@set_passed_properties(
property_names = {
"connection_table_properties": ["BIAS_port"],
"device_properties": ["serial_number", "SDK", "effective_pixel_size", "exposure_time", "orientation", "trigger_edge_type", "minimum_recovery_time"]}
)
def __init__(self, name, parent_device, connection,
BIAS_port = 1027, serial_number = 0x0, SDK='', effective_pixel_size=0.0,
exposure_time=float('nan'), orientation='side', trigger_edge_type='rising', minimum_recovery_time=0, other_params={},
**kwargs):
# not a class attribute, so we don't have to have a subclass for each model of camera:
self.trigger_edge_type = trigger_edge_type
self.minimum_recovery_time = minimum_recovery_time
self.exposure_time = exposure_time
self.orientation = orientation
self.BLACS_connection = BIAS_port
if isinstance(serial_number,str):
serial_number = int(serial_number,16)
self.sn = np.uint64(serial_number)
self.sdk = str(SDK)
self.effective_pixel_size = effective_pixel_size
self.exposures = []
self.other_params = other_params
# DEPRECATED: backward compatibility:
if 'exposuretime' in kwargs:
# We will call self.set_property later to overwrite the non-underscored kwarg's default value.
self.exposure_time = kwargs.pop('exposuretime')
import sys
sys.stderr.write('WARNING: Camera\'s keyword argument \'exposuretime\' deprecated. Use \'exposure_time\' instead.\n')
TriggerableDevice.__init__(self, name, parent_device, connection, **kwargs)
def set_cam_param(self, param, value):
if self.other_params.has_key(param):
self.other_params[param] = value
else:
raise LabscriptError('Camera parameter %s does not exist in dictionary'%param)
def expose(self, name, t, frametype, exposure_time=None):
if exposure_time is None:
duration = self.exposure_time
else:
duration = exposure_time
if duration is None:
raise LabscriptError('Camera %s has not had an exposure_time set as an instantiation argument, '%self.name +
'and one was not specified for this exposure')
if not duration > 0:
raise LabscriptError("exposure_time must be > 0, not %s"%str(duration))
# Only ask for a trigger if one has not already been requested by
# another camera attached to the same trigger:
already_requested = False
for camera in self.trigger_device.child_devices:
if camera is not self:
for _, other_t, _, other_duration in camera.exposures:
if t == other_t and duration == other_duration:
already_requested = True
if not already_requested:
self.trigger_device.trigger(t, duration)
# Check for exposures too close together (check for overlapping
# triggers already performed in self.trigger_device.trigger()):
start = t
end = t + duration
for exposure in self.exposures:
_, other_t, _, other_duration = exposure
other_start = other_t
other_end = other_t + other_duration
if abs(other_start - end) < self.minimum_recovery_time or abs(other_end - start) < self.minimum_recovery_time:
raise LabscriptError('%s %s has two exposures closer together than the minimum recovery time: ' %(self.description, self.name) + \
'one at t = %fs for %fs, and another at t = %fs for %fs. '%(t,duration,start,duration) + \
'The minimum recovery time is %fs.'%self.minimum_recovery_time)
self.exposures.append((name, t, frametype, duration))
return duration
def do_checks(self):
# Check that all Cameras sharing a trigger device have exposures when we have exposures:
for camera in self.trigger_device.child_devices:
if camera is not self:
for exposure in self.exposures:
if exposure not in camera.exposures:
_, start, _, duration = exposure
raise LabscriptError('Cameras %s and %s share a trigger. ' % (self.name, camera.name) +
'%s has an exposure at %fs for %fs, ' % (self.name, start, duration) +
'but there is no matching exposure for %s. ' % camera.name +
'Cameras sharing a trigger must have identical exposure times and durations.')
def generate_code(self, hdf5_file):
self.do_checks()
table_dtypes = [('name','a256'), ('time',float), ('frametype','a256'), ('exposure_time',float)]
data = np.array(self.exposures,dtype=table_dtypes)
group = self.init_device_group(hdf5_file)
if self.exposures:
group.create_dataset('EXPOSURES', data=data)
# DEPRECATED backward campatibility for use of exposuretime keyword argument instead of exposure_time:
self.set_property('exposure_time', self.exposure_time, location='device_properties', overwrite=True)
if len(self.other_params) > 0:
for key in self.other_params:
if isinstance(self.other_params[key], Camera.Param):
group.attrs[key] = self.other_params[key].name
else:
group.attrs[key] = self.other_params[key]
import os
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from blacs.tab_base_classes import Worker, define_state
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
from blacs.device_base_class import DeviceTab
from qtutils import UiLoader
import qtutils.icons
@BLACS_tab
class CameraTab(DeviceTab):
def initialise_GUI(self):
layout = self.get_tab_layout()
ui_filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'camera.ui')
self.ui = UiLoader().load(ui_filepath)
layout.addWidget(self.ui)
port = int(self.settings['connection_table'].find_by_name(self.settings["device_name"]).BLACS_connection)
self.ui.port_label.setText(str(port))
self.ui.check_connectivity_pushButton.setIcon(QIcon(':/qtutils/fugue/arrow-circle'))
self.ui.host_lineEdit.returnPressed.connect(self.update_settings_and_check_connectivity)
self.ui.use_zmq_checkBox.toggled.connect(self.update_settings_and_check_connectivity)
self.ui.check_connectivity_pushButton.clicked.connect(self.update_settings_and_check_connectivity)
def get_save_data(self):
return {'host': str(self.ui.host_lineEdit.text()), 'use_zmq': self.ui.use_zmq_checkBox.isChecked()}
def restore_save_data(self, save_data):
if save_data:
host = save_data['host']
self.ui.host_lineEdit.setText(host)
if 'use_zmq' in save_data:
use_zmq = save_data['use_zmq']
self.ui.use_zmq_checkBox.setChecked(use_zmq)
else:
self.logger.warning('No previous front panel state to restore')
# call update_settings if primary_worker is set
# this will be true if you load a front panel from the file menu after the tab has started
if self.primary_worker:
self.update_settings_and_check_connectivity()
def initialise_workers(self):
worker_initialisation_kwargs = {'port': self.ui.port_label.text()}
self.create_worker("main_worker", CameraWorker, worker_initialisation_kwargs)
self.primary_worker = "main_worker"
self.update_settings_and_check_connectivity()
@define_state(MODE_MANUAL, queue_state_indefinitely=True, delete_stale_states=True)
def update_settings_and_check_connectivity(self, *args):
icon = QIcon(':/qtutils/fugue/hourglass')
pixmap = icon.pixmap(QSize(16, 16))
status_text = 'Checking...'
self.ui.status_icon.setPixmap(pixmap)
self.ui.server_status.setText(status_text)
kwargs = self.get_save_data()
responding = yield(self.queue_work(self.primary_worker, 'update_settings_and_check_connectivity', **kwargs))
self.update_responding_indicator(responding)
def update_responding_indicator(self, responding):
if responding:
icon = QIcon(':/qtutils/fugue/tick')
pixmap = icon.pixmap(QSize(16, 16))
status_text = 'Server is responding'
else:
icon = QIcon(':/qtutils/fugue/exclamation')
pixmap = icon.pixmap(QSize(16, 16))
status_text = 'Server not responding'
self.ui.status_icon.setPixmap(pixmap)
self.ui.server_status.setText(status_text)
@BLACS_worker
class CameraWorker(Worker):
def init(self):#, port, host, use_zmq):
# self.port = port
# self.host = host
# self.use_zmq = use_zmq
global socket; import socket
global zmq; import zmq
global zprocess; import zprocess
global shared_drive; import labscript_utils.shared_drive as shared_drive
self.host = ''
self.use_zmq = False
def update_settings_and_check_connectivity(self, host, use_zmq):
self.host = host
self.use_zmq = use_zmq
if not self.host:
return False
if not self.use_zmq:
return self.initialise_sockets(self.host, self.port)
else:
response = zprocess.zmq_get_raw(self.port, self.host, data='hello')
if response == 'hello':
return True
else:
raise Exception('invalid response from server: ' + str(response))
def initialise_sockets(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert port, 'No port number supplied.'
assert host, 'No hostname supplied.'
assert str(int(port)) == port, 'Port must be an integer.'
s.settimeout(10)
s.connect((host, int(port)))
s.send('hello\r\n')
response = s.recv(1024)
s.close()
if 'hello' in response:
return True
else:
raise Exception('invalid response from server: ' + response)
def transition_to_buffered(self, device_name, h5file, initial_values, fresh):
# h5file = shared_drive.path_to_agnostic(h5file)
if not self.use_zmq:
return self.transition_to_buffered_sockets(h5file,self.host, self.port)
response = zprocess.zmq_get_raw(self.port, self.host, data=h5file.encode('utf-8'))
if response != 'ok':
raise Exception('invalid response from server: ' + str(response))
response = zprocess.zmq_get_raw(self.port, self.host, timeout = 10)
if response != 'done':
raise Exception('invalid response from server: ' + str(response))
return {} # indicates final values of buffered run, we have none
def transition_to_buffered_sockets(self, h5file, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(120)
s.connect((host, int(port)))
s.send('%s\r\n'%h5file)
response = s.recv(1024)
if not 'ok' in response:
s.close()
raise Exception(response)
response = s.recv(1024)
if not 'done' in response:
s.close()
raise Exception(response)
return {} # indicates final values of buffered run, we have none
def transition_to_manual(self):
if not self.use_zmq:
return self.transition_to_manual_sockets(self.host, self.port)
response = zprocess.zmq_get_raw(self.port, self.host, 'done')
if response != 'ok':
raise Exception('invalid response from server: ' + str(response))
response = zprocess.zmq_get_raw(self.port, self.host, timeout = 10)
if response != 'done':
raise Exception('invalid response from server: ' + str(response))
return True # indicates success
def transition_to_manual_sockets(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(120)
s.connect((host, int(port)))
s.send('done\r\n')
response = s.recv(1024)
if response != 'ok\r\n':
s.close()
raise Exception(response)
response = s.recv(1024)
if not 'done' in response:
s.close()
raise Exception(response)
return True # indicates success
def abort_buffered(self):
return self.abort()
def abort_transition_to_buffered(self):
return self.abort()
def abort(self):
if not self.use_zmq:
return self.abort_sockets(self.host, self.port)
response = zprocess.zmq_get_raw(self.port, self.host, 'abort')
if response != 'done':
raise Exception('invalid response from server: ' + str(response))
return True # indicates success
def abort_sockets(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(120)
s.connect((host, int(port)))
s.send('abort\r\n')
response = s.recv(1024)
if not 'done' in response:
s.close()
raise Exception(response)
return True # indicates success
def program_manual(self, values):
return {}
def shutdown(self):
return
|
[
"[email protected]"
] | |
bd73d143b5e404f6ecbbf21996e1113b86771e97
|
b144c2a117eb3553f9a801dc685164e46368f220
|
/ROS-main/catkin_ws/build/catkin_generated/order_packages.py
|
820c37b42f2fa7c1b77853e1e46bd8b1ccc6d83a
|
[] |
no_license
|
BjoBor/RobotBlackjack
|
beb650d25290ddf6f31329a0cbd984cb1406de1c
|
ea07d3587714399ba064117095def1e7cc65b75d
|
refs/heads/main
| 2023-01-25T05:21:38.148287 | 2020-12-10T19:39:51 | 2020-12-10T19:39:51 | 319,963,846 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/danial/catkin_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/opt/ros/noetic'.split(';') if '/opt/ros/noetic' != '' else []
|
[
"[email protected]"
] | |
7b610bf2dc37263d332476c74cca4f006e5c126c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02628/s221826573.py
|
8d9cc91fbf2407d05ab712842180953fe7ae11f1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124 |
py
|
N, K = map(int, input().split())
p_list = list(map(int, input().split()))
p_list = sorted(p_list)
print(sum(p_list[:K]))
|
[
"[email protected]"
] | |
1391d16711381447cf1a4662f4cf3fd0427a6c15
|
1730decd1cc1609bd4e3bae61bb679c4afb99fe9
|
/gui_basic/gui_project/gui_1.py
|
09bed1194d93534d063ccaa6c71414af9237e003
|
[] |
no_license
|
jimin4017/python_project
|
d347c63ef0d4924ffd44f9a81bc4ea08341399f3
|
9c53293634caafdd3a1210283018847d27fcb76d
|
refs/heads/master
| 2023-04-11T03:01:24.895755 | 2021-04-14T18:12:08 | 2021-04-14T18:12:08 | 340,390,578 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,713 |
py
|
import tkinter.ttk as ttk
from tkinter import *
root = Tk()
root.title("jimin project") ## 콘솔창 이름 설정
root.geometry("640x480+100+300") ## tkinter에서 콘솔 크기 설정가로 * 세로
## + " x,y" 좌표
file_frame = Frame(root)
file_frame.pack()
btn_Add_file = Button(file_frame,padx=5,pady=5,width=12,text="파일추가")
btn_Add_file.pack(side="left")
btn_del_file = Button(file_frame,padx=5,pady=5,width=12, text="선택삭제")
btn_del_file.pack(side="right")
############################################### 밑에 메모장처럼 글자 넣는곳
list_frame = Frame(root)
list_frame.pack(fill="both")
scrollbar = Scrollbar(list_frame)
scrollbar.pack(side="right",fill="y")
list_file = Listbox(list_frame, selectmode="extended",height= 15,yscrollcommand=scrollbar.set)
list_file.pack(side="left",fill="both",expand=True)
scrollbar.config(command=list_file.yview)
# 저장경로 프레임
path_frame = LabelFrame(root,text="저장경로")
path_frame.pack()
txt_dest_path = Entry(path_frame)
txt_dest_path.pack(side="left",fill="both")
btn_dest_path = Button(path_frame,text="찾아보기",width=10)
btn_dest_path.pack(side="right")
# 옵션 프레임
frame_option = LabelFrame(root,text="옵션")
frame_option.pack()
# 1. 가로 넓이 옵션
# 가로 넓이 레이블
lbl_width = Label(frame_option, text="가로넓이",)
# ## 가로 넓이 콤보
# opt_width = ["원본유지","1024","800","640"]
# cmb_width = ttk.Combobox(frame_option,state="readonly",values=opt_width)
# cmb_width.current()
# cmb_width.pack(side="left")
root.resizable(False,False) #x,y 변경 불가
root.mainloop() ## 계속 루프 돌려 실행
|
[
"[email protected]"
] | |
0c1a61961e8a5d9d8772be8258041e8096719937
|
3eba91e10b5758cfea17334cd4bff5393e8fbc87
|
/src/runner.py
|
668d55684868e8d469f42103fa83ff78a01cdae0
|
[] |
no_license
|
NanThanThanSoe/TestAutomationFramework
|
3306ffdb32686cc8db3e4662ede50811a750c499
|
3cd225156ac7aa3cd2113f7845d1512068a77478
|
refs/heads/main
| 2023-05-23T15:44:21.941054 | 2021-06-11T09:43:33 | 2021-06-11T09:43:33 | 341,836,360 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 646 |
py
|
"""
This is a headless prototype to execute pytest test cases with a custom configuration file
"""
import json
from configuration import Test_Plan
import pytest
import os
def run():
boxip = os.environ.get("BOX_IP", "10.13.201.18")
print(f"Using Box IP: {boxip}")
config = Test_Plan("data/configuration/test3.json")
config.configuration.customize_config("rhost", boxip)
pytest.main(["--html", "logs/output.html", "--self-contained-html"] + ["--junitxml", "logs/output.junit"] +
["--configuration", json.dumps(config.get_configuration())] + config.get_testfiles())
if __name__ == "__main__":
run()
|
[
"[email protected]"
] | |
5fdd01c76510a26587a3b1a59f24fc573d6df8f5
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/7dbcaa7c22297fe1b303/snippet.py
|
2378306d1ede8dd7979bb02a73d1b3106a44283a
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 |
Python
|
UTF-8
|
Python
| false | false | 4,969 |
py
|
#!/usr/bin/env python
"""
Pandoc filter to parse CriticMarkup into Spans for
Insertion and Deletion. The Docx writer will convert
these into Tracked Changes.
A comment immediately after a change will be parsed
for "author: The Author" and "date: 12-21-12", which
will be inserted into the Span as appropriate.
"""
from pandocfilters import Span, Str, RawInline, walk, attributes, stringify
import re
import sys
import json
regexes = {
'all': re.compile(r"([-+=~]{2}\}\{>>|\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
# 'all': re.compile(r"(\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
}
def parseMarks (key, value, format, meta):
if key == 'Str':
if regexes['all'].search(value):
items = regexes['all'].split(value, 1)
result = [
Str(items[0]),
RawInline('critic', items[1])]
result.extend(walk([Str(items[2])], parseMarks, format, meta))
return result
spanstart = {
'{++' : 'insertion',
'{--' : 'deletion',
'{==' : 'hilite',
'{>>' : 'comment',
'{~~' : 'subdelete'
}
spanend = {
'insertion' : '++}',
'deletion' : '--}',
'hilite' : '==}',
# 'comment' : '<<}',
}
spancomment = {
'insertion' : '++}{>>',
'deletion' : '--}{>>',
'hilite' : '==}{>>',
'subadd' : '~~}{>>',
}
def makeSpan (contents, classes = "", author = "", date = ""):
attrs = {'classes' : classes.split(), 'author' : author, 'date' : date}
return Span (attributes(attrs), contents)
def findAuthor (comment):
author = re.search(r"(author:|@)\s*([\w\s]+)", comment)
if author:
return author.group(2)
else:
return ""
def findDate (comment):
date = re.search(r"date:\s*(\S+)", comment)
if date:
return date.group(1)
else:
return ""
inspan = False
spantype = None
lasttype = None
spancontents = []
priorspan = []
def spanify (key, value, format, meta):
global inspan
global spantype
global lasttype
global spancontents
global priorspan
if inspan:
# pass
if key == 'RawInline' and value[0] == 'critic':
if value[1] == spanend.get(spantype, ""):
newspan = makeSpan(spancontents, spantype)
inspan = False
spantype = None
spancontents = []
return walk([newspan], spanify, format, meta)
elif spantype == 'subdelete' and value[1] == '~>':
priorspan.append({'type': 'deletion', 'contents': spancontents})
spancontents = []
spantype = 'subadd'
return []
elif spantype == 'subadd' and value[1] == '~~}':
delspan = makeSpan(priorspan[0]['contents'], 'deletion')
addspan = makeSpan(spancontents, 'insertion')
inspan = False
spantype = None
priorspan = []
spancontents = []
return walk([delspan, addspan], spanify, format, meta)
elif value[1] == spancomment.get(spantype, ""):
thistype = spantype
if thistype == 'subadd': thistype = 'insertion'
priorspan.append({'type': thistype, 'contents': spancontents})
spancontents = []
spantype = 'comment'
return []
elif value[1] == '<<}' and spantype == 'comment':
commentstring = stringify(spancontents)
result = []
# if len(priorspan) > 0:
author = findAuthor(commentstring)
date = findDate(commentstring)
for item in priorspan:
result.append(makeSpan(item['contents'], item['type'], author, date))
comment = "<!-- %s -->" % commentstring
result.append(RawInline('html', comment))
priorspan = []
spancontents = []
spantype = None
inspan = False
return walk(result, spanify, format, meta)
else:
spancontents.append({'t': key, 'c': value})
return []
else:
spancontents.append({'t': key, 'c': value})
return []
else:
if key == 'RawInline' and value[0] == 'critic':
thetype = spanstart.get(value[1], "")
if thetype:
spantype = thetype
inspan = True
spancontents = []
return []
else:
#this is a user error, do not parse
pass
else:
pass
if __name__ == "__main__":
doc = json.loads(sys.stdin.read())
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
meta = doc[0]['unMeta']
parsed = walk(doc, parseMarks, format, meta)
altered = walk(parsed, spanify, format, meta)
json.dump(altered, sys.stdout)
|
[
"[email protected]"
] | |
4f388037513dc7157edd78c95a929b1b7d5c1ed8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/6/usersdata/131/2399/submittedfiles/investimento.py
|
b734cf524df522049516f8e80f2ef98958d66a91
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 774 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
#ENTRADA
a=input ('digite seu saldo 2016: ')
#proscessamento
b = float(a*0.045 + a)
c = float(b*0.045 + b)
d = float(c*0.045 + c)
e = float(d*0.045 + d)
f = float(e*0.045 + e)
g = flaot(f*0.045 + f)
h = float(g*0.045 + g)
i = float(h*0.045 + h)
j = float(i*0.045 + i)
k = float(j*0.045 + j)
#saida
print('seu saldo em 2017 %.2f' %(b))
print('seu saldo em 2018 %.2f' %(c))
print('seu saldo em 2019 %.2f' %(d))
print('seu saldo em 2020 %.2f' %(e))
print('seu saldo em 2021 %.2f' %(f))
print('seu saldo em 2022 %.2f' %(g))
print('seu saldo em 2023 %.2f' %(h))
print('seu saldo em 2024 %.2f' %(i))
print('seu saldo em 2025 %.2f' %(j))
print('seu saldo em 2026 %.2f' %(k))
|
[
"[email protected]"
] | |
b8bb4ff270fd0aea8e827648cbb85767f97aa6e5
|
8761d98a4eba4ef828a8161c48a89856ab20b1d7
|
/oo/carro_enunciado.py
|
e666dca5c96f0b9173dc38f611b15a87aa14e850
|
[
"MIT"
] |
permissive
|
euzivamjunior/pythonbirds
|
c5ddbbb671cc7f0276a917fb8b46ab00745b1581
|
ae9ecc1821f19a4fcb7bb44b9285c49184d7f23c
|
refs/heads/master
| 2023-02-20T02:10:26.063243 | 2020-12-21T16:55:24 | 2020-12-21T16:55:24 | 318,506,330 | 0 | 0 |
MIT
| 2020-12-04T12:14:11 | 2020-12-04T12:14:11 | null |
UTF-8
|
Python
| false | false | 2,204 |
py
|
"""Você deve criar uma classe carro que vai possuir 2 atributos compostos por outras duas classes:
1 - Motor
2 - Direção
O motor terá a responsabilidade de controlar a velocidade.
Ele oferece os seguintes atributos:
1 - Atributo de dado: velocidade
2 - Método acelerar, que deverá incrementar a velocidade de uma velocidade
3 - Método frear que deverá decrementar a velocidade em duas unidades
A direção terá a aresponsabilidade de controlar a direção. Ela oferece os seguintes atributos:
1 - Valor de direção com valores possíveis: Norte, Sul, Leste, Oeste.
2 - Método girar a direita
2 - Método girar a esquerda
N
O L - Virar a direita significa a mudança na seguinte sequência N-L-S-O-N
S - Virar a esquerda: N-O-S-L
Exemplo:
#Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> # Testando Direção()
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.acelerar()
>>> carro.calcular_velocidade()
0
>>> carro.calcular.direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular.direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular.direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular.direcao()
'Oeste'
"""
# Solução Instrutor
|
[
"[email protected]"
] | |
ff8233385b14ba5f9bbf38133c2e520d54c876da
|
58c6905e164ec982f7ae89e67a6692196ea7aaff
|
/Lesson 6/ex02.py
|
f139b1f86f073969e2fca213a3b6c314c29060f4
|
[] |
no_license
|
kovasa/academy
|
970a8aa8b5b4be2db0914708d1fc26b175118935
|
a4babd6e791e7453c403de9b78cd7999e809b58e
|
refs/heads/master
| 2023-05-27T05:46:14.495013 | 2020-02-26T10:25:38 | 2020-02-26T10:25:38 | 236,598,853 | 0 | 0 | null | 2023-05-22T22:40:10 | 2020-01-27T21:27:02 |
Python
|
UTF-8
|
Python
| false | false | 354 |
py
|
""" Lesson 6 ex.2 """
def gen_atr(obj):
""" Generates a list of all public attributes of an object. """
for item in dir(obj):
if not item.startswith('__'):
yield item
def _test():
xyz = gen_atr("string")
print(next(xyz))
print(next(xyz))
for item in xyz:
print(item)
_test()
|
[
"[email protected]"
] | |
8b47e68e06661efee88f6eb194e54f3b99a71a55
|
fbb9dd2328a00df7d2522b736b6a6e5f2dc42157
|
/Qtwindow/pageFour.py
|
111fc383df70d954ac6b1f1fee7405bf569377ed
|
[] |
no_license
|
llemontea/hyperOpt
|
01d5685e31ac53e4d4b56882bd048fce40995bb6
|
576e84ba643566b029af2c82d3b7285d1b587443
|
refs/heads/master
| 2022-08-30T23:04:35.660173 | 2020-05-28T10:01:00 | 2020-05-28T10:01:00 | 266,337,927 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,190 |
py
|
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import chardet
# 自定义编辑区域.附加行号显示,高亮文本等效果.
class CodeEditor(QPlainTextEdit):
class NumberBar(QWidget):
def __init__(self, editor):
QWidget.__init__(self, editor)
self.editor = editor
self.editor.blockCountChanged.connect(self.updateWidth)
self.editor.updateRequest.connect(self.updateContents)
self.font = QFont()
self.numberBarColor = QColor("#e8e8e8")
def paintEvent(self, event):
painter = QPainter(self)
painter.fillRect(event.rect(), self.numberBarColor)
block = self.editor.firstVisibleBlock()
while block.isValid():
blockNumber = block.blockNumber()
block_top = self.editor.blockBoundingGeometry(block).translated(self.editor.contentOffset()).top()
if blockNumber == self.editor.textCursor().blockNumber():
self.font.setBold(True)
painter.setPen(QColor("#000000"))
else:
self.font.setBold(False)
painter.setPen(QColor("#717171"))
paint_rect = QRect(0, block_top, self.width(), self.editor.fontMetrics().height())
painter.drawText(paint_rect, Qt.AlignCenter, str(blockNumber + 1))
block = block.next()
def getWidth(self):
count = self.editor.blockCount()
if 0 <= count < 99999:
width = self.fontMetrics().width('99999')
else:
width = self.fontMetrics().width(str(count))
return width
def updateWidth(self):
width = self.getWidth()
self.editor.setViewportMargins(width, 0, 0, 0)
def updateContents(self, rect, dy):
if dy:
self.scroll(0, dy)
else:
self.update(0, rect.y(), self.width(), rect.height())
if rect.contains(self.editor.viewport().rect()):
fontSize = self.editor.currentCharFormat().font().pointSize()
self.font.setPointSize(fontSize)
self.font.setStyle(QFont.StyleNormal)
self.updateWidth()
def __init__(self):
super(CodeEditor, self).__init__()
self.setFont(QFont("Console", 12))
# 非自动换行.超出屏幕显示范围的部分会出现滚动条.
self.setLineWrapMode(QPlainTextEdit.NoWrap)
self.number_bar = self.NumberBar(self)
self.currentLineNumber = None
self.cursorPositionChanged.connect(self.highligtCurrentLine)
self.setViewportMargins(50, 0, 0, 0)
self.highligtCurrentLine()
def resizeEvent(self, *e):
cr = self.contentsRect()
rec = QRect(cr.left(), cr.top(), self.number_bar.getWidth(), cr.height())
self.number_bar.setGeometry(rec)
def highligtCurrentLine(self):
newCurrentLineNumber = self.textCursor().blockNumber()
if newCurrentLineNumber != self.currentLineNumber:
lineColor = QColor(Qt.lightGray).lighter(120)
self.currentLineNumber = newCurrentLineNumber
hi_selection = QTextEdit.ExtraSelection()
hi_selection.format.setBackground(lineColor)
hi_selection.format.setProperty(QTextFormat.FullWidthSelection, True)
hi_selection.cursor = self.textCursor()
hi_selection.cursor.clearSelection()
self.setExtraSelections([hi_selection])
class fourWidget(QWidget):
def __init__(self):
super(fourWidget, self).__init__()
self.main_widget = QWidget()
self.main_layout = QVBoxLayout()
self.headerLabel = QLabel('简易文本编辑区域.适用于.txt和.py等简单文本类文件的编辑.请勿打开.doc等复杂文本文件或者无法转换为文本内容的文件.')
self.firstRow_layout = QHBoxLayout()
self.fileButton = QPushButton('打开文件')
self.newButton = QPushButton('新建文件')
self.tipLabel = QLabel('当前文件:')
self.fileLabel = QLabel('当前没有打开或新建的可编辑文件.')
self.textEdit = CodeEditor()
self.lastRow_layout = QHBoxLayout()
self.clearButton = QPushButton('清空内容')
self.saveButton = QPushButton('保存')
self.saveasButton = QPushButton('另存为')
self.code = 'utf-8'
self.init_widget()
def getWidget(self):
return self.main_widget
def init_widget(self):
self.fileButton.setFixedWidth(150)
self.newButton.setFixedWidth(150)
self.fileButton.clicked.connect(lambda: self.openfile(self.textEdit))
self.newButton.clicked.connect(lambda: self.newfile(self.textEdit))
self.firstRow_layout.addWidget(self.fileButton)
self.firstRow_layout.addWidget(self.newButton)
self.firstRow_layout.addWidget(self.tipLabel)
self.firstRow_layout.addWidget(self.fileLabel)
self.firstRow_layout.addStretch()
self.newButton.setContentsMargins(10, 0, 0, 0)
self.tipLabel.setContentsMargins(20, 0, 0, 0)
self.clearButton.clicked.connect(lambda: self.clearEdit(self.textEdit))
self.saveButton.clicked.connect(lambda: self.savefile(self.textEdit))
self.saveasButton.clicked.connect(lambda: self.saveasfile(self.textEdit))
self.saveButton.setEnabled(False)
self.saveasButton.setEnabled(False)
self.textEdit.setEnabled(False)
self.clearButton.setFixedWidth(150)
self.saveButton.setFixedWidth(150)
self.saveasButton.setFixedWidth(150)
self.lastRow_layout.addWidget(self.saveasButton)
self.lastRow_layout.addWidget(self.saveButton)
self.lastRow_layout.addWidget(self.clearButton)
self.lastRow_layout.setDirection(1)
self.lastRow_layout.addStretch()
self.clearButton.setContentsMargins(0, 0, 10, 0)
self.saveButton.setContentsMargins(0, 0, 10, 0)
self.setwidgetStyle()
self.main_layout.addWidget(self.headerLabel)
self.main_layout.addLayout(self.firstRow_layout)
self.main_layout.addWidget(self.textEdit)
self.main_layout.addLayout(self.lastRow_layout)
self.main_widget.setLayout(self.main_layout)
def openfile(self, edit):
dialog = QFileDialog()
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setFilter(QDir.Files)
if dialog.exec_():
try:
print(dialog.selectedFiles())
filenames = dialog.selectedFiles()
with open(filenames[0], 'rb') as fp:
# 自适应编码格式读取.有些.txt文件和.py文件是不同的编码,所以不解码的话就无法打开,进而卡死.
data = fp.read()
f_charinfo = chardet.detect(data)
self.code = f_charinfo['encoding']
edit.setPlainText(str(data.decode(f_charinfo['encoding'])))
edit.setEnabled(True)
self.fileLabel.setText(str(filenames[0]))
self.saveButton.setEnabled(True)
self.saveasButton.setEnabled(True)
except Exception:
message = QMessageBox()
message.setWindowIcon(QIcon('icon/tip.png'))
message.setWindowTitle('打开文件失败')
message.setText('编解码及读取过程中出现问题,打开失败.')
message.addButton(QPushButton("确定"), QMessageBox.YesRole)
message.exec_()
def newfile(self, edit):
if len(edit.toPlainText()) != 0:
message = QMessageBox()
message.setWindowIcon(QIcon('icon/tip.png'))
message.setWindowTitle('新建编辑区域')
message.setText('新创建编辑区会导致正在编辑的内容清空!\n如果需要保存现有内容,请先保存后再新建编辑区.')
yes = message.addButton("确定", QMessageBox.YesRole)
no = message.addButton("取消", QMessageBox.NoRole)
message.exec_()
if message.clickedButton() == yes:
self.code = 'utf-8'
edit.clear()
self.fileLabel.setText('新建文件')
self.saveButton.setEnabled(False)
else:
pass
else:
self.code = 'utf-8'
self.fileLabel.setText('新建文件')
self.saveButton.setEnabled(False)
self.saveasButton.setEnabled(True)
edit.setEnabled(True)
def clearEdit(self, edit):
edit.clear()
# 仅在打开文件,或者新建文件已经完成另存为确定位置的情况下,保存文件的按钮才是可以使用的.换言之:右上角显示文件路径的时候.
def savefile(self, edit):
aim_file = str(self.fileLabel.text())
print('save', aim_file)
fp = open(aim_file, 'w+', encoding = self.code)
with fp:
fp.write(str(edit.toPlainText()))
# 在新建文件时,想要让这个新建的文件成为一个可保存文件的唯一途径就是另存为.
# 新建文件的另存为操作会改变右上角的文件路径,打开文件的另存为则不会改变右上角的文件路径.
def saveasfile(self, edit):
fileName, ok= QFileDialog.getSaveFileName(self, '文件另存为', 'C:/', 'Text Files (*.txt)')
if ok:
print('save as', fileName)
with open(fileName, 'w', encoding = self.code) as fp:
fp.write(str(edit.toPlainText()))
if str(self.fileLabel.text()) == '新建文件':
self.fileLabel.setText(fileName)
self.saveButton.setEnabled(True)
def setwidgetStyle(self):
self.main_widget.setStyleSheet('''
QLabel {
font-family: "Dengxian";
font: 16px;
vertical-align: middle;
}
''')
|
[
"[email protected]"
] | |
42ec63637ac766889e253c31e4c9cd29bdf63e4e
|
795b9190c580d96498288b4e9b48dd1c29fa54ca
|
/TFtrain.py
|
225a011c421ad9d80e08f944c616898a006d3501
|
[] |
no_license
|
shen338/Obfuscated-Face-Reconstruction
|
2702fbf9eaa884ce5121b0009a87177238cadd74
|
1f3e67cf0a54da57d37a4d20a512d52efde2be85
|
refs/heads/master
| 2021-05-08T06:33:39.772659 | 2018-01-08T21:52:03 | 2018-01-08T21:52:03 | 106,633,982 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,803 |
py
|
import tensorflow as tf
import numpy as np
import cv2
import glob
import random
import sys
def load_image(addr):
# read an image and resize to (224, 224)
# cv2 load images as BGR, convert it to RGB
img = cv2.imread(addr)
#img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.uint8)
return img
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
shuffle_data = True
image_path_small = './origin/small/*.png'
address_small = glob.glob(image_path_small)
print(len(address_small))
image_path_origin = './origin/origin/*.png'
address_origin = glob.glob(image_path_origin)
if shuffle_data:
c = list(zip(address_small, address_origin))
random.shuffle(c)
address_small, address_origin= zip(*c)
train_filename = 'train_espcn.tfrecords'
# create new TFrecord file
writer = tf.python_io.TFRecordWriter(train_filename)
for i in range(len(address_small)):
if not i % 1000:
print('Train data: {}/{}'.format(i, len(address_small)))
sys.stdout.flush()
img_small = load_image(address_small[i])
img_origin = load_image(address_origin[i])
feature = {'train/image_small': _bytes_feature(tf.compat.as_bytes(img_small.tostring())),
'train/image_origin': _bytes_feature(tf.compat.as_bytes(img_origin.tostring()))}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
writer.close()
|
[
"[email protected]"
] | |
ebae63b152ff453d136f25c77bd60094c14bec8c
|
b3d951acf68f12e7d295824b52173d4391ad57c3
|
/lesson4/crawler_record_user_click.py
|
1f80dc59e57412a513cb9df86bee93f9e0724e17
|
[] |
no_license
|
rafi80/UdacityIntroToComputerScience
|
ab6e7c7279b0760edda9669228dd2d0c0c10b67a
|
c2d702b9ca6878d17ac72fb39489b4a850644c1a
|
refs/heads/master
| 2021-01-20T18:39:55.356079 | 2016-12-07T15:13:44 | 2016-12-07T15:13:44 | 60,169,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,534 |
py
|
# 2 Gold Stars
# One way search engines rank pages
# is to count the number of times a
# searcher clicks on a returned link.
# This indicates that the person doing
# the query thought this was a useful
# link for the query, so it should be
# higher in the rankings next time.
# (In Unit 6, we will look at a different
# way of ranking pages that does not depend
# on user clicks.)
# Modify the index such that for each url in a
# list for a keyword, there is also a number
# that counts the number of times a user
# clicks on that link for this keyword.
# The result of lookup(index,keyword) should
# now be a list of url entries, where each url
# entry is a list of a url and a number
# indicating the number of times that url
# was clicked for this query keyword.
# You should define a new procedure to simulate
# user clicks for a given link:
# record_user_click(index,word,url)
# that modifies the entry in the index for
# the input word by increasing the count associated
# with the url by 1.
# You also will have to modify add_to_index
# in order to correctly create the new data
# structure, and to prevent the repetition of
# entries as in homework 4-5.
def record_user_click(index, keyword, url):
urls = lookup(index, keyword)
if urls:
for entry in urls:
if entry[0] == url:
entry[1] = entry[1] + 1
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
for urls in entry[1]:
if urls[0] == url:
return
entry[1].append([url, 0])
return
# not found, add new keyword to index
index.append([keyword, [[url, 0]]])
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return '''<html> <body> This is a test page for learning to crawl!
<p> It is a good idea to
<a href="http://www.udacity.com/cs101x/crawling.html">
learn to crawl</a> before you try to
<a href="http://www.udacity.com/cs101x/walking.html">walk</a> or
<a href="http://www.udacity.com/cs101x/flying.html">fly</a>.</p></body></html>'''
elif url == "http://www.udacity.com/cs101x/crawling.html":
return '''<html> <body> I have not learned to crawl yet, but I am
quite good at <a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.
</body> </html>'''
elif url == "http://www.udacity.com/cs101x/walking.html":
return '''<html> <body> I cant get enough
<a href="http://www.udacity.com/cs101x/index.html">crawling</a>!</body></html>'''
elif url == "http://www.udacity.com/cs101x/flying.html":
return '<html><body>The magic words are Squeamish Ossifrage!</body></html>'
except:
return ""
return ""
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
#Here is an example showing a sequence of interactions:
index = crawl_web('http://www.udacity.com/cs101x/index.html')
print lookup(index, 'good')
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 0]]
record_user_click(index, 'good', 'http://www.udacity.com/cs101x/crawling.html')
print lookup(index, 'good')
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 1]]
index =crawl_web('http://other.page/morestuff.html')
print lookup(index, 'Good')
|
[
"[email protected]"
] | |
cd0024be46f1fc0cd047791143d3781a9383b34e
|
90f2652b64aabdce12292fc3ebd0d44bedf3c8d3
|
/src/predict_song.py
|
8e5fe573accef865787599da6559e2e016c23630
|
[] |
no_license
|
wsonguga/scg_code
|
c62837cc62f628087142b253fc25013522cd3bc0
|
1b4d43111facf385b524ff26b1b0fdc5d3ecb0ab
|
refs/heads/main
| 2023-05-01T22:21:26.583762 | 2021-05-19T23:43:32 | 2021-05-19T23:43:32 | 368,540,206 | 0 | 0 | null | 2021-05-18T13:28:00 | 2021-05-18T13:27:59 | null |
UTF-8
|
Python
| false | false | 2,112 |
py
|
import torch
import sys, os
from utils import get_data, Model, test_model, sort_dataset
NUM_LABELS = 4
HIDDEN_SIZE = 1024
NUM_LAYERS = 3
if __name__ == "__main__":
file_name = "real_test_timesorted"
data_path = "../data/real_regression_data"
out_path = "../outputs/real_regression_data"
if(len(sys.argv) > 2):
out_path = sys.argv[1]
file_path = sys.argv[2]
file_name = os.path.splitext(os.path.basename(file_path))[0]
data_path = os.path.dirname(file_path)
else:
print(f"Usage: {sys.argv[0]} model_directory test_data_file")
print(f"Example: {sys.argv[0]} ../outputs/song ../data/real_regression_data/real_test_truesorted.npy")
print(f"Example: {sys.argv[0]} ../outputs/real_regression_data ../data/real_regression_data/real_test_timesorted.npy")
exit()
print(f"Input: {data_path+file_name}")
print(f"Output: {out_path}")
device = torch.device('cpu')
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# test_x, test_y, test_Y, min_v, max_v = get_data(
# data_path=data_path,
# out_path=out_path,
# name="real_test_timesorted",
# load_values=True,
# device=device,
# num_labels=NUM_LABELS,
# return_extra=True,
# drop_extra=2)
test_x, test_y, test_Y, min_v, max_v = get_data(
data_path=data_path,
out_path=out_path,
name=file_name,
load_values=True,
device=device,
num_labels=NUM_LABELS,
return_extra=True,
drop_extra=2)
model = Model(
input_size=test_x.shape[1],
num_labels=NUM_LABELS,
hidden_size=HIDDEN_SIZE,
num_layers=NUM_LAYERS)
model.load_state_dict(torch.load(f"{out_path}/model.pt"))
model = model.to(device)
model.eval()
losses = test_model(model, test_x, test_Y, min_v, max_v)
print(f"MAE of H = {losses[0]:.3f}")
print(f"MAE of R = {losses[1]:.3f}")
print(f"MAE of S = {losses[2]:.3f}")
print(f"MAE of D = {losses[3]:.3f}")
# %%
# sort_dataset()
# %%
|
[
"[email protected]"
] | |
d759b98c5650d0cca0a203e76dcbc5a4f8ac7c81
|
efad0649e168b777daffe227cd7d1def854404ed
|
/text/symbols.py
|
56acd92a3acaa33e302facdf3a725c62a531e135
|
[
"MIT"
] |
permissive
|
yiebo/tts_tranformer
|
7b91a4cd84ddde4b70bc62a9536bb921ff9ad6de
|
5ee4c1f07c6934821ec172b40ed6e1d708a66a18
|
refs/heads/master
| 2022-09-19T11:56:23.187160 | 2020-05-24T22:54:45 | 2020-05-24T22:54:45 | 266,638,423 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
from text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
# _arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) # + _arpabet
|
[
"[email protected]"
] | |
3ea9cf0003db433310684c1fd7d1ee820391fe5f
|
b689e2f1abfbd330f1a6ff7532615513cf85ae2f
|
/base/__init__.py
|
8f3df09783ee0f87aad50598e8ddfe5baa356a1d
|
[] |
no_license
|
taohu918/datahouse
|
182dceb6969bef9f943b9ba537d9e895dd607628
|
66a24e059091e55f314ad820fc3ca8adb612c746
|
refs/heads/master
| 2021-04-12T07:51:41.228416 | 2018-03-19T06:46:38 | 2018-03-19T06:46:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
# __author__: taohu
from flask import Blueprint
root = Blueprint('base', __name__)
from . import views
|
[
"[email protected]"
] | |
fbb8f99928fd1c1ea8d14ab0c5742d65aa2d568d
|
3d2d6448735d6480823e1cb2872c09a53b461897
|
/scrapr.py
|
83c1c558ccb5c6850e3a208bcdc0e5177c5c11a4
|
[] |
no_license
|
miclowe/scrapr
|
ce8c750352818cba9da61b04844c5a42d00c2b61
|
3a535f0f0abe5e61c4381203f53a162f0b470377
|
refs/heads/master
| 2021-05-11T04:47:13.933866 | 2018-04-20T15:58:05 | 2018-04-20T15:58:05 | 117,947,396 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,022 |
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup as soup
my_url = "http://egolferz.com/html/leaderboard.htm"
page = urlopen(my_url)
html = page.read()
page.close()
page_soup = soup(html, "html.parser")
table = page_soup.find('table')
table_header = table.find('thead')
table_body = table.find('tbody')
# Get list of tournaments
headings = table_header.findAll('th', {'scope' : 'col'})
tournaments = []
for i, h in enumerate(headings):
if i > 0 and i < len(headings) - 1:
tournaments.append(h.text)
filename = "leaderboard.csv"
f = open(filename, "w")
headers = ", ".join(["Name"] + tournaments) + "\n"
f.write(headers)
# Get each member stats
rows = table_body.findAll('tr')
scores = []
for r in rows:
member = []
name = r.a.text if r.a else r.th.text
member.append(name)
# Exclude ranking and total points
cols = r.findAll('td', {'class' : None})
member += [ele.text for ele in cols]
f.write(", ".join(member) + "\n")
# scores.append(member)
f.close()
|
[
"[email protected]"
] | |
cbcd7950fddfd0dafb705df1c66a6d7d300a2b0f
|
714354bb550f1e7a46507be9ff211b9f83e7811c
|
/PythEnv/Scripts/pipwin-script.py
|
c4e5d8c8698d9564b5be71306f3ae52bd43ba710
|
[] |
no_license
|
sachin801/offline-virtual-assistant
|
40b83ce3a4a73b7a496924207ff2516d010877a1
|
33eb57cf70bc5dafb2ccdc097a98daa329da7423
|
refs/heads/main
| 2023-08-11T15:56:08.407528 | 2021-10-05T15:03:08 | 2021-10-05T15:03:08 | 413,783,446 | 0 | 0 | null | 2021-10-05T12:56:50 | 2021-10-05T11:09:46 |
Python
|
UTF-8
|
Python
| false | false | 1,006 |
py
|
#!c:\users\sachi\pycharmprojects\speechrecog\pythenv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pipwin==0.5.1','console_scripts','pipwin'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'pipwin==0.5.1'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('pipwin==0.5.1', 'console_scripts', 'pipwin')())
|
[
"[email protected]"
] | |
e9f53d472593fba30c21cd52106df23b0220e3dd
|
c0abec5fa649a5d3c4fd50604abd53d416f73229
|
/data_table_experian.py
|
1a5bc747bce0898f15aff624b340858afec31fa0
|
[] |
no_license
|
gitter-badger/city_comparison
|
7413ce66015bf9cd7ed3b07a3ee712c1ecfe013a
|
3b550cf2a2549b68ed28a52c68d6084ac19da72e
|
refs/heads/master
| 2020-05-31T11:56:57.480970 | 2019-06-04T07:52:54 | 2019-06-04T07:52:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 685 |
py
|
"""
Module for parsing any Experian related data in data/experian.
"""
import pandas
from data_table import DataTable
class Experian(DataTable):
""" Table of Experian data. """
@staticmethod
def read(file_path):
data = pandas.read_csv(file_path)
data['State'] = data['State'].str.lower()
data['City'] = data['City'].str.lower()
return data
@staticmethod
def get_exact_matching_key():
# By returning `None` as key, we use `index` as key.
# return None
return 'index'
@staticmethod
def get_state_key():
return 'State'
@staticmethod
def get_city_key():
return 'City'
@staticmethod
def get_population_key():
return None
|
[
"[email protected]"
] | |
d436717586b2fba2b0f93f97db6e7f5564a9fea3
|
c0c0f64c1a543ab42ac0e81cfbda27f7c3a51c9b
|
/parse_iptables.py
|
cd8fcddaaa9dc8dd504974d20dd5dbc965549a63
|
[] |
no_license
|
superekcah/utils
|
c74d437bee2f3cf3d9589c0f86f4bbf43305f014
|
acb5d1f11312509427e8e702aa02d83dd66b8853
|
refs/heads/master
| 2021-01-21T14:02:01.590118 | 2016-03-21T13:27:58 | 2016-03-21T13:27:58 | 9,028,392 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,684 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import subprocess
cur_table = None
all_chains = {}
top_chains = {}
tables = {}
class Chain(object):
def __init__(self, name, table, policy, packet, byte):
self.name = name
self.table = table
self.policy = policy
self.packet = int(packet)
self.byte = int(byte)
self.rules = []
def __repr__(self):
return "%s(%s)" % (self.__class__, self.name)
def __str__(self):
return ("%s %s [%d:%d]"
% (self.name, self.policy, self.packet, self.byte))
def add_rule(self, cond, target, pkt, byte):
self.rules.append((cond, target, pkt, byte))
def pretty_print(self, indent=''):
for cond, target, pkt, byte in self.rules:
if isinstance(target, Chain):
print(indent + "-A %s %s -j %s\t[%d:%d]"
% (self.name, cond, target.name, pkt, byte))
target.pretty_print(indent + ' ')
else:
print(indent + "-A %s %s -j %s\t[%d:%d]"
% (self.name, cond, target, pkt, byte))
def parse_line(line):
if not line:
return
c = line[0:1]
if c == '#':
# comment
pass
elif c == '*':
# table
global cur_table
cur_table = line[1:].strip()
elif c == ':':
# chain
parts = line[1:].split()
chain = parts[0]
policy = parts[1]
counts = parts[2][1:-1].split(':')
chain_obj = Chain(**{'name': chain, 'policy': policy,
'table': cur_table,
'packet': counts[0], 'byte': counts[1]})
key = cur_table + ":" + chain
all_chains[key] = chain_obj
top_chains[key] = chain_obj
elif c == '[':
# rules
parts = line.split()
counts = parts[0][1:-1].split(':')
chain_name = parts[2]
key = cur_table + ":" + chain_name
if key not in all_chains:
print("Error: unknown chain %s" % chain_name)
return
chain = all_chains[key]
remain_parts = ' '.join(parts[3:]).split('-j')
cond = remain_parts[0].strip()
target = remain_parts[1].strip()
if target.split()[0] in ['SNAT', 'DNAT', 'ACCEPT', 'DROP',
'MASQUERADE', 'CHECKSUM', 'QUEUE',
'MARK', 'RETURN', 'REJECT', 'LOG',
'REDIRECT']:
chain.add_rule(cond, target, int(counts[0]), int(counts[1]))
else:
key = cur_table + ":" + target.split()[0]
chain.add_rule(cond, all_chains[key],
int(counts[0]), int(counts[1]))
if key in top_chains:
del top_chains[key]
def group_results():
for key in top_chains:
tbl = key.split(":")[0]
if tbl in tables:
tables[tbl].append(top_chains[key])
else:
tables[tbl] = [top_chains[key]]
if __name__ == '__main__':
if len(sys.argv) == 2:
with open(sys.argv[1]) as f:
for line in f.readlines():
parse_line(line)
else:
popen = subprocess.Popen("sudo iptables-save -c",
shell=True, stdout=subprocess.PIPE)
content = popen.stdout.readlines()
for line in content:
parse_line(line)
group_results()
for tbl in tables:
print("-t %s" % tbl)
for chain in tables[tbl]:
print("%s\t[%d:%d]" % (chain.name, chain.packet, chain.byte))
chain.pretty_print(' ')
print('')
|
[
"[email protected]"
] | |
421f03f6b2917a41a6a98a1194a47ef03c84abae
|
1c5e7bdc574bb1ce66ff4382838ea3dbc0e290b6
|
/api.py
|
f6216c1f21a91cf91aeece58bd4e8660b60260f8
|
[] |
no_license
|
MatthewHowitt/SwoopDeveloperAssessment
|
bd442d00f02226263fd532ae61e214a27f3c303e
|
f9391be0f02c85a1e1dbaf7206e6c34a7d27326c
|
refs/heads/master
| 2020-05-14T10:30:56.999265 | 2019-04-16T20:42:20 | 2019-04-16T20:42:20 | 181,763,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,495 |
py
|
import json
import requests
from flask import Flask, request, Response
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
# This function takes some value as a string, if the string can be converted to an int it does so, otherwise 0 is returned
def convertToInt(string):
if string:
try:
int(string)
return int(string)
except ValueError:
return 0
return 0
class Math(Resource):
# Adds two arguments passed as part of the url as n1 and n2
def get(self):
n1 = convertToInt(request.args.get('n1'))
n2 = convertToInt(request.args.get('n2'))
result = n1 + n2
return {'result': result}
# Adds two arguments passed as part of a form as n1 and n2
def post(self):
try:
parser.add_argument('n1', location='form')
parser.add_argument('n2', location='form')
except:
pass
args = parser.parse_args()
n1 = convertToInt(args['n1'])
n2 = convertToInt(args['n2'])
result = n1 + n2
return {'result':result}
class Airports(Resource):
# Get a list of airports from a 3rd party service, return error message if call fails
def get(self):
try:
response = requests.get("https://iatacodes.org/api/v6/airports?api_key=472dcce9-ed45-43ec-8492-9cdc10122de9", verify=False)
return response.json()
except requests.exceptions.RequestException as e:
return e
api.add_resource(Math, '/math/add')
api.add_resource(Airports, '/airports')
if __name__ == '__main__':
app.run(port='5002')
|
[
"[email protected]"
] | |
aba5bc122a4b99dc9819faa4500dbd67585597b0
|
e535f59053b545b493c93c9945aa054ad1335178
|
/linkml/generators/javagen.py
|
7183d3d6c2625bd023d3550244e7b7c94f099acf
|
[
"CC0-1.0"
] |
permissive
|
pabloalarconm/linkml
|
9308669d5baba2a2c60fe79f31f737e87ed59295
|
5ef4b2f0e89698ffc0db693fdba68d1306438749
|
refs/heads/main
| 2023-08-25T14:41:58.419628 | 2021-10-02T02:04:06 | 2021-10-02T02:04:06 | 411,990,387 | 0 | 0 |
CC0-1.0
| 2021-09-30T08:49:50 | 2021-09-30T08:49:50 | null |
UTF-8
|
Python
| false | false | 3,382 |
py
|
import os
from typing import Optional, Tuple, List, Union, TextIO, Callable, Dict, Iterator, Set
import click
from jinja2 import Template
from linkml_runtime.utils.schemaview import SchemaView
from linkml.generators import JAVA_GEN_VERSION
from linkml_runtime.linkml_model.meta import SchemaDefinition, TypeDefinition
from linkml.generators.oocodegen import OOCodeGenerator
from linkml.utils.generator import shared_arguments
default_template = """
{#-
Jinja2 Template for a Java class with Lombok @Data annotation
Annotation details at https://projectlombok.org
-#}
package {{ doc.package }};
import java.util.List;
import lombok.*;
/**
{{ cls.source_class.description }}
**/
@Data
@EqualsAndHashCode(callSuper=false)
public class {{ cls.name }} {% if cls.is_a -%} extends {{ cls.is_a }} {%- endif %} {
{% for f in cls.fields %}
private {{f.range}} {{ f.name }};
{%- endfor %}
}"""
TYPEMAP = {
"str": "String",
"int": "Integer",
"float": "Float",
"Bool": "Boolean",
"XSDDate": "String",
"URIorCURIE": "String"
}
class JavaGenerator(OOCodeGenerator):
generatorname = os.path.basename(__file__)
generatorversion = JAVA_GEN_VERSION
valid_formats = ['java']
visit_all_class_slots = False
def __init__(self, schema: Union[str, TextIO, SchemaDefinition],
package: str = None,
template_file: str = None,
format: str = valid_formats[0],
genmeta: bool=False, gen_classvars: bool=True, gen_slots: bool=True, **kwargs) -> None:
self.sourcefile = schema
self.schemaview = SchemaView(schema)
self.schema = self.schemaview.schema
self.package = package
self.template_file = template_file
def map_type(self, t: TypeDefinition) -> str:
return TYPEMAP.get(t.base, t.base)
def serialize(self, directory: str) -> None:
sv = self.schemaview
if self.template_file is not None:
with open(self.template_file) as template_file:
template_obj = Template(template_file.read())
else:
template_obj = Template(default_template)
oodocs = self.create_documents()
self.directory = directory
for oodoc in oodocs:
cls = oodoc.classes[0]
code = template_obj.render(doc=oodoc, cls=cls)
os.makedirs(directory, exist_ok=True)
filename = f'{oodoc.name}.java'
path = os.path.join(directory, filename)
with open(path, 'w') as stream:
stream.write(code)
@shared_arguments(JavaGenerator)
@click.option("--output_directory", default="output", help="Output directory for individually generated class files")
@click.option("--package", help="Package name where relevant for generated class files")
@click.option("--template_file", help="Optional jinja2 template to use for class generation")
@click.command()
def cli(yamlfile, output_directory=None, package=None, template_file=None, head=True, emit_metadata=False, genmeta=False, classvars=True, slots=True, **args):
"""Generate java classes to represent a LinkML model"""
JavaGenerator(yamlfile, package=package, template_file=template_file, emit_metadata=head, genmeta=genmeta, gen_classvars=classvars, gen_slots=slots, **args).serialize(output_directory)
if __name__ == '__main__':
cli()
|
[
"[email protected]"
] | |
064ac56fc601ce0a60b6556fb119e8c841de5580
|
9bca04376f3c34801f4a79127d5c6e2d5599d0de
|
/src/posts/models.py
|
3809c2509430c6a7add94df4996171e0b4713188
|
[] |
no_license
|
kasin-ua/prach
|
ec99880b6f3dee13ad97343ca4f39a1489dfc036
|
fa2d1c2875b5c47d80c79cb050e5ee08e8d3f683
|
refs/heads/master
| 2021-01-10T09:40:08.736328 | 2016-03-11T08:46:18 | 2016-03-11T08:46:18 | 53,467,775 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 945 |
py
|
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
def upload_location(instance, filename):
#filebase, extension = filename.split(".")
#return "%s/%s.%s" %(instance.id, instance.id, extension)
return "%s/%s" %(instance.id, filename)
class Post(models.Model):
title = models.CharField(max_length=120)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
#width_field="url_width",
#height_field="url_height"
)
content = models.TextField()
timestamp = models.DateTimeField(auto_now=True, auto_now_add=False)
update = models.DateTimeField(auto_now=False, auto_now_add=True )
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"id": self.id})
#class Meta:
# ordering = ["-timestamp", "-updated"]
|
[
"[email protected]"
] | |
8c1f361b5057c52de52bc84be3613208359ed794
|
18754ca7a4d5ac749ad29a1294cc2eefb1eb5442
|
/gen_code_for_structure_reader/ggdefs_h_specialize.py
|
3191367349fc858a1f7e97190c8871008e8d9ed2
|
[] |
no_license
|
abc19899/gen_code_for_structure_reader
|
dbe00ed33a2341bc031fa5f85cb74dd12a2d8087
|
a3116a3eca4bff65c42d99c23d6e1ad28394674e
|
refs/heads/master
| 2021-01-14T08:17:53.168001 | 2017-02-15T07:51:54 | 2017-02-15T07:51:54 | 82,034,369 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,527 |
py
|
# encoding=utf-8
""" ggdefs.h有一些逻辑不能通过代码分析来获得, 这里手动处理
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
macro_dict = {
'MAX_PLAYER_NAME_LEN': 16,
'MAX_INSET_JEWEL_NUM': 4,
'MAX_SUBATTR_NUM': 15,
'MAX_EQUIP_NUM': 15,
'MAX_MERIDIAN_TYPE': 7,
'MAX_EXTRA_ATTR_NUM': 4,
'MAX_SERVER_NAME_LEN': 16,
'MAX_XIANLING_ZIZHI_NUM': 5,
'QIN_COUNTRY_ID': 0,
'CHU_COUNTRY_ID': 2,
'LOGIN_AWARD_NUM': 7,
'num_size': 999999999,
'MAX_PLAYER_RANK_NUM': 25,
}
from structure_reader.structure_reader import *
from ctypes import *
structure_addons = {
# key: structure_name, value: tuple(add_field, add_after_with_column),
# add_field is Field type, add_after_with_column is string type. if add_after_with_column is None,
# it means add at beginning
# TIPS: if exist multiple value, value should be a type like [value0, value1]
'ret_get_black_list': (Field(('BlackRoleData', 'mSize'), 'black_role_data'), 'mSize'),
'ReqUpdateTask': (Field((c_byte, 'size'), 'script'), 'size'),
'ReqEquipSkill': (Field(('_Equip_Skill_', 'change_num'), 'equipskill'), 'change_num'),
'TeamRev': (Field(('TeamItem', 'num'), 'team_item'), 'num'),
'RetRaid': [
(Field((c_byte, 'contentSize'), 'battle_content'), 'contentSize'),
(Field(('Package_item', 'loot_item_num'), 'loot_item'), 'battle_content'),
(Field(('Package_item', 'complete_raid_item_num'), 'complete_raid_item'), 'loot_item'),
],
'RetJjcChallengeList': (Field((c_int, 'luck_rank_num'), 'luck_rank'), 'luck_rank_num'),
'RetJjcChallengeRank': (Field((c_byte, 'battleSize'), 'battle_content'), 'rongyu'),
'RetJjcFightReplay': (Field((c_byte, 'contentSize'), 'battle_content'), 'contentSize'),
# todo: complete it. (hard work...)
# todo: can use A::B in Field?
'PushChangeMap': (Field(('RetScenePlayerlist::OtherPlayer', 'num'), 'other_player'), 'num'),
'RetScenePlayerlist.OtherPlayer': [
(Field(('RetScenePlayerlist::BonusBuff', 'bonus_buff_num'), 'bonus_buff'), 'string_bonus_buf_num'),
(Field(('RetScenePlayerlist::StringBonusBuff', 'string_bonus_buf_num'), 'string_bonus_buf'), 'bonus_buff'),
],
'RetScenePlayerlist.StringBonusBuff': (Field((c_char, 'value_len'), 'value'), 'value_len'),
'RetScenePlayerlist': (Field(('OtherPlayer', 'player_num'), 'other_player'), 'player_num'),
}
|
[
"[email protected]"
] | |
c3d88ba20ee2807adfc29cc28181217a65828687
|
afd5cdcf13b5c940f52e2070253c6c7a60972479
|
/votes/admin.py
|
fc08d108923533ec0848aff6d7a418388047aec3
|
[] |
no_license
|
abiola814/voting-system
|
215c035e907c0b379d4fd0a06243de1ca983d15d
|
0025de00156f6b989df98639b05876bd5a05cad1
|
refs/heads/main
| 2023-07-10T13:23:39.571809 | 2021-08-10T19:01:26 | 2021-08-10T19:01:26 | 393,693,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
from django.contrib.admin import AdminSite
from django.contrib import admin
from .models import Poll, Choice, Vote
class Myadminsite(AdminSite):
site_header = 'voting system'
adminsite= Myadminsite(name = 'my-site-admin')
adminsite.register(Poll)
adminsite.register(Choice)
adminsite.register(Vote)
admin.site.register(Poll)
admin.site.register(Choice)
admin.site.register(Vote)
|
[
"[email protected]"
] | |
880e71b019209d46ac553383a0934d5d77bf65a0
|
6c4cd337bc19ba0a7f0f863645ba45275cea122d
|
/thispersondoesnotexist/__init__.py
|
267dc98cdf2f91bd3d35cbc648ce1c69cbda1006
|
[
"Apache-2.0"
] |
permissive
|
David-Lor/ThisPersonDoesNotExistAPI
|
152c094fe9dfa3bdc2a382d49d10c5ee1c884618
|
6f299de6d000e6df63125ea553be675c6d4d835d
|
refs/heads/master
| 2023-08-09T18:29:12.822580 | 2023-07-31T21:48:24 | 2023-07-31T21:48:24 | 171,178,270 | 106 | 20 |
Apache-2.0
| 2023-08-03T07:42:09 | 2019-02-17T22:10:11 |
Python
|
UTF-8
|
Python
| false | false | 74 |
py
|
from .online_getter import *
from .helpers import *
from .assets import *
|
[
"[email protected]"
] | |
70455a839efde333033f93f0bb5305f4ad5201a0
|
f447d350a98c8f2e303ff9ac5bc4d37b7922d45e
|
/run_viz.py
|
253824947a7d62ef99a739d1ffa7026b2f7fce40
|
[] |
no_license
|
Nrikolo/UCAFleet
|
c2866f26b1995290f0306c0ea7f39e91b5b571e5
|
0692634ae45788eebff3784a3579150d31d324f5
|
refs/heads/master
| 2021-04-15T09:46:08.652473 | 2018-07-13T20:47:55 | 2018-07-13T20:47:55 | 126,915,398 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,016 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 16 15:47:22 2018
@author: Riko
"""
# TODO: Visualization of parcels state. A pie chart where each sector indicates
# either {awaiting, onroute, delivered}
# TODO: Visualization of parcels state. A line chart where each line indicates
# the total numder of parcels {awaiting, onroute} as s function of time
#--> should indicate steady-state if reached
# TODO: Payload utilization (how much of the payload on the uav is being used)
# Compute average, min, max, stdev payload utilization (%) for all flights
# TODO: In preparatio for batch running, compute the following metrics to qualify a sim:
# Percent parcels delivered,
#{min, max, average, stdev} age of parcels,
#{min, max, average, stdev} % utilization of uavs
# TODO: visualization should be only done for airports and uavs agent types ,
# would probably require a null function for portrayal of parcels
#run.py
from server import server
server.port = 8521 # The default
server.launch()
|
[
"[email protected]"
] | |
5c3f2491ddd794ce77ac1c0e00010185f159421b
|
520af1b4cef46dd77cdf20026a47b987bdd6746b
|
/Project 5/1_3.py
|
b841a6e79de9222ad3c1945c3ca7fe4fca26bdb3
|
[] |
no_license
|
Laura9505/EE219
|
1736047e4a19917ab80781c83ee38dae4527b0ac
|
215c46dea9016826d1a8dc0bba50ace2fe74488e
|
refs/heads/master
| 2020-03-11T18:48:09.200345 | 2019-01-06T01:07:55 | 2019-01-06T01:07:55 | 130,179,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,857 |
py
|
import json
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import statsmodels.api as sm
import matplotlib.pyplot as plt
tweet_file = ['#gohawks', '#gopatriots', '#nfl', '#patriots', '#sb49', '#superbowl']
features_3 = ['tweets_num', 'retweets_num', 'sum_followers', 'max_followers', 'URLs_num',
'authors_num', 'mentions_num', 'ranking_score', 'hashtags_num']
def plot_feature(fea_val, pred, hashtag, feature_name):
plt.scatter(fea_val, pred, color = 'blue')
plt.xlabel(feature_name)
plt.ylabel('predictant')
plt.title('# tweet for next hour vs. ' + feature_name + ' (tweets_' + hashtag + ')')
plt.grid(True)
plt.savefig('q1.3_' + hashtag + '_' + feature_name + '.png')
for hashtag in tweet_file:
# extract_feature
label = {'#gohawks' : ['tweets_#gohawks.txt', 188136], '#gopatriots' : ['tweets_#gopatriots.txt', 26232],
'#nfl' : ['tweets_#nfl.txt', 259024], '#patriots' : ['tweets_#patriots.txt', 489713],
'#sb49' : ['tweets_#sb49.txt', 826951], '#superbowl' : ['tweets_#superbowl.txt', 1348767]}
time_stamps, author_names, user_followers, retweet, url_citation_num, mention_num, ranking_scores, hashtag_num = [],[],[],[],[],[],[],[]
input_file = open('./tweet_data/' + label[hashtag][0], encoding = 'utf-8')
for (line, index) in zip(input_file, range(0, label[hashtag][1])):
data = json.loads(line)
time_stamps.append(data['citation_date'])
author_name = data['author']['nick']
original_author_name = data['original_author']['nick']
user_followers.append(data['author']['followers'])
if author_name != original_author_name:
retweet.append(1)
else:
retweet.append(0)
url_citation_num.append(len(data['tweet']['entities']['urls']))
author_names.append(author_name)
mention_num.append(len(data['tweet']['entities']['user_mentions']))
ranking_scores.append(data['metrics']['ranking_score'])
hashtag_num.append(data['title'].count('#'))
input_file.close()
prev_hour = int((max(time_stamps)-min(time_stamps))/3600)+1
hour_tweet_num = [0] * prev_hour
hour_retweet_num = [0] * prev_hour
hour_follower_sum = [0] * prev_hour
max_hour_followers_num = [0] * prev_hour
hour_time_of_the_day = [0] * prev_hour
hour_url_citation_num = [0] * prev_hour
hour_author_num = [0] * prev_hour
hour_author_set = [0] * prev_hour
for i in range(0, prev_hour):
hour_author_set[i] = set([])
hour_mention_num = [0] * prev_hour
hour_ranking_scores_tot = [0.0] * prev_hour
hour_hashtag_num = [0] * prev_hour
start_time = min(time_stamps)
for i in range(0, len(hour_time_of_the_day)):
hour_time_of_the_day[i] = i % 24
for i in range(0, label[hashtag][1]):
pres_hour = int((time_stamps[i]-start_time)/3600)
hour_tweet_num[pres_hour] += 1
if retweet[i] == 1:
hour_retweet_num[pres_hour] += 1
hour_follower_sum[pres_hour] += user_followers[i]
if user_followers[i] > max_hour_followers_num[pres_hour]:
max_hour_followers_num[pres_hour] = user_followers[i]
hour_url_citation_num[pres_hour] += url_citation_num[i]
hour_author_set[pres_hour].add(author_names[i])
hour_mention_num[pres_hour] += mention_num[i]
hour_ranking_scores_tot[pres_hour] += ranking_scores[i]
hour_hashtag_num[pres_hour] += hashtag_num[i]
for i in range(0, len(hour_author_set)):
hour_author_num[i] = len(hour_author_set[i])
target_value = hour_tweet_num[1:]
target_value.append(0)
data = np.array([hour_tweet_num, hour_retweet_num, hour_follower_sum, max_hour_followers_num,hour_time_of_the_day,
hour_url_citation_num, hour_author_num,hour_mention_num, hour_ranking_scores_tot, hour_hashtag_num, target_value])
data = np.transpose(data)
df = DataFrame(data)
df.columns = ['tweets_num', 'retweets_num', 'sum_followers', 'max_followers', 'time_of_day',
'URLs_num', 'authors_num', 'mentions_num', 'ranking_score', 'hashtags_num', 'target_value']
training_data = df
# one-hot encoding
time_of_day_set = range(0,24)
for time_of_day in time_of_day_set:
time_of_day_to_add = []
for time_of_day_item in training_data['time_of_day']:
if time_of_day_item == time_of_day:
time_of_day_to_add.append(1)
else:
time_of_day_to_add.append(0)
training_data.insert(training_data.shape[1]-1, str(time_of_day)+'th_hour', time_of_day_to_add)
# linear regression
training_data.drop('time_of_day', axis = 1, inplace = True)
target_data = training_data.pop('target_value')
lr = LinearRegression()
lr_result = lr.fit(training_data, target_data)
lr_pred = lr.predict(training_data)
print ('rmse for tweets_' +hashtag+ ': ' + str(np.sqrt(mean_squared_error(target_data, lr_pred))))
# perform t-test
model = sm.OLS(target_data, training_data)
result = model.fit()
print (result.summary())
p_val = result.pvalues[0:9]
print ('P-values for each feature of tweets_' + hashtag + ' are: ')
print (p_val)
index = sorted(range(len(p_val)), key = lambda i: p_val[i])[0:3]
print('top 3 features are:')
print(features_3[index[0]], features_3[index[1]], features_3[index[2]])
plot_feature(training_data[features_3[index[0]]], lr_pred, hashtag, features_3[index[0]])
plot_feature(training_data[features_3[index[1]]], lr_pred, hashtag, features_3[index[1]])
plot_feature(training_data[features_3[index[2]]], lr_pred, hashtag, features_3[index[2]])
print ('='*50)
|
[
"[email protected]"
] | |
cfb5f2ac07b77f57fd488fc4de2d5de1a653a443
|
eac59df52c001e4f549c1fe6a5d1676d1b183140
|
/setup.py
|
e1422878c3d7ec5600b572ce37b424dfa500e964
|
[
"Apache-2.0"
] |
permissive
|
aleksrgarkusha/pcs
|
0558ddf2dc98fead2d05f0dc5844f2ec9a2e3a4b
|
597a2aa020a60473307ef09a8939db1d93657f8a
|
refs/heads/main
| 2023-08-12T05:27:12.195924 | 2021-09-09T22:34:17 | 2021-09-09T22:34:17 | 401,155,101 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,559 |
py
|
import os
import sys
import pathlib
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name):
super().__init__(name, sources=[])
class CMakeBuild(build_ext):
def build_extension(self, ext):
cwd = pathlib.Path().absolute()
build_temp = pathlib.Path(self.build_temp)
build_temp.mkdir(parents=True, exist_ok=True)
extdir = pathlib.Path(self.get_ext_fullpath(ext.name))
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(str(extdir.parent.absolute())),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DCMAKE_BUILD_TYPE={}".format(cfg),
"-DBUILD_PCS_TESTS=OFF",
"-DBUILD_PCS_COVERAGE=OFF",
"-DBUILD_PCS_PYTHON_BINDINGS=ON",
]
build_args = ["--"]
if self.parallel:
build_args += ["-j{}".format(self.parallel)]
os.chdir(str(build_temp))
self.spawn(["cmake", str(cwd)] + cmake_args)
if not self.dry_run:
self.spawn(["cmake", "--build", "."] + build_args)
os.chdir(str(cwd))
setup(
name="pypcs",
version="1.0",
author="Alexandr Garkusha",
description="Point cloud semantic segmentation library",
ext_modules=[CMakeExtension("pypcs")],
cmdclass={
"build_ext": CMakeBuild,
},
)
|
[
"[email protected]"
] | |
026253c8bee620e52af59fce738b1fa370a3c900
|
86955630eebd7a46ba31ae1ac9ccd2100974a65b
|
/scripts/generate_board_depth_ir_image.py
|
2affe9b58d3ca7af93d6dde0f05a19a6acb5f469
|
[] |
no_license
|
yuki-inaho/bundle_adjustment_with_apriltag
|
c503b72b1c77d28064e80c6d240d1631bcb4d6a6
|
a8fe2fa24446d0c42f5713be13f65ef55196eab4
|
refs/heads/main
| 2022-12-28T01:42:25.963749 | 2020-10-08T08:40:50 | 2020-10-08T08:40:50 | 301,601,189 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,126 |
py
|
import numpy as np
import cv2
from pathlib import Path
from utils import (
set_camera_parameter,
set_ir_camera_parameter,
colorize_depth_img,
get_undistortion_module,
undistortion
)
import toml
import argparse
import pdb
SCRIPT_DIR = str(Path(__file__).resolve().parent)
PARENT_DIR = str(Path(SCRIPT_DIR).parent)
Color = {
"red": (0, 0, 255),
"orange": (0, 64, 255),
"blue": (255, 0, 0),
"light_blue": (64, 255, 0),
"green": (0, 255, 0),
"black": (0, 0, 0)
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg-file', '-c', type=str, default=f'{PARENT_DIR}/cfg/camera_parameter.toml', \
help='location of a camera parameter file')
parser.add_argument('--color-input-dir', '-ic', type=str, default=f'{PARENT_DIR}/data', \
help='location of captured color images')
parser.add_argument('--depth-input-dir', '-id', type=str, default=f'{PARENT_DIR}/depth', \
help='location of captured depth images')
parser.add_argument('--output-dir', '-o', type=str, default=f'{PARENT_DIR}/projected', \
help='location to save projection images')
args = parser.parse_args()
return args
def read_images(input_dir, frame_name_ary, is_uc16=False):
images = []
for frm_name in frame_name_ary:
image_name = str(Path(input_dir, frm_name.replace(".csv", ".png")).resolve())
if is_uc16:
images.append(cv2.imread(image_name, cv2.IMREAD_ANYDEPTH))
else:
images.append(cv2.imread(image_name))
return images
def draw_points(image, points, color, radius=10):
for i in range(points.shape[0]):
image = cv2.circle(image, (points[i, 0], points[i, 1]), radius, Color[color], -1)
return image
# http://staff.www.ltu.se/~jove/courses/c0002m/least_squares.pdf
def calculate_plane_coefficients(points):
pts_mean = points.mean(0)
points_centorized = points - pts_mean
S, sigma, Vt = np.linalg.svd(points_centorized)
plane_coeff = Vt.T[:,-1]
return plane_coeff, pts_mean
def projection(color_image, marker_points, camera_pose, camera_pose_pre, camera_param):
K = camera_param.intrinsic_matrix
projected_image = color_image.copy()
camera_pose = camera_pose_pre @ camera_pose
points_extd = np.c_[marker_points, np.repeat(1, marker_points.shape[0])]
points_tfm = points_extd @ camera_pose.T[:, :3]
points_tfm[:,[0,1]] *= 1.2075
points_tfm *= 0.039/0.03206
#D = np.sqrt(np.square(points_tfm[..., np.newaxis, :] - points_tfm).sum(axis=-1))
#np.sort(D[:, 0])[:10]
plane_coefficients, plane_origin = calculate_plane_coefficients(points_tfm)
pts_3d_prj = points_tfm @ K.T
x_pos = np.int16(pts_3d_prj[:,0]/pts_3d_prj[:,2])
y_pos = np.int16(pts_3d_prj[:,1]/pts_3d_prj[:,2])
pts_2d = np.c_[x_pos, y_pos]
projected_image = draw_points(projected_image, pts_2d, color="orange", radius=7)
hull = cv2.convexHull(pts_2d.reshape(-1,1,2).astype(np.int32))
mask = np.zeros(color_image.shape, dtype=np.uint8)
mask = cv2.cvtColor(cv2.drawContours(mask,[hull], 0, (255, 255, 255), -1), cv2.COLOR_RGB2GRAY)
y_idx, x_idx = np.where(mask > 0)
fx, fy = camera_param.focal
cx, cy = camera_param.center
u_idx = (x_idx - cx).astype(np.float)/fx
v_idx = (y_idx - cy).astype(np.float)/fy
uve = np.c_[u_idx, v_idx, np.repeat(1, u_idx.shape[0])]
z_var = (plane_coefficients @ plane_origin)/(uve @ plane_coefficients) + 0.032
# residual = (np.c_[u_idx*z_var, v_idx*z_var, z_var] - plane_origin) @ plane_coefficients
z_var_int16 = (z_var * 1000).astype(np.int16)
plane_depth = np.zeros([color_image.shape[0], color_image.shape[1]], dtype=np.int16)
plane_depth[y_idx, x_idx] = z_var_int16
gray_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
ret2,th_image = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
plane_depth[th_image==0] = 0
plane_depth_colorized = colorize_depth_img(plane_depth)
return plane_depth, plane_depth_colorized
def main(cfg_file_path, color_input_dir, depth_input_dir, output_dir):
toml_dict = toml.load(open(cfg_file_path))
camera_param = set_ir_camera_parameter(toml_dict)
camera_pose_ary = np.loadtxt(f"{PARENT_DIR}/camera_pose_ary.csv")
marker_points = np.loadtxt(f"{PARENT_DIR}/markers.csv")
frame_name_ary = np.loadtxt(f"{PARENT_DIR}/frame_name_list.csv", dtype = "unicode")
color_images = read_images(color_input_dir, frame_name_ary)
depth_images = read_images(depth_input_dir, frame_name_ary, is_uc16=True)
undistorter = get_undistortion_module(toml_dict, str(Path(cfg_file_path).parent))
camera_pose_pre = np.eye(4)
n_images = len(color_images)
diffs = []
for i, image in enumerate(color_images):
print(f"{i}/{n_images}")
camera_pose = camera_pose_ary[i, :].reshape(4,4)
plane_depth, plane_depth_colorized = projection(
image, marker_points, camera_pose, camera_pose_pre, camera_param
)
cv2.imwrite(f"{PARENT_DIR}/test.png", plane_depth_colorized)
depth_input = depth_images[i]
depth_input[depth_input == 65535] = 0
depth_image = undistortion(depth_input, camera_param, undistorter)
#depth_image = depth_input
diff = depth_image.astype(float) - plane_depth.astype(float)
diff_abs = np.abs(diff).astype(np.int16)
diff_colorized = colorize_depth_img(diff_abs)
diff_colorized[plane_depth==0] = 0
diff_ext = diff[plane_depth!=0]
depth_image[plane_depth==0] = 0
cv2.imwrite(f"{PARENT_DIR}/diff/{i}.png", diff_colorized)
cv2.imwrite(f"{PARENT_DIR}/board/{i}.png", colorize_depth_img(plane_depth))
cv2.imwrite(f"{PARENT_DIR}/raw/{i}.png", colorize_depth_img(depth_image))
cv2.waitKey(10)
#diffs.extend(diff_ext)
#hist = np.histogram(diffs)
if __name__ == '__main__':
args = parse_args()
main(args.cfg_file, args.color_input_dir, args.depth_input_dir, args.output_dir)
|
[
"[email protected]"
] | |
638667bb30ef0006c74f375cd417ef7d6e0e0f88
|
52aafe9fe70015ac331727a04d249c5d80779f5c
|
/exer_1_maratona_program.py
|
b39de3bf7fd1d160db32ae7ff3d46ba810f290d3
|
[] |
no_license
|
levi5/exer_progamacao
|
2b782c4c446d81d6d43b74dd55d6861b6050d50e
|
51236f1c221eafa9212cf15d6d4016f9ad63dff4
|
refs/heads/master
| 2021-04-30T05:35:40.918495 | 2018-02-13T18:32:44 | 2018-02-13T18:32:44 | 121,420,213 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,948 |
py
|
import math
class num_min_pg():
def num_palav(self, num1):
self.texto = str (input(''))
self.tamanho = len(self.texto)
self.texto = self.texto.split(' ')
for x in self.texto:
if len(x) >= 1 and len(x) <= 70:
self.texto_1 = []
if num1 < len(self.texto):
for x in range(0 , num1):
self.texto_1.append(self.texto[x])
elif num1 > len(self.texto):
for x in range(0 , len(self.texto)):
self.texto_1.append(self.texto[x])
elif len(x) < 1 or len(x) > 70:
self.texto_1 = []
y = 'default value'
self.texto_1.append(y)
print('erro palavra grande!!')
continue
def caract_linha(self, num2, num3):
self.tam = self.tamanho / num3
self.num_pg = self.tam / num2
if (self.num_pg % 2) != 0:
self.num_pg = int(self.num_pg + 1)
def printer(self):
print(self.num_pg)
list_num = []
num1,num2,num3 = 0,0,0
nums = str(input(''))
list_num = nums.split(' ')
if len(list_num) == 3:
for x in range(0,3):
if x == 0:
num1 = int (list_num[x])
elif x == 1:
num2 = int (list_num[x])
elif x == 2:
num3 = int(list_num[x])
if num1 >= 2 and num1 <= 1000:
if num2 >= 1 and num2 <= 30:
if num3 >= 1 and num3 <= 70:
x = num_min_pg()
x.num_palav(num1)
x.caract_linha(num2, num3)
x.printer()
else:
print('Condição inválida!!! C')
else:
print('Condição inválida!!! L')
else:
print('Condição inválida!!! N')
|
[
"[email protected]"
] | |
b20d17916565894c0ad9d4c6695c25d8b0ded9b1
|
5b5d46b4a47ab365688af03afdbec24e885a2c90
|
/21/21.py
|
19a6901a33b382a6d732eace82edb63fc3f53e03
|
[] |
no_license
|
CA2528357431/python-base--Data-Structures
|
e9e24717ae016c4ca4a15805f261fd48f377ac6b
|
dccbcb27d82f2264947458686900addf2b83faad
|
refs/heads/main
| 2023-07-04T08:32:52.551200 | 2021-07-30T16:21:31 | 2021-07-30T16:21:31 | 386,671,623 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,829 |
py
|
# 二叉树
# 27非递归遍历
class tree:
def __init__(self, root, left=None, right=None):
self.nodes = []
self.root = root
self.left = left
self.right = right
self.data = None
# root作为排序依据
# data存数据
# 后续几个二叉树用例就不带数据了
@property
def lisp(self):
lisp = [self.root, None, None]
if self.left is not None:
lisp[1] = self.left.lisp
if self.right is not None:
lisp[2] = self.right.lisp
return lisp
# lisp 表达法
def __str__(self):
return str(self.lisp)
# 三种深度优先遍历
# 即三种周游
# 周游一定是 根、左周游、右周游的组合
def first(self):
l = []
r = []
if self.left is not None:
l = self.left.first()
if self.right is not None:
r = self.right.first()
res = [self.root] + l + r
return res
'''
def first(self):
res = []
cur = self
def do(cur):
if cur is not None:
res.append(cur.root)
do(cur.left)
do(cur.right)
do(cur)
return res
'''
def middle(self):
l = []
r = []
if self.left is not None:
l = self.left.middle()
if self.right is not None:
r = self.right.middle()
res = l + [self.root] + r
return res
'''
def middle(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
res.append(cur.root)
do(cur.right)
do(cur)
return res
'''
def last(self):
l = []
r = []
if self.left is not None:
l = self.left.last()
if self.right is not None:
r = self.right.last()
res = l + r + [self.root]
return res
'''
def last(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
do(cur.right)
res.append(cur.root)
do(cur)
return res
'''
# 一种广度优先遍历
def layer(self):
res = []
queue = [self]
# queue中同层的数据相连
while queue:
cur = queue[0]
queue.pop(0)
res.append(cur.root)
for x in (cur.left,cur.right):
if x is not None:
queue.append(x)
return res
a = tree(1)
b = tree(2)
c = tree(3, a, b)
d = tree(6)
e = tree(4)
f = tree(10, d, e)
g = tree(13, c, f)
print(g.first())
print(g.middle())
print(g.last())
print(g.layer())
|
[
"[email protected]"
] | |
3f31aed1d2535080b22aec25c04f7b53f426c9ec
|
088641f914b3865a8352e52eb2a48b2244506a2c
|
/MainTesting/IMUProcess.py
|
14e2ffb2ee2d2cf9e58ee752f6ad212f68f667ab
|
[] |
no_license
|
DavidAmison/thread_test
|
49ee75efbab1ccce68a2eba086c241a13adf3e21
|
4c04823bb2819ff346b53ab3e4e46d5d7cd302de
|
refs/heads/master
| 2021-01-22T18:33:40.816012 | 2017-03-17T19:40:27 | 2017-03-17T19:40:27 | 85,092,804 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 317 |
py
|
"""
Created on Fri Mar 17 16:49:16 2017
@author: David
"""
import timeit
import IMU
import multiprocessing
class IMUProcess(multiprocessing.Process):
def run(self):
st = timeit.default_timer()
IMU.collect_data(100)
tm = timeit.default_timer() - st
print('IMU took:',tm,'s')
|
[
"[email protected]"
] | |
fe4091648705793506b512a3db8c66523e047ce9
|
a957c31e7a09c507debcc793f39db04f457ad966
|
/backend/storm/manage.py
|
14f674679dea3870b89cabf7fcc829728399f944
|
[] |
no_license
|
TheFuzzy/NTU-CZ3003-CMS
|
bbc6a6d3c2bce887e12101e56995ca415ede768a
|
4409a623b248b937d4a92b5b7bd7c3471c2bb7e3
|
refs/heads/master
| 2019-01-02T03:23:34.210023 | 2014-04-17T06:40:33 | 2014-04-17T06:40:33 | 17,104,639 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "storm.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
3c327c89f0de7bec82025164c968faf2df12d343
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4191/codes/1716_2497.py
|
08cd8e7141262da535ce7f98751d1c4b82b7ce4d
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
# Instituto de Computacao - UFAM
# Lab 04 - Ex 04
# 20 / 06 / 2016
qi = float(input("Quantia inicial: "))
tempo = int(input("Tempo de investimento: "))
juros = 4.0
saldo = qi # Variavel acumuladora
# Valor inicial da variavel contadora
t = 0
rend=0
# Atualizacao de saldo
while(t<tempo):
rend = saldo * (juros/100)
saldo = saldo + rend
t =t+1
print(round(saldo, 2))
|
[
"[email protected]"
] | |
32b42c8cc585711b17c8bd705bef71998e2e8323
|
ac1644a3e6d0209562fd21ce43bb9c422137b1e9
|
/AnalysisCI/GitWikiFormatter.py
|
a6a819eaf2d792a556893df4b4c4bfbb5927d838
|
[
"Apache-2.0"
] |
permissive
|
aa-software2112/SOEN390_SimpleCamera
|
a18d53334fc233bff4fc88927aae4cf28a322718
|
ffd1b9216f81f40b8b09588402438ff6bebc25d0
|
refs/heads/develop
| 2020-04-15T06:22:27.500475 | 2019-04-15T16:22:16 | 2019-04-15T16:22:16 | 164,458,621 | 5 | 1 |
Apache-2.0
| 2019-04-15T16:29:00 | 2019-01-07T16:17:24 |
Kotlin
|
UTF-8
|
Python
| false | false | 4,862 |
py
|
import os
import subprocess
from TravisEnvirExtractor import TravisEnvirExtractor
class GitApiHelper():
@staticmethod
def config(email, username):
subprocess.Popen(['git', 'config', '--global', 'user.email', email]).communicate()
@staticmethod
def push(use_token=False):
if not use_token:
subprocess.Popen(['git', 'push', '--quiet', 'https://github.com/aa-software2112/SOEN390_SimpleCamera.wiki.git/', 'refs/heads/master:refs/heads/master']).communicate()
else:
subprocess.Popen(['git', 'push', '--quiet', 'https://' + str(os.environ.get("GH_TOKEN")) + '@github.com/aa-software2112/SOEN390_SimpleCamera.wiki.git/' ,'refs/heads/master:refs/heads/master']).communicate()
@staticmethod
def commit():
subprocess.Popen(['git', 'commit', '-am', '\"travis\"']).communicate()
@staticmethod
def clone():
subprocess.Popen(['git', 'clone', 'https://github.com/aa-software2112/SOEN390_SimpleCamera.wiki.git']).communicate()
class GitWikiFormatter():
HEADER1 = 1
HEADER2 = 2
HEADER3 = 3
HEADER4 = 4
HEADER5 = 5
HEADER6 = 6
def __init__(self):
self.lines = []
def add_file(self, filename):
f = open(filename, 'r')
for line in f.readlines():
self.add_line(line)
return self
def add_line(self, string):
string = string.strip(" ")
self.lines.append((string + " ") if "\n" in string else (string + " ") + "\n")
return self
def add_lines(self, lines):
if len(lines) == 0:
lines.append("n/a")
for line in lines:
self.add_line(line)
return self
def add_header(self, header_level, string):
self.add_line(header_level*"#" + " " + string)
return self
def add_key_value_pair(self, key, value):
if value == "":
value = "n/a"
if "_" in value:
self.add_line(self.surr_bold(key) + ": " + value)
else:
self.add_line(self.surr_bold(key) + ": " + self.surr_italics(value))
return self
def add_link(self, text, url):
self.add_line(self.surr_sqbrack(text) + self.surr_brackets(url))
return self
def surr(self, text, surr_char):
return surr_char + text + surr_char
def surr_sqbrack(self, string):
return "[" + string + "]"
def surr_italics(self, string):
return self.surr(string, "_")
def surr_bold(self, string):
return self.surr(string, "**")
def surr_brackets(self, string):
return "(" + string + ")"
def add_horiz_rule(self):
self.add_line("***")
return self
def add_underline(self):
self.add_line("======")
return self
def get_markdown_after(self, delimiter):
tokens = "".join(self.lines).split(delimiter)
if len(tokens) > 1:
return [line for line in tokens[1].split("\n") if line != '']
return ""
def write_to_file(self, filename):
f = open(filename, 'w')
f.writelines(self.lines)
def add_table(self, headers, rows):
header_text = "| " + " | ".join(headers) + " |"
header_data_split = "| ---: " + "| :---: "*(len(headers) -1) + " |"
self.add_line(header_text)
self.add_line(header_data_split)
# Expects a list of rows (strings)
for row in rows:
# Row is a string
row = " | ".join(row.split())
self.add_line(row)
def skip_line(self):
self.add_line(" ")
def __add__(self, other):
return GitWikiFormatter().add_lines(self.lines + other.lines)
def __str__(self):
for l in self.lines:
print(l,"")
return ""
if __name__ == "__main__":
PATH_TO_WIKI = "../SOEN390_SimpleCamera.wiki/CI-Analysis.md"
travis_output = TravisEnvirExtractor.get_travis_variables()
old_wiki = GitWikiFormatter().add_file(PATH_TO_WIKI)
# Setup the new wiki
new_wiki = GitWikiFormatter().add_header(GitWikiFormatter.HEADER1, "CI Analysis")
new_wiki.add_header(GitWikiFormatter.HEADER3,
str(travis_output["TRAVIS_PULL_REQUEST"]) + "-" + str(travis_output["TRAVIS_JOB_NAME"]))
new_wiki.add_header(GitWikiFormatter.HEADER4, "Build Metadata")
for k, v in travis_output.items():
new_wiki.add_key_value_pair(k, str(v))
new_wiki.add_header(GitWikiFormatter.HEADER4, "ERROR(S)").add_lines(["t", "c", "d"])
new_wiki.add_header(GitWikiFormatter.HEADER4, "SUCCESS(ES)").add_lines(["t", "c", "d"])
new_wiki.add_header(GitWikiFormatter.HEADER5, "INFO").add_lines(["t", "c", "d"])
new_wiki.add_horiz_rule()
new_wiki.add_lines(old_wiki.get_markdown_after("CI Analysis"))
new_wiki.write_to_file(PATH_TO_WIKI)
print(new_wiki)
|
[
"[email protected]"
] | |
cd0242dbd00c5b49c79a07dfb842bee733bde983
|
3e6935bf8b1b14bf897f230ffe90dcba745b3534
|
/pañol/views.py
|
b316113070c81d6c7601a4bb9a32ef0f5220e374
|
[] |
no_license
|
pipeordenes/pa-ol
|
0902e7d7ca9befc7cd9d411f71f8489129a1cccb
|
c6447058199b71ea6bb3c3c909b3bdd5767acfc8
|
refs/heads/master
| 2020-09-08T22:54:20.924722 | 2019-12-04T20:20:51 | 2019-12-04T20:20:51 | 221,267,714 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 436 |
py
|
from django.shortcuts import render
from .models import Pañol,Persona
from django.views import generic
def index(request):
return render(request, 'index.html')
def info(request):
return render(request, 'info.html')
def formulario(request):
return render(request, 'formulario.html')
def galeria(request):
return render(request, 'galeria.html')
def we(request):
return render(request, 'we.html')
|
[
"[email protected]"
] | |
379beb1e77ac436d3ed27c8285063c25a9983ae2
|
ebe8e6a885fe38aa5b4f153ae7e97183b114371f
|
/todos/migrations/0002_auto_20180809_0222.py
|
9193f1879b8bb5e5358472937c52836840ac588c
|
[] |
no_license
|
ahmed-zubair-1998/todos-app
|
1ca0bd5ccac245c46d158ba471262a967202fc45
|
23c82f1f396758560d1332fbb094468130800bdb
|
refs/heads/master
| 2020-03-25T16:22:25.072170 | 2018-08-09T21:21:16 | 2018-08-09T21:21:16 | 143,927,425 | 0 | 0 | null | 2018-08-11T09:03:44 | 2018-08-07T21:13:17 |
Python
|
UTF-8
|
Python
| false | false | 720 |
py
|
# Generated by Django 2.1 on 2018-08-08 21:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='todo',
name='author',
field=models.ForeignKey(default=-1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='todo',
name='status',
field=models.IntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
074e0c1bd228b44b107f1584fe6cd68117e95268
|
9fcc13b9556f57b362867ee6e06f257d5af5e812
|
/PythonProjects/scriptExample.py
|
5347361524ae14c15035630e58da83b5147cf0d6
|
[] |
no_license
|
rlyyah/1st_2nd_week
|
daab4028ba0ceb436059224f2ee1ccd2302275f1
|
825f116182b9db84190bea18ee1aaa92e108fed1
|
refs/heads/master
| 2020-09-14T01:22:26.598043 | 2019-11-20T15:16:20 | 2019-11-20T15:16:20 | 222,965,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 221 |
py
|
print('Please enter your name')
answer = str(input(' '))
while type(answer) != str:
answer = str(input('Please enter your name'))
if len(answer) < 1:
print('Hello stranger!')
else:
print('Hello ' + answer)
|
[
"[email protected]"
] | |
c224a0a67f24b636dd0b650907a86aefdd1418de
|
bc7092c337428dfc8398a6108d30ffa44e3a168e
|
/app/models/book.py
|
fce8a3821323be11efe90718b4523725799d83e7
|
[] |
no_license
|
Choco-x/fisher
|
8f43cf88c9212d17831c7ab8fce8babf42492898
|
389305a7104c3f21ceda8f039e2899f0fae6215a
|
refs/heads/master
| 2022-12-22T08:14:42.790352 | 2020-02-25T05:46:02 | 2020-02-25T05:46:02 | 242,916,357 | 0 | 0 | null | 2021-03-20T03:19:32 | 2020-02-25T05:17:50 |
Python
|
UTF-8
|
Python
| false | false | 784 |
py
|
"""
模型层
模型自动化映射生成表 sqlalchemy
Flask_SQLAlchemy
"""
from sqlalchemy import Column, Integer, String
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# 需要和app核心对象绑定
# db = SQLALchemy(app) 需要引入
class Book(db.Model):
id = Column(Integer, primary_key=True, autoincrement=True)
# Column()建立新列
title = Column(String(50), nullable=False)
author = Column(String(30), default='未名')
price = Column(String(20))
binding = Column(String(20))
publisher = Column(String(50))
pages = Column(Integer)
pubdate = Column(String(20))
isbn = Column(String(15), nullable=False, unique=True)
summary = Column(String(1000))
image = Column(String(50))
def sample(self):
pass
|
[
"[email protected]"
] | |
bd5155e5950eb069a38d2fce0df786350afdbebd
|
71788a22dcaeb2fbde56b87fabf7ee21df3a770f
|
/students/eric_gosnell/lesson_08/circle.py
|
5adf1b2c248ded0714aebaa51ae31f4252e8837c
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/Python210_Fall2019
|
5bdfc1c919666eccb42ee07a1d7e385b21f11652
|
e45481671684a3cc8a469461a15cd9f660752ee0
|
refs/heads/master
| 2020-08-05T16:33:53.068983 | 2019-12-29T09:57:59 | 2019-12-29T09:57:59 | 212,615,940 | 4 | 34 | null | 2019-12-29T09:58:00 | 2019-10-03T15:38:40 |
Python
|
UTF-8
|
Python
| false | false | 3,332 |
py
|
"""
Eric Gosnell
Lesson 08 - Circle Class
11.25.2019
"""
from functools import total_ordering
from math import pi
@total_ordering
class Circle:
"""Circle shape class with typical geometric attributes"""
@classmethod
def from_diameter(cls, diameter):
return cls(diameter / 2)
def __init__(self, radius):
self._radius = radius
def __repr__(self):
return f'{self.__class__.__name__}({self.radius})'
def __eq__(self, other):
return self.radius == other.radius
def __lt__(self, other):
return self.radius < other.radius
def __add__(self, other):
try:
return Circle(self.radius + other.radius)
except AttributeError:
return Circle(self.radius + other)
def __sub__(self, other):
try:
return Circle(self.radius - other.radius)
except AttributeError:
return Circle(self.radius - other)
def __mul__(self, other):
try:
return Circle(self.radius * other.radius)
except AttributeError:
return Circle(self.radius * other)
def __truediv__(self, other):
try:
return Circle(self.radius / other.radius)
except AttributeError:
return Circle(self.radius / other)
def __radd__(self, other):
try:
return Circle(self.radius + other.radius)
except AttributeError:
return Circle(self.radius + other)
def __rsub__(self, other):
try:
return Circle(self.radius - other.radius)
except AttributeError:
return Circle(self.radius - other)
def __rmul__(self, other):
try:
return Circle(self.radius * other.radius)
except AttributeError:
return Circle(self.radius * other)
def __rtruediv__(self, other):
try:
return Circle(self.radius / other.radius)
except AttributeError:
return Circle(self.radius / other)
def __iadd__(self, other):
try:
return Circle(self.radius + other.radius)
except AttributeError:
return Circle(self.radius + other)
def __isub__(self, other):
try:
return Circle(self.radius - other.radius)
except AttributeError:
return Circle(self.radius - other)
def __imul__(self, other):
try:
return Circle(self.radius * other.radius)
except AttributeError:
return Circle(self.radius * other)
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, val):
if val > 0:
self._radius = val
else:
raise ValueError("radius must be > 0")
@property
def diameter(self):
return self.radius * 2
@diameter.setter
def diameter(self, val):
self.radius = val / 2
@property
def area(self):
return self.radius ** 2 * pi
def sort_key(self):
return self.radius
class Sphere(Circle):
"""Sphere shape sub-class with additional geometric properties"""
@property
def volume(self):
return self.radius ** 3 * pi * (4/3)
@property # override Circle class property
def area(self):
return self.radius ** 2 * pi * 4
|
[
"[email protected]"
] | |
3406d55446f31b11a840b6274b4a1d052921988b
|
55987c6d7516477d65b7e8d1d366cba1c609af5d
|
/merge_sort.py
|
7b86d3654944cec54c69be2964ef66023cb70e3c
|
[] |
no_license
|
vafajardo/CourseraAlgorithmsPart1
|
080414ba85f0807122913f8c5110bab36fc727bb
|
9bd87d0cfaa4ac6e78daf4c048718bddd4a46520
|
refs/heads/master
| 2021-01-10T02:07:43.243167 | 2016-04-02T19:54:41 | 2016-04-02T19:54:41 | 54,615,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 931 |
py
|
#!/bin/python
# My merge sort algorithm (can handle non-even arrays)
def combine(x,y):
"""
merge_sort always sends y as bigger half in case x and y
are arrays of different lengths.
I have to include "=" to take this last observation into account.
"""
z = []
# having nx and ny allows for combinin
nx = len(x)
ny = len(y)
# starting positions for our pointers
i = 0
j = 0
while i < nx and j < ny:
if x[i] < y[j]:
z.append(x[i])
i += 1
else:
z.append(y[j])
j += 1
# append the remaining symbols
if j <= i: # include "=" here since y is bigger half in case of uneveness
for el in y[j:]:
z.append(el)
else: # there are elements in remaining in x
for el in x[i:]:
z.append(el)
return z
def merge_sort(x):
n = len(x)
half = int(n/2)
if n == 1:
return [x[0]] # returns a list in basic case
else:
foo = merge_sort(x[:half])
bar = merge_sort(x[half:])
return combine(foo,bar)
|
[
"[email protected]"
] | |
e9e6b193ada49c07eeba439047839ed6c513a166
|
7a31597f1359be11d2cc05d8107963f3dbe9e204
|
/Image_recognition/utils/model_dict.py
|
c4a7e7c37d00cc8aae670e50e091d41bc1d6d1b9
|
[] |
no_license
|
LIMr1209/machine-learn
|
9aac2b51a928a864ac3cf82368b3fe9694644cb2
|
56453dce6ae8ba5e7298dab99d5e6a6d114e4860
|
refs/heads/master
| 2022-07-12T14:17:07.536535 | 2021-12-20T06:57:54 | 2021-12-20T06:57:54 | 163,064,915 | 5 | 2 | null | 2020-08-31T03:09:10 | 2018-12-25T08:48:00 |
Python
|
UTF-8
|
Python
| false | false | 252 |
py
|
import torch as t
def save_oplaus():
state_dict = {}
checkpoint = t.load('../checkpoint/EfficientNet.pth.tar')
state_dict['state_dict'] = checkpoint['state_dict']
t.save(state_dict, '/opt/checkpoint/EfficientNet.pth')
save_oplaus()
|
[
"[email protected]"
] | |
4a2b37ae2ac53caae2f1788898826989d070b4f5
|
1bde114a847c629701e3acd004be5788594e0ef1
|
/residual/code/FunctionObjects/ChainOfResponsibility.py
|
bcb44c3267c355ce4a9eb632900d8283f24c1743
|
[] |
no_license
|
BruceEckel/ThinkingInPython
|
0b234cad088ee144bb8511e1e7db9fd5bba78877
|
76a1310deaa51e02e9f83ab74520b8269aac6fff
|
refs/heads/master
| 2022-02-21T23:01:40.544505 | 2022-02-08T22:26:52 | 2022-02-08T22:26:52 | 97,673,620 | 106 | 33 | null | 2022-02-08T22:26:53 | 2017-07-19T04:43:50 |
Python
|
UTF-8
|
Python
| false | false | 2,733 |
py
|
# FunctionObjects/ChainOfResponsibility.py
# Carry the information into the strategy:
class Messenger: pass
# The Result object carries the result data and
# whether the strategy was successful:
class Result:
def __init__(self):
self.succeeded = 0
def isSuccessful(self):
return self.succeeded
def setSuccessful(self, succeeded):
self.succeeded = succeeded
class Strategy:
def __call__(messenger): pass
def __str__(self):
return "Trying " + self.__class__.__name__ \
+ " algorithm"
# Manage the movement through the chain and
# find a successful result:
class ChainLink:
def __init__(self, chain, strategy):
self.strategy = strategy
self.chain = chain
self.chain.append(self)
def next(self):
# Where this link is in the chain:
location = self.chain.index(self)
if not self.end():
return self.chain[location + 1]
def end(self):
return (self.chain.index(self) + 1 >=
len(self.chain))
def __call__(self, messenger):
r = self.strategy(messenger)
if r.isSuccessful() or self.end(): return r
return self.next()(messenger)
# For this example, the Messenger
# and Result can be the same type:
class LineData(Result, Messenger):
def __init__(self, data):
self.data = data
def __str__(self): return `self.data`
class LeastSquares(Strategy):
def __call__(self, messenger):
print(self)
linedata = messenger
# [ Actual test/calculation here ]
result = LineData([1.1, 2.2]) # Dummy data
result.setSuccessful(0)
return result
class NewtonsMethod(Strategy):
def __call__(self, messenger):
print(self)
linedata = messenger
# [ Actual test/calculation here ]
result = LineData([3.3, 4.4]) # Dummy data
result.setSuccessful(0)
return result
class Bisection(Strategy):
def __call__(self, messenger):
print(self)
linedata = messenger
# [ Actual test/calculation here ]
result = LineData([5.5, 6.6]) # Dummy data
result.setSuccessful(1)
return result
class ConjugateGradient(Strategy):
def __call__(self, messenger):
print(self)
linedata = messenger
# [ Actual test/calculation here ]
result = LineData([7.7, 8.8]) # Dummy data
result.setSuccessful(1)
return result
solutions = []
ChainLink(solutions, LeastSquares()),
ChainLink(solutions, NewtonsMethod()),
ChainLink(solutions, Bisection()),
ChainLink(solutions, ConjugateGradient())
line = LineData([
1.0, 2.0, 1.0, 2.0, -1.0,
3.0, 4.0, 5.0, 4.0
])
print(solutions[0](line))
|
[
"[email protected]"
] | |
e380de69418da9be88cb3a88a31e87d7ecac0918
|
b84981a1c862b3a5ccd7d215532847192a6773d8
|
/test/scenarios/kusto/output/ext_default_folder/src/kusto/azext_kusto/generated/commands.py
|
dc1174f122a6d21228b91db2b60d238500db8132
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
qiaozha/autorest.az
|
d0a0427b4414cabf2cc69a5f67204a34d0b7dea7
|
70265eda62ff6d4ec4ae978693f7b7cc46304b09
|
refs/heads/master
| 2023-03-17T22:16:24.207128 | 2021-03-08T06:53:00 | 2021-03-08T06:53:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,226 |
py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_kusto.generated._client_factory import cf_cluster
kusto_cluster = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._clusters_operations#ClustersOperations.{}',
client_factory=cf_cluster,
)
with self.command_group('kusto cluster', kusto_cluster, client_factory=cf_cluster, is_experimental=True) as g:
g.custom_command('list', 'kusto_cluster_list')
g.custom_show_command('show', 'kusto_cluster_show')
g.custom_command('create', 'kusto_cluster_create', supports_no_wait=True)
g.custom_command('update', 'kusto_cluster_update', supports_no_wait=True)
g.custom_command('delete', 'kusto_cluster_delete', supports_no_wait=True, confirmation=True)
g.custom_command(
'add-language-extension', 'kusto_cluster_add_language_extension', is_preview=True, supports_no_wait=True
)
g.custom_command('detach-follower-database', 'kusto_cluster_detach_follower_database', supports_no_wait=True)
g.custom_command('diagnose-virtual-network', 'kusto_cluster_diagnose_virtual_network', supports_no_wait=True)
g.custom_command('list-follower-database', 'kusto_cluster_list_follower_database')
g.custom_command('list-language-extension', 'kusto_cluster_list_language_extension')
g.custom_command('list-sku', 'kusto_cluster_list_sku')
g.custom_command('remove-language-extension', 'kusto_cluster_remove_language_extension', supports_no_wait=True)
g.custom_command('start', 'kusto_cluster_start', supports_no_wait=True)
g.custom_command('stop', 'kusto_cluster_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'kusto_cluster_show')
from azext_kusto.generated._client_factory import cf_cluster_principal_assignment
kusto_cluster_principal_assignment = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._cluster_principal_assignments_operations#ClusterPrincipalAssignmentsOperations.{}',
client_factory=cf_cluster_principal_assignment,
)
with self.command_group(
'kusto cluster-principal-assignment',
kusto_cluster_principal_assignment,
client_factory=cf_cluster_principal_assignment,
) as g:
g.custom_command('list', 'kusto_cluster_principal_assignment_list')
g.custom_show_command('show', 'kusto_cluster_principal_assignment_show')
g.custom_command('create', 'kusto_cluster_principal_assignment_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_cluster_principal_assignment_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_cluster_principal_assignment_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command('wait', 'kusto_cluster_principal_assignment_show')
from azext_kusto.generated._client_factory import cf_database
kusto_database = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._databases_operations#DatabasesOperations.{}',
client_factory=cf_database,
)
with self.command_group('kusto database', kusto_database, client_factory=cf_database) as g:
g.custom_command('list', 'kusto_database_list')
g.custom_show_command('show', 'kusto_database_show')
g.custom_command('create', 'kusto_database_create', supports_no_wait=True)
g.custom_command('update', 'kusto_database_update', supports_no_wait=True)
g.custom_command('delete', 'kusto_database_delete', supports_no_wait=True, confirmation=True)
g.custom_command('add-principal', 'kusto_database_add_principal')
g.custom_command('list-principal', 'kusto_database_list_principal')
g.custom_command('remove-principal', 'kusto_database_remove_principal')
g.custom_wait_command('wait', 'kusto_database_show')
from azext_kusto.generated._client_factory import cf_database_principal_assignment
kusto_database_principal_assignment = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._database_principal_assignments_operations#DatabasePrincipalAssignmentsOperations.{}',
client_factory=cf_database_principal_assignment,
)
with self.command_group(
'kusto database-principal-assignment',
kusto_database_principal_assignment,
client_factory=cf_database_principal_assignment,
) as g:
g.custom_command('list', 'kusto_database_principal_assignment_list')
g.custom_show_command('show', 'kusto_database_principal_assignment_show')
g.custom_command('create', 'kusto_database_principal_assignment_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_database_principal_assignment_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_database_principal_assignment_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command('wait', 'kusto_database_principal_assignment_show')
from azext_kusto.generated._client_factory import cf_attached_database_configuration
kusto_attached_database_configuration = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._attached_database_configurations_operations#AttachedDatabaseConfigurationsOperations.{}',
client_factory=cf_attached_database_configuration,
)
with self.command_group(
'kusto attached-database-configuration',
kusto_attached_database_configuration,
client_factory=cf_attached_database_configuration,
) as g:
g.custom_command('list', 'kusto_attached_database_configuration_list')
g.custom_show_command('show', 'kusto_attached_database_configuration_show')
g.custom_command('create', 'kusto_attached_database_configuration_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_attached_database_configuration_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_attached_database_configuration_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command('wait', 'kusto_attached_database_configuration_show')
from azext_kusto.generated._client_factory import cf_data_connection
kusto_data_connection = CliCommandType(
operations_tmpl=(
'azext_kusto.vendored_sdks.kusto.operations._data_connections_operations#DataConnectionsOperations.{}'
),
client_factory=cf_data_connection,
)
with self.command_group('kusto data-connection', kusto_data_connection, client_factory=cf_data_connection) as g:
g.custom_command('list', 'kusto_data_connection_list')
g.custom_show_command('show', 'kusto_data_connection_show')
g.custom_command('event-grid create', 'kusto_data_connection_event_grid_create', supports_no_wait=True)
g.custom_command('event-hub create', 'kusto_data_connection_event_hub_create', supports_no_wait=True)
g.custom_command('iot-hub create', 'kusto_data_connection_iot_hub_create', supports_no_wait=True)
g.custom_command('event-grid update', 'kusto_data_connection_event_grid_update', supports_no_wait=True)
g.custom_command('event-hub update', 'kusto_data_connection_event_hub_update', supports_no_wait=True)
g.custom_command('iot-hub update', 'kusto_data_connection_iot_hub_update', supports_no_wait=True)
g.custom_command('delete', 'kusto_data_connection_delete', supports_no_wait=True, confirmation=True)
g.custom_command(
'event-grid data-connection-validation',
'kusto_data_connection_event_grid_data_connection_validation',
supports_no_wait=True,
)
g.custom_command(
'event-hub data-connection-validation',
'kusto_data_connection_event_hub_data_connection_validation',
supports_no_wait=True,
)
g.custom_command(
'iot-hub data-connection-validation',
'kusto_data_connection_iot_hub_data_connection_validation',
supports_no_wait=True,
)
g.custom_wait_command('wait', 'kusto_data_connection_show')
|
[
"[email protected]"
] | |
addcaf23d559f565c76be58dd26d6a5b76857526
|
ed065fd97cdfc9b3396bdaee7ccfb17dc0557803
|
/HP Codewars 2017/prob13.py
|
b003eb39db60418af050d040c229a483bd764393
|
[
"Apache-2.0"
] |
permissive
|
NaveenGop/projects
|
25f626f38fee6f0d9f05648c4b1c463c9a7fc515
|
b4c63f7b7030288b384ff780f078bcd9504769b0
|
refs/heads/master
| 2021-06-30T04:24:06.448031 | 2017-09-20T04:04:32 | 2017-09-20T04:04:32 | 104,162,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 903 |
py
|
with open('prob13-2-in.txt', 'r') as f:
a = f.read()[:-1]
d = ['u', 'd', 'l', 'r']
x, y, e = 0, 0, 'r'
word = []
for z in a:
word.append([z, x, y])
char = z.lower()
if char in d:
e = char
if char == 'r':
x += 1
if char == 'l':
x -= 1
if char == 'u':
y += 1
if char == 'd':
y -= 1
else:
if e == 'r':
x += 1
if e == 'l':
x -= 1
if e == 'u':
y += 1
if e == 'd':
y -= 1
del x, y, e, d
left, down = min([z[1] for z in word]), min([z[2] for z in word])
for z in word:
z[1] -= left
z[2] -= down
word = sorted(word, key=lambda q: (q[2], -q[1]), reverse=True)
x = [[z for z in word if z[2] == x] for x in range(word[0][2], -1, -1)]
s = ''
for z in x:
for y in range(z[-1][1]+1):
for g in z:
if g[1] == y:
s += g[0]
break
else:
s += ' '
s += '\n'
print(s)
|
[
"[email protected]"
] | |
c5023ecc348a5f6d754ae717b924597515d9e466
|
c24fa89450cccb48fcd481c3cfa475ee0e412e09
|
/PythonTools/accToMatAcc.py
|
9b41f081bd69b214a00fd824ead8d6cca2702378
|
[] |
no_license
|
PhoenixYanrongLi/CareEcoSystem_ServerCodeNew
|
e95d1c552cdcc70aac09482dfda63e253e01fcb0
|
b627484694863c425483a04391eedc2ec2ec1098
|
refs/heads/master
| 2021-01-01T04:34:51.858543 | 2016-04-14T17:57:30 | 2016-04-14T17:57:30 | 56,258,674 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,039 |
py
|
__author__ = 'Brad'
import csv
import datetime
import scipy.io
import numpy
def writeFile(filename):
writeDict={}
f=open(filename,'r')
timeAr=[]
accAr=[]
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=" ")
for time, x, y, z, azimuth, pitch, roll in reader:
formatStr="%y-%m-%dT%H:%M:%S.%f"
timeC=datetime.datetime.strptime(time,formatStr)
epoch = datetime.datetime.utcfromtimestamp(0)
delta=timeC-epoch
delta=delta.total_seconds()*1000
if len(timeAr)==0:
timeAr=[delta]
accAr=numpy.array([float(x),float(y),float(z)])
else:
timeAr=numpy.vstack((timeAr,delta))
accAr=numpy.vstack([accAr,[float(x),float(y),float(z)]])
writeDict={'AccData':accAr,'UnixTime_ms':timeAr}
print accAr
scipy.io.savemat(filename+'_AccelerometerData.mat',writeDict)
filename='99000213875160_20141113-193740_MM_ACC_1103.txt'
writeFile(filename)
|
[
"[email protected]"
] | |
1ee47960a9622eda41d4240873eadcb969bff091
|
f80b74334058d3fdf6ddf3aa9931d07272b07492
|
/mysite_venv/mysite/read_count/migrations/0001_initial.py
|
5ef99eb29709767aae7c5f004c945f2f6f1480dc
|
[] |
no_license
|
LittleSheep213/ls_blog_site
|
ad7258ca8a75ce8d3ae836c2cc5ea44ac2521e8d
|
b716bff21d621bee1d60520353e725f929133354
|
refs/heads/master
| 2022-11-11T21:26:26.528404 | 2019-03-19T05:41:53 | 2019-03-19T05:41:53 | 176,430,919 | 0 | 1 | null | 2022-11-02T11:46:39 | 2019-03-19T05:27:48 |
Python
|
UTF-8
|
Python
| false | false | 775 |
py
|
# Generated by Django 2.1.7 on 2019-03-14 09:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ReadNum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
|
[
"[email protected]"
] | |
cefbf0a0fdf8d99aa2a71fe16307acce46c29f21
|
bde54b463633bb4f4a3a15bca70801e481274bf4
|
/FP driver.py
|
e43731ab2f75fad07fb891f486405a1979e83679
|
[] |
no_license
|
kwujciak/Final-Project-ES2
|
b5985b4671a00cf8d7f12ccc5d0152bec97349f7
|
7c529e13119046b94dc8eafa47077dbbdb772daf
|
refs/heads/master
| 2022-06-21T11:33:38.045140 | 2020-05-01T15:07:26 | 2020-05-01T15:07:26 | 260,487,327 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 633 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 09:59:51 2020
@author: katewujciak
"""
import FP_time as time
import FP_currency as cur
import FP_attractions as at
import FP_FlightTime as ft
print("Welcome to your personal travel tool!")
print("Compatible destinations are:")
print("London, Rome, Tokyo, New York, Chicago, Denver, and Los Angeles")
destination = input("Where are you travelling?")
europe = ["London", "Rome"]
US = ["New York", "Chicago", "Denver", "Los Angeles"]
if destination in europe:
time.Europe()
if destination == "Tokyo":
time.Japan()
if destination in US:
time.US()
|
[
"[email protected]"
] | |
096d0d8288be255279da6b0333f78ba7b7ba5d4a
|
48e907ceff4620d93353985d502bd5c56489422d
|
/brochure/views.py
|
41d14388d9f950a62460ea1cb01b76ae414b422f
|
[] |
no_license
|
pinealan/stemandbeyond
|
7b49d44f36b7d7724eb5df3c60be22b81f1f1955
|
49c4cb62a8a5a10fa0501801874e03e3d61aedd8
|
refs/heads/master
| 2020-03-23T04:58:47.092453 | 2018-08-11T21:00:27 | 2018-08-11T21:00:27 | 141,117,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
from django.shortcuts import render, get_object_or_404
from django.views import generic
from .models import Speaker, Metatag, Content, Affiliate
def index(req):
context = {}
context['content'] = {blob.field: blob.text for blob in Content.objects.all()}
context['speakers'] = list(Speaker.objects.all())
context['metas'] = list(Metatag.objects.all())
context['affiliates'] = list(Affiliate.objects.all())
return render(req, 'brochure/en.html', context=context)
def zh(req):
context = {}
context['content'] = {blob.field: blob.text for blob in Content.objects.all()}
context['speakers'] = list(Speaker.objects.all())
context['metas'] = list(Metatag.objects.all())
context['affiliates'] = list(Affiliate.objects.all())
return render(req, 'brochure/zh.html', context=context)
|
[
"[email protected]"
] | |
d490e410f392b2ae592f904be4753a8223b0cb08
|
27cfd0d22e9cdb2bed18f55788332191bcbb973a
|
/armstrong-numbers/armstrong_numbers.py
|
30e6c9fca283975d5dd190064638295c1c380b38
|
[] |
no_license
|
NishantUpadhyay-BTC/python-exercism-exercises
|
151b9a2b14cc5f28baf761a1fe6d46c1b3827e39
|
60a57ae1de8a8bf74242b754db02af5b8099c479
|
refs/heads/master
| 2020-07-01T16:39:30.075982 | 2019-08-08T09:55:35 | 2019-08-08T09:55:35 | 201,228,404 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 199 |
py
|
def is_armstrong_number(number):
digits = list(map(int, str(number)))
total_count = len(digits)
power_arr = [digit ** total_count for digit in digits]
return sum(power_arr) == number
|
[
"[email protected]"
] | |
67e14d9dd0180f4e2182ddaef7cbe3dad2f06327
|
0ef9b5db33cf121d7a089c139894f6c918bd770a
|
/Events_Api_Interface/settings.py
|
254e7b569001d423bf273b75d21c789ffb6216ac
|
[] |
no_license
|
dnaport22/events_recommandation_api
|
f16dab14bc1a4483ad88fdf4ad7cb64c61c839d2
|
c7afe3d68cbd7ca062bbc3dbfded001ba433b94f
|
refs/heads/master
| 2021-01-15T10:42:22.179881 | 2017-08-07T15:55:31 | 2017-08-07T15:55:31 | 99,594,893 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,212 |
py
|
"""
Django settings for Events_Api_Interface project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f_e^72t5+6(w^1i4k%xjyp-ug2gaeme3t)*rxq%xtnpk=9qt03'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Events_Api_Interface.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Events_Api_Interface.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
4b244650d7abe40c2752c34c73b3f8b6dd3fb81d
|
1833612f35d8ebc0249e882fead7b593d58ae911
|
/ground-core/scripts/cassandra/cassandra_setup.py
|
3346d2d7ef8a3f0ac6b09dd9401f56e4d9b1a55d
|
[] |
no_license
|
jegonzal/ground
|
6b8eb797f0e602aeac9c7107ae0b80c44e9719e3
|
78efff8e41cc21b72f427202318a650563d2017e
|
refs/heads/master
| 2023-08-30T18:07:37.926637 | 2016-07-09T21:49:06 | 2016-07-09T21:49:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 158 |
py
|
import sys, os
assert (len(sys.argv) == 2)
dbname = sys.argv[1]
command_string = "cqlsh -k " + str(dbname) + " -f cassandra.sql"
os.system(command_string)
|
[
"[email protected]"
] | |
20df727211d4c56d675fffedbca49041fd906653
|
bb156f9916f9fe8530306cd2e97d23ed06428c0a
|
/.wttd/bin/pyreverse
|
0da3ae4accd62c6c7200d36dda8db2c6a2e3162f
|
[] |
no_license
|
wilharlley/wttd
|
dcda740f3c31a5b96e1bbec3eafccc92ddb7ed9c
|
1926032de18e969129ac78ebc20a204ff6e605ae
|
refs/heads/master
| 2020-06-03T15:22:24.309374 | 2019-06-12T14:07:00 | 2019-06-12T14:07:00 | 191,625,632 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
#!/home/wilharlley/Desenvolvimento/Pessoal/wttd/.wttd/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"[email protected]"
] | ||
1eb4ea943bb10ccda036a8f2bbafcef91c5855ed
|
efd6a277c2d5bffdfba6ccb4d5efd555e652d29e
|
/chap2/2.12.py
|
f427f3f0b66eeba917d8798655d76ae107eb82bf
|
[] |
no_license
|
CavalcanteLucas/cookbook
|
dd57583c8b5271879bb086783c12795d1c0a7ee8
|
09ac71e291571e3add8d23d79b1684b356702a40
|
refs/heads/master
| 2020-03-25T03:09:39.608599 | 2019-09-13T04:43:23 | 2019-09-13T04:43:23 | 143,325,952 | 0 | 0 | null | 2020-09-25T05:46:30 | 2018-08-02T17:32:08 |
Python
|
UTF-8
|
Python
| false | false | 885 |
py
|
# Sanitizing and Cleaning Up Text
s = 'pýtĥöñ\fis\tawesome\r\n'
s
remap = {
ord('\t') : ' ',
ord('\f') : ' ',
ord('\r') : None # Deleted
}
a = s.translate(remap)
a
import unicodedata
import sys
sys.maxunicode
cmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode) if unicodedata.combining(chr(c)))
b = unicodedata.normalize('NFD', a)
b
b.translate(cmb_chrs)
digitmap = { c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
len(digitmap)
# Arabic digits
x = '\u0661\u0662\u0663'
x
x.translate(digitmap)
a
b = unicodedata.normalize('NFD', a)
b
b.encode('ascii', 'ignore').decode('ascii')
# Discussion
# on text processing; the simpler, the faster.
def clean_space(s):
s = s.replace('\r', '')
s = s.replace('\t', ' ')
s = s.replace('\f', ' ')
return s
|
[
"[email protected]"
] | |
abec0a4a92dc068a00f9f27d0c21709406b6641f
|
e47b87905872d92458512b0eda435f53f90b19cf
|
/movies/migrations/0003_alter_movie_author.py
|
f15bf19bee735f007ed42db65755c2622c2f495c
|
[] |
no_license
|
ephremworkeye/drf_demo
|
e08e2f2049b427497bad815e51247e27784b1f29
|
9f5ce84edd7841fd0456107d99485d2af44e1c49
|
refs/heads/master
| 2023-07-31T16:24:12.400218 | 2021-09-25T05:56:05 | 2021-09-25T05:56:05 | 409,107,635 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 577 |
py
|
# Generated by Django 3.2.7 on 2021-09-23 00:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_alter_movie_author'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
2179a4aeda2724c9cc76fb486c75f79bf594baab
|
76dfa1c7d83a66f8e57ff058cea13541c8590785
|
/process_create.py
|
9ada85511df8ecd7d67428428190c6c377ca5bef
|
[] |
no_license
|
seyo-you/web2_python
|
8fecc3c911116ce4ca572f4c2a21ae2248974c6d
|
457522091113d899894a5b8fa349b89b248ad59f
|
refs/heads/main
| 2023-02-05T10:06:14.179743 | 2020-12-21T07:16:35 | 2020-12-21T07:16:35 | 322,902,575 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
#!/usr/bin/env python3
import cgi
form = cgi.FieldStorage()
title = form["title"].value
description = form["description"].value
opened_file = open('data/'+title, 'w')
opened_file.write(description)
#Redirection
print("Location: index.py?id="+title)
print()
|
[
"[email protected]"
] | |
86e530ad2926e76a695d7a7e9bce92cafb6d33fe
|
1a861389682ffae24cf37591a56ef49f2a2cd25c
|
/My_Social_Project/App_Posts/admin.py
|
ed2650dba5eae3c646e9f4d3ed11ef8a92796d8f
|
[] |
no_license
|
tanviredu/Social
|
2a180f72a4a006d21e662101c3a2b441d960c30a
|
f52dc31358adf36701fd56c688cd1c6c947799b2
|
refs/heads/master
| 2022-12-11T00:58:36.386143 | 2020-09-04T17:30:30 | 2020-09-04T17:30:30 | 290,747,285 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 116 |
py
|
from django.contrib import admin
from .models import Post,Like
admin.site.register(Post)
admin.site.register(Like)
|
[
"[email protected]"
] | |
6a44b292eddc09c6e7fd4688ff22c55ddc9a8820
|
1265e6cef320aad5691e63077b16432d6dbf9d54
|
/main.py
|
0250cf777648325f7faf33152f446446a8be19e2
|
[] |
no_license
|
ullikappu/MySite
|
9d8734eb41ac2efbd987cf92b2ed33c3b5eb9ba6
|
70c09f68e99f5868038ec7aab4b9a7e7c2242da1
|
refs/heads/master
| 2023-05-14T20:10:33.940971 | 2019-07-22T16:29:50 | 2019-07-22T16:29:50 | 198,260,499 | 0 | 0 | null | 2023-05-02T18:28:17 | 2019-07-22T16:13:06 |
HTML
|
UTF-8
|
Python
| false | false | 1,498 |
py
|
from flask import Flask, render_template, request, make_response, Response, Request
import datetime
import random
import uuid
SECRETS_DB = {}
app = Flask(__name__)
@app.route("/")
def index():
some_text = "Message from the handler."
current_year = datetime.datetime.now().year
cities = ["Boston", "Vienna", "Paris", "Berlin"]
return render_template("index.html",
some_text=some_text,
current_year=current_year,
cities=cities)
@app.route("/about-me")
def about():
return render_template("about.html")
@app.route("/guessing-game", methods=["GET", "POST"])
def guessing_game():
global SECRETS_DB
user_id = request.cookies.get("user_id")
method = request.method
if method == "GET":
if not user_id:
user_id = str(uuid.uuid4())
if user_id not in SECRETS_DB:
SECRETS_DB[user_id] = random.randint(1,10)
resp: Response = make_response(render_template("guessing_game.html"))
resp.set_cookie("user_id", user_id)
return resp
elif method == "POST":
number = request.form.get("number")
if number and int(number)==SECRETS_DB[user_id]:
SECRETS_DB[user_id] = random.randint(1, 10)
text = "You won"
else:
text = f"Try Again {SECRETS_DB[user_id]}"
return render_template("guessing_game.html", text=text)
if __name__ == '__main__':
app.run()
|
[
"[email protected]"
] | |
0c2344ad81e759cfc904c110fd2b6691d39439c6
|
5a52c8cfa4268135316b9688c8f72a7e7dd2dd1b
|
/14.py
|
fe455b5970dc69d39754b4d0eae9baba27a7e352
|
[] |
no_license
|
joketeng/LeetCode
|
4f7fdf45ddc55fe9eec9708f2e27d9c3d898368d
|
49c342fd884702bc4fca386b8b51d5c464cc6191
|
refs/heads/master
| 2020-03-31T18:02:07.777952 | 2018-12-17T16:04:00 | 2018-12-17T16:04:00 | 152,443,941 | 1 | 0 | null | null | null | null |
GB18030
|
Python
| false | false | 711 |
py
|
class Solution:
def longestCommonPrefix(self, strs):
'''
a = [1,2,3]
b = [4,5,6]
c = [4,5,6,7,8]
zipped = zip(a,b) # 打包为元组的列表
[(1, 4), (2, 5), (3, 6)]
zip(a,c) # 元素个数与最短的列表一致
[(1, 4), (2, 5), (3, 6)]
zip(*zipped) # 与 zip 相反,*zipped 可理解为解压,返回二维矩阵式
[(1, 2, 3), (4, 5, 6)]
'''
str = ''
for i, c in enumerate(zip(*strs)):
if len(set(c)) > 1:
# set() 函数创建一个无序不重复元素集
return str
else:
str += c[0]
return str
|
[
"[email protected]"
] | |
f5a3685cbdf15c80e00b6ef7d6df4403c70af4dc
|
af0f8b9af4c15da99c31f0bee8064a7ca7302385
|
/task2.py
|
f379bc07d1d7404fe87244d039c64e9702ae200e
|
[] |
no_license
|
brittanylindberg98/Homework7
|
ddb25aa985c372952dd9468880f794d57b7d9f0a
|
3807da0bb56827b6e8b94a4cb8d473f5c3d8111d
|
refs/heads/main
| 2023-02-01T02:32:25.067614 | 2020-12-12T05:17:04 | 2020-12-12T05:17:04 | 320,753,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,325 |
py
|
def change_working_dir(new_directory='', verbose=False):
import os
if verbose:
print('Working directory was: {0}'.format(os.getcwd()))
in_str = new_directory
if in_str == '':
in_str = os.path.dirname(os.path.realpath)
os.chdir(in_str)
if verbose:
print('Working directory is now: {0}'. format(os.getcwd()))
def get_letter_grade(grade):
grade = [
(90, 'A'),
(80, 'B'),
(70, 'C'),
(60, 'D'),
(00, 'F'),
]
def get_grade_points(score):
score = {
'90', 4.00,
'80', 3.00,
'70', 2.00,
'60', 1.00,
'00', 0.00
}
def main(testing=False):
import pandas as pd
FILE = 'scores.csv'
change_working_dir()
df_scores = pd.read_csv(FILE, delimiter=',', indexy_col=0, header=0)
df_letter_grades = df_scores.applymap(get_letter_grade)
df_grade_points = df_letter_grades.applymap(get_grade_points)
df_grade_points['The class GPA is'] = df_grade_points.mean(axis=1)
df_grade_points = df_grade_points.transpose()
df_grade_points['GPA'] = df_grade_points.mean(axis=1).round(2)
series = df_grade_points['GPA']
print(series.to_string())
|
[
"[email protected]"
] | |
6e7a6db5c0b4633c4d25fb61e1f4d9f2e9b0664f
|
f9ad3e5082181bf1e5a72cc36ab0d7d6aced16fa
|
/detection/dataset/constant.py
|
4363b9a7489c35ca602a0b5f3d1a1c5e14ad83a3
|
[] |
no_license
|
Yaser-wyx/classicNerualNetWork
|
2d1af05b5248d76912696ca34df4825ae846878f
|
f9445457d13206460e3ab03258be8f77801c0a2a
|
refs/heads/master
| 2023-05-31T02:33:46.180581 | 2021-06-11T08:44:18 | 2021-06-11T08:44:18 | 375,714,542 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 755 |
py
|
import os
ANNOTATIONS = lambda root: os.path.join(root, "Annotations")
TRAIN_PATH = lambda root: os.path.join(root, "ImageSets", "Main", "train.txt")
VAL_PATH = lambda root: os.path.join(root, "ImageSets", "Main", "val.txt")
TEST_PATH = lambda root: os.path.join(root, "ImageSets", "Main", "test.txt")
IMAGE_DIR_PATH = lambda root: os.path.join(root, "JPEGImages")
VOC_BBOX_LABEL_NAMES = [
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor']
VOC_BBOX_LABEL_NAMES2IDX = {name: idx for idx, name in enumerate(VOC_BBOX_LABEL_NAMES)}
|
[
"[email protected]"
] | |
0669e1b87913ae9f847aa13336e31fbccb0f8b56
|
38af0f486b44b15ef3b748b96e54136c3b072ecf
|
/Project2/Project2/OOAD_Project2_p1c/ZooKeeperInterface.py
|
4b1b04d8ff4cbdd57a73ff440f9614cbceaed907
|
[] |
no_license
|
sankar77/OOAD_Project2
|
9f378e16d0f1f34979e04d4fbb58288735bbf4e5
|
910c1a90c0334d1e51a2061d06e67f68459e2edb
|
refs/heads/master
| 2021-01-04T14:50:14.032528 | 2020-02-14T21:09:32 | 2020-02-14T21:09:32 | 240,597,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
class ZooKeeperInterface:
def registerZooAnnouncer(self,za):
pass
def unRegisterZooAnnouncer(self,za):
pass
def notifyZooAnnouncer(self,annoucement):
pass
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.