metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "20after4/mpp-solar",
"score": 3
} |
#### File: mppsolar/outputs/mqtt.py
```python
import logging
import paho.mqtt.publish as publish
from .baseoutput import baseoutput
log = logging.getLogger("MPP-Solar")
class mqtt(baseoutput):
def __str__(self):
return "mqtt - outputs the to the supplied mqtt broker: eg 'tag'/status/total_output_active_power/value 1250"
def __init__(self, *args, **kwargs) -> None:
log.debug(f"processor.mqtt __init__ kwargs {kwargs}")
def build_msgs(self, *args, **kwargs):
data = self.get_kwargs(kwargs, "data")
tag = self.get_kwargs(kwargs, "tag")
# Build array of mqtt messages
msgs = []
# Remove command and _command_description
cmd = data.pop("_command", None)
data.pop("_command_description", None)
if tag is None:
tag = cmd
# Loop through responses
for key in data:
value = data[key][0]
unit = data[key][1]
# remove spaces
key = key.lower().replace(" ", "_")
log.debug(f"tag {tag}, key {key}, value {value}, unit {unit}")
# 'tag'/status/total_output_active_power/value 1250
# 'tag'/status/total_output_active_power/unit W
msg = {"topic": f"{tag}/status/{key}/value", "payload": value}
msgs.append(msg)
if unit:
msg = {"topic": f"{tag}/status/{key}/unit", "payload": unit}
msgs.append(msg)
log.debug(msgs)
return msgs
def output(self, *args, **kwargs):
log.info("Using output processor: mqtt")
log.debug(f"processor.mqtt.output kwargs {kwargs}")
data = self.get_kwargs(kwargs, "data")
if data is None:
return
tag = self.get_kwargs(kwargs, "tag")
topic = self.get_kwargs(kwargs, "topic")
mqtt_broker = self.get_kwargs(kwargs, "mqtt_broker", "localhost")
mqtt_user = self.get_kwargs(kwargs, "mqtt_user")
mqtt_pass = self.get_kwargs(kwargs, "mqtt_pass")
if mqtt_user is not None and mqtt_pass is not None:
auth = {"username": mqtt_user, "password": <PASSWORD>}
log.info(f"Using mqtt authentication, username: {mqtt_user}, password: [supplied]")
else:
log.debug("No mqtt authentication used")
auth = None
msgs = self.build_msgs(data=data, tag=tag, topic=topic)
publish.multiple(msgs, hostname=mqtt_broker, auth=auth)
```
#### File: mppsolar/protocols/jk02.py
```python
import logging
from .jkabstractprotocol import jkAbstractProtocol
log = logging.getLogger("MPP-Solar")
NEW_COMMANDS = {
"getCellData": {
"name": "getCellData",
"command_code": "96",
"record_type": "2",
"description": "BLE Cell Data inquiry",
"help": " -- queries the ble device for the cell data",
"type": "QUERY",
"response": [
["hex", 4, "Header", ""],
["hex", 1, "Record Type", ""],
["int", 1, "Record Counter", ""],
["loop", 24, "Voltage Cell", "V", "2ByteHex"],
["discard", 4, "", ""],
["2ByteHex", 1, "Average Cell Voltage", "V"],
["2ByteHex", 1, "Delta Cell Voltage", "V"],
["2ByteHexU", 1, "", ""],
["loop", 25, "Resistance Cell", "Ohm", "2ByteHex"],
["discard", 4, "", ""],
["2ByteHex", 1, "Battery Voltage", "V"],
["discard", 10, "", ""],
["2ByteHexC", 1, "Battery T1", "°C"],
["2ByteHexC", 1, "Battery T2", "°C"],
["2ByteHexC", 1, "MOS Temp", "°C"],
["discard", 4, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["discard", 4, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["uptime", 3, "Time", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["discard", 12, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["2ByteHexU", 1, "", ""],
["rem"],
],
"test_responses": [
bytes.fromhex(
"55aaeb9002b52e0d280dfa0c2e0d2f0d220d220d130d190d1d0d1d0d170d1f0d160dfb0c1f0d00000000000000000000000000000000ffff00001c0d350004029b00c600a000b300bc00cc00be00b100b4002d013d01b000a100ab00b200ad0000000000000000000000000000000000000000000000bcd1000000000000000000001e0116013c010000000000636b0c0300400d030000000000dc4d010064000000781e16000101480a000000000000000000000000070101000000980400000000260141400000000037feffff00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080"
),
bytes.fromhex(
"55aaeb9002bb2e0d280dfa0c2e0d2f0d220d220d130d190d1d0d1d0d170d1f0d160dfb0c1f0d00000000000000000000000000000000ffff00001b0d350004029b00c600a000b300bc00cc00be00b100b4002d013d01b000a100ab00b200ad0000000000000000000000000000000000000000000000b8d1000000000000000000001e0114013c010000000000636b0c0300400d030000000000dc4d0100640000007a1e16000101480a000000000000000000000000070101000000980400000000260141400000000037feffff00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000081"
),
],
"regex": "",
},
}
class jk02(jkAbstractProtocol):
"""
JK02 - Handler for JKBMS 2 byte data communication
- e.g. ASAS = ??V
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self._protocol_id = b"JK02"
self.COMMANDS.update(NEW_COMMANDS)
self.STATUS_COMMANDS = [
"getCellData",
]
self.SETTINGS_COMMANDS = [
"getInfo",
]
self.DEFAULT_COMMAND = "getCellData"
``` |
{
"source": "20AI/revenius",
"score": 3
} |
#### File: revenius/revenius/execute.py
```python
import random
import numpy as np
import gym
import revenius.gymenvs as gymenvs
from revenius.gymenvs.util import get_possible_acts
def random_walk(env, n_episode=5):
"""Execute random walk."""
env.reset()
for i_episode in range(n_episode):
observation = env.reset()
for t in range(100):
enables = env.possible_actions
if len(enables) == 0:
action = env.board_size**2 + 1
else:
action = random.choice(enables)
observation, reward, done, info = env.step(action)
env.render()
print('reward :', reward)
if done:
print("Episode finished after {} timesteps".format(t+1))
black_score = len(np.where(
env.state[0, :, :] == 1)[0])
print(black_score)
break
return env
def _get_dqn_action(dqn, env, observation):
q_values = dqn.compute_q_values(
observation.reshape((1,)+env.obs_space.shape))
action = dqn.policy.select_action(q_values)
return action
def _show_possible_acts(observation, possible_acts):
from revenius.gymenvs.util import act2coord
_coord = [act2coord(observation, _p) for _p in possible_acts]
print("Possible acts :")
print([(_c[0]+1, _c[1]+1) for _c in _coord])
def human_vs_dqn(dqn, plr_color=0):
"""Execute human vs dqn."""
assert gymenvs
from revenius.gymenvs.util import coord2act
env = gym.make('Reversi8x8_dqn-v0')
env.set_model(dqn)
observation = env.reset()
done = False
while not done:
env.render()
possible_acts = get_possible_acts(observation, plr_color)
_show_possible_acts(observation, possible_acts)
action = -1
while action < 0:
_input = input("Input action : ")
action_coord = [int(_i)-1 for _i in _input.split(',')]
if action in possible_acts:
action = coord2act(observation, action_coord)
print(action)
observation, _, done, _ = env.step(action)
def dqn_vs_random(dqn, verbose=False, env_name='Reversi8x8-v0'):
"""Execute dqn vs random."""
assert gymenvs
env = gym.make(env_name)
plr_color = 0
observation = env.reset()
done = False
while not done:
if verbose:
env.render()
possible_acts = get_possible_acts(observation, plr_color)
q_values = dqn.compute_q_values(
observation.reshape((1,)+env.obs_space.shape))
action = possible_acts[q_values[possible_acts].argmax()]
observation, reward, done, _ = env.step(action)
return env, reward
```
#### File: revenius/gymenvs/opponent.py
```python
import numpy as np
from .util import get_possible_acts
def make_random_policy(np_random, board_size):
"""Make random policy."""
def random_policy(state, plr_color):
possible_places = get_possible_acts(state, plr_color)
if len(possible_places) == 0:
return board_size**2 + 1
a = np_random.randint(len(possible_places))
return possible_places[a]
return random_policy
def make_error_policy(opp):
"""Make error policy."""
if opp == 'dqn':
def error_policy(state, plr_color):
raise('Please do set_model first.')
return error_policy
def make_dqn_policy(model, board_size):
"""Make dqn policy."""
def dqn_policy(state, plr_color):
possible_places = get_possible_acts(state, plr_color)
q_values = model.compute_q_values(
state.reshape((1,)+state.shape))
# action = model.policy.select_action(q_values)
if len(possible_places) == 0:
return board_size**2 + 1
possible_q = np.zeros(len(q_values))
for _i, _q in enumerate(q_values):
if _i not in possible_places:
_q = np.min(q_values)
possible_q[_i] = _q
return np.argmax(possible_q)
return dqn_policy
```
#### File: revenius/samples/demo.py
```python
import sys
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
import gym
from rl.agents.dqn import DQNAgent
from rl.policy import EpsGreedyQPolicy
from rl.memory import SequentialMemory
sys.path.append('./')
def build_model(env):
"""Build model."""
nb_actions = env.action_space.n
model = Sequential()
model.add(Flatten(input_shape=(1,)+env.obs_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation="linear"))
return model
def build_dqn(model, nb_actions):
"""Build DQN agent."""
memory = SequentialMemory(limit=50000, window_length=1)
policy = EpsGreedyQPolicy(eps=0.001)
dqn = DQNAgent(model=model, nb_actions=nb_actions, gamma=0.99,
memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
return dqn
def main():
"""Exec main."""
import revenius.gymenvs as gymenvs
from revenius.execute import human_vs_dqn
print(gymenvs)
env = gym.make('Reversi8x8-v0')
nb_actions = env.action_space.n
model = build_model(env)
dqn = build_dqn(model, nb_actions)
dqn.fit(env, nb_steps=500, visualize=False, verbose=2)
human_vs_dqn(dqn)
if __name__ == '__main__':
main()
```
#### File: revenius/tests/test_startup.py
```python
import random
import sys
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import EpsGreedyQPolicy
from rl.memory import SequentialMemory
sys.path.append('./')
def _get_dqn(env):
nb_actions = env.action_space.n
model = Sequential()
model.add(Flatten(input_shape=(1,)+env.obs_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation="linear"))
memory = SequentialMemory(limit=5, window_length=1)
policy = EpsGreedyQPolicy(eps=0.001)
dqn = DQNAgent(model=model, nb_actions=nb_actions, gamma=0.99,
memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
return dqn
def test_start_env():
import revenius.gymenvs as gymenvs
assert gymenvs
env = gym.make('Reversi8x8-v0')
env.reset()
enables = env.possible_actions
action = random.choice(enables)
observation, reward, done, info = env.step(action)
def test_dqn():
import revenius.gymenvs as gymenvs
assert gymenvs
env = gym.make('Reversi8x8-v0')
dqn = _get_dqn(env)
history = dqn.fit(env, nb_steps=5, visualize=False, verbose=2)
dqn.test(env, nb_episodes=1, visualize=True)
print(history)
def test_dqn_policy():
import revenius.gymenvs as gymenvs
assert gymenvs
env = gym.make('Reversi8x8_dqn-v0')
dqn_opp = _get_dqn(env)
env.set_model(dqn_opp)
dqn = _get_dqn(env)
history = dqn.fit(env, nb_steps=5, visualize=False, verbose=2)
dqn.test(env, nb_episodes=1, visualize=True)
print(history)
``` |
{
"source": "20bart/homeassistant_electrolux_status",
"score": 2
} |
#### File: custom_components/electrolux_status/binary_sensor.py
```python
from homeassistant.components.binary_sensor import BinarySensorEntity
from .entity import ElectroluxStatusEntity
from .const import BINARY_SENSOR
from .const import DOMAIN
async def async_setup_entry(hass, entry, async_add_devices):
"""Setup binary sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id]
appliances = coordinator.data.get('appliances', None)
if appliances is not None:
for appliance_id, appliance in appliances.found_appliances.items():
async_add_devices(
[
ElectroluxBinarySensor(coordinator, entry, appliance_id, entity.entity_type, entity.attr)
for entity in appliance.entities if entity.entity_type == BINARY_SENSOR
]
)
class ElectroluxBinarySensor(ElectroluxStatusEntity, BinarySensorEntity):
"""Electrolux Status binary_sensor class."""
@property
def is_on(self):
"""Return true if the binary_sensor is on."""
return self.get_entity.state
``` |
{
"source": "20BBrown14/Discord_MMO_Bot",
"score": 3
} |
#### File: Discord_MMO_Bot/commands/pizza.py
```python
async def command(client, message, channel):
await client.send_message(channel, "I don't have money for pizza. I'll get you back next Tuesday, though")
TRIGGER = '!pizza'
```
#### File: Discord_MMO_Bot/commands/tradeskilladd.py
```python
import json
from fuzzywuzzy import fuzz
import os
#!tradeskilladd someName, level, class, notes
#!tradeskilladd help
tradeskills = ['Carpenter', 'Provisioner', 'Woodworker', 'Weaponsmith', 'Armorer', 'Tailor', 'Alchemist', 'Jeweler', 'Sage', 'Tinkerer', 'Adorner']
async def unrecognized_tradeskill(client, bad_string, message):
space_index = bad_string.strip().find(' ')
if(space_index == -1):
bad_name = bad_string.strip()
elif(space_index > 0):
bad_name = bad_string.strip()[:space_index].lower()
potential_names = []
for name in tradeskills:
ratio = fuzz.ratio(bad_name, name.lower())
print(name, ratio)
if(ratio >= 60):
potential_names.append(name)
if(len(potential_names) > 0):
await client.send_message(message.author, "Unrecognized tradeskill `%s`. Did you mean one of the following: `%s`" % (bad_string, ', '.join(potential_names)))
return
# fuzzy compare
await client.send_message(message.author, "Unrecognized tradeskill `%s`. Please use a real eq2 tradeskill profession name" % bad_string)
async def tradeskill_help(client, message, channel):
help_message = """
`!tradeskilladd [name], [level], [profession/class], [notes(option)]` is used to add yourself to a list of tradeskillers that the bot can track to supply information to others.
An example usage would be `!tradeskilladd Nibikk, 50, Provisioner, All books`
This example command see that the in-game name is Nibikk, the level 50, the profession is Provisioner, and in the notes section is 'All books'
Name, level, and profession/class are required to supply but notes is optional.
To edit an entry already in the tradeskill information you must own it or be an admin.
"""
await client.send_message(message.author, help_message)
async def command(client, message, channel, delete_message, tradeskill_information):
await delete_message(client, message)
#load tradeskill_info from file
if(tradeskill_information == json.loads('{}') and os.path.exists('tradeskill_info.json')):
f = open('tradeskill_info.json', 'r')
tradeskill_information = json.loads(f.read())
f.close()
server_id = message.server.id
message_content = message.content.strip()
if(message_content.find(' ') < 0):
await tradeskill_help(client, message, channel)
return tradeskill_information
if(message_content[15:].lower().strip() == 'help'):
await tradeskill_help(client, message, channel)
return tradeskill_information
arguments = message_content[15:].split(',')
if(len(arguments) < 3):
await tradeskill_help(client, message, channel)
return tradeskill_information
if(len(arguments) > 4):
new_args = []
new_args.append(arguments[0])
new_args.append(arguments[1])
new_args.append(arguments[2])
notes_args = ''
for i in range(3, len(arguments)):
notes_args = notes_args + arguments[i]
new_args.append(notes_args)
arguments = new_args
char_name = arguments[0].strip()
char_level = arguments[1].strip()
char_profession = arguments[2].strip().lower().capitalize()
if(not char_profession in tradeskills):
await unrecognized_tradeskill(client, char_profession, message)
return tradeskill_information
char_notes = ''
if(len(arguments) > 3):
char_notes = arguments[3].strip()
new_json_raw_string = '{"level": "%s", "notes": "%s", "owner": "%s"}' % (char_level, char_notes, str(message.author.id))
if(server_id in tradeskill_information):
if(char_profession in tradeskill_information[server_id]):
if(char_name in tradeskill_information[server_id][char_profession]):
stored_owner = tradeskill_information[server_id][char_profession][char_name][owner]
if(not stored_owner == str(message.author.id)):
await client.send_message(message.author, "There is already an entry with that character name and profession combination and you do not own it. If you think this is in error contact a server mod.")
return tradeskill_information
tradeskill_information[server_id][char_profession][char_name] = json.loads(new_json_raw_string)
else:
tradeskill_information[server_id][char_profession] = json.loads('{"%s": %s}' % (char_name, new_json_raw_string))
else:
tradeskill_information[server_id] = json.loads('{"%s": {"%s": %s}}' % (char_profession, char_name, new_json_raw_string))
tradeskill_info_file = open('tradeskill_info.json', 'w')
tradeskill_info_file.write(json.dumps(tradeskill_information))
tradeskill_info_file.close()
await client.send_message(message.author, "Successfully added entry to tradeskill information.")
return tradeskill_information
TRIGGER = '!tradeskilladd'
```
#### File: Discord_MMO_Bot/commands/weather.py
```python
import requests
import json
import discord
import datetime
from uszipcode import SearchEngine
import pytz
import calendar
from custom_exceptions import zipcode_invalid_exception
def weather_help():
weather_help_message="""
Available Options:
`!weather zip=#####`
`!weather forecast zip=64063`"""
return weather_help_message
def degree_to_cardinal_direction(degree):
arr=["N","NNE","NE","ENE","E","ESE", "SE", "SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"]
val = int((degree/22.5)+.5)
return arr[(val % 16)]
def meter_per_sec_to_mph(speed):
mph = str(round(int(speed) * 2.237, 2))
return ("%s m/s (%s mph)" % (speed, mph))
def kelvin_to_C_and_F_string(kelvin_temp):
celcius = str(round(int(kelvin_temp) - 273.15, 1))
fahrenheit = str(round((int(kelvin_temp) - 273.15) * (9/5) + 32, 1))
return "%s°C (%s°F)" % (celcius, fahrenheit)
def mm_to_inches(rain_mm):
inches = str(round(int(rain_mm) / 25.4, 5))
return "%s mm (%s in)" % (rain_mm, inches)
def precip_string(precip):
if('1h' in precip and '3h' in precip):
return "%s in last 1 hour and %s in last 3 hour" % (mm_to_inches(precip['1h']), mm_to_inches(precip['3h']))
elif('1h' in precip and not '3h' in precip):
return "%s in last 1 hour" % (mm_to_inches(precip['1h']))
elif('3h' in precip and not '1h' in precip):
return "%s in last 3 hours" % (mm_to_inches(precip['3h']))
else:
return False
def create_embeded(title, description, icon, min_temp, max_temp, current_temp, humidity, wind_speed, wind_degrees, wind_direction, rain, snow):
embed=discord.Embed(title="%s's Weather" % title, description="%s" % description, color=0x00f900)
if(icon):
embed.set_thumbnail(url="http://openweathermap.org/img/w/%s.png" % icon)
if(min_temp):
embed.add_field(name="Min Temp", value=min_temp, inline=True)
if(max_temp):
embed.add_field(name="Max Temp", value=max_temp, inline=True)
if(current_temp):
embed.add_field(name="Current Temp", value=current_temp, inline=True)
if(humidity):
embed.add_field(name="Humidity", value="%s%%" % (humidity), inline=True)
if(wind_speed and wind_degrees and wind_direction):
embed.add_field(name="Wind Speed", value="%s" % wind_speed, inline=True)
embed.add_field(name="Wind Direction", value="%s° (%s)" % (wind_degrees, wind_direction), inline=True)
if(rain):
embed.add_field(name='Rain', value=rain, inline=True)
if(snow):
embed.add_field(name='Snow', value=snow, inline=True)
return embed
def create_forecast_embeded(title, description, first_day_times, first_day_temps, first_day_conditions, first_day_humidity, average_temps, weather_conditions, average_humidity):
calendarList = list(calendar.day_name)
next_days = []
first_day = datetime.datetime.fromtimestamp(first_day_times[0])
for i in range(0, 4):
first_day = first_day + datetime.timedelta(days=1)
next_days.append(first_day)
embed=discord.Embed(title="%s's Weather" % title, description="%s" % description, color=0x00f900)
for i in range(0, len(first_day_times)):
date = datetime.datetime.fromtimestamp(first_day_times[i])
embed.add_field(name="%s, %s" % (date, calendarList[date.weekday()]), value="%s | %s | %s%% humidity" % (first_day_conditions[i], kelvin_to_C_and_F_string(first_day_temps[i]), first_day_humidity[i]), inline=False)
embed.add_field(name="%s, %s" % (next_days[0].strftime("%Y-%m-%d"), calendarList[next_days[0].weekday()]), value="%s | %s | %s%% humidity" % (weather_conditions[0], kelvin_to_C_and_F_string(average_temps[0]), round(average_humidity[0], 2)), inline=False)
embed.add_field(name="%s, %s" % (next_days[1].strftime("%Y-%m-%d"), calendarList[next_days[1].weekday()]), value="%s | %s | %s%% humidity" % (weather_conditions[1], kelvin_to_C_and_F_string(average_temps[1]), round(average_humidity[1], 2)), inline=False)
embed.add_field(name="%s, %s" % (next_days[2].strftime("%Y-%m-%d"), calendarList[next_days[2].weekday()]), value="%s | %s | %s%% humidity" % (weather_conditions[2], kelvin_to_C_and_F_string(average_temps[2]), round(average_humidity[2], 2)), inline=False)
embed.add_field(name="%s, %s" % (next_days[3].strftime("%Y-%m-%d"), calendarList[next_days[3].weekday()]), value="%s | %s | %s%% humidity" % (weather_conditions[3], kelvin_to_C_and_F_string(average_temps[3]), round(average_humidity[3], 2)), inline=False)
return embed
def find_zip_info(zipcode):
search = SearchEngine(simple_zipcode=True)
zipcode_info = search.by_zipcode(zipcode)
if(zipcode_info.zipcode == None):
raise zipcode_invalid_exception
return zipcode_info.major_city, zipcode_info.state, zipcode_info.timezone
def update_cache(option, zipcode, item, weather_cache):
if(option == 'forecast'):
if(not 'forecast' in weather_cache):
weather_cache['forecast'] = json.loads('{}')
now = datetime.datetime.now().timestamp()
item['cached_time'] = now
weather_cache['forecast'][zipcode] = item
return weather_cache
if(option == ''):
now = datetime.datetime.now().timestamp()
item['cached_time'] = now
weather_cache[zipcode] = item
return weather_cache
"""
Weather command
@param client: The discord client, generally assumed to be the bot user itself
@param message: The message the discord bot is responding to
@param channel: The channel to send the command response to
@param delete_message: Function to try to delete message
@param weather_cache: Cached weather objects
@param weather_api_key: Weather api key to hit weather api endpoint with
@result: Sends a message always
@result: Deletes messages always
@result: Hits weather api when needed to get weather information
"""
async def command(client, message, channel, delete_message, weather_cache, weather_api_key):
await delete_message(client, message)
message_content = message.content[9:]
arguments = message_content.split(' ')
if(arguments[0].strip().lower() == 'forecast'): #check if user wants 5 day forecast
weather_options = message.content[18:]
equals_index = weather_options.find('=')
if(equals_index < 0): #check if user provided zipcode
if(message_content.strip() == 'help'): #check if user is invoking weather help
await client.send_message(message.author, "%s" % weather_help())
return weather_cache
await client.send_message(message.author, "Please ensure you're using one of the following options:\n%s" % weather_help())
return weather_cache
if(weather_options[:equals_index].strip().lower() == 'zip'): #check if user provided zip
zipcode = weather_options[equals_index+1:].strip()
city, state, time = None, None, None
try:
city, state, timezone = find_zip_info(zipcode)
except zipcode_invalid_exception: #Catch invalid zip code errors
await client.send_message(message.author, "Zipcode (%s) invalid. Only US zip codes are supported." % zipcode)
return weather_cache
try:
timezone = pytz.timezone('US/%s' % timezone)
except pytz.exceptions.UnknownTimeZoneError: #unknown timezone error
await client.send_message(message.author, "Zipcode (%s) is in timezone %s and is not supported or invalid. Contact bot developer if you think this is in error." % zipcode, timezone)
return weather_cache
await client.send_message(message.channel, "%s %s %s" % (city, state, timezone))
now = datetime.datetime.now().timestamp()
weather_data = None
if('forecast' in weather_cache and zipcode in weather_cache['forecast'] and now - weather_cache['forecast'][zipcode]['cached_time'] < 600): #check if forecast exists in cache and is still valid
weather_data = weather_cache['forecast'][zipcode]
else:
url = "http://api.openweathermap.org/data/2.5/forecast?zip=%s&APPID=%s" % (zipcode, weather_api_key)
response = requests.get(url).text
weather_data = json.loads(response)
weather_cache = update_cache('forecast', zipcode, weather_data, weather_cache)
first_forecast_time = None
average_temps = [0, 0, 0, 0]
weather_condition = ['', '', '', '']
humidity_average = [0, 0, 0, 0]
first_day_times = []
first_day_temps = []
first_day_conditions = []
first_day_humidity = []
if('cod' in weather_data and weather_data['cod'] == '200'):
now = datetime.datetime.now().timestamp()
if('list' in weather_data):
if(len(weather_data['list']) > 0):
first_forecast_time = weather_data['list'][0]['dt']
else:
await client.send_message(message.author, "Something went wrong gathering data. Please try again later.")
return weather_cache
for forecast in weather_data['list']:
if('dt' in forecast):
if(forecast['dt'] < first_forecast_time + ( 60*60*24)): # first 24 hours of forecast
first_day_times.append(forecast['dt'])
first_day_temps.append(forecast['main']['temp'])
first_day_conditions.append(forecast['weather'][0]['description'])
first_day_humidity.append(forecast['main']['humidity'])
elif(forecast['dt'] < first_forecast_time + (60 * 60 * 24 * 2)): #second 24 hours of forecast
average_temps[0] = (average_temps[0] + forecast['main']['temp']) / 2 if average_temps[0] != 0 else forecast['main']['temp']
if(forecast['weather'][0]['main'] == 'Rain' or forecast['weather'][0]['main'] == 'Snow'):
weather_condition[0] = forecast['weather'][0]['description']
elif(weather_condition[0] == ''):
weather_condition[0] = forecast['weather'][0]['description']
humidity_average[0] = (humidity_average[0] + forecast['main']['humidity']) / 2 if humidity_average[0] != 0 else forecast['main']['humidity']
elif(forecast['dt'] < first_forecast_time + (60 * 60 * 24 * 3)): #third 24 hours of forecast
average_temps[1] = (average_temps[1] + forecast['main']['temp']) / 2 if average_temps[1] != 0 else forecast['main']['temp']
if(forecast['weather'][0]['main'] == 'Rain' or forecast['weather'][0]['main'] == 'Snow'):
weather_condition[1] = forecast['weather'][0]['description']
elif(weather_condition[1] == ''):
weather_condition[1] = forecast['weather'][0]['description']
humidity_average[1] = (humidity_average[1] + forecast['main']['humidity']) / 2 if humidity_average[1] != 0 else forecast['main']['humidity']
elif(forecast['dt'] < first_forecast_time + (60 * 60 * 24 * 4)): #fourth 24 hour of forecast
average_temps[2] = (average_temps[2] + forecast['main']['temp']) / 2 if average_temps[2] != 0 else forecast['main']['temp']
if(forecast['weather'][0]['main'] == 'Rain' or forecast['weather'][0]['main'] == 'Snow'):
weather_condition[2] = forecast['weather'][0]['description']
elif(weather_condition[2] == ''):
weather_condition[2] = forecast['weather'][0]['description']
humidity_average[2] = (humidity_average[2] + forecast['main']['humidity']) / 2 if humidity_average[2] != 0 else forecast['main']['humidity']
elif(forecast['dt'] < first_forecast_time + (60 * 60 * 24 * 5)): #fifth 24 hour of forecast
average_temps[3] = (average_temps[3] + forecast['main']['temp']) / 2 if average_temps[3] != 0 else forecast['main']['temp']
if(forecast['weather'][0]['main'] == 'Rain' or forecast['weather'][0]['main'] == 'Snow'):
weather_condition[3] = forecast['weather'][0]['description']
elif(weather_condition[3] == ''):
weather_condition[3] = forecast['weather'][0]['description']
humidity_average[3] = (humidity_average[3] + forecast['main']['humidity']) / 2 if humidity_average[3] != 0 else forecast['main']['humidity']
else:
await client.send_message(message.author, 'Something went wrong. Sorry about that. Please try again later.')
return weather_cache
#create embeded
embeded = create_forecast_embeded(zipcode,\
"All times in zipcode's local time zone", \
first_day_times, \
first_day_temps, \
first_day_conditions, \
first_day_humidity, \
average_temps, \
weather_condition, \
humidity_average, \
)
await client.send_message(channel, embed=embeded)
return weather_cache
else:
await client.send_message(message.author, 'Something went wrong. Sorry about that. Please try again later.')
return weather_cache
else:
await client.send_message(message.author, weather_data['message'])
return weather_cache
elif( 'zip' in arguments[0].strip().lower()):
equals_index = message_content.find('=')
if(equals_index < 0): #option not provided
if(message_content.strip() == 'help'): #weather help command invoked
await client.send_message(message.author, "%s" % weather_help())
return weather_cache
await client.send_message(message.author, "Please ensure you're using one of the following options:\n%s" % weather_help())
return weather_cache
if(message_content[:equals_index].strip().lower() == 'zip'):
zipcode = message_content[equals_index+1:].strip()
now = datetime.datetime.now().timestamp()
weather_data = None
if(zipcode in weather_cache and now - weather_cache[zipcode]['cached_time'] < 600):
weather_data = weather_cache[zipcode]
else:
url = "http://api.openweathermap.org/data/2.5/weather?zip=%s&APPID=%s" % (zipcode, weather_api_key)
response = requests.get(url).text
weather_data = json.loads(response)
weather_cache = update_cache('', zipcode, weather_data, weather_cache)
if('cod' in weather_data and weather_data['cod'] == 200):
weather = weather_data["weather"][0] if 'weather' in weather_data else []
main = weather_data["main"] if 'main' in weather_data else []
wind = weather_data["wind"] if 'wind' in weather_data else []
rain = weather_data["rain"] if 'rain' in weather_data else []
snow = weather_data["snow"] if 'snow' in weather_data else []
weather_embeded = create_embeded(zipcode, \
"%s / %s" % (weather['main'], weather['description']) if 'main' in weather and 'description' in weather else 'Weather', \
weather["icon"] if 'icon' in weather else False, \
kelvin_to_C_and_F_string(main["temp_min"]) if 'temp_min' in main else False, \
kelvin_to_C_and_F_string(main["temp_max"]) if 'temp_max' in main else False, \
kelvin_to_C_and_F_string(main["temp"]) if 'temp' in main else False, \
main["humidity"] if 'humidity' in main else 'N/A', \
meter_per_sec_to_mph(wind['speed']) if 'speed' in wind else False, \
wind['deg'] if 'deg' in wind else False, \
degree_to_cardinal_direction(wind['deg']) if 'deg' in wind else False, \
precip_string(rain), \
precip_string(snow) \
)
await client.send_message(channel, embed=weather_embeded)
return weather_cache
else:
await client.send_message(message.author, weather_data['message'])
else:
await client.send_message(message.author, "%s" % weather_help())
TRIGGER = '!weather'
```
#### File: Discord_MMO_Bot/rules/lfg_channel_clean.py
```python
async def rule(client, message, delete_message):
await delete_message(client, message)
def APPLIES(client, message):
if(message.channel.name and message.channel.name.lower() == 'lfg' and message.author != client.user):
return True
return False
``` |
{
"source": "20c/django-inet",
"score": 2
} |
#### File: src/django_inet/models.py
```python
import ipaddress
import warnings
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, RegexValidator
from django.core.validators import URLValidator as DjangoURLValidator
from django.db import models
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
class ConvertOnAssign:
"""
Calls `field.to_python()` on assign
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, typ=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class ConvertOnAssignField(models.Field):
def contribute_to_class(self, cls, name):
super().contribute_to_class(cls, name)
setattr(cls, name, ConvertOnAssign(self))
def addr_ctor(version=None):
if version:
if version == 4:
return ipaddress.IPv4Address
elif version == 6:
return ipaddress.IPv6Address
else:
raise ValueError("unknown version")
else:
return ipaddress.ip_address
def prefix_ctor(version=None):
if version:
if version == 4:
return ipaddress.IPv4Network
elif version == 6:
return ipaddress.IPv6Network
else:
raise ValueError("unknown version")
else:
return ipaddress.ip_network
class IPAddressValidator:
"""
Validates values to be either a v4 or 6 ip address depending
on the version of the field it is attached to
"""
def __init__(self, field):
self.field = field
def __call__(self, value):
# can't use ctor here because serializer fields don't have it
wrap_ip_ctor(addr_ctor(self.field.version))(value)
class IPNetworkValidator:
"""
Validates values to be either a v4 or v6 prefix
"""
def __init__(self, field):
self.field = field
def __call__(self, value):
# can't use ctor here because serializer fields don't have it
wrap_ip_ctor(prefix_ctor(self.field.version))(value)
class URLValidator(DjangoURLValidator):
schemes = ["http", "https", "ftp", "ftps", "telnet"]
class URLField(models.URLField):
default_validators = [URLValidator()]
def __init__(self, *args, **kwargs):
warnings.warn(
"URLField has been deprecated and will be removed in version 1",
DeprecationWarning,
stacklevel=2,
)
kwargs["max_length"] = 255
super().__init__(*args, **kwargs)
class ASNField(models.PositiveIntegerField):
"""
Autonomous System Number
"""
def __init__(self, **kwargs):
# append MinValueValidator
validators = kwargs.get("validators", [])
validators.append(MinValueValidator(0))
kwargs.update(validators=validators)
super().__init__(**kwargs)
def wrap_ip_ctor(ctor):
def func(value):
try:
return ctor(smart_str(value))
except (ValueError, ipaddress.AddressValueError) as e:
raise ValidationError(e)
return func
class IPAddressField(ConvertOnAssignField):
"""
IP Address
"""
empty_strings_allowed = True
max_length = 39
description = _("IP Address")
version = None
def __init__(self, **kwargs):
kwargs["max_length"] = self.max_length
self.version = kwargs.pop("version", None)
self._ctor = wrap_ip_ctor(addr_ctor(self.version))
super().__init__(**kwargs)
self.validators.append(IPAddressValidator(self))
def get_internal_type(self):
return "CharField"
def get_prep_value(self, value):
if value:
return str(value)
return None
def from_db_value(self, value, expression, connection):
if not value:
return None
return self._ctor(value)
def to_python(self, value):
if isinstance(value, ipaddress._BaseAddress):
return value
if not value:
return None
return self._ctor(value)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return str(value)
class IPNetworkField(ConvertOnAssignField):
empty_strings_allowed = True
max_length = 43
description = _("IP Prefix")
version = None
def __init__(self, **kwargs):
kwargs["max_length"] = self.max_length
self.version = kwargs.pop("version", None)
self._ctor = wrap_ip_ctor(prefix_ctor(self.version))
super().__init__(**kwargs)
self.validators.append(IPNetworkValidator(self))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, ipaddress._BaseNetwork):
return value
if not value:
return None
return self._ctor(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return smart_str(value)
def value_to_string(self, obj):
return smart_str(self.value_from_object(obj))
# Alias to original name
IPPrefixField = IPNetworkField
class MacAddressField(ConvertOnAssignField):
""" """
empty_strings_allowed = True
max_length = 17
description = _("Mac Address")
default_error_messages = {}
default_validators = [RegexValidator(r"(?i)^([0-9a-f]{2}[-:]){5}[0-9a-f]{2}$")]
def __init__(self, **kwargs):
kwargs["max_length"] = self.max_length
super().__init__(**kwargs)
def get_internal_type(self):
return "CharField"
```
#### File: django-inet/tests/test_models.py
```python
import ipaddress
import pytest
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import FullModel
from django_inet.models import (
ASNField,
IPAddressField,
IPNetworkField,
IPPrefixField,
MacAddressField,
URLField,
)
def assert_ip_validator(obj):
"""
assert the validator is set correctly and referring to the correct object
"""
assert 0 == len(obj.default_validators)
assert 1 == len(obj.validators)
assert obj == obj.validators[0].field
assert obj.version == obj.validators[0].field.version
class ModelTests(TestCase):
"""test model functionality"""
def test_init(self):
new0 = URLField()
new1 = URLField()
assert 1 == len(new0.default_validators)
assert 1 == len(new1.default_validators)
new0 = ASNField()
new1 = ASNField()
assert 0 == len(new0.default_validators)
assert 0 == len(new1.default_validators)
new0 = IPAddressField()
new1 = IPAddressField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = IPNetworkField()
new1 = IPNetworkField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = IPPrefixField()
new1 = IPPrefixField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = MacAddressField()
new1 = MacAddressField()
assert 1 == len(new0.default_validators)
assert 1 == len(new1.default_validators)
def test_blank(self):
model = FullModel()
model.full_clean()
def test_asn(self):
model = FullModel()
model.asn = 42
assert 42 == model.asn
model.full_clean()
with pytest.raises(ValidationError):
model.asn = "invalid"
model.full_clean()
with pytest.raises(ValidationError):
model.asn = -1
model.full_clean()
model.asn = 4294967295
model.full_clean()
assert model.asn == 4294967295
def test_ipaddress(self):
model = FullModel()
model.ip_address = "10.0.0.0"
assert ipaddress.ip_address("10.0.0.0") == model.ip_address
with pytest.raises(ValidationError):
model.ip_address = "invalid"
def test_ipv4(self):
model = FullModel()
model.ipv4 = "10.0.0.0"
assert ipaddress.ip_address("10.0.0.0") == model.ipv4
with pytest.raises(ValidationError):
model.ipv4 = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
def test_ipv6(self):
model = FullModel()
model.ipv6 = "10::"
assert ipaddress.ip_address("10::") == model.ipv6
with pytest.raises(ValidationError):
model.ipv6 = "10.0.0.0"
def test_ipnetwork(self):
model = FullModel()
model.prefix = "10.0.0.0/8"
with pytest.raises(ValidationError):
model.prefix = "invalid"
def test_mac(self):
model = FullModel()
model.mac = "Ff:00:00:12:34:56"
model.full_clean()
assert "Ff:00:00:12:34:56" == model.mac
with pytest.raises(ValidationError):
model.mac = "invalid"
model.full_clean()
``` |
{
"source": "20c/django-ixpmgr",
"score": 2
} |
#### File: management/commands/upd_l2db.py
```python
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.db import transaction
import dns.query
import dns.tsigkeyring
import dns.update
import ipaddress
import logging
import math
from optparse import make_option
import re
from ixpmgr.models import Customer
from ixpmgr.models import PhysicalInterface
from ixpmgr.models import Switch
from ixpmgr.models import VirtualInterface
from ixpmgr.models import VlanInterface
from ixpmgr.models import ViewVlanInterfaceDetailsByCustid as ViewInterface
from ixpmgr import const
from ixpmgr import util
from pysnmp.entity.rfc3413.oneliner import cmdgen
from snimpy.manager import Manager as M
from snimpy.manager import load
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"--force",
action="store_true",
default=False,
help="Force update for all entries",
),
)
help = "SNMP scan all switches and update mac addr table (does not work)"
def handle(self, *args, **options):
log = logging.getLogger("ixpmgr.script")
conf = util.load_config()
if options["force"]:
qs = ViewInterface.objects.all()
else:
# query for any interface that's enabled for ip and missing hostname
qs = ViewInterface.objects.filter(
~Q(ipv4address=0), ipv4enabled=1, ipv4hostname=""
)
qs |= ViewInterface.objects.filter(
~Q(ipv6address=0), ipv6enabled=1, ipv6hostname=""
)
qs = Switch.objects.filter(active=True, switchtype=const.SWITCHTYPE_SWITCH)
for switch in qs:
print(switch.name, switch.hostname, switch.snmppasswd)
print(switch.name, switch.hostname, switch.snmppasswd)
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(switch.snmppasswd),
cmdgen.UdpTransportTarget((switch.hostname, 161)),
cmdgen.MibVariable("SNMPv2-MIB", "sysName", 0),
)
# Check for errors and print out results
if errorIndication:
print(errorIndication)
else:
if errorStatus:
print(
"%s at %s"
% (
errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1] or "?",
)
)
else:
for name, val in varBinds:
print(f"{name.prettyPrint()} = {val.prettyPrint()}")
load("IF-MIB")
load("SNMPv2-MIB")
load("BRIDGE-MIB")
load("Q-BRIDGE-MIB")
# for id in s.dot1qPortIngressFiltering:
m = M(switch.hostname, switch.snmppasswd)
# m = M(switch.hostname, 'dev_aLFMBMoZ30dNy8NqnHDJJgtRfP3', 2)
# print m.ifDescr[0]
# print m.sysContact
print(m.ifDescr)
# for idx in m.ifDescr:
# print m.ifDescr[idx]
# dot1qTpFdbPort
for i in m.dot1dBasePortIfIndex:
ifidx = m.dot1dBasePortIfIndex[i]
print("dot1d: ", i, m.dot1dBasePortIfIndex[i])
# print "dot1d: ", i, m.dot1dBasePortIfIndex[i], " ifDescr: ", m.ifDescr[ifidx]
``` |
{
"source": "20c/django-namespace-perms",
"score": 2
} |
#### File: django_namespace_perms/auth/backends.py
```python
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
from django_namespace_perms.constants import *
from django_namespace_perms.util import (
NAMESPACES,
obj_to_namespace,
permcode_to_namespace,
has_perms,
load_perms,
)
import logging
import re
import collections
log = logging.getLogger("django")
# Admin namespace prefixes
# Grant perms to a namespace
ADMIN_NS_GRANT = "admin.grant"
class NSPBackend(ModelBackend):
"""
Authenticate actions using nsp
"""
def load_perms(self, user_obj):
return load_perms(user_obj)
def has_module_perms(self, user_obj, obj=None):
if hasattr(obj, "nsp_namespace"):
fn = getattr(obj, "nsp_namespace")
if not isinstance(fn, collections.Callable):
raise Exception(
"nsp_namespace attribute needs to be callable for %s" % obj
)
namespace = fn()
else:
namespace = obj_to_namespace(obj)
log.info("Checking module perms: %s" % namespace)
perms = self.load_perms(user_obj)
return has_perms(perms, namespace, PERM_READ)
def has_perm(self, user_obj, perm, obj=None):
# if not user_obj.is_authenticated() or not user.is_active:
# FIXME: load guest perms and proceed
# return False
# super users have access to everything
if user_obj.is_superuser:
return True
namespace, level = permcode_to_namespace(perm)
write_ops = ["add", "delete", "change"]
if hasattr(obj, "nsp_write_ops") and callable(obj, "nsp_write_ops"):
write_ops.extend(getattr(obj, "nsp_write_ops")())
log.info("NSP has_perms %s %s %s" % (namespace, perm, level))
perms = self.load_perms(user_obj)
return has_perms(perms, namespace, level)
``` |
{
"source": "20c/django-syncref",
"score": 3
} |
#### File: src/django_handleref/version.py
```python
from django.core.exceptions import ValidationError
try:
import reversion
except ImportError:
reversion = None
class Version:
"""
Object version interface - extend to support
different types of django object versioning
solutions
"""
def __init__(self, version):
"""
Argument(s):
- version: version object instance, for example
django reversion Version object
"""
self.version = version
@property
def date(self):
"""
Should return version date (datetime object)
"""
raise NotImplementedError()
@property
def user(self):
"""
Should return user (django User object) that commited the version
or None if no such user exists
"""
raise NotImplementedError()
@property
def id(self):
"""
Should return the internal version id (primary
key in the database)
"""
raise NotImplementedError()
@property
def comment(self):
"""
Should return comment (str) for the version
or None if no such comment exists
"""
raise NotImplementedError()
@property
def data(self):
"""
Should return a dict of the object snapshot
store in this version, with field names mapping
to field values
"""
raise NotImplementedError()
@property
def data_sorted(self):
"""
Should return `data` in list form with (field, value)
tuples as items, sorted by field
"""
raise NotImplementedError()
@property
def model(self):
"""
Should return the django model class of the object
snapshotted by the version
"""
raise NotImplementedError()
@property
def previous(self):
"""
Should return a Version instance holding the previous
version neighbour of this version
If no previous version exists return an empty Version
instance
"""
raise NotImplementedError()
@property
def next(self):
"""
Should return a Version instance holding the next
version neighbour of this version
if no next version exists return an empty Version
instance
"""
raise NotImplementedError()
@classmethod
def changes_summary(self, versions):
"""
Compiles and return a changes summary of multiple
Version objects.
Argument(s):
- versions(list): list of Version instances
Returns:
- list: a list of (field, dict) tuples where
the dict will be mapping version ids to a
dict containing changes made to the field
by the version. Check `Diff` class for
further information
"""
changes = {}
for version in versions:
_changes = version.changes(version.previous)
for field, diff in _changes.items():
if field not in changes:
changes[field] = {}
_diff = {"version": version}
_diff.update(diff)
changes[field].update({version.id: _diff})
changes_list = list(changes.items())
changes_list = sorted(changes_list, key=lambda i: i[0])
return changes_list
def changes(self, previous):
"""
Return a `Diff` instance for this version
and a previous version
Argument(s):
- previous(Version)
Returns(s):
- Diff
"""
return Diff(previous, self).changes
def changed_fields(self, previous):
"""
Return a list of changed fields between
this version and a previous version
Argument(s):
- previous(Version)
Returns:
- list(str)
"""
changes = self.changes(previous)
if changes is None:
return None
return sorted(changes.keys())
class ReversionVersion(Version):
"""
Version abtraction for django-reversion
"""
def __init__(self, version):
"""
Argument(s):
- Version(int|reversion.models.Version): can be either
a reversion version instance or the id of one
"""
if isinstance(version, int):
version = reversion.models.Version.objects.get(id=version)
super().__init__(version)
@property
def date(self):
"""
Returns:
- datetime: date of revision
"""
return self.version.revision.date_created
@property
def user(self):
"""
Returns:
- User: user that authored revision
- None: if no such user exists
"""
return self.version.revision.user
@property
def comment(self):
"""
Returns:
- str: comment stored with revision
- None: if no such comment exists
"""
return self.version.revision.comment
@property
def id(self):
"""
Returns:
- int:version instance id
"""
return self.version.id
@property
def data(self):
"""
Returns:
- dict: object data
"""
return self.version.field_dict
@property
def model(self):
"""
Returns:
- model: django model for the object
snapshotted by this version
"""
return self.version._model
@property
def data_sorted(self):
"""
Returns:
- list: list of (field, value) tuples for
object data
"""
data = []
for field, value in self.data.items():
data.append((field, value))
return sorted(data, key=lambda i: i[0])
@property
def previous(self):
"""
Returns:
- Version: previous version - if no previous version exists
the Version instance will be empty
"""
if hasattr(self, "_previous"):
return self._previous
versions = reversion.models.Version.objects.get_for_object(
self.version.object
).order_by("-id")
for version in versions:
if version.id < self.version.id:
self._previous = self.__class__(version)
return self._previous
return None
@property
def next(self):
"""
Returns:
- Version: next version - if no next version exists
the Version instance will be empty
"""
if hasattr(self, "_next"):
return self._next
qset = reversion.models.Version.objects.filter(
content_type_id=self.version.content_type_id,
object_id=self.version.object_id,
id__gt=self.version.id,
)
qset = qset.order_by("id")
self._next = self.__class__(qset.first())
return self._next
class Diff:
"""
Describes changes between two versions
"""
# when generating diff ignore these fields
diff_ignore_fields = [
"version",
"created",
"updated",
]
def __init__(self, version_a, version_b):
"""
Argument(s):
- version_a(Version): older version
- version_b(Version): newer version
"""
self.version_a = version_a
self.version_b = version_b
@property
def changes(self):
"""
Compile and return a dict describing changes between
the two versions tracked in this diff
Returns:
- dict: dict mapping field names to a dict describing
changed made to the field
{
field_name: {
"old": old_value,
"changed": changed_value,
},
...
}
"""
if not self.version_a or not self.version_b:
return None
if not self.version_a.version or not self.version_b.version:
return None
data_a = self.version_a.data
data_b = self.version_b.data
diff = {}
for field, value_b in data_b.items():
if field in self.diff_ignore_fields:
continue
value_a = data_a.get(field)
if value_a == value_b:
continue
if isinstance(value_a, str) or isinstance(value_a, int):
diff[field] = {"old": value_a, "changed": value_b}
else:
diff[field] = {
"old": self.format_value(value_a),
"changed": self.format_value(value_b),
}
return diff
def format_value(self, value):
return f"{value}"
class Reverter:
"""
Allows to revert / rollback changes
"""
def revert_fields(self, instance, field_versions, **kwargs):
"""
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version object
Raises:
- ValidationError: if any of the fields fail validation
"""
for field, version in field_versions.items():
setattr(instance, field, version.data[field])
if field == "status":
self.validate_status_change(instance, version.data[field])
instance.full_clean()
instance.save()
def rollback(self, instance, version, **kwargs):
"""
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Raises:
- ValidationError: if any of the fields fail validation
"""
for field, value in version.data.items():
if field in ["created", "updated", "version"]:
continue
if field == "status":
self.validate_status_change(instance, value)
setattr(instance, field, value)
instance.full_clean()
instance.save()
def validate_status_change(self, instance, status):
"""
Validate a status value change - this will make sure
an object cannot be undeleted if a parent relationship
is still flagged as deleted
Argument(s):
- instance(model instance): instance of django model
to be reverted
- status(str)
"""
for field in instance.__class__._meta.get_fields():
if not field.is_relation or not field.many_to_one:
continue
try:
relation = getattr(instance, field.name)
except Exception:
continue
self.validate_parent_status(instance, relation, status)
def validate_parent_status(self, instance, parent, status):
if not hasattr(parent, "HandleRef"):
return
if parent.status == "deleted" and status != "deleted":
raise ValidationError(
{
"non_field_errors": "Parent object {} is currently flagged as deleted."
"This object may not be undeleted while the parent "
"is still deleted.".format(parent)
}
)
class ReversionReverter(Reverter):
"""
Reverter abstraction for django-reversion
"""
def revert_fields(self, instance, field_versions, user=None):
"""
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version pk
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
"""
with reversion.create_revision():
if user:
reversion.set_user(user)
version_ids = [
"{}".format(version.data["version"])
for version in field_versions.values()
]
version_ids = list(set(version_ids))
reversion.set_comment(
"reverted some fields via versions: {}".format(", ".join(version_ids))
)
super().revert_fields(instance, field_versions)
def rollback(self, instance, version, user=None):
"""
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
"""
with reversion.create_revision():
if user:
reversion.set_user(user)
reversion.set_comment(
"rollback to version {}".format(version.data["version"])
)
super().rollback(instance, version)
```
#### File: django-syncref/tests/test_datetime_fields.py
```python
from datetime import datetime, timedelta
import pytest
from django.test import TestCase
from tests.models import Org, Sub, Widget
data_org = {"name": "Acme Widgets"}
class FieldTestCase(TestCase):
def setUp(self):
self.org = Org.objects.create(**data_org)
self.created = datetime.now()
self.one_sec = timedelta(seconds=1)
pass
# org = Org.objects.create(**data_org)
def test_obj_creation(self):
assert self.one_sec > self.created - self.org.created
assert self.one_sec > self.created - self.org.updated
def test_updated(self):
self.org.name = "Updated"
self.org.save()
now = datetime.now()
assert self.one_sec > self.created - self.org.created
assert self.one_sec > now - self.org.updated
``` |
{
"source": "20centcroak/PyCroakTools",
"score": 3
} |
#### File: pycroaktools/datapack/datafiles.py
```python
from pycroaktools.files import Finder
from pycroaktools.datapack import DataPack
from tkinter import Tk, filedialog
import logging
class DataFiles:
"""The DataFiles class manages resource files thanks to 2 dictionaries (files and fileset), one is a key/value per file
the other one is a key/value per file set (a list of files)
The way resource files are retrieved is based on regex. It uses Finder class to do so.
"""
def __init__(self, parent: str, settings: dict, depth=0, caseSensitive=False):
"""
constructor: it defines the parent folder where resource files should be searched (except ofr external files).
and the it associates key/value pair for resources.
Parameters
----------
parent: parent folder path where resource files should be searched
settings: a dictionary with optional keys :
'files': list of name: regex, each regex defining a pattern to search for 1 file to be associated with the given name in the resulting files dictionary
'fileset': list of name: regex, each regex defining a pattern to search for multiple files to be associated with the given name in the resulting fileset dictionary
'externalfiles': list of name: parameters to search for files outside the parent folder. The files are added to the 'files' dictionary. The parameters may contain either:
- 'ref': regex = regex to defining a pattern to search for 1 file to be associated with the given name in the resulting files dictionary
- 'in': the parent folder where to search for a file
or
- 'tip': title of the dialog that will pop open to select the file
- 'type': file extension of the searched file to bu used in this dialog
"""
finder_settings = {'parent': parent,
'depth': depth, 'caseSensitive': caseSensitive}
self.files = dict()
if 'files' in settings:
for name in settings['files']:
finder_settings['regex'] = settings['files'][name]
self.files[name] = self._getFile(finder_settings)
logging.info('file {} found'.format(self.files[name]))
self.fileset = dict()
if 'fileset' in settings:
for name in settings['fileset']:
finder_settings['regex'] = settings['fileset'][name]
self.fileset[name] = self._getFiles(finder_settings)
logging.info('fileset: {}'.format(self.fileset[name]))
if 'externalfiles' in settings:
for name in settings['externalfiles']:
parameters = settings['externalfiles'][name]
if 'ref' not in parameters:
tip = parameters['tip'] if 'tip' in parameters else 'open'
filetypes = [
('searched files', parameters['type']), ('all files', '.*')]
self.files[name] = self._openDialog(
filetypes=filetypes, title=tip)
logging.info('file {} selected'.format(self.files[name]))
else:
finder_settings['parent'] = ['in']
finder_settings['regex'] = parameters['ref']
self.files[name] = self._getFile(finder_settings)
logging.info('file {} found'.format(self.files[name]))
def _openDialog(self, title='open', filetypes=None):
root = Tk()
root.withdraw()
filepath = filedialog.askopenfilename(title=title, filetypes=filetypes)
root.destroy()
return filepath
def _getFiles(self, finder_settings):
foundfiles = Finder(finder_settings).findFiles()
if not foundfiles:
raise ValueError('no file found in {} with regex {}'.format(
finder_settings['parent'], finder_settings['regex']))
return foundfiles
def _getFile(self, finder_settings):
return self._getFiles(finder_settings)[0]
def generateDataPack(self, dataprocessing: dict):
"""
generate a DataPack thanks to these file resources and the processing to be played on them.
Parameters
--------------------
dataprocessing: description of the processings to be applied to the resource files.
"""
count = 0
files = dict()
for key, value in self.fileset.items():
self.files[key+str(count)] = value
count += 1
return DataPack(dict(self.files, **files), dataprocessing)
```
#### File: pycroaktools/easyPresentation/generator.py
```python
import os, sys, logging
from webbrowser import open as webopen
from pandas import read_csv
import distutils.dir_util as dirutil
import pycroaktools.applauncher as launcher
from pycroaktools.presentation import Slides
from pycroaktools.workflow import Workflow, Flowchart
from pycroaktools.easyPresentation.slidesToWorkflow import SlidesToWorkflow
from pycroaktools.easyPresentation.workflowToPresentation import WorkflowToPresentation
class Generator(launcher.Settings):
"""
The Generator class generates presentations either based on a workflow, on a list of images, a list of markdown slides or all together.
It inherits from pycroaktools.applauncher.settings.Settings that manages the settings.
"""
def __init__(self, settings: dict):
"""
builds the class according to the settings dictionary.
Parameters
----------
settings : dictionary that may contain the following key and values
- slideFolder: folder that contains markdown files.
These files define either a slide thanks to their name or thanks to their header.
See class Slides to get more details about the header.
- imageFolder: folder that contains images. These images define either their own slide if the image name complies
with the slide definition (see class Slides) or may be called by a markdown file.
- outputFolder: folder where the presentation is created. If none is provided, the current working directory
(from where the app is launched) is used.
- workflowFile: csv file defining the workflow. See class Workflow for a description of this csv file
- createFlowchart: if True, a graphical representation of the workflow is generated
- createLinearPresentations: if True, each possible path defined by the workflow generates an individual presentation.
Then Each slide has only one next slide. This is a linear sequence from first to last slide.
- createWorkflowPresentation: if True, a unique presentation is generated to represent the workflow.
Each slide may have multiple next slides. Then links give choices to follow a path or another in the workflow
- displayTitles: if true, each slide displays its title
"""
self.slideFolder = None
self.imageFolder = None
self.outputFolder = os.getcwd()
self.workflowFile = None
self.versions = [0.]
self.createFlowchart = False
self.createLinearPresentations = False
self.createWorkflowPresentation = True
self.displayTitles = False
self.setProperties(settings)
self._build()
def _build(self):
slides = self._manageSlides()
self._manageImages(slides)
workflow = self._manageWorkflow(slides)
self._manageMissingSlides(slides, workflow)
pres = self._generate(workflow, slides)
self.open(pres)
def open(self, filename):
if not filename:
return
new = 2
logging.info('presentation available at {}'.format(filename))
url = "file:///"+os.path.realpath(filename)
logging.info('opening presentation at url {}'.format(url))
webopen(url, new=new)
def _manageImages(self, slides: Slides):
if not self.imageFolder:
return
print(self.imageFolder)
slides.catalog(self.imageFolder, images=True)
def _manageSlides(self):
slides = Slides(self.displayTitles)
if self.slideFolder:
slides.catalog(self.slideFolder)
else:
self.slideFolder = os.path.join(self.outputFolder, 'slides')
return slides
def _manageWorkflow(self, slides: Slides):
if self.workflowFile:
try:
return Workflow(read_csv(
self.workflowFile), os.path.basename(self.workflowFile)[:-4])
except FileNotFoundError:
launcher.error('file {} not found'.format(self.workflowFile))
if not slides.getDefaultSlideOrder():
launcher.error('no workflow or slide found')
return Workflow(SlidesToWorkflow().create(slides), 'presentation')
def _manageMissingSlides(self, slides: Slides, workflow: Workflow):
slides.createMissingSlides(
[step.stepId for step in workflow.getSteps()])
def _generate(self, workflow: Workflow, slides: Slides):
toPres = WorkflowToPresentation(
workflow, slides, self.outputFolder)
presentation = None
for version in self.versions:
logging.info('version {} ...'.format(version))
if self.createFlowchart:
Flowchart(workflow).display()
if self.createLinearPresentations:
presentation = toPres.createLinearPresentations(version)
if self.createWorkflowPresentation:
presentation = toPres.createWorkflowPresentation(version)
return presentation
```
#### File: pycroaktools/easyPresentation/workflowToPresentation.py
```python
from pycroaktools.presentation import Presentation, Slides
from pycroaktools.workflow import Workflow
class WorkflowToPresentation:
"""
The WorkflowToPresentation class builds a bridge between a defined workflow and a presentation.
It generates presentations starting from this workflow and by matching Workflow data and Slides data.
These presentations are versioned. The content evolve easily by requesting slide versions corresponding
to the expected presentation version.
To do so, 2 functions are available :
- createLinearPresentations : each possible path defined by the workflow generates an individual presentation.
Then Each slide has only one next slide. This is a linear sequence from first to last slide.
- createWorkflowPresentation: a unique presentation is generated to represent the workflow. Each slide may has multiple next slides.
Then links give choices to follow a path or another in the workflow
"""
def __init__(self, workflow: Workflow, slides: Slides, outputFolder):
"""
Builds the object
---
Parameters:
- workflow: workflow definition
- slides: slides that should match the workflow
- outputFolder: folder where the presentations are saved
"""
self.workflow = workflow
self.slides = slides
self.outputFolder = outputFolder
def _getPresNames(self, paths, version):
presNames = []
for path in paths:
presName = self.workflow.name+'_v'+str(version)+'_'
for step in path:
presName += str(step.stepId)+'-'
presNames.append(presName[:-1]+'.html')
return presNames
def createLinearPresentations(self, version):
"""
Each possible path defined by the workflow generates an individual presentation.
Then Each slide has only one next slide. This is a linear sequence from first to last slide.
---
Parameters:
- version: expected version of the presentation. Then this version of the slides is searched and if not
found the previous one is used.
"""
paths = self.workflow.getAllPaths()
presNames = self._getPresNames(paths, version)
for index, path in enumerate(paths):
slideIds = [step.stepId for step in path]
presentation = Presentation().createPresentation(
presNames[index], self.slides, slideIds, self.outputFolder, version=version)
return presentation
def createWorkflowPresentation(self, version):
"""
A unique presentation is generated to represent the workflow. Each slide may has multiple next slides.
Then links give choices to follow a path or another in the workflow
---
Parameters:
- version: expected version of the presentation. Then this version of the slides is searched and if not
found the previous one is used.
"""
presName = self.workflow.name + '_v' + str(version)+'.html'
links = self.workflow.getLinksPerSteps()
return Presentation().createPresentation(presName, self.slides, outputFolder=self.outputFolder,
links=links, version=version)
```
#### File: pycroaktools/presentation/slides.py
```python
from pathlib import Path
import logging, sys
import bisect as bs
from pycroaktools.presentation.slide import Slide
from pycroaktools.presentation.slideGenerator import SlideGenerator
from pycroaktools.applauncher import Configuration, error
class Slides:
"""
The Slides class manages a catalog of Slide that may be used by the Presentation object.
The slides may be retrieved from a disk location if the markdown files defining the slide content respect one of the following rules:
- the markdown file owns a header section composed of a "---" sequence defining the start and end of the header. It should be at the very beginning of the file.
In this header at least 2 keys must be defined :
- title: arbitrary title
- id: a unique integer. 2 slides can't have the same id, except if it is split.
- part: [optional] float number. A Slide may be split in multiple parts. In this case, they have the same id but a different part number. If not set, 0.0 is the default value.
- version: [optional] float number. A slide may have different versions, then a history may be managed (version 0 is older than version 1). If not set, 0.0 is the default value
- the markdown file name is built as follow: id_title[_part][_version]. The fields are the same than in the header definition
The slides can also be added by passing Slide objects.
The slides may be built thanks to images. Image names should comply with the same rule given for markdown file names.
"""
def __init__(self, displayTitles=False):
"""builds the object"""
self.slides = dict()
"""self.slides is a dictionary with key = slide id and value is the version dictionary.
The version dictionary is defined by key = slide version and value is the part dictionary
The part dictionary is defined by key = slide part number and value = Slide object"""
self.versions = [0]
"""references in a sorted list the different available versions. Some version may be only available for one or more slides."""
self.displayTitles = displayTitles
"""if True, the slide title is displayedin slides"""
self.imageFolders = []
def catalog(self, folder: str, images=False):
"""
references slides by the files contained in the given folder if they comply with the rules defined in the class definition
---
Parameters:
- folder: folder where files to produce slides may be found
- images: indicates if files are image files if True
"""
logging.info('search for files to create slides...')
if images:
self.declareResources(folder)
path = Path(folder).rglob('*.*')
files = [x for x in path if x.is_file()]
counter = 0
for file in files:
slide = None
if images:
slide = SlideGenerator().fromImage(file)
else:
slide = SlideGenerator().fromHeader(file)
if not slide:
slide = SlideGenerator().fromFilename(file)
if not slide:
logging.warning(
'can\'t retrieve useful information from file {}, slide is not created.'.format(file))
continue
self.addSlide(slide)
counter += 1
if counter > 0:
logging.info('{} slides created'.format(counter))
else:
logging.warning(
'no file found to define slides in {}'.format(folder))
def declareResources(self, imageFolder):
self.imageFolders.append(imageFolder)
def addSlide(self, slide: Slide):
"""add a predefined Slide object"""
version = slide.version
if slide.id not in self.slides:
self.slides[slide.id] = dict()
if version not in self.slides[slide.id]:
bs.insort(self.versions, version)
self.slides[slide.id][version] = dict()
self.slides[slide.id][version][slide.part] = slide
def createMissingSlides(self, slideIds):
"""create missing slides according to the given slide ids. Slides are created with slide title="slide id" where id is the slide id.
The slide is saved in the slideFolder with the markdown rules details in the class definition"""
for slideId in slideIds:
if self.getSlide(slideId, self.getHighestVersion()):
continue
title = 'slide {}'.format(slideId)
slide = Slide(slideId, title)
slide.setContent('# '+title)
self.addSlide(slide)
logging.info('slide {} {} created'.format(slideId, title))
def getDefaultSlideOrder(self):
return sorted(list(self.slides.keys()))
def getFileName(self, title, id, version=0, part=0):
"""returns the slide filename"""
return '{1}{0}{2}{0}{3}{0}{4}{5}'.format('_', id, title, version, part, '.md')
def getHighestVersion(self):
"""returns the highest version value fourn for at least 1 slide in the collection"""
return self.versions[-1]
def getSlide(self, slideId, version=0):
"""returns a dictionary with key = part number and value = Slide object corresponding to the slide id and its version.
If no version is provided, version 0.0 is returned."""
version = float(version)
slideId = int(slideId)
if slideId not in self.slides:
logging.warning('slide {} not found'.format(slideId))
return None
try:
index = self.versions.index(version)
except ValueError:
index = len(self.versions) - 1
while version not in self.slides[slideId] and index >= 0:
index -= 1
version = self.versions[index]
if index < 0:
return self.slides[slideId][next(iter(self.slides[slideId]))]
return self.slides[slideId][version]
def _getSlidePart(self, slideId, part, version=0):
"""returns a slide part corresponding to the slide id, version and part. If no version is provided, version 0.0 is returned."""
slide = self.getSlide(slideId, version)
if slide is None:
return None
part = float(part)
try:
return slide[part]
except KeyError:
error("part {} not found for slide {}".format(part, slideId))
return self.slides[version][slideId][part]
def getSlideTitle(self, slideId, version=0):
"""return the slide title by getting the slide title of its first part"""
return self.getSlide(slideId, version)[next(iter(self.getSlide(slideId, version)))].title
def getSlideContents(self, slideId, version=0):
"""returns slide contents, ie a list of markdown contents. Each item of the sorted list correspond to a slide part.If no version is provided, version 0.0 is returned."""
contents = []
slide = self.getSlide(slideId, version)
if slide is None:
contents.append('slide {} is missing.'.format(slideId))
return contents
parts = list(slide.keys())
parts.sort()
for part in parts:
slidePart = self._getSlidePart(slideId, part, version)
if slide is None:
contents.append(
'slide {} part {} is missing.'.format(slideId, part))
contents.append(slidePart.getContent(self.displayTitles))
return contents
def getMarkdownLinks(self, links, version=0):
"""returns markdown links correponding to the given link ids. It transforms theses ids as html links in a Presentation."""
if links is None:
return None
mdLinks = [" \n"]
for slideId in links:
href = '#/'+str(links[slideId])
text = self.getSlideTitle(slideId, version)
mdLinks.append('['+text+']('+href+')')
return mdLinks
```
#### File: tests/unit/test_finder.py
```python
import unittest
import os
from pycroaktools.files import Finder
class TestFinder(unittest.TestCase):
test_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources/test_finder/testFolder')
test_zip = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources/test_finder/myzip.zip')
def _findFolders(self, properties):
finder = Finder(properties)
return finder.findFolders()
def test_findFolders(self):
properties = {'parent': self.test_folder, 'regex': r'.*1$'}
results = self._findFolders(properties)
self.assertEqual(len(results), 1)
self.assertEqual(os.path.basename(results[0]), 'folder1')
def _findFiles(self, properties):
finder = Finder(properties)
return finder.findFiles()
def test_findFiles(self):
"""
Test that it can retrieve 1 specific file
"""
properties = {'parent': self.test_folder, 'regex': r't.+t1\.txt$', }
results = self._findFiles(properties)
self.assertEqual(len(results), 1)
self.assertEqual(os.path.basename(results[0]), 'test1.txt')
def _findFilesInZip(self, properties):
finder = Finder(properties)
return finder.findFilesInZip()
def test_findFilesInZip(self):
"""
Test that it can retrieve 1 specific file in zip file
"""
properties = {'parent': self.test_zip, 'regex': r'.+level2\.txt$'}
results = self._findFilesInZip(properties)
self.assertEqual(len(results), 1)
self.assertEqual(os.path.basename(results[0]), 'mytextinlevel2.txt')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "20centcroak/pyRevealjs",
"score": 3
} |
#### File: pyRevealjs/tests/test_presentation.py
```python
from pyRevealjs import Slide, SlideCatalog, SlideGenerator, Presentation, PresentationSettings
import unittest
import tempfile
import logging
import sys
class testPresentation(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.TemporaryDirectory()
print('temp dir: ', cls.temp_dir.name)
root = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s')
root.setLevel(logging.DEBUG)
# console
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
@classmethod
def tearDownClass(cls):
cls.temp_dir.cleanup()
def test_simple(self):
# get slides from images
catalog = SlideCatalog()
catalog.catalog('tests/resources/onlyimages', images=True)
# Create Presentation based on Slides in the current working directory
settings = {'title': 'simple presentation', 'theme': 'white'}
pres = Presentation('simple presentation', catalog, PresentationSettings(settings))
pres.addSlideByIds(catalog.getAllIds())
pres.save('temp')
def test_full(self):
# Create a slide with id=1. Part and version numbers are let to defaut values (0.0)
# Content is given as a string
slide1 = Slide(1, "slide 1")
slide1.setContent(
'# Slide1 \nversion {} - part {}'.format(slide1.version, slide1.part))
# Create slide 2
# Content is given as an external markdown file with header where id=2. Part and version numbers are let to defaut values (0.0)
slide2part0 = SlideGenerator().fromHeader('tests/resources/full/slide2.md')
# Create a slide with the same id than slide 2 because it is a second part (part number 1.1) of the same slide
# Parts of a same slide will be displayed vertically in the presentation while different ids are displayed horizontally
slide2part1 = Slide(2, 'slide2-1', part=1.1)
slide2part1.setContent(
'# Slide2 \nversion {} - part {}'.format(slide2part1.version, slide2part1.part))
# The content is not defined, it will be automatically generated using the title of the slide
# content is not defined for this slide, it will automatically generated based on its title
slide3version0_1 = Slide(3, 'slide3', version=0.1)
slide3version0_1.setContent(
'# Slide3 \nversion {} - part {}'.format(slide3version0_1.version, slide3version0_1.part))
slide3version1 = Slide(3, 'slide3', version=1)
slide3version1.setContent(
'# Slide3 \nversion {} - part {}'.format(slide3version1.version, slide3version1.part))
# Add slides to Slides
catalog = SlideCatalog()
catalog.addSlide(slide1)
catalog.addSlide(slide2part0)
catalog.addSlide(slide2part1)
catalog.addSlide(slide3version0_1)
catalog.addSlide(slide3version1)
# slide2.md asks to not display links
links = {1: [2, 3], 2: [3]}
# if slides embed images from imageFolder in their markdown content, the following line is required:
# slides.declareResources(imageFolder):
# Define versions of presentation to create
# slides does not need to all have the same version. The presentation will find the closest version of slide less than the requested version if not found
versions = [0.0, 0.1, 1.0]
# Create Presentation based on Slides in the current working directory
pres = Presentation('presentation', catalog)
pres.addSlideByIds(catalog.getAllIds())
pres.addLinks(links)
temp = self.temp_dir.name
for version in versions:
pres.save('temp', version)
``` |
{
"source": "20c/grainy",
"score": 3
} |
#### File: grainy/tests/test_util.py
```python
import unittest
from grainy import const, core
class TestUtils(unittest.TestCase):
def test_int_flags(self):
self.assertEqual(core.int_flags("c"), const.PERM_CREATE)
self.assertEqual(core.int_flags("cr"), const.PERM_CREATE | const.PERM_READ)
self.assertEqual(
core.int_flags("cru"),
const.PERM_CREATE | const.PERM_READ | const.PERM_UPDATE,
)
self.assertEqual(
core.int_flags("crud"),
const.PERM_CREATE | const.PERM_READ | const.PERM_UPDATE | const.PERM_DELETE,
)
``` |
{
"source": "20chan/aheuithon",
"score": 2
} |
#### File: aheuithon/codec/parser.py
```python
import parso
from aheuithon.codec.compiler import compile
def ffc(typ, node):
for x in node.children:
if x.type == typ:
return x
def find_decorated_nodes(mod):
decorated = []
q = [mod]
while q:
n = q.pop()
if not hasattr(n, "children"):
continue
found = False
for x in n.children:
if not found and x.type == "decorator":
name = ffc("name", x).value
if name == "aheui":
found = True
continue
elif found and x.type == "funcdef":
decorated.insert(0, x)
if hasattr(x, "children"):
q.append(x)
return decorated
def parse(src):
lines = src.splitlines()
mod = parso.parse(src)
aheuifuncs = find_decorated_nodes(mod)
offset = 0
for func in aheuifuncs:
start = func.start_pos[0] + offset
end = func.end_pos[0] + offset
prefix = func.start_pos[1] + 4
body = [l[prefix:] for l in lines[start:end]]
newBody = [' ' * prefix + l for l in compile(body)]
print(body)
print(newBody)
offset += len(newBody) - len(body)
lines[start:end] = newBody
return "\n".join(lines)
``` |
{
"source": "20chan/eng2kor",
"score": 2
} |
#### File: eng2kor/eng2kor/eng2kor.py
```python
BASE_CODE, CHO_CODE, JUNG_CODE, MAX_CODE = 44032, 588, 28, 55203
CHO_LIST = list('ㄱㄲㄴㄷㄸㄹㅁㅂㅃㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎ')
JUNG_LIST = list('ㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣ')
JONG_LIST = list(' ㄱㄲㄳㄴㄵㄶㄷㄹㄺㄻㄼㄽㄾㄿㅀㅁㅂㅄㅅㅆㅇㅈㅊㅋㅌㅍㅎ')
KORS = list('ㄱㄲㄳㄴㄵㄶㄷㄹㄺㄻㄼㄽㄾㄿㅀㅁㅂㅄㅅㅆㅇㅈㅊㅋㅌㅍㅎㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣ')
ENGS = ['r', 'R', 'rt', 's', 'sw', 'sg', 'e', 'f', 'fr', 'fa', 'fq', 'ft', 'fx', 'fv', 'fg', 'a', 'q', 'qt', 't',
'T', 'd', 'w', 'c', 'z', 'x', 'v', 'g',
'k', 'o', 'i', 'O', 'j', 'p', 'u', 'P', 'h', 'hk', 'ho', 'hl', 'y', 'n', 'nj', 'np', 'nl', 'b', 'm', 'ml', 'l']
KOR_ENG_TABLE = dict(zip(KORS, ENGS))
def eng2kor(text):
for i in len(text):
def kor2eng(text):
res = ''
for ch in text:
spl = split(ch)
if spl is None:
res += ch
else:
res += ''.join([v for v in spl if v != ' '])
return res
def combine(cho, jung, jong):
res = BASE_CODE
res += 0 if cho == ' ' else CHO_LIST.index(cho) * CHO_CODE
res += 0 if jung == ' ' else JUNG_LIST.index(jung) * JUNG_CODE
res += JONG_LIST.index(jong)
return chr(res)
def split(kor):
code = ord(kor) - BASE_CODE
if code < 0 or code > MAX_CODE - BASE_CODE:
if kor == ' ': return None
if kor in CHO_LIST: return kor, ' ', ' '
if kor in JUNG_LIST: return ' ', kor, ' '
if kor in JONG_LIST: return ' ', ' ', kor
return None
return CHO_LIST[code // CHO_CODE], JUNG_LIST[(code % CHO_CODE) // JUNG_CODE], JONG_LIST[(code % CHO_CODE) % JUNG_CODE]
if __name__ == '__main__':
print(split('뷁'))
print(combine(*split('뷁')))
print(kor2eng('안녕하세요 파이썬은 정말 최고에요'))
``` |
{
"source": "20chase/cartpole_rl",
"score": 2
} |
#### File: cartpole_rl/a3c/play_gym.py
```python
import argparse
import gym
import roboschool
import scipy.signal
import numpy as np
import tensorflow as tf
import utils as U
from tabulate import tabulate
from discrete_a2c import DiscreteA2C
parser = argparse.ArgumentParser(description='discrete advantage actor critic algorithm')
parser.add_argument(
'--lr', default=7e-4, type=float, help='learning rate')
parser.add_argument(
'--ent_coef', default=0., type=float, help='the coefficient of entropy')
parser.add_argument(
'--vf_coef', default=0.5, type=float, help='the coefficient of value function')
parser.add_argument(
'--max_grad_norm', default=0.5, type=float, help='max gradients normalize')
parser.add_argument(
'--gamma', default=.99, type=float, help='gamma')
parser.add_argument(
'--seed', default=0, type=int, help='RNG seed')
parser.add_argument(
'--num_steps', default=5, type=int, help='the number of steps')
parser.add_argument(
'--num_procs', default=32, type=int, help='the number of processes')
parser.add_argument(
'--max_steps', default=8e6, type=int, help='max steps of training')
parser.add_argument(
'--animate', default=False, type=bool, help='whether to animate environment')
parser.add_argument(
'--softmax', default=True, type=bool, help='whether to use softmax to sample action')
parser.add_argument(
'--huber', default=False, type=bool, help='whether to use huber loss')
parser.add_argument(
'--save_network', default=False, type=bool, help='whether to save network')
parser.add_argument(
'--load_network', default=False, type=bool, help='whether to load network')
parser.add_argument(
'--test_alg', default=False, type=bool, help='whether to test our algorithm')
parser.add_argument(
'--gym_id', default='CartPole-v1', type=str, help='gym id')
parser.add_argument(
'--model_name', default='discrete_a2c', type=str, help='save or load model name')
args = parser.parse_args()
def build_multi_envs():
def make_env(rank):
def _thunk():
env = gym.make(args.gym_id)
env.seed(args.seed+rank)
return env
return _thunk
U.set_global_seeds(args.seed)
env = U.SubprocVecEnv([make_env(i) for i in range(args.num_procs)])
return env
class PlayGym(object):
def __init__(self, args, env, agent):
self.args = args
self.env = env
self.agent = agent
self.test_env = gym.make(self.args.gym_id)
def play(self, max_iters=100000):
obs = self.env.reset()
for i in range(max_iters):
obses, acts, rews, values, obs = self._sample_trajs(obs)
self.agent.update(obses, acts, rews, values)
if i % 100 == 0:
score = self.test()
print ("iter: {} | score: {}".format(i, score))
self.agent.score = score
def test(self):
env = self.test_env
obs = env.reset()
score = 0
done = False
while not done:
act = self.agent.get_action([obs])
obs, rew, done, _ = env.step(act)
score += rew
return score
def _sample_trajs(self, obs):
obses, acts, rews, values, dones = [], [], [], [], []
for step in range(self.args.num_steps):
obses.append(obs)
act, value = self.agent.step(obs)
obs, rew, done, _ = self.env.step(act)
acts.append(act)
rews.append(rew)
values.append(value)
dones.append(done)
obses = np.asarray(obses, dtype=np.float32).swapaxes(1, 0)
acts = np.asarray(acts, dtype=np.int32).swapaxes(1, 0)
rews = np.asarray(rews, dtype=np.float32).swapaxes(1, 0)
values = np.asarray(values, dtype=np.float32).swapaxes(1, 0)
dones = np.asarray(dones, dtype=np.bool).swapaxes(1, 0)
last_values = self.agent.get_value(obs)
for n, (rew, done, value) in enumerate(zip(rews, dones, last_values)):
rew = rew.tolist()
done = done.tolist()
if done[-1] == 0:
rew = U.discount_with_dones(rew+[value], done+[0.], self.args.gamma)[:-1]
else:
rew = U.discount_with_dones(rew, done, self.args.gamma)
rews[n] = rew
obses = np.concatenate([obs for obs in obses])
acts = acts.flatten()
rews = rews.flatten()
values = values.flatten()
return obses, acts, rews, values, obs
if __name__ == '__main__':
graph = tf.get_default_graph()
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=args.num_procs,
inter_op_parallelism_threads=args.num_procs)
session = tf.Session(graph=graph, config=config)
# build env
env = build_multi_envs()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.n
# build agent
agent = DiscreteA2C(session, args, obs_dim, act_dim)
# build player
player = PlayGym(args, env, agent)
# start to play :)
session.run(tf.global_variables_initializer())
player.play()
```
#### File: cartpole_rl/dqn/dueling_network_old.py
```python
import tensorflow as tf
import tensorlayer as tl
import gym
import numpy as np
import random
import os
from gym import wrappers
from collections import deque
# Hyper Parameters for DQN
GAMMA = 0.99 # discount factor for target Q
INITIAL_EPSILON = 1 # starting value of epsilon
FINAL_EPSILON = 0.01 # final value of epsilon
EXPLOER_NUM = 10000
REPLAY_SIZE = 20000 # experience replay buffer size
BATCH_SIZE = 64 # size of minibatch
LEARNING_RATE = 1e-4
DECLAY_FLAG = True
DECLAY_NUM = 1e-5
DISPLAY = False
SAVE = False
LOAD = False
MODE_NAME = 'LunarLander-v2'
# MODE_NAME = 'Atlantis-ram-v0'
EPISODE = 10000 # Episode limitation
STEP = 10000 # Step limitation in an episode
TEST = 100
UPDATE_TIME = 500
OBSERVE_NUM = 32
TARGET_NUM = 195
EVAL_FLAG = False
class DQN():
# DQN Agent
def __init__(self, env):
# init experience replay
self.replay_buffer = deque()
# init some parameters
self.time_step = 0
self.reward = 0
self.epsilon = INITIAL_EPSILON
self.state_dim = env.observation_space.shape[0]
self.action_dim = env.action_space.n
print 'state_dim:', self.state_dim, ' action_dim:', self.action_dim
self.create_Q_network()
self.create_Q_network_target()
self.create_training_method()
# Init session
self.session = tf.InteractiveSession()
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter('/tmp/train', self.session.graph)
self.session.run(tf.global_variables_initializer())
def create_Q_network(self):
# input layer
self.state_input = tf.placeholder("float",[None,self.state_dim])
self.network = tl.layers.InputLayer(self.state_input, name='Input')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu1')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu2')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu3')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu4')
self.network_V = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='V_1')
self.network_V = tl.layers.DenseLayer(self.network_V, n_units=200, act=tf.nn.relu, name='V_2')
self.network_V = tl.layers.DenseLayer(self.network_V, n_units=1, name='output_V')
self.network_A = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='A_1')
self.network_A = tl.layers.DenseLayer(self.network_A, n_units=200, act=tf.nn.relu, name='A_2')
self.network_A = tl.layers.DenseLayer(self.network_A, n_units=self.action_dim, name='output_A')
self.value_function = self.network_V.outputs
self.advantage_function = self.network_A.outputs
def create_Q_network_target(self):
# input layer
self.state_input_target = tf.placeholder("float",[None,self.state_dim])
self.network_target = tl.layers.InputLayer(self.state_input_target, name='Input_target')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_1')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_2')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_3')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_4')
self.network_V_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='V_1_target')
self.network_V_target = tl.layers.DenseLayer(self.network_V_target, n_units=200, act=tf.nn.relu, name='V_2_target')
self.network_V_target = tl.layers.DenseLayer(self.network_V_target, n_units=1, name='output_V_target')
self.network_A_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='A_1_target')
self.network_A_target = tl.layers.DenseLayer(self.network_A_target, n_units=200, act=tf.nn.relu, name='A_2_target')
self.network_A_target = tl.layers.DenseLayer(self.network_A_target, n_units=self.action_dim, name='output_A_target')
self.value_function_target = self.network_V_target.outputs
self.advantage_function_target = self.network_A_target.outputs
def create_training_method(self):
self.action_input = tf.placeholder("float",[None,self.action_dim]) # one hot presentation
self.y_input = tf.placeholder("float",[None])
self.reward_sum = tf.placeholder("float")
self.epsilon_sum = tf.placeholder("float")
self.replay_size = tf.placeholder("float")
A_origin = tf.reduce_sum(tf.multiply(self.advantage_function, self.action_input),reduction_indices = 1)
A_baseline = tf.reduce_mean(self.advantage_function, reduction_indices = 1)
Q_action = self.value_function + (A_origin - A_baseline)
self.cost = tf.reduce_mean(tf.square(self.y_input - Q_action))
A_value = tf.reduce_mean(self.advantage_function, axis=0)
A_baseline_value = tf.reduce_mean(A_baseline)
V_value = tf.reduce_mean(self.value_function)
with tf.name_scope('loss'):
tf.summary.scalar('cost', self.cost)
with tf.name_scope('reward'):
tf.summary.scalar('reward_mean', self.reward_sum)
with tf.name_scope('Q_value_nomalize'):
tf.summary.scalar('Q_value', V_value + (A_value[0] - A_baseline_value))
tf.summary.scalar('Q_value', V_value + (A_value[1] - A_baseline_value))
# tf.summary.scalar('Q_value', V_value + (A_value[2] - A_baseline_value))
with tf.name_scope('param'):
tf.summary.scalar('epsilon', self.epsilon_sum)
tf.summary.scalar('replay_size', self.replay_size)
self.optimizer_1 = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.cost)
# self.optimizer_2 = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-7).minimize(self.cost)
# self.optimizer_3 = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-8).minimize(self.cost)
def perceive(self,state,action,reward,next_state,done):
one_hot_action = np.zeros(self.action_dim)
one_hot_action[action] = 1
self.replay_buffer.append((state,one_hot_action,reward,next_state,done))
if len(self.replay_buffer) > REPLAY_SIZE:
self.replay_buffer.popleft()
if len(self.replay_buffer) > BATCH_SIZE:
self.train_Q_network()
def write_reward(self, reward_sum):
self.reward = reward_sum
def train_Q_network(self):
self.time_step += 1
# Step 1: obtain random minibatch from replay memory
minibatch = random.sample(self.replay_buffer,BATCH_SIZE)
state_batch = [data[0] for data in minibatch]
action_batch = [data[1] for data in minibatch]
reward_batch = [data[2] for data in minibatch]
next_state_batch = [data[3] for data in minibatch]
# Step 2: calculate y
y_batch = []
value_target_batch = self.value_function_target.eval(feed_dict = {self.state_input_target:next_state_batch})
advantage_target_batch = self.advantage_function_target.eval(feed_dict = {self.state_input_target:next_state_batch})
advantage_baseline_batch = np.mean(advantage_target_batch, axis = 1)
advantage_baseline_batch = advantage_baseline_batch.reshape(BATCH_SIZE, 1)
advantage_batch = self.advantage_function.eval(feed_dict = {self.state_input:state_batch})
# print '1:', np.shape(value_target_batch)
# print '2:', np.shape(advantage_target_batch)
# print '3:', np.shape(advantage_baseline_batch)
# print '4:', np.shape(advantage_batch)
# print '1-1:', value_target_batch[0][0]
# print '3-1:', advantage_baseline_batch[0]
# print '4-1:', np.argmax(advantage_batch[0])
for i in range(0, BATCH_SIZE):
done = minibatch[i][4]
if done:
y_batch.append(reward_batch[i])
else :
y_batch.append(reward_batch[i] + GAMMA * (value_target_batch[i][0] + (advantage_target_batch[i][np.argmax(advantage_batch[i])] - advantage_baseline_batch[i][0])))
replay_size = len(self.replay_buffer)
summary, _ = self.session.run([self.merged, self.optimizer_1], feed_dict={
self.y_input:y_batch,
self.action_input:action_batch,
self.state_input:state_batch,
self.reward_sum:self.reward,
self.epsilon_sum:self.epsilon,
self.replay_size:replay_size})
self.train_writer.add_summary(summary, self.time_step)
if self.time_step % UPDATE_TIME == 0:
# print 'updating...'
tl.files.assign_params(self.session, self.network.all_params, self.network_target)
tl.files.assign_params(self.session, self.network_A.all_params, self.network_A_target)
tl.files.assign_params(self.session, self.network_V.all_params, self.network_V_target)
def egreedy_action(self,state):
if self.time_step < OBSERVE_NUM:
return random.randint(0,self.action_dim - 1)
if DECLAY_FLAG:
self.epsilon *= (1 - DECLAY_NUM)
else:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLOER_NUM
if self.epsilon < FINAL_EPSILON:
self.epsilon = FINAL_EPSILON
Q_value = self.advantage_function.eval(feed_dict = {self.state_input:[state]})[0]
if random.random() <= self.epsilon:
return random.randint(0,self.action_dim - 1)
else:
return np.argmax(Q_value)
def action(self,state):
return np.argmax(self.Q_value.eval(feed_dict = {
self.state_input:[state]
})[0])
def train_game():
env = gym.make(MODE_NAME)
if EVAL_FLAG:
env = wrappers.Monitor(env, '/tmp/' + MODE_NAME)
agent = DQN(env)
if LOAD is True:
params = tl.files.load_npz(name=MODE_NAME + '.npz')
tl.files.assign_params(agent.session, params, agent.network)
reward_mean = 0
reward_sum = 0
end_flag = False
for episode in xrange(EPISODE):
# initialize task
state = env.reset()
if end_flag:
break;
# Train
for step in xrange(STEP):
if DISPLAY is True:
env.render()
action = agent.egreedy_action(state) # e-greedy action for train
next_state,reward,done,_ = env.step(action)
reward_sum += reward
agent.perceive(state,action,reward,next_state,done)
state = next_state
if done:
agent.write_reward(reward_sum)
reward_mean += reward_sum
print 'epsido: ', episode, '... reward_sum: ', reward_sum
reward_sum = 0
if episode % TEST == 0:
if SAVE is True:
tl.files.save_npz(agent.network.all_params, name=MODE_NAME + '.npz')
reward_mean /= (TEST + 1)
if (reward_mean > TARGET_NUM):
end_flag = True
print 'episode:', episode, ' reward_mean:', reward_mean, ' epsilon: ', agent.epsilon
break
if __name__ == '__main__':
train_game()
if EVAL_FLAG:
gym.upload('/tmp/' + MODE_NAME, api_key='<KEY>')
```
#### File: cartpole_rl/dqn/pdd_dqn.py
```python
import time
import random
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import utils as U
class PrioritizedDoubleDuelingDQN(object):
def __init__(self, session, args, obs_dim, act_dim):
self.sess = session
self.args = args
self.obs_dim = obs_dim
self.act_dim = act_dim
if self.args.prioritized:
self.buffer = U.PrioritizedReplayBuffer(
self.args.buffer_size, alpha=self.args.alpha)
self.speed_beta = (1. - self.args.beta) / self.args.max_steps
self.beta = self.args.beta
else:
self.buffer = U.ReplayBuffer(self.args.buffer_size)
self.time_step = 0
self.score = 0
self.epsilon = 1
self.speed_eps = (1 - self.args.final_epsilon) / (self.args.explore_num)
self._build_ph()
self.eval_model, self.value_eval, self.adv_eval = self._build_network('eval')
self.target_model, self.value_target, self.adv_target = self._build_network('target')
self.mean_q, self.td_error, self.loss, self.opt, self.update_target = self._build_training_method()
self._build_tensorboard()
self.merge_all = tf.summary.merge_all()
self.writer = tf.summary.FileWriter('../tensorboard/dqn/{}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))),
self.sess.graph)
def _build_ph(self):
self.obs_ph = tf.placeholder(tf.float32, [None, self.obs_dim], 'obs_ph')
self.act_ph = tf.placeholder(tf.float32, [None, self.act_dim], 'act_ph')
self.ret_ph = tf.placeholder(tf.float32, [None, ], 'ret_ph')
self.weights_ph = tf.placeholder(tf.float32, [None, ], 'weights_ph')
def _build_network(self, model_name):
hid1_size = self.obs_dim * 10
hid3_size = self.act_dim * 5
hid2_size = int(np.sqrt(hid1_size*hid3_size))
network = tl.layers.InputLayer(self.obs_ph, name='input_{}'.format(model_name))
network = tl.layers.DenseLayer(network, n_units=hid1_size, act=tf.nn.relu,
name='{}_mlp1'.format(model_name))
network = tl.layers.DenseLayer(network, n_units=hid2_size, act=tf.nn.relu,
name='{}_mlp2'.format(model_name))
value_network = tl.layers.DenseLayer(network, n_units=hid3_size, act=tf.nn.relu,
name='{}_value'.format(model_name))
value_network = tl.layers.DenseLayer(value_network, n_units=1,
name='{}_value_output'.format(model_name))
adv_network = tl.layers.DenseLayer(network, n_units=self.act_dim,
name='{}_adv_output'.format(model_name))
value = value_network.outputs
adv = adv_network.outputs
return [network, value_network, adv_network], value, adv
def _build_training_method(self):
mean_q = tf.reduce_mean(self.adv_eval) + tf.reduce_mean(self.value_eval)
with tf.variable_scope('advantage'):
adv = tf.reduce_sum(tf.multiply(self.adv_eval, self.act_ph), axis=1)
with tf.variable_scope('q_value'):
q_value = tf.squeeze(self.value_eval) + adv
with tf.variable_scope('td_error'):
td_error = q_value - self.ret_ph
if self.args.huber:
with tf.variable_scope('huber_loss'):
errors = U.huber_loss(td_error)
else:
errors = tf.square(td_error)
with tf.variable_scope('loss'):
loss = tf.reduce_mean(self.weights_ph*errors)
# opt operation
opt = tf.train.AdamOptimizer(self.args.lr).minimize(loss)
# update target operation
eval_vars = []
target_vars = []
for eval_net, target_net in zip(self.eval_model, self.target_model):
eval_vars += eval_net.all_params
target_vars += target_net.all_params
update_target = []
for var, var_target in zip(eval_vars, target_vars):
update_target.append(var_target.assign(var))
update_target = tf.group(*update_target)
return mean_q, td_error, loss, opt, update_target
def _build_tensorboard(self):
self.score_tb = tf.placeholder(tf.float32, name='score_tb')
self.size_tb = tf.placeholder(tf.float32, name='size_tb')
self.epsilon_tb = tf.placeholder(tf.float32, name='epsilon_tb')
self.beta_tb = tf.placeholder(tf.float32, name='beta_tb')
with tf.name_scope('loss'):
tf.summary.scalar('loss', self.loss)
with tf.name_scope('params'):
tf.summary.scalar('q-value', self.mean_q)
tf.summary.scalar('score', self.score_tb)
tf.summary.scalar('buffer_size', self.size_tb)
tf.summary.scalar('epsilon', self.epsilon_tb)
if self.args.prioritized:
tf.summary.scalar('beta', self.beta_tb)
def train(self):
self.time_step += 1
if self.args.prioritized:
# sample experience from buffer
self.beta += self.speed_beta
experience = self.buffer.sample(self.args.batch_size, self.beta)
(obses, acts, rews, new_obses, dones, weights, idxes) = experience
else:
obses, acts, rews, new_obses, dones = self.buffer.sample(self.args.batch_size)
weights, idxes = np.ones_like(rews), None
# compute rets
adv_eval, adv_target, value_target = self.sess.run(
[self.adv_eval, self.adv_target, self.value_target],
feed_dict={self.obs_ph: new_obses})
baselines = np.mean(adv_target, axis=1)
rets = []
for i in range(len(rews)):
if dones[i]:
rets.append(rews[i])
else:
rets.append(rews[i]+self.args.gamma*
(value_target[i][0]+adv_target[i][np.argmax(adv_eval[i])]-baselines[i]))
rets = np.asarray(rets)
# opt q-network
feed_dict = {
self.obs_ph: obses,
self.act_ph: acts,
self.ret_ph: rets,
self.weights_ph: weights,
self.score_tb: self.score,
self.size_tb: len(self.buffer),
self.epsilon_tb: self.epsilon
}
if self.args.prioritized:
feed_dict[self.beta_tb] = self.beta
summary, td_error, _ = self.sess.run([self.merge_all, self.td_error, self.opt],
feed_dict=feed_dict)
# write tensorboard file
self.writer.add_summary(summary, self.time_step)
# update target network
if self.time_step % self.args.update_target_num == 0:
self.sess.run(self.update_target)
if self.args.prioritized:
# update priorities
new_priorities = np.abs(td_error) + 1e-6
self.buffer.update_priorities(idxes, new_priorities)
def action(self, obs, test=False):
obs = np.reshape(obs, (1, self.obs_dim))
feed_dict = {self.obs_ph: obs}
if self.args.test_alg or test:
return np.argmax(self.sess.run(self.adv_eval, feed_dict)[0])
elif self.time_step == 0:
return random.randint(0, self.act_dim-1)
# epsilon-greedy exploration
self.epsilon -= self.speed_eps
if self.epsilon < self.args.final_epsilon:
self.epsilon = self.args.final_epsilon
if random.random() <= self.epsilon:
return random.randint(0, self.act_dim-1)
else:
return np.argmax(self.sess.run(self.adv_eval, feed_dict)[0])
def get_score(self, score):
# get cumulative rewards
self.score = score
def one_hot_key(self, act):
one_hot_key = np.zeros(self.act_dim)
one_hot_key[act] = 1.
return one_hot_key
def save_network(self, model_name):
for i, network in enumerate(self.model):
tl.files.save_npz(network.all_params,
name='../model/dqn/{}_{}.npz'.format(model_name, i),
sess=self.sess)
def load_network(self, model_name):
for i, network in enumerate(self.model):
params = tl.files.load_npz(
name='../model/dqn/{}_{}.npz'.format(model_name, i))
tl.files.assign_params(self.sess, params, network)
```
#### File: cartpole_rl/dqn/play_gym.py
```python
import argparse
import gym
import numpy as np
import tensorflow as tf
from pddqn import PrioritizedDoubleDQN
from pdd_dqn import PrioritizedDoubleDuelingDQN
from ddqn import DDQN
parser = argparse.ArgumentParser(description='prioritized double dueling deep q-network algorithm')
parser.add_argument(
'--lr', default=5e-4, type=float, help='learning rate')
parser.add_argument(
'--final_epsilon', default=0.02, type=float, help='epsilon greedy exploration hyperparameter')
parser.add_argument(
'--beta', default=0.4, type=float, help='prioritized replay buffer hyperparameter')
parser.add_argument(
'--alpha', default=0.6, type=float, help='prioritized replay buffer hyperparameter')
parser.add_argument(
'--gamma', default=.99, type=float, help='gamma')
parser.add_argument(
'--batch_size', default=32, type=int, help='training batch size')
parser.add_argument(
'--update_target_num', default=500, type=int, help='the frequence of updating target network')
parser.add_argument(
'--obs_num', default=2000, type=int, help='how many transitions before agent training')
parser.add_argument(
'--explore_num', default=50000, type=int, help='how many transitions finished the exploration')
parser.add_argument(
'--buffer_size', default=100000, type=int, help='the size of replay buffer')
parser.add_argument(
'--max_steps', default=100000, type=int, help='max steps of training')
parser.add_argument(
'--animate', default=False, type=bool, help='whether to animate environment')
parser.add_argument(
'--prioritized', default=True, type=bool, help='whether to use prioritized replay buffer')
parser.add_argument(
'--huber', default=True, type=bool, help='whether to use huber loss')
parser.add_argument(
'--save_network', default=False, type=bool, help='whether to save network')
parser.add_argument(
'--load_network', default=False, type=bool, help='whether to load network')
parser.add_argument(
'--test_alg', default=False, type=bool, help='whether to test our algorithm')
parser.add_argument(
'--gym_id', default='CartPole-v1', type=str, help='gym id')
parser.add_argument(
'--model_name', default='pddqn', type=str, help='save or load model name')
args = parser.parse_args()
class PlayGym(object):
def __init__(self, args, env, agent):
self.args = args
self.env = env
self.agent = agent
def play(self, times=100000):
for e in range(times):
score = self._train_episode()
self.agent.get_score(score)
# self.agent.update_target()
# print ("Episode: {} | score: {} | epsilon: {}".format(e+1, score, self.agent.epsilon))
if e % 50 == 0:
scores = [self._test_episode() for _ in range(10)]
scores = np.asarray(scores)
mean = np.mean(scores)
print ("Episode: {} | score: {} | epsilon: {}".format(e, mean, self.agent.epsilon))
if self.agent.time_step > self.args.max_steps:
break
def _test_episode(self):
obs = self.env.reset()
done = False
score = 0
while not done:
act = self.agent.action(obs, test=True)
obs, rew, done, info = self.env.step(act)
score += rew
return score
def _train_episode(self):
obs = self.env.reset()
done = False
score = 0
while not done:
act = self.agent.action(obs)
new_obs, rew, done, info = self.env.step(act)
self.agent.buffer.add(obs, self.agent.one_hot_key(act), rew, new_obs, done)
score += rew
obs = new_obs
if len(self.agent.buffer) > self.args.obs_num:
self.agent.train()
return score
if __name__ == '__main__':
graph = tf.get_default_graph()
config = tf.ConfigProto()
session = tf.Session(graph=graph, config=config)
env = gym.make(args.gym_id)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.n
agent = PrioritizedDoubleDuelingDQN(session, args, obs_dim, act_dim)
# agent = PrioritizedDoubleDQN(session, args, obs_dim, act_dim)
# agent = DDQN(session, args, obs_dim, act_dim)
player = PlayGym(args, env, agent)
session.run(tf.global_variables_initializer())
player.play()
```
#### File: cartpole_rl/ppo_dm/ppo_old.py
```python
import argparse
import gym
import time
import ray
import threading
import roboschool
import util
import scipy.signal
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tabulate import tabulate
from gym import wrappers
from collections import OrderedDict
from sklearn.utils import shuffle
parser = argparse.ArgumentParser(description='ppo adaptive KL algorithm')
parser.add_argument(
'--gamma', default=0.995, type=float, help='gamma')
parser.add_argument(
'--lambda_gae', default=0.98, type=float, help='lambda for GAE')
parser.add_argument(
'--log_vars', default=0.0, type=float, help='init action log variance')
parser.add_argument(
'--eta', default=50, type=float, help='actor loss parameter')
parser.add_argument(
'--actor_lr', default=3e-4, type=float, help='learning rate for actor')
parser.add_argument(
'--critic_lr', default=1e-3, type=float, help='learning rate for critic')
parser.add_argument(
'--kl_targ', default=0.003, type=float, help='kl divergence target')
parser.add_argument(
'--nums_worker', default=8, type=int, help='number of workers')
parser.add_argument(
'--train_epochs', default=10, type=int, help='training epochs')
parser.add_argument(
'--batch_size', default=20, type=int, help='trianing batch size')
parser.add_argument(
'--training_steps', default=4000, type=int, help='steps number for training')
parser.add_argument(
'--max_episodes', default=1000000000, type=int, help='max trianing episodes')
parser.add_argument(
'--animate', default=False, type=bool, help='whether to animate environment')
parser.add_argument(
'--save_network', default=False, type=bool, help='whether to save network')
parser.add_argument(
'--load_network', default=False, type=bool, help='whether to load network')
parser.add_argument(
'--test_algorithm', default=False, type=bool, help='wether to test algorithm')
parser.add_argument(
'--eval_algorithm', default=False, type=bool, help='whether to evaluate algorithm')
parser.add_argument(
'--env_name', default='RoboschoolAnt-v1', type=str, help='gym env name')
parser.add_argument(
'--model_name', default='ppo', type=str, help='save or load model name')
# global value
args = parser.parse_args()
class PPO(object):
def __init__(self, env, args):
self.init_param(env, args)
self.session = tf.InteractiveSession()
self._build_ph()
self.model = self._build_network()
self._build_trainning()
self._build_summary()
self.merge_all = tf.summary.merge_all()
self.writer = tf.summary.FileWriter('../tensorboard/ppo/{}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
), self.session.graph)
self.session.run(tf.global_variables_initializer())
def _build_ph(self):
self.obs_ph = tf.placeholder(tf.float32, [None, self.obs_dim],
'obs_ph')
self.act_ph = tf.placeholder(tf.float32, [None, self.act_dim],
'act_ph')
self.adv_ph = tf.placeholder(tf.float32, [None, ],
'adv_ph')
self.ret_ph = tf.placeholder(tf.float32, [None, ],
'ret_ph')
self.lr_ph = tf.placeholder(tf.float32, name='lr_ph')
self.beta_ph = tf.placeholder(tf.float32, name='beta_ph')
self.old_log_vars_ph = tf.placeholder(tf.float32, [self.act_dim, ],
'old_log_vars')
self.old_means_ph = tf.placeholder(tf.float32, [None, self.act_dim],
'old_means')
def _build_network(self):
# build actor network
hid1_size = self.obs_dim * 10
hid3_size = self.act_dim * 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
self.actor_network = tl.layers.InputLayer(self.obs_ph, name = 'actor_network_input')
self.actor_network = tl.layers.DenseLayer(self.actor_network, n_units = hid1_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / self.obs_dim)), name = 'actor_tanh1')
self.actor_network = tl.layers.DenseLayer(self.actor_network, n_units = hid2_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid1_size))), name = 'actor_tanh2')
self.actor_network = tl.layers.DenseLayer(self.actor_network, n_units = hid3_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid2_size))), name = 'actor_tanh3')
self.actor_network = tl.layers.DenseLayer(self.actor_network, n_units = self.act_dim, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid3_size))), name = 'means')
# build critic network
hid1_size = self.obs_dim * 10
hid3_size = 5
hid2_size = int(np.sqrt(hid1_size * hid3_size))
self.critic_network = tl.layers.InputLayer(self.obs_ph, name = 'critic_network_input')
self.critic_network = tl.layers.DenseLayer(self.critic_network, n_units = hid1_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / self.obs_dim)), name = 'critic_tanh1')
self.critic_network = tl.layers.DenseLayer(self.critic_network, n_units = hid2_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid1_size))), name = 'critic_tanh2')
self.critic_network = tl.layers.DenseLayer(self.critic_network, n_units = hid3_size, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid2_size))), name = 'critic_tanh3')
self.critic_network = tl.layers.DenseLayer(self.critic_network, n_units = 1, act = tf.nn.tanh,
W_init = tf.random_normal_initializer(stddev=np.sqrt(1.0 / float(hid3_size))), name = 'value')
# build variance network
logvar_speed = (10 * hid3_size) // 48
log_vars = tf.get_variable('logvars', (logvar_speed, self.act_dim), tf.float32,
tf.constant_initializer(0.0))
self.means = self.actor_network.outputs
self.log_vars = tf.reduce_sum(log_vars, axis=0) - self.args.log_vars
self.value = self.critic_network.outputs
# sample action from norm distributiion
with tf.variable_scope('sample_action'):
self.sampled_act = (self.means +
tf.exp(self.log_vars / 2.0) *
tf.random_normal(shape=(self.act_dim,)))
return [self.actor_network, self.critic_network]
def _build_trainning(self):
# logprob
self.logp = -0.5 * tf.reduce_sum(self.log_vars) + -0.5 * tf.reduce_sum(tf.square(self.act_ph - self.means) / tf.exp(self.log_vars), axis=1)
self.logp_old = -0.5 * tf.reduce_sum(self.old_log_vars_ph) + -0.5 * tf.reduce_sum(tf.square(self.act_ph - self.old_means_ph) / tf.exp(self.old_log_vars_ph), axis=1)
with tf.variable_scope('kl'):
self.kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.exp(self.old_log_vars_ph - self.log_vars)) +
tf.reduce_sum(tf.square(self.means - self.old_means_ph) / tf.exp(self.log_vars), axis=1) -
self.act_dim +
tf.reduce_sum(self.log_vars) - tf.reduce_sum(self.old_log_vars_ph))
with tf.variable_scope('entropy'):
self.entropy = 0.5 * (self.act_dim * (np.log(2 * np.pi) + 1) +
tf.reduce_sum(self.log_vars))
with tf.variable_scope('actor_loss'):
loss1 = -tf.reduce_mean(self.adv_ph * tf.exp(self.logp - self.logp_old))
loss2 = tf.reduce_mean(self.beta_ph * self.kl)
loss3 = self.args.eta * tf.square(tf.maximum(0.0, self.kl - 2.0 * self.args.kl_targ))
self.actor_loss = loss1 + loss2 + loss3
self.actor_opt = tf.train.AdamOptimizer(self.lr_ph).minimize(self.actor_loss)
with tf.variable_scope('critic_loss'):
self.critic_loss = tf.reduce_mean(tf.square(tf.squeeze(self.value) - self.ret_ph))
self.critic_opt = tf.train.AdamOptimizer(self.args.critic_lr).minimize(self.critic_loss)
def _build_summary(self):
self.score_tb = tf.placeholder(tf.float32, name='score_tb')
self.actor_loss_tb = tf.placeholder(tf.float32, name='actor_loss_tb')
self.critic_loss_tb = tf.placeholder(tf.float32, name='critic_loss_tb')
self.entropy_tb = tf.placeholder(tf.float32, name='entropy_tb')
self.kl_tb = tf.placeholder(tf.float32, name='kl_tb')
self.lr_tb = tf.placeholder(tf.float32, name='lr_tb')
self.beta_tb = tf.placeholder(tf.float32, name='beta_tb')
with tf.name_scope('loss'):
tf.summary.scalar('actor_loss', self.actor_loss_tb)
tf.summary.scalar('critic_loss', self.critic_loss_tb)
with tf.name_scope('param'):
tf.summary.scalar('entropy', self.entropy_tb)
tf.summary.scalar('kl', self.kl_tb)
tf.summary.scalar('lr', self.lr_tb)
tf.summary.scalar('beta', self.beta_tb)
tf.summary.scalar('score', self.score_tb)
def update_actor(self, obs, acts, advs, rets, score):
feed_dict = {
self.obs_ph: obs,
self.act_ph: acts,
self.adv_ph: advs,
self.ret_ph: rets,
self.beta_ph: self.beta,
self.lr_ph: self.args.actor_lr * self.lr_multiplier
}
old_means_np, old_log_vars_np = self.session.run([self.means, self.log_vars],
feed_dict)
feed_dict[self.old_log_vars_ph] = old_log_vars_np
feed_dict[self.old_means_ph] = old_means_np
for e in range(self.args.train_epochs):
self.session.run(self.actor_opt, feed_dict)
kl = self.session.run(self.kl, feed_dict)
if kl > self.args.kl_targ * 4: # early stopping
break
# magic setting
if kl > self.args.kl_targ * 2:
self.beta = np.minimum(35, 1.5 * self.beta)
if self.beta > 30 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.args.kl_targ / 2.0:
self.beta = np.maximum(1.0 / 35.0, self.beta / 1.5)
if self.beta < (1.0 / 30.0) and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
stats = self._visualize_stats(feed_dict, score)
self._visualize_tensorboard(stats)
if self.args.save_network and self.time_step % 10 == 0.:
self.save_network(self.args.model_name)
return stats
def update_critic(self, x, y):
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.critic_epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
obs = x_train[start:end, :]
ret = y_train[start:end]
feed_dict = {self.obs_ph: obs,
self.ret_ph: ret}
self.session.run(self.critic_opt, feed_dict=feed_dict)
def _visualize_stats(self, feed_dict, score):
kl, entropy, actor_loss, critic_loss = self.session.run(
[self.kl, self.entropy, self.actor_loss, self.critic_loss],
feed_dict)
stats = OrderedDict()
stats["Score"] = score
stats["LearningRate"] = self.args.actor_lr * self.lr_multiplier
stats["Beta"] = self.beta
stats["KL-divergence"] = kl
stats["Entropy"] = entropy
stats["ActorLoss"] = actor_loss
stats["CriticLoss"] = critic_loss
return stats
def _visualize_tensorboard(self, stats):
feed_dict = {
self.score_tb: stats["Score"],
self.lr_tb: stats["LearningRate"],
self.beta_tb: stats["Beta"],
self.kl_tb: stats["KL-divergence"],
self.entropy_tb: stats["Entropy"],
self.actor_loss_tb: stats["ActorLoss"],
self.critic_loss_tb: stats["CriticLoss"],
}
self.time_step += 1
summary = self.session.run(self.merge_all, feed_dict)
self.writer.add_summary(summary, self.time_step)
def sample(self, obs):
# obs = np.reshape(obs, (1, self.obs_dim))
feed_dict = {self.obs_ph: obs}
if self.args.test_algorithm:
return self.session.run(self.means, feed_dict=feed_dict)
else:
return self.session.run(self.sampled_act, feed_dict=feed_dict)
def get_value(self, obs):
values = self.value.eval(feed_dict = {self.obs_ph: obs})
return values
def convert_action(self, action):
return action * self.act_high
def init_param(self, env, args):
self.args = args
# env param
self.obs_dim = env.observation_space.shape[0]
self.act_dim = env.action_space.shape[0]
self.act_high = env.action_space.high
# value init
self.time_step = 0
self.score = 0
# actor param
self.beta = 1
self.lr_multiplier = 1.0
# critic param
self.replay_buffer_x = None
self.replay_buffer_y = None
self.critic_epochs = 10
def save_network(self, model_name):
for i in range(len(self.model)):
tl.files.save_npz(self.model[i].all_params,
name='../model/ppo/{}_{}.npz'.format(model_name, i),
sess=self.session)
def load_network(self, model_name):
for i in range(len(self.model)):
params = tl.files.load_npz(name='../model/ppo/{}_{}.npz'.format(model_name, i))
tl.files.assign_params(self.session, params, self.model[i])
@ray.remote
class RayEnvironment(object):
def __init__(self, env):
self.env = env
state = self.env.reset()
self.shape = state.shape
def step(self, action):
if self.done:
return [np.zeros(self.shape), 0.0, True]
else:
state, reward, done, info = self.env.step(action)
self.done = done
return [state, reward, done]
def reset(self):
self.done = False
return self.env.reset()
def run_episode(envs, agent, trajectories, animate=args.animate):
terminates = [False for _ in range(len(envs))]
terminates_idxs = [0 for _ in range(len(envs))]
paths_obs, paths_act, paths_rew = [], [], []
states = [env.reset.remote() for env in envs]
states = ray.get(states)
while not all(terminates):
# if animate:
# env.render()
paths_obs.append(states)
actions = agent.sample(states)
paths_act.append(actions)
next_step = [env.step.remote(actions[i]) for i, env in enumerate(envs)]
next_step = ray.get(next_step)
states = [batch[0] for batch in next_step]
rewards = [batch[1] for batch in next_step]
dones = [batch[2] for batch in next_step]
paths_rew.append(rewards)
for i, d in enumerate(dones):
if d:
terminates[i] = True
else:
terminates_idxs[i] += 1
for i in range(len(envs)):
obs = []
acts = []
rews = []
for j in range(len(paths_rew)):
obs.append(paths_obs[j][i])
acts.append(paths_act[j][i])
rews.append(paths_rew[j][i])
if terminates_idxs[i] == j:
break
obs = np.asarray(obs)
acts = np.asarray(acts)
rews = np.asarray(rews)
acts = np.reshape(acts, (len(rews), agent.act_dim))
trajectory = {
'obs': obs,
'acts': acts,
'rewards': rews
}
trajectories.append(trajectory)
return trajectories
def run_policy(envs, agent, training_steps, batch_size):
trajectories = []
for e in range((batch_size // args.nums_worker) + 1):
trajectories = run_episode(envs, agent, trajectories)
mean_step = np.mean([len(t['rewards']) for t in trajectories])
score = np.mean([t['rewards'].sum() for t in trajectories])
return trajectories, score, mean_step
def discount(x, gamma):
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
def add_disc_sum_rew(trajectories, gamma):
for trajectory in trajectories:
rewards = trajectory['rewards'] * (1 - gamma)
disc_sum_rew = discount(rewards, gamma)
trajectory['disc_sum_rew'] = disc_sum_rew
def add_value(trajectories, agent):
for trajectory in trajectories:
obs = trajectory['obs']
values = agent.get_value(obs)
trajectory['values'] = np.squeeze(np.asarray(values))
def add_gae(trajectories, gamma, lam):
for trajectory in trajectories:
rewards = trajectory['rewards'] * (1 - gamma)
values = trajectory['values']
# temporal differences
tds = rewards - values + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
advs = np.asarray(advantages)
trajectory['advs'] = advs
def build_train_set(trajectories):
observes = np.concatenate([t['obs'] for t in trajectories])
actions = np.concatenate([t['acts'] for t in trajectories])
disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])
advantages = np.concatenate([t['advs'] for t in trajectories])
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)
return observes, actions, advantages, disc_sum_rew
def print_stats(stats):
print("*********** Iteration {} ************".format(stats["Iteration"]))
table = []
for k, v in stats.items():
table.append([k, v])
print(tabulate(table, tablefmt="grid"))
def train():
env = gym.make(args.env_name)
agent = PPO(env, args)
if args.eval_algorithm:
env = wrappers.Monitor(env, './model/{}'.format(args.model_name), force=True)
e = 0
envs = [gym.make(args.env_name) for _ in range(args.nums_worker)]
envs = [RayEnvironment.remote(envs[i]) for i in range(args.nums_worker)]
if args.load_network:
agent.load_network(args.model_name)
while e < (args.max_episodes):
trajectories, score, mean_step = run_policy(envs, agent, args.training_steps, args.batch_size)
e += len(trajectories)
add_value(trajectories, agent)
add_disc_sum_rew(trajectories, args.gamma)
add_gae(trajectories, args.gamma, args.lambda_gae)
obs, acts, advs, rets = build_train_set(trajectories)
stats = agent.update_actor(obs, acts, advs, rets, score)
agent.update_critic(obs, rets)
stats["AverageStep"] = mean_step
stats["Iteration"] = e
print_stats(stats)
def apply_wechat():
threads = []
t1 = threading.Thread(target=train)
threads.append(t1)
t2 = threading.Thread(target=util.wechat_display)
threads.append(t2)
for t in threads:
t.start()
if __name__ == "__main__":
ray.init()
train()
# apply_wechat()
# util.wechat_display()
```
#### File: cartpole_rl/ppo_openai/play_gym.py
```python
import argparse
import gym
import sys
import time
import os
import logger
import monitor as M
import os.path as osp
import numpy as np
import tensorflow as tf
import utils as U
from collections import deque
from ppo_cliped import PPOCliped
parser = argparse.ArgumentParser(description='proximal policy optimization cliped version')
parser.add_argument(
'--lr', default=3e-4, type=float, help='learning rate')
parser.add_argument(
'--d_targ', default=0.012, type=float, help='the target of kl divergence')
parser.add_argument(
'--ent_coef', default=0., type=float, help='the coefficient of entropy')
parser.add_argument(
'--clip_range', default=0.2, type=float, help='the clip range parameter')
parser.add_argument(
'--vf_coef', default=0.5, type=float, help='the coefficient of value function')
parser.add_argument(
'--max_grad_norm', default=0.5, type=float, help='max gradients normalize')
parser.add_argument(
'--lamb', default=.98, type=float, help='GAE hyper parameters')
parser.add_argument(
'--gamma', default=.995, type=float, help='gamma')
parser.add_argument(
'--seed', default=0, type=int, help='RNG seed')
parser.add_argument(
'--num_batchs', default=4, type=int, help='the number of batchs')
parser.add_argument(
'--num_opts', default=4, type=int, help='the number of opts')
parser.add_argument(
'--save_interval', default=50, type=int, help='the number of save_network')
parser.add_argument(
'--num_steps', default=512, type=int, help='the number of steps')
parser.add_argument(
'--num_procs', default=32, type=int, help='the number of processes')
parser.add_argument(
'--max_steps', default=20e6, type=int, help='max steps of training')
parser.add_argument(
'--train', default=True, type=bool, help='Whether to train')
parser.add_argument(
'--point', default='00001', type=str, help='the point for loading')
parser.add_argument(
'--gym_id', default='Ant-v2', type=str, help='gym id')
args = parser.parse_args()
class PlayGym(object):
def __init__(self, args, env, agent):
self.args = args
self.env = env
self.agent = agent
self.total_timesteps = self.args.max_steps
self.nminibatches = self.args.num_batchs
self.nsteps = self.args.num_steps
self.gamma = self.args.gamma
self.lam = self.args.lamb
self.noptepochs = self.args.num_opts
nenv = env.num_envs
self.obs = np.zeros((nenv,) + env.observation_space.shape)
self.obs[:] = env.reset()
self.dones = [False for _ in range(nenv)]
def learn(self):
env = self.env
nsteps = self.nsteps
nminibatches = self.nminibatches
total_timesteps = self.total_timesteps
total_timesteps = int(total_timesteps)
noptepochs = self.noptepochs
save_interval = self.args.save_interval
loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
print (" ------------- Shape --------------- ")
print ("obs_dim: {} | ac_dim: {}".format(ob_space.shape[0], ac_space.shape[0]))
print (" ----------------------------------- ")
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
lrnow = self.args.lr
cliprangenow = self.args.clip_range
nupdates = total_timesteps//nbatch
init_targ = self.args.d_targ
kl = 0.01
def adaptive_lr(lr, kl, d_targ):
if kl < (d_targ / 1.5):
lr *= 2.
elif kl > (d_targ * 1.5):
lr *= .5
return lr
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
curr_step = update*nbatch
step_percent = float(curr_step / total_timesteps)
if step_percent < 0.1:
d_targ = init_targ
elif step_percent < 0.4:
d_targ = init_targ / 2.
else:
d_targ = init_targ / 4.
lrnow = adaptive_lr(lrnow, kl, d_targ)
obs, returns, masks, actions, values, neglogpacs, states, epinfos = self._run()
epinfobuf.extend(epinfos)
mblossvals = []
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, actions, values, neglogpacs))
mblossvals.append(self.agent.train(lrnow, cliprangenow, *slices))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
kl = lossvals[3]
if update % 1 == 0 or update == 1:
ev = U.explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', U.safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', U.safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
logger.logkv('lr', lrnow)
logger.logkv('d_targ', d_targ)
for (lossval, lossname) in zip(lossvals, loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
self.agent.save_network(savepath)
np.save('{}/mean'.format(logger.get_dir()), self.env.ob_rms.mean)
np.save('{}/var'.format(logger.get_dir()), self.env.ob_rms.var)
env.close()
def _run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = None
epinfos = []
for _ in range(self.nsteps):
actions, values, neglogpacs = self.agent.step(self.obs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.agent.get_value(self.obs)
#discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(U.sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def play(self, curr_path):
self.agent.load_network("{}/log/checkpoints/{}".format(curr_path, self.args.point))
def run_episode():
obs = self.env.reset()
score = 0
done = [False]
while not done[0]:
env.render()
act = self.agent.get_action(obs)
obs, rew, done, info = env.step(act)
score += rew[0]
return score
for e in range(10000):
score = run_episode()
print ('episode: {} | score: {}'.format(e, score))
class MakeEnv(object):
def __init__(self, curr_path):
self.curr_path = curr_path
def make(self, train=True):
if train:
return self.make_train_env()
else:
return self.make_test_env()
def make_train_env(self):
logger.configure(dir='{}/log'.format(curr_path))
def make_env(rank):
def _thunk():
env = gym.make(args.gym_id)
env.seed(args.seed + rank)
env = M.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
return env
return _thunk
nenvs = args.num_procs
env = U.SubprocVecEnv([make_env(i) for i in range(nenvs)])
env = U.VecNormalize(env)
return env
def make_test_env(self):
def make_env():
env = gym.make(args.gym_id)
return env
env = U.DummyVecTestEnv([make_env])
running_mean = np.load('{}/log/mean.npy'.format(self.curr_path))
running_var = np.load('{}/log/var.npy'.format(self.curr_path))
env = U.VecNormalizeTest(env, running_mean, running_var)
return env
if __name__ == '__main__':
curr_path = sys.path[0]
graph = tf.get_default_graph()
config = tf.ConfigProto()
session = tf.Session(graph=graph, config=config)
# make env
maker = MakeEnv(curr_path)
env = maker.make(args.train)
# build agent
ob_space = env.observation_space
ac_space = env.action_space
agent = PPOCliped(session, args, ob_space, ac_space)
# build player
player = PlayGym(args, env, agent)
# start
session.run(tf.global_variables_initializer())
if args.train:
player.learn()
else:
player.play(curr_path)
```
#### File: cartpole_rl/ppo_openai/ppo_cliped.py
```python
import time
import joblib
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import utils as U
class PPOCliped(object):
def __init__(self, sess, args, ob_space, ac_space):
self.sess = sess
self.args = args
self.obs_dim = ob_space.shape[0]
self.act_dim = ac_space.shape[0]
self.time_step = 0
self.score = 0
self._build_ph()
self.model, self.means, self.value, self.pd, self.act, self.logp = self._build_net()
self.costs, self.approx_kl, self.clip_frac, self.opt = self._build_training_method()
self.merge_all = tf.summary.merge_all()
self.writer = tf.summary.FileWriter('../tensorboard/ppo/{}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))),
self.sess.graph)
def _build_ph(self):
self.obs_ph = tf.placeholder(tf.float32, [None, self.obs_dim], 'obs_ph')
self.act_ph = tf.placeholder(tf.float32, [None, self.act_dim], 'act_ph')
self.ret_ph = tf.placeholder(tf.float32, [None, ], 'ret_ph')
self.adv_ph = tf.placeholder(tf.float32, [None, ], 'adv_ph')
self.old_vpred_ph = tf.placeholder(tf.float32, [None, ], 'old_vpred_ph')
self.old_logp_ph = tf.placeholder(tf.float32, [None, ], 'old_logp_ph')
self.lr_ph = tf.placeholder(tf.float32, name='lr_ph')
self.clip_range_ph = tf.placeholder(tf.float32, name='clip_range_ph')
def _build_net(self):
actor_net = tl.layers.InputLayer(self.obs_ph, name='actor_input')
actor_net = tl.layers.DenseLayer(actor_net, n_units=64, act=tf.nn.tanh,
name='actor_tanh1')
actor_net = tl.layers.DenseLayer(actor_net, n_units=64, act=tf.nn.tanh,
name='actor_tanh2')
actor_net = tl.layers.DenseLayer(actor_net, n_units=self.act_dim, act=tf.nn.tanh,
name='act_output')
critic_net = tl.layers.InputLayer(self.obs_ph, name='critic_input')
critic_net = tl.layers.DenseLayer(critic_net, n_units=64, act=tf.nn.tanh,
name='critic_tanh1')
critic_net = tl.layers.DenseLayer(critic_net, n_units=64, act=tf.nn.tanh,
name='critic_tanh2')
critic_net = tl.layers.DenseLayer(critic_net, n_units=1, name='value_output')
logstd = tf.get_variable(name='logstd', shape=[1, self.act_dim],
initializer=tf.zeros_initializer())
means = actor_net.outputs
value = critic_net.outputs
pdparam = tf.concat([means, means*0.0 + logstd], axis=1)
pdtype = U.DiagGaussianPdType(self.act_dim)
pd = pdtype.pdfromflat(pdparam)
with tf.variable_scope('sample_act'):
act = pd.sample()
logp = pd.neglogp(act)
return [actor_net, critic_net], means, value[:, 0], pd, act, logp
def _build_training_method(self):
vpred = self.value
vpred_clip = self.old_vpred_ph + tf.clip_by_value(self.value-self.old_vpred_ph,
-self.clip_range_ph, self.clip_range_ph)
with tf.variable_scope('vf_loss1'):
vf_loss1 = tf.square(vpred-self.ret_ph)
with tf.variable_scope('vf_loss2'):
vf_loss2 = tf.square(vpred_clip-self.ret_ph)
with tf.variable_scope('vf_loss'):
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_loss1, vf_loss2))
logp = self.pd.neglogp(self.act_ph)
with tf.variable_scope('ratio'):
ratio = tf.exp(self.old_logp_ph-logp)
with tf.variable_scope('pg_loss1'):
pg_loss1 = -self.adv_ph * ratio
with tf.variable_scope('pg_loss2'):
pg_loss2 = -self.adv_ph * tf.clip_by_value(ratio, 1.0-self.clip_range_ph, 1.0+self.clip_range_ph)
with tf.variable_scope('pg_loss'):
pg_loss = tf.reduce_mean(tf.maximum(pg_loss1, pg_loss2))
with tf.variable_scope('entropy'):
entropy = tf.reduce_mean(self.pd.entropy())
with tf.variable_scope('approx_kl'):
approx_kl = .5 * tf.reduce_mean(tf.square(logp-self.old_logp_ph))
with tf.variable_scope('clip_frac'):
clip_frac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio-1.),self.clip_range_ph)))
with tf.variable_scope('total_loss'):
loss = pg_loss - self.args.ent_coef * entropy + self.args.vf_coef * vf_loss
# params = U.get_all_params(self.model)
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if self.args.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.args.max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.AdamOptimizer(learning_rate=self.lr_ph, epsilon=1e-5)
opt = trainer.apply_gradients(grads)
return [pg_loss, vf_loss, entropy, loss], approx_kl, clip_frac, opt
def train(self, lr, clip_range, obses, rets, acts, values, logps):
# compute advantage
advs = rets - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
feed_dict = {
self.obs_ph: obses,
self.act_ph: acts,
self.adv_ph: advs,
self.ret_ph: rets,
self.old_vpred_ph: values,
self.old_logp_ph: logps,
self.lr_ph: lr,
self.clip_range_ph: clip_range
}
return self.sess.run(
[self.costs[0], self.costs[1], self.costs[2],
self.approx_kl, self.clip_frac, self.opt], feed_dict=feed_dict)[:-1]
def step(self, obs):
feed_dict = {self.obs_ph: obs}
act, value, logp = self.sess.run([self.act, self.value, self.logp],
feed_dict=feed_dict)
return act, value, logp
def get_value(self, obs):
feed_dict = {self.obs_ph: obs}
return self.sess.run(self.value, feed_dict=feed_dict)
def get_action(self, obs):
feed_dict = {self.obs_ph: obs}
return self.sess.run(self.means, feed_dict=feed_dict)
def save_network(self, save_path):
params = tf.trainable_variables()
ps = self.sess.run(params)
joblib.dump(ps, save_path)
def load_network(self, load_path):
loaded_params = joblib.load(load_path)
restores = []
params = tf.trainable_variables()
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
self.sess.run(restores)
```
#### File: 20chase/cartpole_rl/util.py
```python
import time
import itchat
from selenium import webdriver
def capture(url, save_fn="tensorboard.png"):
browser = webdriver.Firefox() # Get local session of firefox
browser.set_window_size(1920, 1080)
browser.get(url) # Load page
browser.execute_script("""
(function () {
var y = 0;
var step = 100;
window.scroll(0, 0);
function f() {
if (y < document.body.scrollHeight) {
y += step;
window.scroll(0, y);
setTimeout(f, 50);
} else {
window.scroll(0, 0);
document.title += "scroll-done";
}
}
setTimeout(f, 1000);
})();
""")
for i in range(30):
if "scroll-done" in browser.title:
break
time.sleep(1)
browser.save_screenshot(save_fn)
browser.close()
def wechat_display():
@itchat.msg_register([itchat.content.TEXT])
def chat_trigger(msg):
if msg['Text'] == u'tensorboard':
capture("http://127.0.0.1:6006/")
itchat.send_image('tensorboard.png', 'filehelper')
itchat.auto_login(hotReload=True)
itchat.run()
if __name__ == '__main__':
wechat_display()
``` |
{
"source": "20chix/DWM1001_ROS",
"score": 2
} |
#### File: DWM1001_ROS/test/test_Anchor_0.py
```python
from time import sleep
import unittest
import time
import rospy
import rostest
from localizer_dwm1001.msg import Anchor
from localizer_dwm1001.msg import Tag
import rosunit
# Structure Message Anchor
# string id
# float64 x
# float64 y
# float64 z
# float64 distanceFromTag
class Anchor0TestCase(unittest.TestCase):
anchorData_ok = False
anchor0 = Anchor()
def callback(self, data):
self.anchorData_ok = True
self.anchor0 = data
# Test existance of topic
def test_if_anchor_0_is_published(self):
rospy.init_node('test_anchor_0')
rospy.Subscriber("/dwm1001/anchor0", Anchor, self.callback)
counter = 0
# give 5 seconds to check the topic is publishing something
while not rospy.is_shutdown() and counter < 5 and (not self.anchorData_ok):
time.sleep(1)
counter += 1
rospy.loginfo("looping")
self.assertTrue(self.anchorData_ok)
# Test the id of Anchor if is a string
def test_if_anchor_0_id_is_string(self):
if isinstance(self.anchor0.id, str):
self.assertTrue(True)
else:
self.assertTrue(False)
# Test the x of Anchor if is a float
def test_if_anchor_0_x_is_float(self):
if isinstance(self.anchor0.x, float):
self.assertTrue(True)
else:
self.assertTrue(False)
# Test the y of Anchor if is a float
def test_if_anchor_0_y_is_float(self):
if isinstance(self.anchor0.y, float):
self.assertTrue(True)
else:
self.assertTrue(False)
# Test the z of Anchor if is a float
def test_if_anchor_0_z_is_float(self):
if isinstance(self.anchor0.z, float):
self.assertTrue(True)
else:
self.assertTrue(False)
# Test the z of Anchor if is a float
def test_if_anchor_0_distanceFromTag_is_float(self):
if isinstance(self.anchor0.distanceFromTag, float):
self.assertTrue(True)
else:
self.assertTrue(False)
if __name__ == '__main__':
rosunit.unitrun('localizer_dwm1001', 'test_Anchor_0_Publisher', Anchor0TestCase)
``` |
{
"source": "20CM/Sanctuary",
"score": 3
} |
#### File: sanctuary/account/serializers.py
```python
from rest_framework import serializers
from .models import CustomUser
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ('id', 'email', 'username', 'is_active', 'date_joined', "password", "is_staff")
read_only_fields = ('id', 'is_active', 'date_joined', 'is_staff')
extra_kwargs = {'password': {'<PASSWORD>': True}}
def create(self, validated_data):
user = CustomUser(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
if not CustomUser.objects.count():
# The first user will be a superuser
user.is_superuser = True
user.is_staff = True
user.save()
return user
class SimplifiedUserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ('id', 'email', 'username')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ('email', 'username', 'password')
class PasswordSerializer(serializers.Serializer):
password = serializers.CharField(max_length=200)
```
#### File: sanctuary/topic/api.py
```python
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAuthenticatedOrReadOnly
from .models import Topic, Reply
from sanctuary.viewsets import NoDestroyModelViewSet
from .serializers import TopicSerializer, ReplySerializer
class CreateWithAuthorMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.initial_data["author"] = self.request.user.id
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class IsSuperAdminOrAuthor(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return user.is_superuser or user == obj.author
class TopicViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor)
filter_fields = ('author', 'tags')
class ReplyViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet):
queryset = Reply.objects.all()
serializer_class = ReplySerializer
permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor)
filter_fields = ('topic', 'author')
```
#### File: sanctuary/topic/models.py
```python
from django.db import models
from django.db.models import F
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
from model_utils.models import TimeStampedModel
from django_extensions.db.fields import AutoSlugField
from account.models import CustomUser
from tag.models import Tag
from .markdown.markdown import Markdown
class Topic(TimeStampedModel):
title = models.CharField(max_length=50)
slug = AutoSlugField(populate_from="title")
last_activity = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(CustomUser, related_name="topics")
tags = models.ManyToManyField(Tag, related_name="topics", blank=True)
replies_count = models.IntegerField(default=0)
class Reply(TimeStampedModel):
topic = models.ForeignKey(Topic, related_name="replies")
index = models.IntegerField(default=0)
author = models.ForeignKey(CustomUser, related_name="replies")
content = models.TextField()
content_html = models.TextField(blank=True)
class Meta:
unique_together = ("topic", "index")
def save(self, *args, **kwargs):
markdown = Markdown()
self.content_html = markdown.render(self.content)
super().save(*args, **kwargs)
from notification.models import Notification
Notification.create_notifications_from_reply(
reply=self,
mentions=markdown.get_mentions()
)
@receiver(pre_save, sender=Reply)
def calc_index(sender, instance, **kwargs):
if instance.index == 0:
instance.index = instance.topic.replies_count + 1
@receiver(post_save, sender=Reply)
def update_topic_replies_count(sender, instance, created, **kwargs):
if not created:
return
topic = instance.topic
topic.replies_count = F("replies_count") + 1
topic.last_activity = instance.created
topic.save()
``` |
{
"source": "20c/munge",
"score": 3
} |
#### File: src/munge/base.py
```python
import collections
import sys
from urllib.parse import urlsplit
import requests
from munge import codec
class Meta(type):
"""Metadata class to check and register codec classes."""
def __init__(cls, name, bases, attrs):
if name == "CodecBase":
super().__init__(name, bases, attrs)
return
if not hasattr(cls, "extensions"):
raise NotImplementedError(
f"class {cls.__name__} failed import, must have 'extensions' defined"
)
if not cls.supports_dict and not cls.supports_list:
raise NotImplementedError(
f"class {cls.__name__} failed import, must have either 'supports_dict' or 'supports_list' defined"
)
super().__init__(name, bases, attrs)
codec.add_codec(cls.extensions, cls)
class CodecBase(metaclass=Meta):
supports_dict = False
supports_list = False
supports_roundtrip = False
def __init__(self, config=None):
if config:
self.config = config
else:
self.config = dict()
@property
def extension(self):
return self.extensions[0]
def set_type(self, name, typ):
raise NotImplementedError("set_type has not been implemented")
def supports_data(self, data):
if isinstance(data, collections.abc.Mapping):
return self.supports_dict
if isinstance(data, list):
return self.supports_list
def open(self, url, mode="r", stdio=True):
"""
opens a URL, no scheme is assumed to be a file
no path will use stdin or stdout depending on mode, unless stdio is False
"""
# doesn't need to use config, because the object is already created
res = urlsplit(url)
if not res.scheme:
if not res.path or res.path == "-":
if not stdio:
raise OSError(f"unable to open '{url}'")
if "w" in mode:
return sys.stdout
return sys.stdin
return open(res.path, mode)
if res.scheme in ("https", "http", "ftp"):
req = requests.get(res.geturl(), stream=True)
# TODO error check
return req.raw
# return urllib2.urlopen(res.geturl())
raise OSError(f"unable to open '{url}'")
def loadu(self, url, **kwargs):
"""
opens url and passes to load()
kwargs are passed to both open and load
"""
return self.load(self.open(url, **kwargs), **kwargs)
def dumpu(self, data, url, **kwargs):
"""
opens url and passes to load()
kwargs are passed to both open and dump
"""
return self.dump(data, self.open(url, "w", **kwargs), **kwargs)
```
#### File: src/munge/cli.py
```python
import click
import munge
import munge.click
class Context(munge.click.Context):
app_name = "munge"
def get_config():
return {}
def list_codecs(ctx, param, value):
if not value or ctx.resilient_parsing:
return
print(munge.codec.list_codecs())
ctx.exit(0)
def common_options(f):
f = click.option(
"--config", envvar="MUNGE_HOME", default=click.get_app_dir("munge")
)(f)
f = click.option("--debug", is_flag=True, default=False)(f)
return f
@click.command()
# @Context.pass_context()
# @Context.options
@click.version_option()
@common_options
@click.argument("input", nargs=-1)
@click.argument("output", nargs=1)
@click.option(
"--list-codecs",
is_flag=True,
callback=list_codecs,
expose_value=False,
is_eager=True,
)
def main(**options):
conf = munge.config.MungeConfig(try_read=options["config"])
inp = options["input"]
outp = options["output"]
if not len(inp):
# if there's only 1 argument, it's (incorrectly) put in output
if outp:
inp = (outp,)
outp = None
else:
inp = "-"
elif len(inp) != 1:
raise NotImplementedError("multi input not yet supported")
src = munge.config.parse_url(inp[0], conf.get("addrbook", []))
data = src.cls().loadu(src.url.path)
# use same input codec by defailt
if not outp:
dst = src
dst.cls().dumpu(data, "-")
else:
dst = munge.config.parse_url(outp, conf.get("addrbook", {}))
dst.cls().dumpu(data, dst.url.path)
```
#### File: munge/codec/json.py
```python
import json
from munge.base import CodecBase
class Json(CodecBase):
supports_dict = True
supports_list = True
extensions = ["json"]
__kwargs = {}
def set_type(self, name, typ):
if name == "dict":
self.__kwargs["object_pairs_hook"] = typ
def load(self, fobj, **kwargs):
return json.load(fobj, **self.__kwargs)
def loads(self, input_string, **kwargs):
return json.loads(input_string, **self.__kwargs)
def dump(self, data, fobj, **kwargs):
return json.dump(data, fobj, **kwargs)
def dumps(self, data):
return json.dumps(data)
```
#### File: munge/codec/yaml.py
```python
from munge.base import CodecBase
try:
import yaml
class Yaml(CodecBase):
supports_dict = True
supports_list = True
extensions = ["yaml", "yml"]
def set_type(self, name, typ):
pass
def load(self, *args, **kwargs):
return yaml.safe_load(*args, **kwargs)
def loads(self, *args, **kwargs):
return self.load(*args, **kwargs)
def dump(self, data, fobj):
return fobj.write(
yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
)
def dumps(self, data):
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
except ImportError:
pass
```
#### File: src/munge/config.py
```python
import collections
import copy
import os
from urllib.parse import urlsplit
import munge
import munge.util
# this wouldn't work with tabular data
# need metaclass to allow users to set info once on class
# TODO rename to BaseConfig, set standard setup for Config?
class Config(collections.abc.MutableMapping):
"""
class for storing and manipulating data for config files
"""
# internal base for defaults
_base_defaults = {
"config": {},
# directory to look for config in
"config_dir": None,
# name of config file
"config_name": "config",
"codec": None,
"autowrite": False,
"validate": False,
}
def __init__(self, **kwargs):
"""
accepts kwargs to set defaults
data=dict to set initial data
read=dir to open a dir
try_read=dir to try to open a dir (and not throw if it doesn't read)
"""
# use derived class defaults if available
if hasattr(self, "defaults"):
self._defaults = self._base_defaults.copy()
self._defaults.update(self.defaults)
else:
self._defaults = self._base_defaults.copy()
# override anything passed to kwargs
for k, v in list(kwargs.items()):
if k in self._defaults:
self._defaults[k] = v
self.data = kwargs.get("data", self.default())
self._meta_config_dir = ""
if "read" in kwargs:
self.read(kwargs["read"])
if "try_read" in kwargs:
self.try_read(kwargs["try_read"])
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def copy(self):
rv = self.__class__(data=self.data.copy())
# copy meta
rv._meta_config_dir = self._meta_config_dir
return rv
def get_nested(self, *args):
"""
get a nested value, returns None if path does not exist
"""
data = self.data
for key in args:
if key not in data:
return None
data = data[key]
return data
def default(self):
return copy.deepcopy(self._defaults["config"])
def clear(self):
self.data = self.default()
self._meta_config_dir = None
@property
def meta(self):
if not self._meta_config_dir:
return {}
return {
"config_dir": self._meta_config_dir,
}
def read(self, config_dir=None, config_name=None, clear=False):
"""
read config from config_dir
if config_dir is None, clear to default config
clear will clear to default before reading new file
"""
# TODO should probably allow config_dir to be a list as well
# get name of config directory
if not config_dir:
config_dir = self._defaults.get("config_dir", None)
if not config_dir:
raise KeyError("config_dir not set")
# get name of config file
if not config_name:
config_name = self._defaults.get("config_name", None)
if not config_name:
raise KeyError("config_name not set")
conf_path = os.path.expanduser(config_dir)
if not os.path.exists(conf_path):
raise OSError(f"config dir not found at {conf_path}")
config = munge.load_datafile(config_name, conf_path, default=None)
if not config:
raise OSError(f"config file not found in {conf_path}")
if clear:
self.clear()
munge.util.recursive_update(self.data, config)
self._meta_config_dir = conf_path
return self
def try_read(self, config_dir=None, **kwargs):
"""
try reading without throwing an error
config_dir may be a list of directories to try in order, if so it
will return after the first successful read
other args will be passed direction to read()
"""
if isinstance(config_dir, str):
config_dir = (config_dir,)
for cdir in config_dir:
try:
self.read(cdir, **kwargs)
return cdir
except OSError as e:
pass
def write(self, config_dir=None, config_name=None, codec=None):
"""
writes config to config_dir using config_name
"""
# get name of config directory
if not config_dir:
config_dir = self._meta_config_dir
if not config_dir:
raise OSError("config_dir not set")
# get name of config file
if not config_name:
config_name = self._defaults.get("config_name", None)
if not config_name:
raise KeyError("config_name not set")
if codec:
codec = munge.get_codec(codec)()
else:
codec = munge.get_codec(self._defaults["codec"])()
config_dir = os.path.expanduser(config_dir)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
codec.dumpu(self.data, os.path.join(config_dir, "config." + codec.extension))
class MungeConfig(Config):
defaults = {"config": {}, "config_dir": "~/.munge", "codec": "yaml"}
def find_cls(name, extra_schemes={}):
if name in extra_schemes:
return munge.get_codec(extra_schemes[name]["type"])
return munge.get_codec(name)
class MungeURL(collections.namedtuple("MungeURL", "cls url")):
pass
# TODO change extra_schemes to full config dict
def parse_url(url, extra_schemes={}):
"""
parse a munge url
type:URL
URL.type
examples:
file.yaml
yaml:file.txt
http://example.com/file.yaml
yaml:http://example.com/file.txt
mysql://user:password@localhost/database/table
django:///home/user/project/settings_dir.settings/app_name/model
"""
if not url:
raise ValueError("url cannot be empty")
cls = None
res = urlsplit(url)
# check config first
if res.scheme in extra_schemes:
# TODO - nerge these with any existing and recurse
addr = extra_schemes[res.scheme]
if "type" in addr:
cls = find_cls(res.scheme, extra_schemes)
if "url" in addr:
url = addr["url"]
if cls:
res = urlsplit(url)
return MungeURL(cls, res)
# TODO - nerge these with any existing and recurse
return parse_url(url)
if res.scheme:
cls = find_cls(res.scheme, extra_schemes)
# check file extension
if not cls:
(rest, sep, ext) = url.rpartition(".")
cls = find_cls(ext, extra_schemes)
if not cls:
raise ValueError("unable to find codec for %s" % url)
return MungeURL(cls, res)
```
#### File: munge/tests/test_codecs.py
```python
import collections
import filecmp
import os
import sys
import pytest
import munge
import munge.codec.all
this_dir = os.path.dirname(__file__)
data_dir = os.path.join(this_dir, "data")
test_codecs = []
for tags, cls in list(munge.get_codecs().items()):
if any(name in ("json", "toml", "yaml") for name in tags):
print(f"appending codec {cls.extension}")
test_codecs.append(cls)
class Datadict0:
name = "dict0"
filename = "data/" + name
expected = {"munge": {"str0": "str0", "list0": ["item0", "item1"], "int0": 42}}
class Datalist0:
name = "list0"
filename = "data/" + name
expected = [{"int0": 42, "str0": "str0"}, {"int0": 1337, "str0": "fish"}]
data = (
Datadict0,
Datalist0,
)
class Codec:
def __init__(self, cls):
self.cls = cls
# if not os.path.exists(
def find_file(self, name):
prefix = os.path.join(this_dir, name)
for ext in self.cls.extensions:
fq_name = f"{prefix}.{ext}"
print("checking", fq_name)
if os.path.exists(fq_name):
return fq_name
def open_file(self, name, *args, **kwargs):
return open(self.find_file(name), *args, **kwargs)
@pytest.fixture(scope="module", params=data)
def dataset(request):
print(request.param)
obj = request.param()
print(obj)
assert obj.expected
return obj
return request.param()
@pytest.fixture(scope="module", params=test_codecs)
def codec(request):
print(request.param)
return Codec(request.param)
def test_codec_registry():
assert munge.get_codecs()
def test_extesion(codec, dataset):
obj = codec.cls()
assert obj.extensions[0] == obj.extension
# Needs set_type fixing
def no_test_load_into(codec, dataset):
obj = codec.cls()
obj.set_type("dict", collections.OrderedDict)
data = obj.load(open(codec.find_file(dataset.filename)))
assert dataset.expected == data
assert collections.OrderedDict.__name__ == type(data).__name__
assert isinstance(data, collections.OrderedDict)
def test_open(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
# with open(codec.find_file(dataset.filename)) as :
assert (
open(codec.find_file(dataset.filename)).read()
== obj.open(codec.find_file(dataset.filename)).read()
)
with pytest.raises(IOError):
obj.open("noneexistant")
with pytest.raises(IOError):
obj.open("", stdio=False)
assert sys.stdin == obj.open("")
assert sys.stdin == obj.open("-")
def test_load(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
assert dataset.expected == obj.load(open(codec.find_file(dataset.filename)))
def test_loads(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
data = codec.open_file(dataset.filename)
print(data)
print(data.read())
assert dataset.expected == obj.loads(codec.open_file(dataset.filename).read())
def test_loadu(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
assert dataset.expected == obj.loadu(codec.find_file(dataset.filename))
def test_dump(codec, dataset, tmpdir):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
dstfile = tmpdir.join("dump" + obj.extension)
obj.dump(dataset.expected, dstfile.open("w"))
with dstfile.open() as fobj:
assert dataset.expected == obj.load(fobj)
def test_roundtrip(codec, dataset, tmpdir):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
if not obj.supports_roundtrip:
return
data = obj.load(open(codec.find_file(dataset.filename)))
for section in dataset.expected:
for k, v in dataset.expected[section].items():
data[section][k] = v
dumped = obj.dumps(data)
print(f"dumping: {dumped}")
dstfile = tmpdir.join("dump" + obj.extension)
obj.dump(data, dstfile.open("w"))
with dstfile.open() as fobj:
assert codec.open_file(dataset.filename).read() == fobj.read()
def test_dumps(codec, dataset, tmpdir):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
dstfile = tmpdir.join("dump" + obj.extension)
obj.dumpu(dataset.expected, str(dstfile))
assert dataset.expected == obj.load(dstfile.open())
def test_dumpu(codec, dataset, tmpdir):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
dstfile = tmpdir.join("dump" + obj.extension)
assert dataset.expected == obj.loads(obj.dumps(dataset.expected))
def test_find_datafile(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
print(dataset.filename)
print(data_dir)
files = munge.find_datafile("dict0", data_dir)
# should == number of codec tests
assert files
assert not munge.find_datafile("nonexistant", [])
# test codec override
files = munge.find_datafile(
dataset.filename, this_dir, {("json",): munge.get_codecs()[("json",)]}
)
assert 1 == len(files)
load = files[0]
assert os.path.join(this_dir, dataset.filename + ".json") == load[1]
assert hasattr(load[0], "load")
assert dataset.expected == load[0]().load(open(load[1]))
def test_load_datafile(codec, dataset):
obj = codec.cls()
if not obj.supports_data(dataset.expected):
return
# TODO move the nonexistant tests to their own function so they're not repeatedly called
with pytest.raises(IOError):
munge.load_datafile("nonexistant", data_dir)
# default value
assert None == munge.load_datafile("nonexistant", data_dir, default=None)
assert "DEFAULT" == munge.load_datafile("nonexistant", data_dir, default="DEFAULT")
data = munge.load_datafile(dataset.filename, this_dir)
assert data
# test hardset extension
assert obj.extensions
for ext in obj.extensions:
fq_path = f"{dataset.filename}.{ext}"
data = munge.load_datafile(fq_path, this_dir, default=None)
if data:
break
data = None
assert data
# test default search path '.'
files = munge.find_datafile(fq_path, this_dir)
assert 1 == len(files)
relpath = os.path.relpath(files[0][1])
data = munge.load_datafile(relpath)
assert data
with pytest.raises(IOError):
munge.load_datafile(dataset.filename, this_dir, codecs={})
``` |
{
"source": "20c/ngage",
"score": 2
} |
#### File: ngage/plugins/bird.py
```python
import ipaddress
from pybird import PyBird
import ngage
from ngage.exceptions import AuthenticationError, ConfigError
@ngage.plugin.register("bird")
class Driver(ngage.plugins.DriverPlugin):
plugin_type = "bird"
def _do_init(self):
config = self.config
self.host = config.get("host")
self.user = config.get("user")
self.password = config.get("password")
self.optional_args = config.get("driver_args", {})
self.socket_file = self.optional_args.pop("socket_file", None)
if not self.socket_file:
raise ValueError("bird requires socket_file in driver_args")
self.dev = PyBird(
self.socket_file, self.host, self.user, self.password, **self.optional_args
)
def _do_open(self):
# TODO connection caching
return
try:
self.dev.open()
except ConnectionException:
raise AuthenticationError
def _do_close(self):
self.dev.close()
def _do_pull(self):
return self.dev.get_config()
def _do_push(self, fname, **kwargs):
with open(fname) as fobj:
conf = fobj.read()
return self.dev.put_config(conf)
def _do_diff(self, index=0):
return
if index != 0:
raise NotImplementedError("version index not implemented")
return self.dev.compare_config()
def _do_lock(self):
pass
# self.dev.lock()
def _do_unlock(self):
pass
# self.dev.unlock()
def _do_commit(self, **kwargs):
self.dev.commit_config()
def _do_check(self):
self.dev.check_config()
def _do_rollback(self, index=0):
if index == 0:
self.dev.discard_config()
elif index == 1:
self.dev.rollback()
else:
raise NotImplementedError("version index not implemented")
def _do_lookup_peer(self, peer):
# may want to cache this?
peers = self.dev.get_peer_status()
if peer.lower().startswith("as"):
for each in peers:
if each["asn"] == peer[2:]:
return each["name"]
for each in peers:
if each["name"] == peer:
return peer
elif each["address"] == peer:
return each["name"]
elif each["asn"] == peer:
return each["name"]
raise ValueError(f"peer {peer} not found")
def _do_get_bgp_neighbors(self):
router_id = self.dev.get_bird_status().get("router_id", "")
field_map = {
# 'local_as'
"asn": "remote_as",
"router_id": "remote_id",
"up": "is_up",
"description": "description",
# 'uptime'
}
rv = {
"router_id": router_id,
"peers": {},
}
for peer in self.dev.get_peer_status():
if peer["protocol"] != "BGP":
continue
# TODO use inet abstraction
addr = ipaddress.ip_address(str(peer["address"]))
row = {v: peer.get(k, None) for k, v in list(field_map.items())}
row["is_enabled"] = True
row["address_family"] = {
f"ipv{addr.version}": {
"received_prefixes": 0,
"accepted_prefixes": peer["routes_imported"],
"sent_prefixes": peer["routes_exported"],
}
}
rv["peers"][addr] = row
return rv
def _do_get_routes(self, **kwargs):
routes = self.dev.get_routes(**kwargs)
return routes
```
#### File: ngage/tests/test_context.py
```python
import os
import pytest
import ngage.cli
this_dir = os.path.dirname(__file__)
data_dir = os.path.join(this_dir, "data")
@pytest.fixture()
def ctx():
return ngage.cli.Context(home=os.path.join(data_dir, "config", "tst0"))
host_tst0 = "tst0.example.com"
host_tst1 = "tst1.example.com"
host_tst2 = "tst2.example.com"
def hkwa(host):
"""make kwargs for host"""
return dict(
host=host,
# user='test'
)
def test_default():
ctx = ngage.cli.Context()
config = ctx.get_connect_config({})
assert config
assert len(config)
assert config["type"]
config = ctx.get_connect_config(hkwa(host_tst0))
assert host_tst0 == config["host"]
def test_host_exactmatch(ctx):
config = ctx.get_connect_config(hkwa(host_tst0))
assert "exactmatch" == config["type"]
assert host_tst0 == config["host"]
def test_host_match(ctx):
config = ctx.get_connect_config(hkwa(host_tst2))
assert "match" == config["type"]
assert host_tst2 == config["host"]
def test_host_fallthrough(ctx):
config = ctx.get_connect_config(hkwa("notfound"))
assert "fallthrough" == config["type"]
assert "notfound" == config["host"]
def test_host_override(ctx):
config = ctx.get_connect_config(hkwa(host_tst1))
assert "override" == config["type"]
assert "newhost" == config["host"]
assert "newuser" == config["user"]
def test_copy_notref(ctx):
config = ctx.get_connect_config(hkwa(host_tst1))
config = ctx.get_connect_config(hkwa(host_tst1))
assert "override" == config["type"]
assert "newhost" == config["host"]
``` |
{
"source": "20c/pluginmgr",
"score": 2
} |
#### File: pluginmgr/tests/test_import_loader.py
```python
import importlib
import logging
import os
import sys
import pytest
# log here to see import logs
logging.basicConfig(level=logging.DEBUG)
import pluginmgr_test
import pluginmgr
@pytest.yield_fixture(autouse=True)
def set_loader():
pluginmgr_test.set_create_loader(True)
yield
def test_static_import():
# skip this test for version 3+ since boxed doesn't seem to be working and
# it's only for created loaders
if sys.version_info[0] >= 3:
return
# NOTE this will fail if pytest-xdist --boxed isn't used because py.test
# has already loaded static0 so it's in the module cache
with pytest.raises(ImportError):
from pluginmgr_test.plugins import static0
def test_load_fail():
with pytest.raises(ImportError):
pluginmgr_test.plugin._imphook.load_module("does.not.exist")
def test_load_file_not_found():
with pytest.raises(ImportError):
pluginmgr_test.plugin._imphook.load_module("pluginmgr_test.plugins.nonexistant")
def test_dyn_import():
from pluginmgr_test.plugins import mod0
def test_standalone_import():
hook = pluginmgr.SearchPathImporter(
"standalone",
os.path.join(os.path.dirname(__file__), "data", "standalone"),
True,
)
sys.meta_path.append(hook)
mod = importlib.import_module("standalone.mod0.submodule")
assert mod.test == 1
``` |
{
"source": "20c/twentyc.tmpl",
"score": 2
} |
#### File: src/tmpl/context.py
```python
import os
import re
class RenderError(Exception):
pass
class Context:
"""generic template interface class"""
def __init__(self, **kwargs):
"""
tmpl_dir is the base directory templates are stored in
out_dir is the output directory
env is a default set of variables to use
"""
self._search_path = []
if "tmpl_dir" in kwargs:
self._search_path = [kwargs.get("tmpl_dir")]
if "search_path" in kwargs:
self._search_path = kwargs.get("search_path", [])
self.out_dir = kwargs.get("out_dir", None)
self.env = kwargs.get("env", {})
@property
def search_path(self):
return self._search_path
@search_path.setter
def search_path(self, path_list):
if isinstance(path_list, str):
self._search_path = [path_list]
else:
self._search_path = path_list
@search_path.deleter
def search_path(self):
self._search_path = []
# overridden if engine can handle it, otherwise we mock
# def get_template(self, name):
# filename = self.find_template(name)
# if not filename:
# raise LookupError("template not found")
# return filename
def find_template(self, name):
for tmpl_dir in self.search_path:
tmpl_file = os.path.join(tmpl_dir, name)
if os.path.exists(tmpl_file):
return tmpl_file
return None
def render(self, src, env=None, out_dir=None, out_file=None):
"""
renders src.tmpl with env to produce out_dir/src
"""
if not env:
env = self.env
if not out_dir:
out_dir = self.out_dir
if out_file:
dest = out_file
else:
if not out_dir:
raise RenderError("no output directory (out_dir) set")
dest = os.path.join(str(out_dir), src)
if not out_dir:
raise RenderError("no output directory (out_dir) set")
print(self.out_dir)
print(src)
print(os.getcwd())
self._render_file(src, env, dest)
def render_file(self):
pass
# def render_env(self, env=None):
# if not env:
# env = self.env
def render_string(self, instr, env=None):
"""
renders instr string with env and returns output string
"""
if not env:
env = self.env
return self._render_str_to_str(instr, env)
def render_walk(self, env=None, prefix="", skip=None, tmpl_dir=None, out_dir=None):
"""
Walks a directory and recursively renders all files
env -- override environment [default: self.env]
skip -- list of regex to skip files [default: None]
matches against the whole relative source path and the filename
prefix -- prefix output file with this [default: '']
returns a list generated files tuples (source, output)
"""
if not env:
env = self.env
if not out_dir:
out_dir = self.out_dir
if tmpl_dir:
return self.__render_walk(env, tmpl_dir, out_dir, prefix=prefix, skip=skip)
for tmpl_dir in self.search_path:
self.__render_walk(env, tmpl_dir, out_dir, prefix=prefix, skip=skip)
def __render_walk(self, env, tmpl_dir, out_dir, prefix, skip):
if skip:
skip_re = re.compile(skip)
generated = []
# self.debug_msg("rendering " + prefix + " from " + tmpl.tmpl_dir + " to " + tmpl.out_dir)
for root, dirs, files in os.walk(tmpl_dir):
rel_dir = os.path.relpath(root, tmpl_dir)
if rel_dir == ".":
rel_dir = ""
elif skip and skip_re.search(rel_dir):
continue
out_dir = os.path.join(out_dir, prefix)
for file in files:
if skip and skip_re.search(file):
continue
# self.debug_msg("rendering from " + file)
targ_dir = os.path.join(out_dir, rel_dir)
if not os.path.exists(targ_dir):
os.makedirs(targ_dir)
dest_file = os.path.join(targ_dir, file)
generated.append(dest_file)
env["filename"] = os.path.join(rel_dir, prefix + file)
# self.debug_msg("generating file " + env['filename'])
# self.render(os.path.join(rel_dir, file), out_file=dest_file, env=env)
self.render(os.path.join(rel_dir, file), out_file=dest_file, env=env)
return generated
def _render(self, src, env):
"""
renders src template file with env to return string
"""
abs_path = self.find_template(src)
return self._render_str_to_str(open(abs_path).read(), env)
def _render_file(self, src, env, dest):
"""
renders src template with env to produce dest file
"""
open(dest, "w").write(self._render(src, env))
def dump(self, src, env):
tmpl = self.ctx.get_template(src)
print(tmpl.render(env))
class Template:
def __init__(self, **kwargs):
pass
# self.src = file, string, obj
# self.ctx = Context
def render_string(self):
pass
def render_file(self):
pass
```
#### File: src/tmpl/engine.py
```python
from tmpl import context
class Jinja2Template(context.Template):
def __init__(self, tmpl, **kwargs):
self.tmpl = tmpl
super().__init__(**kwargs)
def render(self, env):
"""
renders from template, return object
"""
return self.tmpl.render(env)
def load(self):
pass
def loads(self):
pass
class Jinja2Engine(context.Context):
"""template interface class for jinja2"""
@staticmethod
def can_load():
import imp
try:
import jinja2 # noqa
# TODO removing this results in
# E ModuleNotFoundError: No module named 'getpass'
# from pytest's tmpdir
imp.find_module("jinja2")
return True
except ModuleNotFoundError:
return False
except ImportError:
return False
def __init__(self, **kwargs):
import jinja2
super().__init__(**kwargs)
self.engine = jinja2.Environment(
loader=jinja2.FileSystemLoader(self._search_path)
)
self.engine.line_statement_prefix = "."
self.engine.line_comment_prefix = ";."
self.engine.keep_trailing_newline = True
self.engine.lstrip_blocks = True
self.engine.trim_blocks = True
@property
def search_path(self):
return self.engine.loader.searchpath
@search_path.setter
def search_path(self, path):
if isinstance(path, str):
self._search_path = [path]
self.engine.loader.searchpath = [path]
else:
self.engine.loader.searchpath = path
@search_path.deleter
def search_path(self):
self.engine.loader.searchpath = []
# self.engine.loader.searchpath = path
def get_template(self, name):
"""finds template in search path
returns Template object
"""
return Jinja2Template(Jinja2Template(self.engine.get_template(name)))
def make_template(self, tmpl_str):
"""makes template object from a string"""
raise NotImplementedError()
# return Jinja2Template(Template(tmpl_str))
def _render(self, src, env):
"""
renders from template, return object
"""
return self.engine.get_template(src).render(env)
def _render_str_to_str(self, instr, env):
"""
renders from template, return object
"""
return self.engine.from_string(instr).render(env)
class DjangoTemplate(context.Template):
def __init__(self, tmpl, **kwargs):
self.tmpl = tmpl
super().__init__(**kwargs)
def render(self, env):
"""
renders from template, return object
"""
from django.template import Context
return self.tmpl.render(Context(env))
def load(self):
pass
def loads(self):
pass
class DjangoEngine(context.Context):
"""template interface class for Django"""
@staticmethod
def can_load():
import imp
try:
imp.find_module("django")
import django # noqa
return True
except ImportError:
print("import error")
return False
def __init__(self, **kwargs):
import django
import django.template
from django.conf import settings
from django.template import Template
if not settings.configured:
settings.configure(
TEMPLATES=[
{"BACKEND": "django.template.backends.django.DjangoTemplates"}
]
)
django.setup()
self.tmpl_ctor = Template
super().__init__(**kwargs)
def get_template(self, name):
filename = self.find_template(name)
if not filename:
raise LookupError("template not found")
return self.make_template(open(filename).read())
def make_template(self, tmpl_str):
"""makes template object from a string"""
return DjangoTemplate(self.tmpl_ctor(tmpl_str))
def _render_str_to_str(self, instr, env):
"""
renders contents of instr with env returns string
"""
return self.make_template(instr).render(env)
``` |
{
"source": "20c/twentyc.tools",
"score": 3
} |
#### File: twentyc/tools/config.py
```python
from ConfigParser import ConfigParser
import os.path
try:
from munge import config as munge_config
except ImportError:
munge_config = None
def dict_conf(filename):
"""
Return dict object for *.conf file
"""
f, ext = os.path.splitext(filename)
ext = ext.lower()
if ext == "conf" or ext == "ini":
# python config via config parser
config = ConfigParser()
config.optionxform=str
config.read(filename)
rv = {}
for section in config.sections():
rv[section] = {}
for key,value in config.items(section):
rv[section][key] = value.strip('"').strip("'").decode("string_escape")
return rv
else:
# other type of config, use munge
if munge_config:
src = munge_config.parse_url(filename)
return src.cls().load(open(filename)).get("vodka")
else:
raise Exception("'%s' type of config encountered, install munge" % ext)
```
#### File: twentyc/tools/CryptoUtil.py
```python
import os
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
import Crypto.Util.number
class BillingCrypto:
@staticmethod
def generate_key(filename, bits=2048):
privkey = RSA.generate(bits)
pubkey = privkey.publickey()
f = open("%s/keys/%s-priv.pem" % (os.getcwd(), filename),'w')
f.write(privkey.exportKey('PEM'))
f.close()
f = open("keys/%s-pub.pem" % filename,'w')
f.write(pubkey.exportKey('PEM'))
f.close()
@staticmethod
def load_rsa_key(filename, keytype = 'pub', filepath=None):
try:
if not filepath:
if (filename[-3:] == "pem"):
f = open(filename, 'r')
else:
f = open("%s-%s.pem" % (filename, keytype), 'r')
else:
f = open(filepath, "r")
key = RSA.importKey(f.read());
f.close()
return key;
except Exception as e:
raise e
@staticmethod
def iv(length=16):
return os.urandom(length)
@staticmethod
def encrypt(pubkey, text, keylength = 24):
key = ""
ciphertext = ""
try:
key = os.urandom(keylength)
secret = pubkey.encrypt(key, None)
iv = BillingCrypto.iv()
aes_engine = AES.new(key, AES.MODE_CBC, iv)
while(len(text) % 16 != 0):
text += '\x00'
ciphertext = aes_engine.encrypt(text)
except:
raise
return (secret[0].encode('hex'), iv.encode('hex'), ciphertext.encode('hex'))
@staticmethod
def decrypt(privkey, key, iv, ciphertext):
try:
skey = key.decode('hex')
txt = ciphertext.decode('hex')
plainkey = privkey.decrypt(skey)
civ = iv.decode('hex')
aes_engine = AES.new(plainkey, AES.MODE_CBC, civ)
plaintext = aes_engine.decrypt(txt)
return plaintext
except:
raise
if __name__ == "__main__":
print os.getcwd()
# run this once to make test keys
#BillingCrypto.generate_key("test", 2048)
pubkey = BillingCrypto.load_rsa_key("test", "pub")
txt = "hi this is a test 123"
(key, iv, cipher) = BillingCrypto.encrypt(pubkey, txt, 16);
print "Text: %s" % txt
print "IV: %s" % iv
print "Key: %s" % key
print "Cipher: %s" % cipher
privkey = BillingCrypto.load_rsa_key("test", "priv")
plaintext = BillingCrypto.decrypt(privkey, key, iv, cipher)
print "Recovered Plaintext: %s" % plaintext
``` |
{
"source": "20c/vodka1",
"score": 2
} |
#### File: twentyc/vodka/prefs.py
```python
import constants
import traceback
import threading
import base64
import re
import validator
import copy
import simplejson as json
MAX_KEY_LENGTH = 255
# store valid document types
documents = [
"app",
"mobile_app",
"layout",
"color_theme",
"setups",
"sounds"
]
# modules can use this dict to add their own document types
documents_from_modules = {}
# define expected storage types of a doctype (eg. single or multiple instance)
document_storage = {
"app" : "single",
"mobile_app" : "single",
"layout" : "multi",
"color_theme" : "multi",
"setups" : "single",
"sounds" : "single"
}
document_limits = {
}
###############################################################################
# Functions
###############################################################################
class ValidationException(Exception):
trigger = ""
traceback = ""
###############################################################################
class DoctypeLimitException(Exception):
def __init__(self, doctype, limit):
Exception.__init__(self, constants.ERR_DOCTYPE_LIMIT % (doctype, limit))
###############################################################################
class PrefManager(object):
#############################################################################
def __init__(self, ses):
"""
Initialize the PrefManager instance with a session.
Args
ses <Session> Session instance
"""
try:
# set this to true if you want to see whats happening
self.verbose = False
self.ses = ses
# reference to vodka app config
self.config = ses.app.config
# cache config documents
self.cache = {}
# thread locks
self.lockSet = threading.RLock()
# for validating pref documents
self.validator = validator.Validator()
self.dbg("loaded for session %d" % ses.id)
except:
raise
############################################################################
def dbg(self, msg):
if self.verbose:
print "PrefManager: %s" % msg
############################################################################
def error(self, msg):
raise Exception("PrefManager: %s" % msg)
############################################################################
def prefix(self):
"""
Returns storage key prefix for this PrefManager instance / session combo
"""
if self.ses and self.ses.is_authed():
try:
prefix = self.ses.app.couch_config.get("prefix_prefs")
if not prefix:
self.error("Missing %s config: prefix_prefs" % self.ses.app.couch_engine)
prefix = prefix % self.ses.user_id
return prefix
except:
raise
else:
return None
############################################################################
def update(self, config):
"""
update multiple document types via a dict structure. Keys at the root
level should be valid document identifiers eg. "app" or "layout"
Keys at the second level will be updated in the targeted document. Note
that a key not existing at the second level will not remove it from the
document, but anything past and including the third level will be treated
as absolute.
example:
{
#update the app document
"app": {
# overwrite these values in the "app" document namespace
"update_speed" : 1, # will get overwritten
# will also get overwritten completely as is, the previous
# state of this config key will be gone, so make sure that in this
# example the brackets list is holding all brackets that you want
# to store.
"brackets" : [
...
]
}
}
"""
for doc_name, doc in config.items():
#load the current document
current = self.get(doc_name)
#validate the new incoming data
validate_name = doc_name.split(".")[0]
self.validator.validate(doc, validate_name)
#update the current document
current.update(doc)
self.set(doc_name, current, validate=False)
############################################################################
def update_by_key_list(self, items):
"""
update config documents using a list of serialized keys.
Args
items <dict> dict of serialized keys with values.
example
{
"app->update_speed" : 1,
"app->context_menus" : 1,
"app->some.nested.value" : 1,
"sound->volume" : 100
}
"""
documents = []
for key,value in items.items():
keys = key.split("->")
base_name = keys[0]
keys = keys[1].split(".")
if not base_name in documents:
documents.append(base_name)
doc = self.get(base_name)
if len(keys) > 1:
for token in keys[0:-1]:
if not doc.has_key(token) or type(doc.get(token)) != dict:
doc[token] = {}
doc = doc.get(token)
doc[keys[-1]] = value
for doc_name in documents:
doc = self.get(doc_name)
self.set(doc_name, doc)
############################################################################
def clean_key(self, key):
if len(key) > MAX_KEY_LENGTH:
raise ValidationException(constants.ERR_KEY_LENGTH % ("key", MAX_KEY_LENGTH))
m = re.findall("[^a-zA-Z0-9_\-\. #+*]", key)
if m:
a = []
for k in m:
if k not in a:
a.append(k)
raise ValidationException(constants.ERR_KEY_INVALID_CHARACTER % ("key", str(a)))
############################################################################
def delete(self, key):
"""
Delete the couchbase document at the specific key
key <string> id key of the config object in the couchbase database. appropriate
prefix will automatically be prepended, so you only need to provide the base
key, eg. "quoteboard" to get the quoteboard config
"""
client = None
try:
if not self.ses.is_authed():
return
self.clean_key(key)
if self.cache.has_key(key):
del self.cache[key]
full_key = "%s.%s" % (self.prefix(), key)
client = self.ses.get_client()
self.dbg("Deleting: %s" % full_key)
client.db_prefs.unset(full_key)
self.document_unpin(key.split(".")[0], key)
# apply post processor for document type if it exists
doctype = key.split(".")[0]
ondel = "on_delete_%s" % doctype
if hasattr(self, ondel):
ondel = getattr(self, ondel)
ondel(key)
except:
raise
finally:
if client:
self.ses.free_client(client)
############################################################################
def document_check_limits(self, doctype, key):
"""
Check if there is room for the proposed doctype creation.
If the document key already exists do nothing.
If the document key does not exist and the limit for the specified
doctype is reached raise a DoctypeLimitException
Args:
doctype <string> valid known doctype eg. "layout"
key <string> doc key string eg. "layout.MyNewLayout"
Returns:
True if its ok to add another document for this doctype
"""
if doctype == "sys":
return True
limit = document_limits.get(doctype, None)
if type(limit) != int:
limit = 1
sysconfig = self.get("sys")
documents = sysconfig.get("documents",{})
tracker = documents.get(doctype,[])
if(key in tracker):
return True
if(len(tracker) >= limit):
raise DoctypeLimitException(doctype, limit)
else:
return True
############################################################################
def document_pin(self, doctype, key):
if doctype == "sys" or key == doctype:
return True
sysconfig = self.get("sys")
documents = sysconfig.get("documents",{})
tracker = documents.get(doctype,[])
if(key in tracker):
return True
tracker.append(key)
documents[doctype] = tracker
sysconfig["documents"] = documents
self.set("sys", sysconfig)
############################################################################
def document_unpin(self, doctype, key):
if doctype == "sys" or key==doctype:
return True
sysconfig = self.get("sys")
documents = sysconfig.get("documents",{})
tracker = documents.get(doctype,[])
if(key not in tracker):
return True
tracker.remove(key)
documents[doctype] = tracker
sysconfig["documents"] = documents
self.set("sys", sysconfig)
############################################################################
def set(self, key, data, validate=True, **kwargs):
"""
save a data object at the specified key
Args
key <string> id key of the config object in the couchbase database. appropriate
prefix will automatically be prepended, so you only need to provide the base
key, eg. "quoteboard" to get the quoteboard config
data <dict> config document to be saved
type <string> config documen type eg "layout", "base" etc.
"""
self.lockSet.acquire()
client = None
if not self.ses or not self.ses.is_authed():
return
try:
self.clean_key(key)
if type(data) != dict:
self.error("Tried to pass non-dict object as data to set()")
tokens = key.split(".")
doctype = tokens[0]
# check limits
if len(tokens) > 1:
if document_storage.get(doctype, "single") != "multi":
raise ValidationException("'%s' cannot store nested keys" % doctype)
self.document_check_limits(doctype, key)
if validate:
self.validator.validate(data, doctype)
# apply preparation processor for document type if it exists
prepare = "prepare_%s" % doctype
if hasattr(self, prepare):
prepare = getattr(self, prepare)
prepare(key, data, tokens)
client = self.ses.get_client()
full_key = "%s.%s" % (self.prefix(), key)
data["%suser_id" % client.db_prefs.meta_prefix] = self.ses.user_id
data["%stype" % client.db_prefs.meta_prefix] = doctype
#self.dbg("Setting %s: %s" % (full_key, data.keys()))
if data.has_key("_rev"):
del data["_rev"]
client.db_prefs.set(full_key, data, retry=2)
# update limits
self.document_pin(doctype, key)
# apply post processor for document type if it exists
onset = "on_set_%s" % doctype
if hasattr(self, onset):
onset = getattr(self, onset)
onset(key, data, tokens, **kwargs)
self.cache[key] = data
except:
raise
finally:
self.lockSet.release()
if client:
self.ses.free_client(client)
############################################################################
def get(self, key, load=False):
"""
get a config object by it's key. will look in cache first, if it doesnt
exist calls self.load
Args
key <string> id key of the config object in the couchbase database. appropriate
prefix will automatically be prepended, so you only need to provide the base
key, eg. "quoteboard" to get the quoteboard config
Keyword Args
load <boolean> if true will always call self.load even if object is already
cached
"""
if not self.ses or not self.ses.is_authed():
raise Exception("Trying to use pref manager with a non auth'd session")
try:
self.clean_key(key)
if not self.cache.has_key(key) or load:
return self.load(key)
else:
return self.cache.get(key, {})
except:
raise
############################################################################
def load(self, key):
"""
load and cache a config object by it's key. cached objects will be stored
in self.cache
Args
key <string> id key of the config object in the couchbase database. appropriate
prefix will automatically be prepended, so you only need to provide the base
key, eg. "quoteboard" to get the quoteboard config
"""
client = None
if not self.ses:
return
try:
self.clean_key(key)
#get a free client with a couchbase connection
client = self.ses.get_client()
#attempt to load object from the couchbase server
full_key = "%s.%s" % (self.prefix(), key)
self.dbg("Loading document: %s" % full_key)
obj = client.db_prefs.get(full_key)
if full_key:
if not obj:
obj = {}
self.cache[key] = obj
return obj
except:
raise
finally:
if client:
self.ses.free_client(client)
############################################################################
def import_prefs(self, data):
"""
Imports preferences from a json string
Args:
data <string|dict> valid json string/dictionary holding pref documents indexed by
document name
"""
try:
if type(data) in [unicode,str]:
config = json.loads(data)
elif type(data) == dict:
config = data
else:
raise Exception("Invalid import format")
self.validator.validate_shared_data(config)
app_config = None
for key, value in config.items():
if key.split(".")[0] == "layout":
if not app_config:
app_config = self.get("app")
if app_config.get("default_layout","") in ["__dflt",""]:
#print "Backing up user's current default layout."
layout = self.layout_load_default()
layout["name"] = "Default Layout"
self.set("layout.Default Layout", layout)
self.update(config)
return config.keys()
except:
raise
############################################################################
def export_prefs(self, targets):
"""
Creates JSON data of targeted preferences for export.
Args:
targets <list> list of targets. Each target can be another list holding
keys.
Example:
export_prefs([
["app", "brackets"],
["color_theme.my theme"]
])
Export will be validated against validator specs for each specific
target so if a target does not end up in the returned data object it
probably means the export validator threw it out and needs to be adjusted
"""
try:
data = {}
for target in targets:
prefs = None
store = data
i = 0
for key in target:
# if prefs object is empty, load the targeted prefs document
# which should always reside in the first target key
if not prefs:
prefs = self.get(key)
else:
prefs = prefs.get(key)
if i < len(target)-1:
if not store.has_key(key):
store[key] = {}
store = store[key]
i += 1
if type(prefs) != None:
store[target[-1]] = copy.deepcopy(prefs)
# validate for export
self.validator.validate_shared_data(data)
if data:
return data
else:
return {}
except:
raise
############################################################################
def layout_rename(self, layout, name):
"""
Rename a layout
Args
layout <string> current layout name
name <string> new layout name
"""
if not name:
raise Exception("No new name specified")
if not layout:
raise Exception("No layout specified")
layout_data = self.get("layout.%s" % layout)
if not layout_data:
raise Exception("Could not load layout '%s' for rename" % layout)
default = self.get("app",{}).get("default_layout")
# save under new name
self.set("layout.%s" % name, layout_data, replace=layout)
# remove old layout
self.delete("layout.%s" % layout)
if default == layout:
self.layout_set_default(name)
return name
############################################################################
def layout_load(self, name):
"""
Return the config document for the layout with the specified name. Layout
names cannot contain '.' character, so they will be stripped
"""
if not name:
return {}
name = self.validate_layout_name(name)
return self.get("layout.%s" % name)
############################################################################
def layout_load_default(self):
"""
Return the config document for this user's default layout
"""
return self.layout_load(self.get("app").get("default_layout"))
############################################################################
def layout_set_default(self, name):
"""
Make the layout with name <name> the default layout for the user
"""
if not name:
return
layout_config = self.get("layout.%s" % name)
if not layout_config:
raise Exception("Could not make layout '%s' the default layout, loading of layout failed." % name)
self.update({ "app" : {
"default_layout" : name
}})
############################################################################
def validate_layout_name(self, name):
"""
Validate a config document key name and return it.
"""
if not name:
raise ValidationException(constants.ERR_LAYOUT_NAME_MISSING)
return name.replace(".","-");
############################################################################
def add_custom_sound(self, soundName, file):
"""
Add a custom sound
Args
soundName <string> sound name as it is define in vodka.conf
file <data> sound file data
"""
if not soundName:
raise ValidationException(constants.ERR_VALUE_EMPTY%"name")
changes = {}
changes[soundName] = base64.b64encode(file)
self.update({ "sounds" : changes})
custom_sounds = self.get("app").get("custom_sounds",[]) or []
if soundName not in custom_sounds:
custom_sounds.append(soundName)
self.update({ "app" : { "custom_sounds" : custom_sounds }})
############################################################################
def prepare_layout(self, key, data, key_tokens):
try:
name = ".".join(key_tokens[1:])
data["name"] = name
data["id"] = name
except:
raise
############################################################################
def on_set_layout(self, key, data, key_tokens, **kwargs):
"""
Gets alled after a document type of type "layout" has been saved via
set()
Makes sure the layout name is added to app.layouts in the user's app
preferences
"""
try:
name = ".".join(key_tokens[1:])
app_config = self.get("app")
self.load("layout.%s" % name)
# make default if needed
if app_config.get("default_layout") in [None, "", "__dflt"]:
self.layout_set_default(name)
if name != "__dflt" and "__dflt" in app_config.get("layouts",[]):
self.delete("layout.__dflt")
# add to layout list
layouts = app_config.get("layouts")
layout_tabs = app_config.get("layout_tabs", [])
if not layouts or type(layouts) != list:
layouts = []
if not name in layouts:
layouts.append(name)
if not name in layout_tabs:
rpl = kwargs.get("replace")
if rpl and rpl in layout_tabs:
layout_tabs[layout_tabs.index(rpl)] = name
else:
layout_tabs.append(name)
self.update({
"app" : {
"layouts" : layouts,
"layout_tabs" : layout_tabs
}
})
except:
raise
############################################################################
def on_delete_layout(self, key):
"""
Gets called after document of type layout has been deleted
Makes sure the layour name is removed from app.layouts in the user's
app preferences, and - if necessary - assigns a new default layout for
the user
"""
try:
name = ".".join(key.split(".")[1:])
app_config = self.get("app")
# remove layout from layout list
if name in app_config.get("layouts", []):
app_config["layouts"].remove(name)
if name in app_config.get("layout_tabs", []):
app_config["layout_tabs"].remove(name)
# if layout was default layout, make a different layout the default layout
if app_config["default_layout"] == name:
if(len(app_config.get("layouts",[]))):
app_config["default_layout"] = app_config.get("layouts")[0]
else:
app_config["default_layout"] = ""
# save app config
self.set("app", app_config)
# return new default layout
return app_config["default_layout"]
except:
raise
############################################################################
def on_delete_color_theme(self, key):
"""
Gets called after document of type color theme has been deleted
Makes sure the color theme name is removed from app.color_themes in the
user's app preferences
"""
try:
name = ".".join(key.split(".")[1:])
app_config = self.get("app")
if name in app_config.get("color_themes", []):
app_config["color_themes"].remove(name)
self.set("app", app_config)
except:
raise
############################################################################
def on_set_color_theme(self, key, data, key_tokens):
"""
Gets called after a document of type color theme has been saved
Makes sure the color theme name is added to app.color_themes in the user's
app preferences.
"""
try:
name = ".".join(key_tokens[1:])
app_config = self.get("app")
color_themes = app_config.get("color_themes", [])
if name not in color_themes:
color_themes.append(name)
self.update({
"app" : {
"color_themes" : color_themes
}
})
except:
raise
```
#### File: twentyc/vodka/session.py
```python
import os
import time
import datetime
import traceback
import logging, logging.handlers
import traceback
import re
import tmplbridge
import random
import weakref
import prefs
import uuid
import task
import simplejson as json
import types
import twentyc.tmpl as tmpl_engine
from twentyc.tools.thread import RunInThread
import twentyc.vodka.tools.session as vt_session
from rpc import RPC_JSON_KEYS
from wsgi import webapp
import constants
version = constants.version
AUTH_IDLE = 0
AUTH_PROCESSING = 1
AUTH_FINISHED = 2
AUTH_STATUS_XL = [
"IDLE",
"PROCESSING",
"FINISHED"
]
AUTH_FINALIZE = []
AUTH_CLEAR_FINALIZE = []
TASK_CAPACITY = {
}
################################################################################
class AuthInProgressException(Exception):
pass
class LoginInvalidException(Exception):
pass
class LoginPermsException(Exception):
def error_info(self):
return {
"log_msg" : "Login denied due to missing permissions: %s" % str(self),
"user_msg" : constants.ERR_LOGIN_PERMS
}
################################################################################
# VodkaApp session
class Session(object):
"""
User session object
"""
##############################################################################
def __init__(self, app, fromRequest, web_ses_id):
"""
Initialize the session object
app should be a reference to a VodkaApp instance
fromRequest should be a reference to _environ.get("_request")
"""
self.fromRequest = fromRequest
#reference to the VodkaApp instance
self.app = weakref.proxy(app)
#static file url
self.staticFileUrl = self.app.config.get("server", {}).get("static_file_url","/")
self.staticFileUrl = os.path.join(
self.staticFileUrl,
version
)
self.pref_manager = None
self.tmpl_engines = {}
#path to the currently selected brand directory
self.brand_path = ""
#the selected brand
self.brand = self.pick_brand(fromRequest)
#static file url (brands)
self.staticFileUrlBrand = self.staticFileUrl + "/brands/"+self.brand.get("name")
#the selected locale
self.locale = self.brand["locale"]
self.lang = self.locale.lang
#the selected theme
self.theme = self.pick_theme(fromRequest)
#error messages that can be displayed in-page or added to json output
self.messages = []
self.errors = []
#if set, the specified theme will be used instead of the picked one
self.override_theme = False
#if set, the simple theme will be forced no matter what
self.fixed_theme_forced = False
#will hold module perms for the authenticated session as it is stored
#in couchbase
self.module_perms = {}
self.module_perms_structure = {}
# a unique id identifying this session
self.client_id = ""
self.auth_id = None
self.auth_status = None
self.auth_data = None
#session id for the web sessions
self.web_ses_id = web_ses_id
#user id that was returned by successful login
self.user_id = 0
#user name that was returned by successful login
self.user = None
self.sounds = {}
self.env = None
#user agent
self.ua = fromRequest.get('user_agent').lower();
#store imported prefs for later confirmation
self.imported_prefs = None
#specifies which preference document keys the user can create / write to
self.pref_document_access = []
#holds remote code execution requirements
self.rce = {}
#holds current tasks running for this session
self.tasks = []
#holds update index data for rpc/update
self.update_index_map = {}
self.update_index_rev = {}
self.update_index_dropped = {}
##############################################################################
def rce_require(self, name, code, grace=10, limit=5):
"""
Remote code execution required.
This will execute a piece of javascript code on the user's client (browser)
When a remote code execution is sent to the client it is expecting to be
satisified via rce_satisfied(). If that fails to happen within the grace period
and request limit the session will be logged out.
name <str> unqiue name for the rce to identify it
code <str> valid javascript code to execute
grace <int> grace period between execution requests (seconds)
limit <int> if after n requests rce has not been satisified the session will
be logged out
"""
try:
if self.rce.has_key(name) or not code:
return
id = uuid.uuid4()
self.rce[name] = {
"id" : id,
"code" : code,
"time" : 0,
"limit" : limit,
"grace" : grace
}
except:
raise
##############################################################################
def rce_satisfy(self, name, id):
try:
if self.rce.has_key(name):
if str(self.rce.get(name).get("id")) == id:
del self.rce[name]
except:
raise
##############################################################################
def verify_csrf(self, request):
a = request.get('query', {});
csrf_token_a = a.get('csrfmiddlewaretoken');
csrf_token_b = webapp.get_cookie(request, "csrftoken")
if csrf_token_a != csrf_token_b or not csrf_token_b:
return False
return True
##############################################################################
# pick brand depending on host name
def pick_brand(self, request, f_brand=None):
"""
Cycle to brand map in config and see if hostname matches
any of the url
Pick brand according to hostname match
On no match pick default
if f_brand is set always use brand that matches f_brand(str) by
name
"""
host = request.get("host")
s_brand = None
#print "checking host " + host + " for brand..."
for brand, mask in self.app._brand_map.items():
if mask.match(host):
#print "got brand " + brand
s_brand = self.app.brand[brand]
#else:
#print "no brand match " + brand + " " + str(mask)
if f_brand:
if self.app.brand.get(f_brand):
s_brand = self.app.brand.get(f_brand)
if not s_brand:
s_brand = self.app.brand["default"]
dir = s_brand.get("dir")
request["session"].data["url_map"] = [
("/css", "%s/htdocs/css" % dir, "%s/htdocs/css" % self.app.brand["default"].get("dir")),
("/js", "%s/htdocs/js" % dir, "%s/htdocs/js" % self.app.brand["default"].get("dir")),
("/favicon.ico", "%s/htdocs/favicon.ico" % dir),
("favicon.ico", "%s/htdocs/favicon.ico" % dir)
]
self.brand = s_brand
self.staticFileUrlBrand = self.staticFileUrl + "/brands/"+self.brand.get("name")
self.brand_path = dir
return s_brand
##############################################################################
# pick default theme depending on user agent
def pick_theme(self, request):
"""
Select theme by useragent
"""
ua = request.get("user_agent")
for name, regex in self.app._theme_map.items():
if regex.match(ua):
return name
return self.app.config.get("app",{}).get("theme.default", "default")
##############################################################################
def uses_chrome(self):
"""
Return True if the useragent indicates that google chrome is being used
"""
if self.ua.find("chrome") != -1:
return True
else:
return False
##############################################################################
# check the user agent string to figure of if it's safari
def uses_safari(self):
"""
Return True if the useragent indicates that safari is being used
"""
if self.ua.find("safari") != -1:
return True
else:
return False
##############################################################################
# update session variables
def update(self, **kwargs):
"""
Update session variables
possible keyword arguments:
theme (str)
brand (str)
locale (locale object)
user (str), username
"""
if "theme" in kwargs:
if kwargs.get("theme") == "default" and not self.uses_chrome() and not self.uses_safari():
self.theme = "mobile"
self.fixed_theme_forced = True
else:
self.fixed_theme_forced = False
self.theme = kwargs["theme"]
if "brand" in kwargs:
self.brand = kwargs["brand"]
if "locale" in kwargs:
self.locale = kwargs["locale"]
self.lang = self.locale.lang
if "user" in kwargs:
self.user = kwargs["user"]
##############################################################################
def update_sesmap(self):
self.app.update_sesmap({ self.web_ses_id : self.auth_id or None })
##############################################################################
def get_client(self, for_duration=10):
"""
Get the first free VodkaClient instance from the app's client pool
"""
client = self.app.client_pool.get_client(for_duration)
i = 0
while not client:
client = self.app.client_pool.get_client(for_duration)
time.sleep(0.1)
i+=1
if i >= 1000:
raise Exception("No inactive clients")
return client
##############################################################################
def free_client(self, client):
"""
respawn an unused / finished cliend gotten via get_client()
"""
self.app.client_pool.respawn(client)
##############################################################################
def is_authed(self):
"""
Return True if session is authenticated, False if not
"""
return self.is_connected()
##############################################################################
# check if session is connected (has auth_id)
def is_connected(self):
"""
Return True if session's auth_id property is set, False if not
"""
if self.auth_id:
return True
return False
##############################################################################
def get_bridge(self, request=None, ignoreExpiry=False):
"""
Return TmplBridge object for the current request
"""
if not request:
request = self.fromRequest
if not request.get("bridge"):
request["bridge"] = tmplbridge.TmplBridge(self, request, ignoreExpiry)
return request.get("bridge")
##############################################################################
# append an error message
def error(self, error, toSession=False):
"""
Append error(str) to self.errors
"""
self.errors.append(error)
##############################################################################
# get all error messages and clear error message stack
def get_errors(self):
"""
Return list containing errors in self.errors
Empty self.errors
"""
e = list(self.errors)
self.errors = []
return e
##############################################################################
def auth_working(self):
if self.auth_status == AUTH_PROCESSING:
return True
else:
return False
##############################################################################
def auth_process(self, *args, **kwargs):
return 1
##############################################################################
def auth_success(self, res):
self.auth_data['result'] = res
self.auth_id = res
self.auth_finalize()
self.auth_status = AUTH_FINISHED
self.auth_data = None
self.reload_20c_module_perms()
##############################################################################
def auth_error(self, error):
self.error(error)
self.auth_cancel()
webapp.log.error(traceback.format_exc())
##############################################################################
def auth_cancel(self):
self.auth_status = AUTH_IDLE
self.auth_data = None
self.auth_id = None
##############################################################################
def auth_validate(self):
if self.auth_working():
to = self.auth_data.get("timeout", 0)
if to:
start_t = self.auth_data.get("start_t")
now = time.time()
if now - start_t > to:
self.error("Authentication timed out, please try again")
self.auth_cancel()
return False
return True
return False
##############################################################################
def auth_start(self, **kwargs):
if not self.auth_working():
self.auth_status = AUTH_PROCESSING
self.auth_data = kwargs
self.auth_data.update(start_t=time.time())
t = RunInThread(self.auth_process)
t.error_handler = self.auth_error
t.result_handler = self.auth_success
t.start(**kwargs)
else:
raise AuthInProgressException()
##############################################################################
def auth_finalize(self):
for fn in AUTH_FINALIZE:
try:
fn(self, self.auth_data)
except Exception, inst:
self.auth_cancel()
webapp.log.error(traceback.format_exc())
raise
##############################################################################
def auth_clear_process(self):
pass
##############################################################################
def auth_clear(self):
t = RunInThread(self.auth_clear_process)
t.start()
try:
for fn in AUTH_CLEAR_FINALIZE:
fn(self)
except Exception, inst:
webapp.log.error(traceback.format_exc())
finally:
self.auth_id = None
##############################################################################
def tmpl(self, name, namespace=None, request=None, tmpl_type="cheetah", theme=None, variables={}, **kwargs):
"""
load a template return it's rendered response
current supported templated tmpl_types are: "cheetah"
Templates can come from modules, the vodka barebone or brands
"""
if not theme:
theme = self.theme
#print "TMPL: %s" % namespace
if theme and namespace:
namespace = "%s.%s" % (namespace, theme)
tmpl_code = None
tmpl_path = None
self.deny_frame(request)
#if namespace is not defined, check barebone vodka templates
if not namespace:
tmpl_path = os.path.join("tmpl")
if not os.path.exists(tmpl_path):
raise Exception("Template not found: %s" % tmpl_path)
else:
# first check in the brand location
if self.brand and os.path.exists(os.path.join(self.brand.get("dir"), "tmpl", namespace, name)):
tmpl_path=os.path.join(
self.brand.get("dir"), "tmpl", namespace
)
# then check in the module template cache
elif self.app.templates.has_key("%s.%s" % (namespace, name)):
tmpl_code = self.app.templates.get("%s.%s" % (namespace, name))
if type(tmpl_code) == list:
tmpl_path = os.path.dirname(tmpl_code[0])
tmpl_code = None
tmpl = None
variables.update({
"brand_path" : self.brand_path,
"app_version" : constants.version,
"request" : self.get_bridge(request),
"_" : self.locale._,
"sf" : self.staticFileUrl,
"sfb": self.staticFileUrlBrand
})
#print "variables: %s" % variables
if not variables.has_key("headers"):
variables["headers"] = []
if tmpl_type == "cheetah":
engine = tmpl_engine.engine.CheetahEngine(tmpl_dir=tmpl_path)
elif tmpl_type == "jinja2":
engine = tmpl_engine.engine.Jinja2Engine(tmpl_dir=tmpl_path)
elif tmpl_type == "django":
engine = tmpl_engine.engine.DjangoEngine(tmpl_dir=tmpl_path)
else:
raise Exception("Unknown templating engine: %s" % tmpl_type)
if tmpl_code:
return engine._render_str_to_str(tmpl_code, env=variables)
elif tmpl_path:
return engine._render(name, env=variables)
else:
# template not found
raise Exception("Template not found: %s, %s" % (name, namespace))
#############################################################################
# set x-frame-options to deny loading this request in a frame. One reason
# to do this is to prevent clickjacking
def deny_frame(self, request):
headers = request.get("headers")
headers.extend([
("x-frame-options", "DENY"),
])
##############################################################################
def reload_20c_module_perms(self):
"""
Reload the module perms for this session
"""
if self.app.module_manager:
self.module_perms = self.app.module_manager.perms(self.auth_id)
self.module_perms_structure = vt_session.perms_structure(self.module_perms)
for namespace, level in self.app.grant_permissions.items():
if self.check_20c_module(namespace) & level:
continue
if self.module_perms.has_key(namespace):
self.module_perms[namespace] = self.module_perms.get(namespace) | level
else:
self.module_perms[namespace] = level
self.module_perms["twentyc-billing.%s.response"%self.client_id] = constants.ACCESS_READ
##############################################################################
def module_control(self,app_doc):
if self.pref_manager:
return self.pref_manager.get(app_doc).get("module_control", {});
else:
return {}
##############################################################################
def available_20c_modules(self, mobile=False):
"""
Return a list of modules that the session has access to
"""
r = [];
if mobile:
app_doc = "mobile_app"
else:
app_doc = "mobile"
module_control = self.module_control(app_doc)
for i in self.app.module_js_load_order:
mod = self.app.module_status.get(i,{})
if mobile and not mod.get("mobile"):
continue;
if not mod.get("status"):
status = 0
else:
status = int(module_control.get(i,1))
if self.check_20c_module(i):
r.append({
"name" : i,
"version" : mod.get("version"),
"status" : status
})
return r
##############################################################################
def check_20c_module(self, name, ambiguous=False):
"""
Check if session has access to the specified 20c module, return perms
"""
if self.app.module_status.has_key(name):
if self.app.module_status.get(name,{}).get("access_level",0) == 0:
return 3
if self.app.grant_permissions.has_key(name):
return self.app.grant_permissions.get(name)
if re.match("^__U\.%s\..+" % self.client_id, name):
return 0x01|0x02|0x04
if re.match("^__vodka-task-result\..+", name):
task_id = name.split(".")[1]
if task_id in self.tasks:
return 0x01
else:
return 0
if self.app.module_manager:
return self.app.module_manager.perms_check(self.module_perms, name, ambiguous=ambiguous)
##############################################################################
def reload_20c_module(self, name, version):
"""
Send remote code execution to client to reload the specified module
name <str> name of the module to reload
"""
self.rce_require(
"reload.%s" % name,
"\n".join([
"TwentyC.Modules.Load('%s', '%s');" % (name,version)
])
)
##############################################################################
def unload_20c_module(self, name):
"""
Send remote code execution to client to unload the specified module
name <str> name of the module to unload
"""
#find all modules that depend on this module.
modules = self.app.update_modules()
for mod_name,mod_status in modules.items():
if name in mod_status.get("dependencies",[]):
self.unload_20c_module(mod_name)
self.rce_require(
"unload.%s" % name,
self.app.unload_tools_code+"\n"+
"TwentyC.Modules.Unload('%s');\n" % name+
(self.app.module_javascript_component(name, comp="unload.js") or "")
)
##############################################################################
def task_run(self, moduleName, taskName, params={}, target="download", filename=None, limitResult=0, source="session"):
if self.check_20c_module(moduleName):
taskType ="%s.%s" % (moduleName, taskName)
# make sure session is not at task capacity
maxCap = TASK_CAPACITY.get(taskType, 1)
totCap = self.app.taskSessionCap
wSame, fSame = self.task_status(taskType)
wTotal, fTotal = self.task_status()
if wSame >= maxCap:
raise Exception("Please wait for the current '%s' task(s) to finish" % taskType)
if wTotal >= totCap:
raise Exception("Please wait for one of your other background tasks to finish")
id_prefix = self.client_id[:6]
self.app.log.info("Session %s... starting task: %s.%s %s" % (
id_prefix,
moduleName,
taskName,
params
))
id, p = self.app.task_run(
moduleName,
taskName,
id=self.client_id[:6],
ses=self,
target=target,
params=params,
filename=filename,
limitResult=limitResult,
source=source
)
self.tasks.append(id)
return id
##############################################################################
def task_cancel(self, id):
if id not in self.tasks:
raise Exception("Session doesn't own a task with that id")
info = self.app.task_info(id)
info.update(end_t=time.time(), status=task.FINISHED, progress="Canceled", retrieved=2)
self.app.task_terminate(id)
##############################################################################
def task_status(self, type=None):
working = 0
finished = 0
for id in self.tasks:
t = self.app.tasks.get(id)
if not t or (type and t.get("info",{}).get("type") != type):
continue
status = t.get("info",{}).get("status")
if status == task.FINISHED:
finished += 1
else:
working += 1
return (working, finished)
##############################################################################
def update_index(self, name, index, rev=None):
if type(index) == types.NoneType:
return
prev = self.update_index_map.get(name, type(index)())
if type(prev) == list:
diff = list(set(prev) - set(index))
rv = self.update_index_dropped[name] = list(set(diff + self.update_index_dropped.get(name,[])))
self.update_index_map[name] = index
crev_a, crev_b = self.update_index_rev.get(name, (0,0))
if rev > crev_a:
self.update_index_dropped[name] = []
crev_a = rev
if rev == crev_b and rv:
if not self.update_index_rev.has_key(name):
crev_a = 0
crev_b = 1
else:
crev_b += 1
self.update_index_rev[name] = (crev_a, crev_b)
return rv
elif type(prev) == dict:
dropped = {}
updated = {}
for k,v in index.items():
if prev.get(k) != v:
updated[k] = v
for k,v in prev.items():
if not index.has_key(k):
dropped[k] = v
diff = (updated, dropped)
return diff
################################################################################
################################################################################
```
#### File: twentyc/vodka/tmplbridge.py
```python
import ConfigParser
import weakref
from pprint import pformat
import urllib
import re
import constants
import session
from rpc import *
import logging, logging.handlers
from datetime import datetime, timedelta, tzinfo
import time
import errno
import socket
import locale
import gettext
import babel
from wsgi import webapp
import operator
gettext.bindtextdomain('vodka', '/path/to/my/language/directory')
gettext.textdomain('vodka')
_ = gettext.gettext
# read , write perm flags
PERMS = {
"READ" : 0x01,
"WRITE" : 0x02
}
def add_module(name, allow=False):
# DEPERECATED
return
################################################################################
rpc_alias = {}
def register_rpc_alias(rpc_name, function_name):
rpc_alias[rpc_name] = function_name
################################################################################
class UTC(tzinfo):
"""
Timezone object
"""
def __init__(self, offset=0):
tzinfo.__init__(self)
self.offset = offset
def dst(self, dt):
return timedelta(0)
def utcoffset(self, dt):
#FIXME
return timedelta(hours=self.offset)
def tzname(self, dt):
return "UTC %s" % str(self.offset)
################################################################################
class TmplBridge:
"""
Request bridge
A new instance is created with each request
"""
def __init__(self, ses, request, ignoreExpiry=False):
"""
ses should be a reference to a vodka Session instance
if ignoreExpiry is True the creation of this tmplbridge
instance will not reset the session expiry timer
"""
# reference to the http request object that this bridge belongs to
self.request = request
# current time
self.now = datetime.now()
# reference to the user session that this bridge belongs to
self.ses = weakref.proxy(ses)
# reference to the session's locale object
self.locale = ses.locale
# reference to the VodkaApp instance
self.zw = ses.app
self.app = ses.app
# user session id
self.auth_id = ses.auth_id
if not ignoreExpiry:
self.reset_expiry()
self.check_expiry(int(ses.app.config['app'].get('expiry', 240)))
#set up locale
locale.setlocale(locale.LC_ALL, '')
@property
def errors(self):
return self.ses.get_errors()
##############################################################################
def rpc_alias(self, name):
return rpc_alias.get(name)
##############################################################################
def version(self):
"""
Return app version (str)
"""
return constants.version
##############################################################################
def timestamp(self):
"""
Return timestamp for self.now (sec.usec format)
"""
dt = self.now
return dt.strftime("%s")+'.'+str(dt.microsecond)
##############################################################################
def user(self):
"""
Return username
"""
return self.ses.user
##############################################################################
# check if the server is running production or dev env
def is_production(self):
"""
Return True if server is running in production environment
Else return False
"""
# default to production
env = self.zw.serverConfig.get("environment", "production")
if env == "production":
return True
else:
return False
#############################################################################
# append error to session error object
def error(self, error):
"""
Append error (str) message to session error object
"""
self.ses.errors.append(error)
return False
##############################################################################
def round_float(self, n, places):
return round(float(n) * pow(10, places)) / pow(10, places)
##############################################################################
# set vodka_ts session variable to current timstamp
def reset_expiry(self):
"""
Reset session expiry time according to current time
"""
self.request.get("session").data["vodka_ts"] = time.mktime(self.now.timetuple())
##############################################################################
def check_expiry(self, n = 4):
"""
Check if enough time has passed since the last reset_expiry call. If so
force timeout and logout on the session
"""
sd = self.request.get("session").data
if not sd.get('vodka_ts') or not n:
return 0
#print "sd: %s" % str(sd)
lastAction = sd['vodka_ts']
now = time.mktime(self.now.timetuple())
diff = now - lastAction
#print "checking expiry %d minutes (%d)" % (n, diff)
if diff >= (60*n):
self.request["session"].forceExpire = True
return diff
##############################################################################
def prefs(self):
"""
Return session prefs object
"""
return self.ses.pref_manager
##############################################################################
def get_themes(self):
"""
Return dict of themes
"""
return self.zw.config.get("themes", {
"default" : "Enhanced",
"fixed" : "Simple",
"iphone" : "IPhone",
}).items()
##############################################################################
def get_langs(self):
"""
Return dict of languages
"""
return self.zw.config.get('lang', {'en-US':1}).items()
##############################################################################
def csrf_token(self):
return webapp.get_cookie(self.request, "csrftoken", "")
##############################################################################
def selected_theme(self):
"""
Return the currently selected theme in the cookie
If no theme is selected in the cookie return the default theme for the
session
"""
return webapp.get_cookie(self.request, "theme", self.ses.theme)
##############################################################################
def selected_lang(self):
"""
Return the currently selected language in the cookie
If no language is selected in the cookie return the default language
for the session
"""
return webapp.get_cookie(self.request, "lang", self.ses.locale.lang)
##############################################################################
def selected_brand(self):
"""
Return the session's brand
"""
return self.ses.brand
##############################################################################
def js_init_20c_modules(self):
rv = []
for name, mod in self.ses.app.modules.items():
# skip modules that were loaded from couchbase but are currently
# deactivated
if hasattr(mod, "_module_from_couchbase"):
if not self.ses.app.module_status.has_key(mod._module_from_couchbase):
continue;
if hasattr(mod, "js_init"):
rv.append(mod.js_init(self.ses) or "")
if rv:
return "\n".join(rv)
else:
return ""
##############################################################################
def include_20c_modules(self, mobile=False):
"""
Cycle through modules and include their js libs if the session has access
to it
"""
r = [];
ses = self.ses
sfUrl = ses.staticFileUrl;
modctrl = ses.module_control()
modstat = ses.app.update_modules()
for i in ses.app.module_js_load_order:
mod = ses.app.module_status.get(i)
# dont include modules that are deactivated
if not mod or not mod.get("status"):
continue
# if were importing for mobile theme, check if module has a mobile component
# before proceeding
if mobile and not mod.get("mobile"):
continue
# check if module is disabled on the session level
if modctrl.get(i) == False:
continue
# check if module dependencies are disabled in module manager or the
# session level. And if they, dont load the module depending on them.
deps = mod.get("dependencies", [])
valid = True
for d in deps:
# is dependency disabled on session level?
if modctrl.get(d) == False:
valid = False
break
# is dependency disabled in module status?
dep = modstat.get(d)
if not dep or not dep.get("status"):
valid = False
break
if not valid:
continue
if self.check_20c_module(i):
r.append('<script id="mod:%s" type="text/javascript" src="/ui_component/%s/%s"></script>' % (
i, i, mod.get("version")
))
return "\n".join(r)
##############################################################################
def check_20c_module(self, name):
"""
Check if session has access to the specified 20c module, return perms
"""
return self.ses.check_20c_module(name)
#############################################################################
def locations(self):
"""
Return a list of valid hosts from brand location map
"""
rv = []
for loc in self.ses.brand.get("locations", []):
rv.append(loc.split("="))
return rv
#############################################################################
def json_string(self, obj):
"""
Return json encoded string for <obj>
"""
return json.json.dumps(obj)
#############################################################################
def js_bool(self, b):
if b:
return "true"
else:
return "false"
#############################################################################
def esc(self, txt):
if txt:
return txt.replace('"', '\\"').replace("'", "\\'")
else:
return ""
#############################################################################
def required_themes(self, layout):
themes = []
if layout.has_key("windows"):
windows = layout.get("windows")
for win in windows:
if win.get("opt"):
opt = win.get("opt")
theme = opt.get("color_theme", opt.get("theme"))
if theme and theme not in themes:
themes.append(theme)
return themes
#############################################################################
def loading_shim_get_template(self):
"""
Dummy function
"""
return None
#############################################################################
def module_media(self, namespace, fileName):
return "/module_media/%s/%s/%s" % (
namespace,
self.ses.app.module_version(namespace),
fileName
)
#############################################################################
def module_js_path(self, namespace):
return "/ui_component/%s/%s" % (
namespace,
self.ses.app.module_version(namespace)
)
#############################################################################
def include_css_libs(self):
rv = []
for path in self.ses.app.lib_includes_css:
rv.append('<link rel="stylesheet" type="text/css" href="%s/%s" />' % (
self.ses.staticFileUrl,
path
))
return '\n'.join(rv)
#############################################################################
def include_js_libs(self):
rv = []
for path in self.ses.app.lib_includes_js:
rv.append('<script type="text/javascript" src="%s/%s"></script>' % (
self.ses.staticFileUrl,
path
))
return '\n'.join(rv)
#############################################################################
def include(self, name, namespace=None, **kwargs):
return self.ses.tmpl(name, namespace=namespace, request=self.request, **kwargs)
#############################################################################
def access_xl(self, v):
return constants.access_xl(v)
#############################################################################
@webapp.expose
def tasks(self):
rv = {}
for id in self.ses.tasks:
task = self.ses.app.tasks.get(id)
if not task:
continue
rv[id] = task.get("info")
return rv
################################################################################
################################################################################
```
#### File: vodka/tools/appstore.py
```python
from module_manager import *
import os
import mimetypes
import time
import traceback
import base64
import threading
import twentyc.tools.CryptoUtil as cu
###############################################################################
APPSTORE_STATUS_PENDING = 0
APPSTORE_STATUS_APPROVED = 1
APPSTORE_STATUS_HIDDEN = 2
APPSTORE_LISTING_KEY = ":appstore-listing:%s"
APPSTORE_COMPONENT_KEY = ":appstore-component:%s.%s"
APPSTORE_COMPANY_INFO_KEY = ":appstore-company:%s"
APPSTORE_CATEGORIES_KEY = ":appstore-categories:%s.%s"
APPSTORE_FILTERS_KEY = ":appstore-filters:%s"
###############################################################################
class AppAlreadyProvisioned(Exception):
pass
class TrialAlreadyStarted(Exception):
pass
class TrialEnded(Exception):
pass
###############################################################################
class Appstore(ModuleManager):
_started = False
billing_pubkey = None,
# category and filter refresh time
cf_refresh = 10
# trial duration time in seconds
trial_t = 1800
# trial limit time (time between trials) in seconds
trial_limit_t = 60*60*24*14
# indicates whether the check trials timer is on or off
started = 0
#############################################################################
def dbg(self, msg, verbose=True):
msg = "Appstore: %s" % msg
if self.log:
self.log.debug(msg)
if self.verbose and verbose:
print msg
#############################################################################
def error(self, msg):
raise Exception("Appstore: %s" % msg)
#############################################################################
def appstore_index(self):
"""
Return index of all appstore listings
"""
try:
apps = self.cb_client.view("appstore", "index")
rv = {}
for row in apps:
app = row.get("value")
rv[app.get("name")] = app
return rv
except:
raise
#############################################################################
def appstore_app_listing(self, app_name):
"""
Return info for specified app listing
app_name <str> name of the listing
"""
try:
return self.cb_client.get(APPSTORE_LISTING_KEY % app_name)
except:
raise
#############################################################################
def appstore_listing_save(self, app_listing):
try:
app_name = app_listing.get("name")
self.cb_client.set(APPSTORE_LISTING_KEY % app_name, app_listing)
except:
raise
#############################################################################
def appstore_add_listing(self, app_name, modules, title="", owner="", tags=[], price=0.0, subscription=0, description=""):
"""
Createa new app listing
app_name <str> name of the listing - will error if anotherl isting with the same name already exists
modules <dict> dict of modules and perms provisioned by this app. Keys should be the module names and
values should be the permissions
title <str> user friendly title of the listing
owner <str> company name/id (as it is used to store to company info)
tags <list> list of tags relevant to the listing
price <float>
subscription <int> if > 0 indicates a subscription with a due time every n days
description <str> short description of the listing
"""
try:
self.lock.acquire()
app_listing = self.appstore_app_listing(app_name)
# check if listing already has entry for module
if not app_listing:
tags.extend([
app_name,
title,
owner
])
tags = [ x.lower() for x in tags ]
app_listing = {
"type" : "appstore_listing",
"name" : app_name,
"title" : title,
"owner" : owner,
"app_name" : app_name,
"modules" : modules,
"tags" : tags,
"media" : [],
"status" : APPSTORE_STATUS_PENDING,
"description" : description,
"price" : price,
"subscription" : subscription
}
self.appstore_listing_save(app_listing)
# create / update pgroup entry for app
self.pgroup_update(
"app:%s" % app_name,
modules,
source="appstore",
reason="listing creation: %s" % app_name
)
return app_listing
else:
self.error("A listing for the name '%s' already exists" % app_name)
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_remove_listing(self, app_name, remove_provisioning=False, reason=None):
"""
Remove a listing
app_name <str> name of the listing
remove_provisioning <bool> if True remove app and provisioning from
all users who have purchased this app
"""
try:
self.lock.acquire()
app_listing = self.appstore_app_listing(app_name)
if app_listing:
# remove provisioning to that app
if remove_provisioning:
used_by = self.appstore_app_is_used_by(app_name)
for user_id in used_by:
self.appstore_remove_app_from_user(user_id["value"], app_name, reason=reason)
# remove app listing and the listing's components
for comp in app_listing.get("media",[]):
self.cb_client.unset(APPSTORE_COMPONENT_KEY % (app_name, comp))
self.cb_client.unset(APPSTORE_LISTING_KEY % app_name)
# remove app's permgroup
self.pgroup_remove("app:%s" % app_name, source="appstore", reason="listing removed: %s" % app_name)
return True
return False
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_change_listing_status(self, app_name, status):
"""
Change the status of a listing
app_name <str> name of the listing
status <int> new listing status
"""
try:
self.lock.acquire()
if status not in [
APPSTORE_STATUS_PENDING,
APPSTORE_STATUS_APPROVED,
APPSTORE_STATUS_HIDDEN
]:
self.error("Invalid listing status: %s" % status)
app_listing = self.appstore_app_listing(app_name)
if not app_listing:
self.error("Cannot change status of listing, as listing does not exist: %s" % app_name)
app_listing["status"] = status
self.appstore_listing_save(app_listing)
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_listing_add_component(self, name, component_name, contents, mime, minified=""):
"""
Add a media component to a listing
name <str> listing name
component_name <str> unique component name (think filename)
contents <str> will be base64 encoded
mime <tuple> mimetype as returned by mimetypes.guesstype
"""
try:
app_listing = self.appstore_app_listing(name)
list_name = "media"
contents = base64.b64encode(contents)
if not app_listing.has_key(list_name):
app_listing[list_name] = []
if component_name not in app_listing[list_name]:
app_listing[list_name].append(component_name)
self.appstore_change_listing(name, app_listing)
self.cb_client.set(
APPSTORE_COMPONENT_KEY % (name, component_name),
{
"listing" : name,
"name" : component_name,
"owner" : app_listing.get("owner"),
"type" : "vodka_appstore_listing_component",
"component_type" : list_name,
"mime" : mime,
"contents" : contents
}
)
except:
raise
#############################################################################
def appstore_listing_remove_component(self, name, component_name):
"""
Remove media component from listing
name <str> listing name
component_name <str> name of the component to be removed
"""
try:
key = APPSTORE_COMPONENT_KEY % (name, component_name)
component = self.cb_client.get(key)
app_listing = self.appstore_app_listing(name)
if component:
app_listing["media"].remove(component_name)
self.cb_client.unset(key)
self.appstore_change_listing(name, app_listing)
except:
raise
#############################################################################
def appstore_listing_component(self, name, component_name):
"""
Return the contents of a media component.
Note that this will not base64 decode the contents
name <str> listing name
component_name <str> name of the component
"""
try:
key = APPSTORE_COMPONENT_KEY % (name, component_name)
component = self.cb_client.get(key)
return component
except:
raise
#############################################################################
def appstore_listing_module_fields(self, app_name):
"""
Returns version of the first module in the app's module list
Returns modified date of the most recently modified module in the app's
module list
Returns mod_status 1 if all modules in the app's module list have been
approved, 0 if any of them hasn't
"""
try:
version = None
modified = 0
mod_status = 1
app_listing = self.appstore_app_listing(app_name)
if not app_listing:
return ("",0)
for mod_name, perms in app_listing.get("modules").items():
mod_info = self.appstore_module_info(mod_name)
if mod_info:
if not version:
version = mod_info.get("version")
modified = max(modified, mod_info.get("modified"))
if not mod_status:
mod_status = 0
return (version, modified, mod_status)
except:
raise
#############################################################################
def appstore_add_provisioning_to_user(self,user_id,listing_name, reason, transaction_id=0, xbahn_sync=True):
"""
Set provisioning for all modules specified in an app listing on
the specified user.
listing_name <str> name of the appstore listing
user_id <int> user id
reason <str> reason for provisioning change, eg. "subscription start"
"""
try:
app_listing = self.appstore_app_listing(listing_name)
if not app_listing:
raise Exception("Cannot find listing: %s", listing_name)
user_perms = self.perms(user_id)
mods_changed = {}
self.pgroup_grant("app:%s" % listing_name, user_id, source="appstore", reason="listing '%s': %s" % (listing_name, reason), xbahn_sync=xbahn_sync)
return mods_changed
except:
raise
#############################################################################
def appstore_remove_provisioning_from_user(self, user_id, listing_name, reason="", xbahn_sync=True):
"""
Unset provisioning for all modules specified in an app listing on
the specified user.
listing_name <str> name of the appstore listing
user_id <int> user id
reason <str> reason for provisioning change, eg. "subscription end"
"""
try:
app_listing = self.appstore_app_listing(listing_name)
if app_listing:
self.pgroup_revoke("app:%s" % listing_name, user_id, source="appstore", reason="listing '%s': %s" % (listing_name, reason), xbahn_sync=xbahn_sync)
return {}
except:
raise
#############################################################################
def appstore_user_info_key(self, user_id):
"""
Return the key with which appstore user info documents are stored
user_id <int>
"""
try:
return "U%s.appstore_user_info" % user_id
except:
raise
#############################################################################
def appstore_app_is_used_by(self, listing_name):
"""
Return a list of user ids that currently possess the specified app (listing_name)
listing_name <str> app listing name
"""
try:
return self.cb_client.view("appstore", "user_active_apps", key=listing_name, stale=False)
except:
raise
#############################################################################
def appstore_all_active_trials(self):
"""
Returns a list of app names and user ids of all active trials
"""
try:
r = self.cb_client.view("appstore", "user_active_trials", stale=False)
rv = {}
for row in r:
user_id = row.get("value")
app_name = row.get("key")
if not rv.has_key(user_id):
rv[user_id] = []
rv[user_id].append(app_name)
return rv
except:
raise
#############################################################################
def appstore_user_has_provisioning(self, user_id, app_name):
"""
Check if a user already has provision to all the modules linked to
an application (not necessarily provisioned by the application in question)
user_id <int>
app_name <str> name of the app listing
"""
try:
app_listing = self.appstore_app_listing(app_name)
if not app_listing:
raise Exception("Listing does not exist: %s" % app_name)
for mod, perms in app_listing.get("modules", {}).items():
if not self.perms_check(user_id, mod) & perms:
return False
return True
except:
raise
#############################################################################
def appstore_user_info(self, user_id):
"""
Return appstore user info for the specified user id
User info contains user specific appstore information such as
purchased applications and demo tracking
user_id <int>
"""
try:
k = self.appstore_user_info_key(user_id)
info = self.cb_client.get(k) or {
"type" : "appstore_user_info",
"user_id" : user_id,
"trials" : {},
"active_apps" : {}
}
return info
except:
raise
#############################################################################
def appstore_save_user_info(self, user_id, info):
"""
Update user info for the sepcified user_id
user_id <int>
info <dict> dict with updated keys
"""
try:
self.cb_client.set(
self.appstore_user_info_key(user_id),
info
)
except:
raise
#############################################################################
def appstore_add_app_to_user(self, user_id, listing_name, reason="", xbahn=True, transaction_id=0):
"""
Add app to user. This is what we want to call when a payment has gone
through. It takes care about provisioning access to the app's modules to the
user amongst other stuff.
user_id <int>
listing_name <str> app listing name
reason <str> reason for adding eg. "app purchase"
xbahn <bool> if true and xbahn property is set on appstore, broadcast module
reload notify
transaction_id <str|int> payment transaction id, if any
"""
try:
self.lock.acquire()
self.dbg("Adding app to user %s: %s" % (user_id, listing_name))
self.appstore_end_trial_for_user(user_id, listing_name, xbahn=False)
mods_changed = self.appstore_add_provisioning_to_user(user_id, listing_name, reason=reason, transaction_id=transaction_id)
info = self.appstore_user_info(user_id)
app_listing = self.appstore_app_listing(listing_name)
info["active_apps"][listing_name] = {
"purchase_price" : app_listing.get("price"),
"transaction_id" : transaction_id,
"subscription" : app_listing.get("subscription"),
"subscription_end" : 0,
"purchase_t" : time.time()
}
self.appstore_save_user_info(user_id, info)
return {
"user_id" : user_id,
"modules" : mods_changed
}
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_remove_app_from_user(self, user_id, listing_name, reason="", xbahn=True):
"""
Remove app from user (this function is what we want to call when a subscription
to an app is ended, or when the app listing is removed completely from the
app store.
It will undo all the provisioning a user has to the app's modules
user_id <int>
listing_name <str> app listing name
reason <str> reason for removal, eg. "subscription end"
xbahn <bool> if true and xbahn property is set on appstore, broadcast module
unload notify
"""
try:
self.lock.acquire()
info = self.appstore_user_info(user_id)
if not info["active_apps"].has_key(listing_name):
return
self.dbg("Removing app from user %s: %s" % (user_id, listing_name))
del info["active_apps"][listing_name]
self.appstore_save_user_info(user_id, info)
mods_removed = self.appstore_remove_provisioning_from_user(user_id, listing_name, reason=reason)
mods_reset = self.appstore_sync_provisioning(user_id, info=info, reason="Listing '%s' removed" % listing_name)
for k in mods_reset.keys():
if mods_removed.has_key(k):
del mods_removed[k]
if xbahn:
self.xbahn_notify_module_unload(user_id, mods_removed)
return {
"user_id" : user_id,
"modules" : mods_removed
}
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_cancel_subscription(self, user_id, listing_name, active_until=0, reason="", info={}):
try:
self.lock.acquire()
info = self.appstore_user_info(user_id)
sub_info = info["active_apps"].get(listing_name)
if not sub_info:
raise Exception("User has no subscription to '%s'" % listing_name)
if sub_info.get("subscription_end"):
return
sub_info["subscription_end"] = active_until or time.time()
sub_info["subscription_end_reason"] = reason
self.appstore_save_user_info(user_id, info)
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_sync_provisioning(self, user_id, reason="", info={}):
"""
Sync provisioning for user
user_id <int>
info <dict> userinfo (if not supplied will be fetched autmatically using user id)
"""
try:
mods_reset = {}
if not info:
info = self.appstore_user_info(user_id)
for name in info["active_apps"].keys():
mods_reset.update(
self.appstore_add_provisioning_to_user(user_id, name, reason="sync provisioning: %s - access regained via %s" % (reason, name))
)
for name in self.appstore_active_trials(info):
mods_reset.update(
self.appstore_add_provisioning_to_user(user_id, name, reason="sync provisioning (trial): %s - acess regained via TRIAL %s" % (reason, name))
)
return mods_reset
except:
raise
#############################################################################
def appstore_active_trials(self, user_id):
"""
Return a list of currently active trials for the specified user
user_id <int|dict> can be user id or userinfo dict
"""
try:
if type(user_id) == dict:
info = user_id
else:
info = self.appstore_user_info(user_id)
trials = info.get("trials", {})
rv = []
for app_name, trial in trials.items():
if trial.get("status") == 0:
rv.append(app_name)
return rv
except:
raise
#############################################################################
def appstore_trial_status(self, user_id, app_name, t=0):
"""
Check the trial status for the specified user and app
user_id <str|dict> can either be user id or a userinfo dict as returned
by self.appstore_user_info
app_name <str> listing name
t <int> optional timestamp as returned by time.time() if omited time.time()
will be called
Returns:
0 - no trial
>0 - active or used up trial, the remaining seconds until another trial can be started
"""
try:
if type(user_id) != dict:
info = self.appstore_user_info(user_id)
else:
info = user_id
trial = info.get("trials", {}).get(app_name)
if not trial:
return (0,0,0)
if not t:
t = time.time()
a = t - trial.get("end_t")
d = self.trial_limit_t - a
b = (trial.get("end_t") - trial.get("start_t")) - (t - trial.get("start_t"))
if d > 0:
return (trial.get("status"), d, b)
else:
return (trial.get("status"), 0, 0)
except:
raise
#############################################################################
def appstore_add_trial_to_user(self, user_id, app_name, xbahn=True):
"""
Add a trial for an app to the specified user. Trials grant provisioning
to the app's modules for a limited amount of time
user_id <int>
app_name <str> name of the listing
"""
try:
self.lock.acquire()
# make sure user doesnt have provisioning to app before proceeding
if self.appstore_user_has_provisioning(user_id, app_name):
raise AppAlreadyProvisioned([user_id, app_name])
user_info = self.appstore_user_info(user_id)
trials = user_info.get("trials", {})
t = time.time()
if trials.has_key(app_name):
if trials.get(app_name).get("status") == 0:
raise TrialAlreadyStarted([user_id, app_name])
else:
trial = trials.get(app_name)
a = t - trial.get("end_t")
d = self.trial_limit_t - a;
if d > 0:
raise TrialEnded([user_id, app_name, d])
trials[app_name] = {
"start_t" : t,
"end_t" : t + self.trial_t,
"status" : 0
}
user_info["trials"] = trials
self.appstore_save_user_info(user_id, user_info)
mods_changed = self.appstore_add_provisioning_to_user(user_id, app_name, reason="trial started: %s" % app_name)
if xbahn:
self.xbahn_notify_module_reload(user_id, mods_changed)
return {
"user_id" : user_id,
"modules" : mods_changed
}
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_end_trial_for_user(self, user_id, app_name, xbahn=True):
"""
End app trial for specified user
user_id <int>
app_name <str> name of the listing
"""
try:
self.lock.acquire()
user_info = self.appstore_user_info(user_id)
trials = user_info.get("trials", {})
if not trials.get(app_name):
return
trial = trials.get(app_name)
if trial.get("status"):
return
trials[app_name]["status"] = 1
trials[app_name]["end_t"] = time.time()
self.appstore_save_user_info(user_id, user_info)
mods_removed = self.appstore_remove_provisioning_from_user(user_id, app_name, reason="trial ended: %s" % app_name)
mods_reset = self.appstore_sync_provisioning(user_id, info=user_info, reason="trial ended: %s" % app_name)
for k in mods_reset.keys():
if mods_removed.has_key(k):
del mods_removed[k]
if xbahn:
self.xbahn_notify_module_unload(user_id, mods_removed)
self.xbahn_notify_appstore_trial_ended(user_id, app_name)
return {
"user_id" : user_id,
"modules" : mods_removed
}
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_module_info(self, mod_name):
return self.module_info(*self.module_token(mod_name))
#############################################################################
def appstore_change_listing(self, app_name, info, xbahn=True):
"""
Update an app listing
app_name <str> name of the listing
info <dict> dict holding updated fields
"""
try:
self.lock.acquire()
app_listing = self.appstore_app_listing(app_name)
mchanged = False
# check if listing already has entry for module
if not app_listing:
raise Exception("Cannot change listing, as appstore listing for this module does not exist: '%s'" % mod_name)
if info.has_key("tags"):
info["tags"] = [ x.lower() for x in info["tags"] ]
if info.has_key("modules"):
# modules has been set, check if modules differ from old listing
# if they do, provisioning needs to be reset for all users that
# have acess to this app
lmo = app_listing.get("modules", {})
lmn = info.get("modules", {})
if len(lmo.keys()) != len(lmn.keys()):
mchanged = True
else:
for mod in lmo.keys():
if mod not in lmn.keys():
mchanged = True
break
# if modules changed: remove provisioning to old modules
# update listing
app_listing.update(info)
app_listing["status"] = APPSTORE_STATUS_PENDING
self.appstore_listing_save(app_listing)
# if modules changed: add provisioning to new modules
if mchanged:
self.pgroup_update(
"app:%s" % app_name,
info.get("modules"),
source="appstore",
reason="listing '%s' updated" % (app_name)
)
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_company_info_key(self, name):
"""
Return the key used to store company info
name <str> company name
"""
try:
return APPSTORE_COMPANY_INFO_KEY % name
except:
raise
#############################################################################
def appstore_company_info(self, name):
"""
Return company info document for the specified company
name <str> company name
"""
try:
return self.cb_client.get(
self.appstore_company_info_key(name)
)
except:
raise
#############################################################################
def appstore_add_company(self, name, description, web, tags, address, phone, email):
"""
add company info
name <str> user friendly company name
description <str> short description of the company
web <str> website address of the company
tags <list> list of tags for search and filtering
"""
try:
info = self.appstore_company_info(name)
if not info:
self.dbg("Adding company: %s" % name)
info = {
"name" : name,
"description" : description,
"web" : web,
"address" : address,
"phone" : phone,
"email" : email,
"tags" : tags
}
self.cb_client.set(
self.appstore_company_info_key(name),
info
)
except Exception, inst:
raise
#############################################################################
def appstore_remove_company(self, name):
"""
Remove company info for the company with the specified name
name <str> company name
"""
try:
info = self.appstore_company_info(name)
if info:
self.dbg("Removing company: %s" % name)
self.cb_client.unset(
self.appstore_company_info_key(name)
)
except:
raise
#############################################################################
def appstore_filters(self, brand="default", t=0):
try:
data = self.cb_client.get(APPSTORE_FILTERS_KEY % (brand))
if not data:
data = self.cb_client.get(
APPSTORE_FILTERS_KEY % ("default")
)
return data or {}
except:
raise
#############################################################################
def appstore_filters_set(self, data, brand="default"):
try:
self.lock.acquire()
self.cb_client.set(APPSTORE_FILTERS_KEY % (brand), data)
except:
raise
finally:
self.lock.release()
#############################################################################
def appstore_categories(self, lang="en", brand="default", t=0):
try:
data = self.cb_client.get(APPSTORE_CATEGORIES_KEY % (brand,lang))
if not data:
data = self.cb_client.get(
APPSTORE_CATEGORIES_KEY % ("default", lang)
)
if not data:
data = self.cb_client.get(
APPSTORE_CATEGORIES_KEY % ("default", "en")
)
return data or {}
except:
raise
#############################################################################
def appstore_categories_set(self, data, lang="en", brand="default"):
try:
self.lock.acquire()
self.cb_client.set(APPSTORE_CATEGORIES_KEY % (brand,lang), data)
except:
raise
finally:
self.lock.release()
#############################################################################
def run(self):
if not self._started:
self.start_process()
self._started = True
#############################################################################
def stop(self):
self.stop_process()
self._started = False
#############################################################################
def start_process(self):
try:
if self.started:
return
self.started = 1
t = threading.Thread(
target = self.process
)
t.start()
except:
raise
#############################################################################
def stop_process(self):
try:
self.started = 0
except:
raise
#############################################################################
def process(self):
store = self
while self.started:
try:
# HANDLE TRIALS
active_trials = store.appstore_all_active_trials()
t = time.time()
#print "Appstore: checking for active trials"
for user_id, apps in active_trials.items():
#print "Appstore: Found active trials for %s -> %s" % (user_id, apps)
info = store.appstore_user_info(user_id)
for app_name in apps:
trial = info.get("trials",{}).get(app_name)
if trial.get("status"):
continue
if store.appstore_trial_status(info, app_name, t=t)[2] <= 0:
self.dbg("Trial for %s has run out" % (app_name))
store.appstore_end_trial_for_user(user_id, app_name)
# HANDLE ENDING SUBSCRIPTIONS
ending_subs = self.cb_client.view("appstore", "user_ending_subs", stale=False)
for row in ending_subs:
data = row.get("value")
sub_end = data.get("t")
if sub_end <= t:
self.dbg("Subscription '%s' ran out for '%s'" % (data.get("a"), data.get("u")))
self.appstore_remove_app_from_user(data.get("u"), data.get("a"), reason="subscription was not renewed.")
except Exception,inst:
self.dbg("Error while checking for active trials (see log for details): %s"%str(inst))
self.dbg(traceback.format_exc(), verbose=False)
finally:
time.sleep(1)
#############################################################################
def xbahn_notify_appstore_trial_ended(self, user_id, app_name):
app_listing = self.appstore_app_listing(app_name)
self.xbahn_notify_user(
user_id,
{
"type" : "appstore_trial_ended",
"app_name" : app_listing.get("title")
}
)
#############################################################################
def xbahn_notify_user(self, user_id, message):
if self.xbahn:
data = {
"user_id" : user_id
}
data[str(uuid.uuid4())] = message
self.xbahn.send(
None,
"__U.%s.notify" % user_id,
data
)
#############################################################################
def billing_load_pubkey(self, path):
self.billing_pubkey = cu.BillingCrypto.load_rsa_key(None, filepath=path)
#############################################################################
def appstore_import_companies(self, path):
print "Importing company information"
f = open(path,"r")
companies = json.load(f)
f.close()
for name, info in companies.items():
self.appstore_remove_company(name)
self.appstore_add_company(**info)
#############################################################################
def appstore_import_listings(self, path, om=False):
f = open(os.path.join(path),"r")
listings = json.load(f)
f.close()
for name, info in listings.get("listings", {}).items():
if not om:
print "Importing listing: %s" % name
else:
print "Importing listing media only: %s" % name
approve = True
if not om:
app_listing = self.appstore_app_listing(name)
if app_listing:
self.appstore_change_listing(name, info)
else:
self.appstore_add_listing(
name,
**info
)
# import media
media_path = os.path.join(os.path.dirname(path), name)
if os.path.exists(media_path):
print "removing old media components for %s" % name
for comp in info.get("media", []):
self.appstore_listing_remove_component(name, comp)
for file in os.listdir(media_path):
if file[0] == ".":
continue
media_file_path = os.path.join(media_path, file)
mime = mimetypes.guess_type(media_file_path)
print "Importing media component for listing %s: %s %s" % (
name, file, mime
)
f = open(media_file_path, "r")
self.appstore_listing_add_component(
name, file, f.read(), mime
)
f.close()
self.appstore_change_listing_status(name, 1)
#############################################################################
def appstore_import_categories(self, path):
try:
f = open(path,"r")
categories = json.load(f)
f.close()
for brand, lang_dict in categories.items():
for lang, data in lang_dict.items():
self.appstore_categories_set(data, lang, brand)
except:
raise
#############################################################################
def appstore_import_filters(self, path):
try:
f = open(path,"r")
filters = json.load(f)
f.close()
for brand, data in filters.items():
self.appstore_filters_set(data, brand=brand)
except:
raise
```
#### File: wsgi/plugins/sadb.py
```python
from webapp import Plugin
from sqlalchemy import *
from sqlalchemy.orm import *
class Sadb(Plugin):
db_engine = None
url = None
def __init__(self, id='db'):
self.id = id
def start(self):
print "starting sadb plugin (%s)" % str(self.id)
dbconf = self.config.get(self.id, {})
self.url = dbconf.get('url')
self.pool_size = int(dbconf.get('pool_size', 30))
self.pool_recycle = int(dbconf.get('pool_recycle', 3600))
if self.url:
self.db_engine = create_engine(self.url, pool_size=self.pool_size, pool_recycle=self.pool_recycle)
self.db = create_session(self.db_engine)
self.meta = MetaData()
def stop(self):
print "stopping sadb plugin (%s)" % str(self.id)
self.db_engine = None
self.url = None
``` |
{
"source": "20c/vodka",
"score": 3
} |
#### File: vodka/config/configurator.py
```python
import vodka
import vodka.exceptions
class Configurator:
"""
Handles interactive configuration process guided
by specs defined via config Handler classes
Attributes:
plugin_manager (ConfigPluginManager): plugin manager instance to use
during plugin configuration
skip_defaults (bool): if True dont prompt for config variables
that have a default value assigned
action_required (list): will hold list of actions required after
configure call has been completed (if any)
"""
def __init__(self, plugin_manager, skip_defaults=False):
self.plugin_manager = plugin_manager
self.action_required = []
self.skip_defaults = skip_defaults
def configure(self, cfg, handler, path=""):
"""
Start configuration process for the provided handler
Args:
cfg (dict): config container
handler (config.Handler class): config handler to use
path (str): current path in the configuration progress
"""
# configure simple value attributes (str, int etc.)
for name, attr in handler.attributes():
if cfg.get(name) is not None:
continue
if attr.expected_type not in [list, dict]:
cfg[name] = self.set(handler, attr, name, path, cfg)
elif attr.default is None and not hasattr(handler, "configure_%s" % name):
self.action_required.append(
(f"{path}.{name}: {attr.help_text}").strip(".")
)
# configure attributes that have complex handlers defined
# on the config Handler class (class methods prefixed by
# configure_ prefix
for name, attr in handler.attributes():
if cfg.get(name) is not None:
continue
if hasattr(handler, "configure_%s" % name):
fn = getattr(handler, "configure_%s" % name)
fn(self, cfg, f"{path}.{name}")
if attr.expected_type in [list, dict] and not cfg.get(name):
try:
del cfg[name]
except KeyError:
pass
def set(self, handler, attr, name, path, cfg):
"""
Obtain value for config variable, by prompting the user
for input and substituting a default value if needed.
Also does validation on user input
"""
full_name = (f"{path}.{name}").strip(".")
# obtain default value
if attr.default is None:
default = None
else:
try:
comp = vodka.component.Component(cfg)
default = handler.default(name, inst=comp)
if self.skip_defaults:
self.echo(f"{full_name}: {default} [default]")
return default
except Exception:
raise
# render explanation
self.echo("")
self.echo(attr.help_text)
if attr.choices:
self.echo("choices: %s" % ", ".join([str(c) for c in attr.choices]))
# obtain user input and validate until input is valid
b = False
while not b:
try:
if type(attr.expected_type) == type:
r = self.prompt(full_name, default=default, type=attr.expected_type)
r = attr.expected_type(r)
else:
r = self.prompt(full_name, default=default, type=str)
except ValueError:
self.echo("Value expected to be of type %s" % attr.expected_type)
try:
b = handler.check({name: r}, name, path)
except Exception as inst:
if hasattr(inst, "explanation"):
self.echo(inst.explanation)
else:
raise
return r
def echo(self, message):
"""override this function with something that echos a message to the user"""
pass
def prompt(self, *args, **kwargs):
"""override this function to prompt for user input"""
return None
```
#### File: vodka/config/__init__.py
```python
import munge
import types
import os
import os.path
import vodka.app
import vodka.exceptions
import vodka.log
import vodka.util
from . import validators
raw = {}
instance = {}
def is_config_container(v):
"""
checks whether v is of type list,dict or Config
"""
cls = type(v)
return issubclass(cls, list) or issubclass(cls, dict) or issubclass(cls, Config)
class Attribute:
"""
A configuration attribute
"""
def __init__(self, expected_type, **kwargs):
"""
Args:
expected_type (class or function): type expected for this attribute, if specified
as a function the result of the function will determine whether or not the value
passed type validation.
Kwargs:
default: if specified this value will be used as a default value, if not specified then
configuration of this attribute is treated as mandatory
help_text (str): explains the attribute
choices (list): list of valid value choices for this attribute, if set any value not
matching any of the choices will raise a configuration error
handler (function): when the value for this attribute is a collection of configuration
attributes (e.g. nested config) use this function to return the aproporiate config
handler class to use to validate them
prepare (function): allows you to prepare value for this attribute
deprecated (str): if not None indicates that this attribute is still functional but
deprecated and will be removed in the vodka version specified in the value
"""
self.expected_type = expected_type
self.default = kwargs.get("default")
self.help_text = kwargs.get("help_text")
self.handler = kwargs.get("handler")
self.choices = kwargs.get("choices")
self.prepare = kwargs.get("prepare", [])
self.deprecated = kwargs.get("deprecated", False)
self.nested = kwargs.get("nested", 0)
self.field = kwargs.get("field")
def finalize(self, cfg, key_name, value, **kwargs):
pass
def preload(self, cfg, key_name, **kwargs):
pass
class Handler:
"""
Can be attached to any vodka application class or vodka
plugin and allows to setup default values and config
sanity checking
"""
@classmethod
def check(cls, cfg, key_name, path, parent_cfg=None):
"""
Checks that the config values specified in key name is
sane according to config attributes defined as properties
on this class
"""
attr = cls.get_attr_by_name(key_name)
if path != "":
attr_full_name = f"{path}.{key_name}"
else:
attr_full_name = key_name
if not attr:
# attribute specified by key_name is unknown, warn
raise vodka.exceptions.ConfigErrorUnknown(attr_full_name)
if attr.deprecated:
vodka.log.warn(
"[config deprecated] {} is being deprecated in version {}".format(
attr_full_name, attr.deprecated
)
)
# prepare data
for prepare in attr.prepare:
cfg[key_name] = prepare(cfg[key_name])
if hasattr(cls, "prepare_%s" % key_name):
prepare = getattr(cls, "prepare_%s" % key_name)
cfg[key_name] = prepare(cfg[key_name], config=cfg)
value = cfg.get(key_name)
if isinstance(attr.expected_type, types.FunctionType):
# expected type holds a validator function
p, reason = attr.expected_type(value)
if not p:
# validator did not pass
raise vodka.exceptions.ConfigErrorValue(
attr_full_name, attr, value, reason=reason
)
elif attr.expected_type != type(value):
# attribute type mismatch
raise vodka.exceptions.ConfigErrorType(attr_full_name, attr)
if attr.choices and value not in attr.choices:
# attribute value not valid according to
# available choices
raise vodka.exceptions.ConfigErrorValue(attr_full_name, attr, value)
if hasattr(cls, "validate_%s" % key_name):
# custom validator for this attribute was found
validator = getattr(cls, "validate_%s" % key_name)
valid, reason = validator(value)
if not valid:
# custom validator failed
raise vodka.exceptions.ConfigErrorValue(
attr_full_name, attr, value, reason=reason
)
num_crit = 0
num_warn = 0
if is_config_container(value) and attr.handler:
if type(value) == dict or issubclass(type(value), Config):
keys = list(value.keys())
elif type(value) == list:
keys = list(range(0, len(value)))
else:
return
for k in keys:
if not is_config_container(value[k]):
continue
handler = attr.handler(k, value[k])
if issubclass(handler, Handler):
h = handler
else:
h = getattr(handler, "Configuration", None)
# h = getattr(attr.handler(k, value[k]), "Configuration", None)
if h:
if (
type(k) == int
and type(value[k]) == dict
and value[k].get("name")
):
_path = "{}.{}".format(attr_full_name, value[k].get("name"))
else:
_path = f"{attr_full_name}.{k}"
_num_crit, _num_warn = h.validate(
value[k], path=_path, nested=attr.nested, parent_cfg=cfg
)
h.finalize(
value,
k,
value[k],
attr=attr,
attr_name=key_name,
parent_cfg=cfg,
)
num_crit += _num_crit
num_warn += _num_warn
attr.finalize(cfg, key_name, value, num_crit=num_crit)
return (num_crit, num_warn)
@classmethod
def finalize(cls, cfg, key_name, value, **kwargs):
"""
Will be called after validation for a config variable
is completed
"""
pass
@classmethod
def validate(cls, cfg, path="", nested=0, parent_cfg=None):
"""
Validates a section of a config dict. Will automatically
validate child sections as well if their attribute pointers
are instantiated with a handler property
"""
# number of critical errors found
num_crit = 0
# number of non-critical errors found
num_warn = 0
# check for missing keys in the config
for name in dir(cls):
if nested > 0:
break
try:
attr = cls.get_attr_by_name(name)
if isinstance(attr, Attribute):
if attr.default is None and name not in cfg:
# no default value defined, which means its required
# to be set in the config file
if path:
attr_full_name = f"{path}.{name}"
else:
attr_full_name = name
raise vodka.exceptions.ConfigErrorMissing(attr_full_name, attr)
attr.preload(cfg, name)
except vodka.exceptions.ConfigErrorMissing as inst:
if inst.level == "warn":
vodka.log.warn(inst.explanation)
num_warn += 1
elif inst.level == "critical":
vodka.log.error(inst.explanation)
num_crit += 1
if type(cfg) in [dict, Config]:
keys = list(cfg.keys())
if nested > 0:
for _k, _v in list(cfg.items()):
_num_crit, _num_warn = cls.validate(
_v, path=(f"{path}.{_k}"), nested=nested - 1, parent_cfg=cfg
)
num_crit += _num_crit
num_warn += _num_warn
return num_crit, num_warn
elif type(cfg) == list:
keys = list(range(0, len(cfg)))
else:
raise ValueError("Cannot validate non-iterable config value")
# validate existing keys in the config
for key in keys:
try:
_num_crit, _num_warn = cls.check(cfg, key, path)
num_crit += _num_crit
num_warn += _num_warn
except (
vodka.exceptions.ConfigErrorUnknown,
vodka.exceptions.ConfigErrorValue,
vodka.exceptions.ConfigErrorType,
) as inst:
if inst.level == "warn":
vodka.log.warn(inst.explanation)
num_warn += 1
elif inst.level == "critical":
vodka.log.error(inst.explanation)
num_crit += 1
return num_crit, num_warn
@classmethod
def get_attr_by_name(cls, name):
"""
Return attribute by name - will consider value in
attribute's `field` property
"""
for attr_name, attr in cls.attributes():
if attr_name == name:
return attr
return None
@classmethod
def default(cls, key_name, inst=None):
attr = cls.get_attr_by_name(key_name)
if not attr:
raise KeyError("No config attribute defined with the name '%s'" % key_name)
if attr.default and callable(attr.default):
return attr.default(key_name, inst)
return attr.default
@classmethod
def attributes(cls):
"""
yields tuples for all attributes defined on this handler
tuple yielded:
name (str), attribute (Attribute)
"""
for k in dir(cls):
v = getattr(cls, k)
if isinstance(v, Attribute):
name = v.field or k
yield name, v
class ComponentHandler(Handler):
"""
This is the base config handler that will be attached to any
vodka application or plugin
"""
# config attribute: enabled
enabled = Attribute(
bool,
default=True,
help_text="specifies whether or not this component should be initialized and started",
)
class InstanceHandler(Handler):
"""
This is the config handler for the vodka main config
"""
apps = Attribute(
dict,
help_text="Holds the registered applications",
default={},
handler=lambda k, v: vodka.app.get_application(k),
)
plugins = Attribute(
list,
help_text="Holds the registered plugins",
default=[],
handler=lambda k, v: vodka.plugin.get_plugin_class(v.get("type")),
)
data = Attribute(list, help_text="Data type configuration", default=[])
logging = Attribute(
dict, help_text="Python logger configuration", default={"version": 1}
)
@classmethod
def configure_plugins(cls, configurator, cfg, path):
configurator.echo("")
configurator.echo("Configure plugins")
configurator.echo("")
plugin_type = configurator.prompt("Add plugin", default="skip")
if "plugins" not in cfg:
cfg["plugins"] = []
while plugin_type != "skip":
plugin_name = configurator.prompt("Name", default=plugin_type)
try:
plugin_class = configurator.plugin_manager.get_plugin_class(plugin_type)
plugin_cfg = {"type": plugin_type, "name": plugin_name}
configurator.configure(
plugin_cfg,
plugin_class.Configuration,
path=f"{path}.{plugin_name}",
)
cfg["plugins"].append(plugin_cfg)
except Exception as inst:
configurator.echo(inst)
plugin_type = configurator.prompt("Add plugin", default="skip")
@classmethod
def configure_apps(cls, configurator, cfg, path):
configurator.echo("")
configurator.echo("Configure applications")
configurator.echo("")
if "apps" not in cfg:
cfg["apps"] = {}
name = configurator.prompt("Add application (name)", default="skip")
while name != "skip":
app_cfg = {}
configurator.configure(
app_cfg, vodka.app.Application.Configuration, path=f"{path}.{name}"
)
vodka.app.load(name, app_cfg)
app = vodka.app.get_application(name)
configurator.configure(app_cfg, app.Configuration, path=f"{path}.{name}")
cfg["apps"][name] = app_cfg
name = configurator.prompt("Add application (name)", default="skip")
class Config(munge.Config):
defaults = {"config": {}, "config_dir": "~/.vodka", "codec": "yaml"}
def read(self, config_dir=None, clear=False, config_file=None):
"""
The munge Config's read function only allows to read from
a config directory, but we also want to be able to read
straight from a config file as well
"""
if config_file:
data_file = os.path.basename(config_file)
data_path = os.path.dirname(config_file)
if clear:
self.clear()
config = munge.load_datafile(data_file, data_path, default=None)
if not config:
raise OSError("Config file not found: %s" % config_file)
munge.util.recursive_update(self.data, config)
self._meta_config_dir = data_path
return
else:
return super().read(config_dir=config_dir, clear=clear)
```
#### File: vodka/config/validators.py
```python
import os
def path(value):
"""Validates that the value is an existing path"""
if not value:
return (True, "")
return (os.path.exists(value), "path does not exist: %s" % value)
def host(value):
"""Validates that the value is a valid network location"""
if not value:
return (True, "")
try:
host, port = value.split(":")
except ValueError as _:
return (False, "value needs to be <host>:<port>")
try:
int(port)
except ValueError as _:
return (False, "port component of the host address needs to be a number")
return (True, "")
```
#### File: vodka/data/data_types.py
```python
import vodka.config
import vodka.component
data_types = {}
def instantiate_from_config(cfg):
"""Instantiate data types from config"""
for h in cfg:
if h.get("type") in data_types:
raise KeyError("Data type '%s' already exists" % h)
data_types[h.get("type")] = DataType(h)
def get(name):
"""
Get data type class by data type name
Args:
name (str): data type name
Returns:
the data type class with the matching name
Raises:
KeyError: Unknown data type
"""
if name not in data_types:
raise KeyError("Unknown data type '%s'" % name)
return data_types.get(name)
class DataType(vodka.component.Component):
"""
Base class for all data types
Classes:
Configuration: configuration handler
"""
class Configuration(vodka.config.ComponentHandler):
handlers = vodka.config.Attribute(
list, default=[], help_text="data handlers to apply to this data"
)
@property
def handlers(self):
"""handlers specified for this data type via config"""
return self.get_config("handlers")
def __init__(self, config):
super().__init__(config)
```
#### File: vodka/data/handlers.py
```python
import vodka.config
import vodka.component
import vodka.storage
import vodka.data.data_types
import vodka.util
handlers = {}
class register(vodka.util.register):
class Meta:
objects = handlers
name = "data handler"
def get(handle):
if handle not in handlers:
raise KeyError("Data handler with handle %s does not exist" % handle)
return handlers.get(handle)
def instantiate(cfg, data_id):
cls = get(cfg.get("type"))
return cls(cfg, data_id)
def instantiate_for_data_type(name, data_id=None):
data_type = vodka.data.data_types.get(name)
if not data_id:
data_id = data_type
r = []
for h in data_type.handlers:
r.append(instantiate(h, data_id))
return r
class Handler(vodka.component.Component):
"""
Base data handler class. A data handler can be attached to a data type
to manipulate data of that type as it enters vodka.
Attribues:
config (dict or MungeConfg): configuration collection
data_id (str): data id for this handler
Classes:
Configuration: Configuration Handler
"""
class Configuration(vodka.config.ComponentHandler):
pass
def __init__(self, config, data_id):
"""
Args:
config (dict or MungeConfig): configuration collection
data_id (str): data id for this handler, needs to be unique
"""
super().__init__(config)
self.data_id = data_id
self.init()
def __call__(self, data, caller=None):
pass
def init(self):
pass
@register("index")
class IndexHandler(Handler):
"""
Will re-index data in a dictionary, indexed to the
key specified in the config
"""
class Configuration(Handler.Configuration):
index = vodka.config.Attribute(str, help_text="the field to use for indexing")
def __call__(self, data, caller=None):
if "data" in data:
r = {}
for d in data["data"]:
if isinstance(d, dict):
r[d[self.get_config("index")]] = d
elif d:
self.log.debug(
"Only dictionary type data rows may be re-indexed, row ignored"
)
else:
self.log.debug("Empty data row ignored.")
data["data"] = r
else:
self.log.debug("Empty data object ignored")
return data
@register("store")
class StorageHandler(Handler):
"""
Will store the data in the vodka storage.
Data will be stored using data type and data id as keys
"""
class Configuration(Handler.Configuration):
container = vodka.config.Attribute(
str,
help_text="specify how to store data",
choices=["list", "dict"],
default="list",
)
limit = vodka.config.Attribute(
int,
default=500,
help_text="Limit the maximum amount of items to keep; only applies to list storage",
)
def validate_limit(self, value):
if value < 1:
return False, "Needs to be greater than 1"
return True, ""
def __call__(self, data, caller=None):
if type(self.storage) == list:
self.storage.append(data)
l = len(self.storage)
while l > self.get_config("limit"):
self.storage.pop(0)
l -= 1
elif type(self.storage) == dict:
self.storage.update(**data["data"])
return data
def init(self):
if self.get_config("container") == "list":
self.storage = vodka.storage.get_or_create(self.data_id, [])
elif self.get_config("container") == "dict":
self.storage = vodka.storage.get_or_create(self.data_id, {})
else:
raise ValueError(
"Unknown storage container type: %s" % self.get_config("container")
)
```
#### File: vodka/plugins/__init__.py
```python
import pluginmgr
import pluginmgr.config
import time
import urllib.parse
import vodka
import vodka.log
import vodka.config
import vodka.component
import vodka.data.handlers
def get_plugin_by_name(name):
return vodka.plugin.get_instance(name)
def get_plugin_instance(name):
return get_plugin_by_name(name)
def get_plugin_class(typ):
return vodka.plugin.get_plugin_class(typ)
class PluginBase(vodka.component.Component, pluginmgr.config.PluginBase):
class Configuration(vodka.component.Component.Configuration):
async_handler = vodka.config.Attribute(
str,
default="thread",
choices=["thread", "gevent"],
field="async",
help_text="specifies how to run this plugin async",
)
type = vodka.config.Attribute(str, help_text="plugin registration type string")
name = vodka.config.Attribute(
str,
default=lambda x, i: i.type,
help_text="plugin instance name, needs to be unique",
)
start_manual = vodka.config.Attribute(
bool, default=False, help_text="disable automatic start of this plugin"
)
@property
def config(self):
return self.pluginmgr_config
@property
def name(self):
return self.config.get("name")
def __init__(self, config, *args, **kwargs):
# cannot init component because pluginmgr turns config into an attr
pluginmgr.config.PluginBase.__init__(self, config)
def init(self):
"""executed during plugin initialization, app instances not available yet"""
pass
def setup(self):
"""executed before plugin is started, app instances available"""
pass
def start(self):
pass
class TimedPlugin(PluginBase):
class Configuration(PluginBase.Configuration):
interval = vodka.config.Attribute(
float,
help_text="minimum interval between calls to work method (in seconds)",
)
def sleep(self, n):
if self.get_config("async") == "gevent":
import gevent
gevent.sleep(n)
else:
time.sleep(n)
def start(self):
self._run()
def stop(self):
self.run_level = 0
def work(self):
pass
def _run(self):
self.run_level = 1
interval = self.get_config("interval")
while self.run_level:
start = time.time()
self.work()
done = time.time()
elapsed = done - start
if elapsed <= interval:
sleeptime = interval - elapsed
if sleeptime > 0:
self.sleep(sleeptime)
class DataPlugin(TimedPlugin):
"""
Plugin that allows to retrieve data from a source on an
interval
Don't instantiate this, but use as a base for other plugins.
"""
class Configuration(TimedPlugin.Configuration):
data = vodka.config.Attribute(
str,
help_text="specify the data type of data fetched by this plugin. Will also apply the vodka data handler with matching name if it exists",
)
data_id = vodka.config.Attribute(
str,
help_text="data id for data handled by this plugin, will default to the plugin name",
default="",
)
@property
def data_type(self):
return self.get_config("data")
@property
def data_id(self):
return self.get_config("data_id") or self.name
def init(self):
return
def work(self, data):
return vodka.data.handle(
self.data_type, data, data_id=self.data_id, caller=self
)
```
#### File: vodka/tests/test_config.py
```python
import unittest
import uuid
import json
import vodka.log
import vodka.config
import vodka.config.shared as shared
import vodka.exceptions as exc
vodka.log.set_loggers(vodka.log.default_config())
def validator(value):
return value < 5, "needs to be smaller than 5"
class ListHandler(vodka.config.Handler):
a = vodka.config.Attribute(int, default=1, help_text="lh:a")
b = vodka.config.Attribute(int, help_text="lh:b")
class DictHandler(vodka.config.Handler):
a = vodka.config.Attribute(int, default=1, help_text="dh:a")
b = vodka.config.Attribute(int, help_text="dh:b")
class DictHandlerProxy(vodka.component.Component):
class Configuration(vodka.config.Handler):
a = vodka.config.Attribute(int, default=1, help_text="dh:a")
b = vodka.config.Attribute(int, help_text="dh:b")
class ConfigHandler(vodka.config.Handler):
a = vodka.config.Attribute(int, default=1, help_text="ht:a")
b = vodka.config.Attribute(int, help_text="ht:b")
d = vodka.config.Attribute(int, default=1, choices=[1, 2, 3], help_text="ht:d")
e = vodka.config.Attribute(int, default=1, help_text="ht:e")
f = vodka.config.Attribute(validator, default=1, help_text="ht:f")
g = vodka.config.Attribute(
int, default=lambda x, i: getattr(i, "default_g"), help_text="ht:g"
)
h = vodka.config.Attribute(int, default=1, prepare=[lambda x: x + 1])
i = vodka.config.Attribute(int, default=1)
j = vodka.config.Attribute(list, default=[], handler=lambda x, y: ListHandler)
k = vodka.config.Attribute(dict, default={}, handler=lambda x, y: DictHandler)
l = vodka.config.Attribute(dict, default={}, handler=lambda x, y: DictHandlerProxy)
depr = vodka.config.Attribute(
int, default=1, help_text="ht:depr", deprecated="2.2.0"
)
@classmethod
def validate_e(self, value):
return value < 5, "needs to be smaller than 5"
@classmethod
def prepare_i(self, value, config=None):
return value + 1
class SharedConfigSubHandler(shared.RoutersHandler):
first = vodka.config.Attribute(str, default="")
second = vodka.config.Attribute(str, default="")
third = vodka.config.Attribute(str, default="")
sub = vodka.config.Attribute(int)
class SharedConfigHandler(vodka.config.Handler):
a = shared.Attribute(dict, share="a:merge")
b = shared.Attribute(list, share="b:merge")
c = shared.Attribute(list, share="c:append")
d = shared.Container(
dict,
share="d:merge",
handler=lambda x, u: shared.Routers(
dict, "d:merge", handler=SharedConfigSubHandler
),
)
e = shared.Container(
dict,
nested=1,
share="e:merge",
default={"group_1": {"sub_1": {"sub": 1, "first": "hello"}}},
handler=lambda x, u: shared.Routers(
dict, "e:merge", handler=SharedConfigSubHandler
),
)
f = shared.Container(
dict,
nested=1,
share="e:merge",
default={"group_1": {"sub_2": {"sub": 2, "first": "world"}}},
handler=lambda x, u: shared.Routers(
dict, "e:merge", handler=SharedConfigSubHandler
),
)
class TestConfig(unittest.TestCase):
default_g = 2
def test_prepare(self):
cfg = {"h": 1, "b": 1}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(cfg["h"], 2)
def test_prepare_via_handler(self):
cfg = {"i": 1, "b": 1}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(cfg["i"], 2)
def test_nested_validation(self):
# should pass validation
cfg = {"b": 1, "j": [{"a": 1, "b": 1}]}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 0)
self.assertEqual(w, 0)
# should detect b missing from list element
cfg = {"b": 1, "j": [{"a": 1}]}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 1)
self.assertEqual(w, 0)
# should detect b missing from dict element
cfg = {"b": 1, "k": {"1": {"a": 1}}}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 1)
self.assertEqual(w, 0)
# should detect b value mismatch in list element
cfg = {"b": 1, "j": [{"a": 1, "b": "z"}]}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 1)
self.assertEqual(w, 0)
# should detect b missing from dict element and a value mismatch
cfg = {"b": 1, "l": {"1": {"a": "z"}}}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 2)
self.assertEqual(w, 0)
def test_validation(self):
# this should validate without errors
cfg = {"a": 1, "b": 1}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 0)
self.assertEqual(w, 0)
# this should return c=2 (2 critical errors) and
# w=1 (1 warning)
cfg = {"a": "test", "c": 3}
c, w = ConfigHandler.validate(cfg)
self.assertEqual(c, 2)
self.assertEqual(w, 1)
def test_check(self):
# this should raise ConfigErrorType
cfg = {"a": "invalid type"}
with self.assertRaises(exc.ConfigErrorType) as inst:
ConfigHandler.check(cfg, "a", "")
# this should raise ConfigErrorValue
cfg = {"d": 4}
with self.assertRaises(exc.ConfigErrorValue) as inst:
ConfigHandler.check(cfg, "d", "")
# this should raise ConfigErrorUnknown
cfg = {"c": 1}
with self.assertRaises(exc.ConfigErrorUnknown) as inst:
ConfigHandler.check(cfg, "c", "")
# this should raise ConfigErrorValue (from custom validation in class method)
cfg = {"e": 6}
with self.assertRaises(exc.ConfigErrorValue) as inst:
ConfigHandler.check(cfg, "e", "")
# this should raise ConfigErrorValue (from custom validation in validator)
cfg = {"f": 6}
with self.assertRaises(exc.ConfigErrorValue) as inst:
ConfigHandler.check(cfg, "f", "")
def test_default(self):
# default hardset value
self.assertEqual(ConfigHandler.default("a"), 1)
# default lambda value with self passed as instance
self.assertEqual(ConfigHandler.default("g", inst=self), 2)
def test_shared_config(self):
def uniq():
return str(uuid.uuid4())
cfg_a = {"__cfg_a": "a"}
cfg_b = {"__cfg_b": "b"}
i = uniq()
i2 = uniq()
i3 = uniq()
for k in ["a"]:
cfg_a[k] = {"first": i, "second": i2}
cfg_b[k] = {"third": i2, "second": i}
for k in ["b", "c"]:
cfg_a[k] = [i, i2]
cfg_b[k] = [i, i2]
cfg_a["d"] = {
"sub_1": {"first": i, "second": i2, "sub": 1},
"sub_2": {"first": i, "second": i2, "sub": 2},
}
cfg_b["d"] = {
"sub_1": {"third": i, "second": i2, "sub": 1},
"sub_2": {"first": i2, "second": i, "sub": 2},
}
cfg_b["e"] = {
"group_1": {"sub_3": {"first": "sup", "sub": 3}},
"group_2": {"sub_1": {"first": "well", "sub": 1}},
}
SharedConfigHandler.validate(cfg_a)
SharedConfigHandler.validate(cfg_b)
# test shared dict (merge)
self.assertEqual(cfg_a["a"], cfg_b["a"])
self.assertEqual(cfg_a["a"]["third"], i2)
self.assertEqual(cfg_a["a"]["second"], i)
self.assertEqual(cfg_a["a"]["first"], i)
# test shared list (merge)
self.assertEqual(cfg_a["b"], cfg_b["b"])
self.assertEqual(cfg_a["b"], [i, i2])
# test shared list (append)
self.assertEqual(cfg_a["c"], cfg_b["c"])
self.assertEqual(cfg_a["c"], [i, i2, i, i2])
print(list(cfg_b["e"].keys()))
print(json.dumps(cfg_a["e"], indent=2))
# test shared dicts in handler (merge)
self.assertEqual(cfg_a["d"], cfg_b["d"])
self.assertEqual(
cfg_a["d"],
{
"sub_1": {"first": i, "second": i2, "third": i, "sub": 1},
"sub_2": {"first": i2, "second": i, "sub": 2},
},
)
# make sure that default configs got shared as well
self.assertEqual(
cfg_a["e"],
{
"group_1": {
"sub_1": {"first": "hello", "sub": 1},
"sub_2": {"first": "world", "sub": 2},
"sub_3": {"first": "sup", "sub": 3},
},
"group_2": {"sub_1": {"first": "well", "sub": 1}},
},
)
```
#### File: vodka/tests/test_configurator.py
```python
import unittest
import vodka.config
import vodka.config.configurator
import sys
class Handler(vodka.config.Handler):
a = vodka.config.Attribute(str)
b = vodka.config.Attribute(str, default="something")
c = vodka.config.Attribute(list, default=[])
d = vodka.config.Attribute(dict, default={})
e = vodka.config.Attribute(dict, default={})
f = vodka.config.Attribute(dict, help_text="manually")
@classmethod
def configure_e(cls, configurator, cfg, path):
_cfg = {}
configurator.configure(_cfg, HandlerE, path="{}.{}".format(path, "test"))
cfg["e"] = {"test": _cfg}
class HandlerE(vodka.config.Handler):
a = vodka.config.Attribute(str)
b = vodka.config.Attribute(str, default="else")
class Configurator(vodka.config.configurator.Configurator):
def set_values(self, **kwargs):
self.prompt_values = kwargs
def prompt(self, k, default=None, *args, **kwargs):
return self.prompt_values.get(k, default)
class TestConfigurator(unittest.TestCase):
def test_configurator(self):
configurator = Configurator(None)
cfg = {}
configurator.set_values(
**{
"a": "what",
"b": "something",
"c": "nope",
"d": "nope",
"e.test.a": "who",
}
)
configurator.configure(cfg, Handler)
self.assertEqual(
cfg,
{"a": "what", "b": "something", "e": {"test": {"a": "who", "b": "else"}}},
)
self.assertEqual(configurator.action_required, ["f: manually"])
def test_configurator_skip_defaults(self):
configurator = Configurator(None, skip_defaults=True)
cfg = {}
configurator.set_values(
**{
"a": "what",
"b": "other",
"c": "nope",
"d": "nope",
"e.test.a": "who",
"e.test.b": "where",
}
)
configurator.configure(cfg, Handler)
self.assertEqual(
cfg,
{"a": "what", "b": "something", "e": {"test": {"a": "who", "b": "else"}}},
)
def test_configurator_override_defaults(self):
configurator = Configurator(None)
cfg = {}
configurator.set_values(
**{
"a": "what",
"b": "other",
"c": "nope",
"d": "nope",
"e.test.a": "who",
"e.test.b": "where",
}
)
configurator.configure(cfg, Handler)
self.assertEqual(
cfg, {"a": "what", "b": "other", "e": {"test": {"a": "who", "b": "where"}}}
)
def test_configurator_skip_existing(self):
configurator = Configurator(None)
cfg = {"a": "why"}
configurator.set_values(
**{
"a": "what",
"b": "other",
"c": "nope",
"d": "nope",
"e.test.a": "who",
"e.test.b": "where",
}
)
configurator.configure(cfg, Handler)
self.assertEqual(
cfg, {"a": "why", "b": "other", "e": {"test": {"a": "who", "b": "where"}}}
)
```
#### File: vodka/tests/test_config_validators.py
```python
import unittest
import vodka.config.validators
class TestConfigValidators(unittest.TestCase):
def test_path_validator(self):
b, d = vodka.config.validators.path(__file__)
self.assertEqual(b, True)
def test_host_validator(self):
b, d = vodka.config.validators.host("host:1")
self.assertEqual(b, True)
b, d = vodka.config.validators.host("host")
self.assertEqual(b, False)
b, d = vodka.config.validators.host("host:b")
self.assertEqual(b, False)
```
#### File: vodka/tests/test_instance.py
```python
import unittest
import vodka.app
import vodka.instance
@vodka.app.register("app_a2")
class AppA(vodka.app.Application):
initialized = False
def setup(self):
self.initialized = True
@vodka.app.register("app_b2")
class AppB(vodka.app.Application):
pass
@vodka.app.register("app_versioned")
class AppV(vodka.app.WebApplication):
version = "1.0.0"
APP_CONFIG = {
"apps": {
AppA.handle: {"enabled": True},
AppB.handle: {"enabled": False},
AppV.handle: {"enabled": True},
}
}
class TestInstance(unittest.TestCase):
def test_instantiate(self):
vodka.instance.instantiate(APP_CONFIG)
inst_a = vodka.instance.get_instance(AppA.handle)
self.assertEqual(inst_a.handle, AppA.handle)
with self.assertRaises(KeyError):
vodka.instance.get_instance(AppB.handle)
def test_setup(self):
vodka.instance.instantiate(APP_CONFIG)
inst_a = vodka.instance.get_instance(AppA.handle)
vodka.instance.ready()
self.assertEqual(inst_a.initialized, True)
def test_app_versioning(self):
vodka.instance.instantiate(APP_CONFIG)
inst_a = vodka.instance.get_instance(AppA.handle)
inst_v = vodka.instance.get_instance(AppV.handle)
self.assertEqual(inst_v.versioned_handle(), "app_versioned/1.0.0")
self.assertEqual(
inst_v.versioned_path("app_versioned/b/c"), "app_versioned/1.0.0/b/c"
)
self.assertEqual(
inst_v.versioned_url("app_versioned/b/c"), "app_versioned/b/c?v=1.0.0"
)
self.assertEqual(inst_a.versioned_handle(), "app_a2")
```
#### File: vodka/tests/test_util.py
```python
import unittest
import vodka.util
class TestUtil(unittest.TestCase):
def test_dict_get_path(self):
a = {"1": {"2": {"3": "end"}}}
b = {"1": [{"x": "end", "name": "a"}, {"c": {"x": "end"}, "name": "b"}]}
self.assertEqual(vodka.util.dict_get_path(a, "1.2.3"), "end")
self.assertEqual(vodka.util.dict_get_path(a, "a.b.c"), None)
self.assertEqual(vodka.util.dict_get_path(a, "a.b.c", default="end"), "end")
self.assertEqual(vodka.util.dict_get_path(b, "1.a.x"), "end")
self.assertEqual(vodka.util.dict_get_path(b, "1.b.c.x"), "end")
self.assertEqual(vodka.util.dict_get_path(b, "1.c.x"), None)
``` |
{
"source": "20c/vodka-xbahn",
"score": 2
} |
#### File: pubsub/test_app/application.py
```python
import vodka
import vodka.app
import vodka.data.renderers
import vodka.storage
@vodka.app.register('test_app')
class MyApplication(vodka.app.WebApplication):
@vodka.data.renderers.RPC(errors=True)
def index(self, data, *args, **kwargs):
data.extend(vodka.storage.get("xbahn_server.test"))
``` |
{
"source": "20dzhong/pyScan",
"score": 3
} |
#### File: 20dzhong/pyScan/pyScan.py
```python
from functions.utils import *
def process(img):
doc = scan(img)
kernel = np.ones((3, 3), np.uint8)
doc = cv2.erode(doc, kernel, iterations=1)
doc = cv2.dilate(doc, kernel, iterations=1)
doc = padding(doc)
return doc
``` |
{
"source": "20Fall-NYU-DevOps-Suppliers/Suppliers",
"score": 3
} |
#### File: Suppliers/service/service.py
```python
import sys
import uuid
import logging
from functools import wraps
from flask import jsonify, request, make_response, abort, url_for
from flask_api import status # HTTP Status Codes
from flask_restplus import Api, Resource, fields, reqparse, inputs, apidoc
from werkzeug.exceptions import NotFound
from service.models import Supplier, DataValidationError, DatabaseConnectionError
from . import app
# Error handlers require app to be initialized so we must import
# them only after we have initialized the Flask app instance
# Document the type of autorization required
authorizations = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'X-Api-Key'
}
}
# initialize DB without @app.before_first_request, to prevent nosetests using supplier DB
Supplier.init_db("suppliers")
######################################################################
# GET HOME PAGE
######################################################################
@app.route('/')
def index():
""" Render Home Page"""
return app.send_static_file('index.html')
######################################################################
# Configure Swagger before initilaizing it
######################################################################
api = Api(app,
version='1.0.0',
title='Supplier Demo REST API Service',
description='This is a sample server Supplier store server.',
default='Suppliers',
default_label='Supplier shop operations',
doc='/apidocs', # default also could use doc='/apidocs/'
authorizations=authorizations
)
# Define the model so that the docs reflect what can be sent
supplier_model = api.model('Supplier', {
'_id': fields.String(readOnly = True,
description='The unique id assigned internally by service'),
'name': fields.String(required=True,
description='The name of the Supplier'),
'like_count': fields.Integer(required=False,
description='The like count of the Supplier'),
'is_active': fields.Boolean(required=False,
description='Is the Supplier active?'),
'rating': fields.Float(required=False,
description='The rating of the Supplier'),
'products': fields.List(fields.Integer,required=False,
description='List of products the Supplier provide')
})
create_model = api.model('Supplier', {
'name': fields.String(required=True,
description='The name of the Supplier'),
'like_count': fields.Integer(required=False,
description='The like count of the Supplier'),
'is_active': fields.Boolean(required=False,
description='Is the Supplier active?'),
'rating': fields.Float(required=False,
description='The rating of the Supplier'),
'products': fields.List(fields.Integer,required=False,
description='List of products the Supplier provide')
})
# query string arguments
supplier_args = reqparse.RequestParser()
supplier_args.add_argument('name', type=str, required=False, help='List Suppliers by name')
supplier_args.add_argument('like_count', type=int, required=False, help='List Suppliers by like_count')
supplier_args.add_argument('is_active', type=bool, required=False, help='List Suppliers by is_active')
supplier_args.add_argument('rating', type=float, required=False, help='List Suppliers by rating')
supplier_args.add_argument('product_id', type=int, required=False, help='List Suppliers by product_id')
######################################################################
# Special Error Handlers
######################################################################
@api.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
message = str(error)
app.logger.error(message)
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'error': 'Bad Request',
'message': message
}, status.HTTP_400_BAD_REQUEST
@api.errorhandler(DatabaseConnectionError)
def database_connection_error(error):
""" Handles Database Errors from connection attempts """
message = str(error)
app.logger.critical(message)
return {
'status_code': status.HTTP_503_SERVICE_UNAVAILABLE,
'error': 'Service Unavailable',
'message': message
}, status.HTTP_503_SERVICE_UNAVAILABLE
######################################################################
# Authorization Decorator
######################################################################
# def token_required(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# token = None
# if 'X-Api-Key' in request.headers:
# token = request.headers['X-Api-Key']
#
# if app.config.get('API_KEY') and app.config['API_KEY'] == token:
# return f(*args, **kwargs)
# else:
# return {'message': 'Invalid or missing token'}, 401
# return decorated
######################################################################
# Function to generate a random API key (good for testing)
######################################################################
def generate_apikey():
""" Helper function used when testing API keys """
return uuid.uuid4().hex
######################################################################
# GET HEALTH CHECK
######################################################################
@app.route('/healthcheck')
def healthcheck():
""" Let them know our heart is still beating """
return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)
######################################################################
# GET API DOCS
######################################################################
@app.route('/apidocs')
def apidoc_page():
"""API Documentation Page"""
return apidoc.ui_for(api)
######################################################################
# PATH: /suppliers/{id}
######################################################################
@api.route('/suppliers/<supplier_id>')
@api.param('supplier_id', 'The Supplier identifier')
class SupplierResource(Resource):
"""
SupplierResource class
Allows the manipulation of a single Supplier
GET /suppliers/{id} - Returns a Supplier with the id
PUT /suppliers/{id} - Update a Supplier with the id
DELETE /suppliers/{id} - Deletes a Supplier with the id
"""
#------------------------------------------------------------------
# RETRIEVE A SUPPLIER
#------------------------------------------------------------------
@api.doc('get_suppliers')
@api.response(404, 'Supplier not found')
# @api.marshal_with(supplier_model)
def get(self, supplier_id):
"""
Retrieve a single Supplier
This endpoint will return a Supplier based on it's id
"""
app.logger.info("Request to Retrieve a supplier with id [%s]", supplier_id)
supplier = Supplier.find(supplier_id)
if not supplier:
api.abort(status.HTTP_404_NOT_FOUND, "Supplier with id '{}' was not found.".format(supplier_id))
return supplier.serialize(), status.HTTP_200_OK
#------------------------------------------------------------------
# UPDATE AN EXISTING SUPPLIER
#------------------------------------------------------------------
@api.doc('update_suppliers', security='apikey')
@api.response(404, 'Supplier not found')
@api.response(400, 'The posted Supplier data was not valid')
@api.expect(supplier_model)
# @api.marshal_with(supplier_model)
def put(self, supplier_id):
"""
Update a supplier
This endpoint will update a Supplier based the body that is posted
"""
app.logger.info('Request to Update a supplier with id [%s]', supplier_id)
check_content_type('application/json')
supplier = Supplier.find(supplier_id)
if not supplier:
return api.abort(status.HTTP_404_NOT_FOUND, "Supplier with id '{}' not found".format(supplier_id))
data = request.get_json()
# Data type transfer
data = data_type_transfer(data)
app.logger.info(data)
supplier.deserialize(data)
supplier.id = supplier_id
supplier.save()
return supplier.serialize(), status.HTTP_200_OK
#------------------------------------------------------------------
# DELETE A SUPPLIER
#------------------------------------------------------------------
@api.doc('delete_supplier', security='apikey')
@api.response(204, 'Supplier deleted')
def delete(self, supplier_id):
"""
Delete a Supplier
This endpoint will delete a Supplier based the id specified in the path
"""
app.logger.info('Request to Delete a Supplier with id [%s]', supplier_id)
supplier = Supplier.find(supplier_id)
if supplier:
supplier.delete()
app.logger.info("Supplier with ID [%s] delete complete.", supplier_id)
else:
app.logger.info("Supplier with ID [%s] does not exist.", supplier_id)
return "", status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /suppliers
######################################################################
@api.route('/suppliers', strict_slashes = True)
class SupplierCollection(Resource):
""" Handles all interactions with collections of Suppliers """
#------------------------------------------------------------------
# LIST ALL SUPPLIERS
#------------------------------------------------------------------
@api.doc('list_suppliers')
@api.response(400, 'Bad Request')
@api.expect(supplier_args, validate=True)
# @api.marshal_list_with(supplier_model)
def get(self):
""" Returns all of the suppliers """
app.logger.info('Request to list Suppliers...')
name = request.args.get('name')
is_active = request.args.get('is_active')
rating = request.args.get('rating')
product_id = request.args.get('product_id')
like_count = request.args.get('like_count')
if name:
app.logger.info('Find suppliers by name: %s', name)
suppliers = Supplier.find_by_name(name)
elif like_count:
app.logger.info('Find suppliers with rating greater than: %s', rating)
like_count = int(like_count)
suppliers = Supplier.find_by_greater("like_count", like_count)
elif is_active:
app.logger.info('Find suppliers by is_active: %s', is_active)
is_active = (is_active == 'true')
suppliers = Supplier.find_by_is_active(is_active)
elif rating:
app.logger.info('Find suppliers with rating greater than: %s', rating)
rating = float(rating)
suppliers = Supplier.find_by_greater("rating", rating)
elif product_id:
app.logger.info('Find suppliers containing product with id %s in their products',
product_id)
product_id = int(product_id)
suppliers = [supplier for supplier in Supplier.all() if product_id in supplier.products]
else:
app.logger.info('Find all suppliers')
suppliers = Supplier.all()
app.logger.info('[%s] Suppliers returned', len(suppliers))
results = [supplier.serialize() for supplier in suppliers]
app.logger.info("Returning %d suppliers", len(results))
return results, status.HTTP_200_OK
#------------------------------------------------------------------
# ADD A NEW SUPPLIER
#------------------------------------------------------------------
@api.doc('create_suppliers', security='apikey')
@api.expect(create_model)
@api.response(400, 'The posted data was not valid')
@api.response(201, 'Supplier created successfully')
# @api.marshal_with(supplier_model, code=201)
def post(self):
"""
Creates a Supplier
This endpoint will create a Supplier based the data in the body that is posted
"""
app.logger.info('Request to Create a Supplier...')
# Check for form submission data
if request.headers.get('Content-Type') == 'application/x-www-form-urlencoded':
app.logger.info('Getting data from form submit')
data = {
"name": request.form['name'],
"like_count": request.form['like_count'],
"is_active": request.form['is_active'],
"products": request.form['products'],
"rating": request.form['rating']
}
else:
check_content_type('application/json')
app.logger.info('Getting json data from API call')
data = request.get_json()
# Data type transfer
data = data_type_transfer(data)
app.logger.info(data)
supplier = Supplier()
supplier.deserialize(data)
supplier.save()
app.logger.info('Supplier with new id [%s] saved!', supplier.id)
location_url = api.url_for(SupplierResource, supplier_id=supplier.id, _external=True)
return supplier.serialize(), status.HTTP_201_CREATED, {'Location': location_url}
######################################################################
# PATH: /suppliers/{supplier_id}/like
######################################################################
@api.route('/suppliers/<supplier_id>/like')
class SupplierLike(Resource):
@api.doc('like_suppliers')
@api.response(404, 'Supplier not found')
def put(self, supplier_id):
"""
Like a single Supplier
This endpoint will update the like_count of the Supplier based on it's id in the database
"""
supplier = Supplier.find(supplier_id)
if not supplier:
raise NotFound("Supplier with id '{}' was not found.".format(supplier_id))
supplier.like_count += 1
supplier.save()
app.logger.info('You liked supplier with id [%s]!', supplier.id)
return supplier.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /suppliers/{product_id}/recommend
######################################################################
@api.route('/suppliers/<product_id>/recommend')
class SupplierRecommend(Resource):
@api.doc('recommend_suppliers')
def get(self, product_id):
"""
Recommend a Supplier
This endpoint will recommend top 1 highly-rated supplier based on a given product
"""
app.logger.info('Recommend suppliers containing product with id %s in their products',
product_id)
product_id = int(product_id)
# retrieve all suppliers including this product first
suppliers = [supplier for supplier in Supplier.all() if product_id in supplier.products and supplier.is_active == True]
# get top 1 rated supplier, None if suppliers is empty
if suppliers:
res_supplier = max(suppliers, key=lambda x: x.rating).serialize()
app.logger.info('Recommended supplier is: {}'.format(res_supplier))
else:
res_supplier = []
app.logger.info("No Recommended supplier!!")
return res_supplier, status.HTTP_200_OK
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def data_reset():
""" Removes all Suppliers from the database """
Supplier.remove_all()
def data_type_transfer(data):
""" Transfer string fields in submitted json data if necessary """
if isinstance(data['is_active'], str):
data['is_active'] = data['is_active'] in ["true", "True", "1"]
if data['like_count']: data['like_count'] = int(data['like_count'])
if isinstance(data['products'], str):
if data['products']:
data['products'] = [int(i) for i in data['products'].split(',') if i]
else:
data['products'] = []
if data['rating']: data['rating'] = float(data['rating'])
return data
def check_content_type(content_type):
""" Checks that the media type is correct """
if 'Content-Type' not in request.headers:
app.logger.error('No Content-Type specified.')
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
'Content-Type must be {}'.format(content_type))
if request.headers['Content-Type'] == content_type:
return
app.logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, 'Content-Type must be {}'.format(content_type))
#<EMAIL>.before_first_request
def initialize_logging(log_level=app.config['LOGGING_LEVEL']):
""" Initialized the default logging to STDOUT """
if not app.debug:
print('Setting up logging...')
# Set up default logging for submodules to use STDOUT
# datefmt='%m/%d/%Y %I:%M:%S %p'
fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)
# Make a new log handler that uses STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt))
handler.setLevel(log_level)
# Remove the Flask default handlers and use our own
handler_list = list(app.logger.handlers)
for log_handler in handler_list:
app.logger.removeHandler(log_handler)
app.logger.addHandler(handler)
app.logger.setLevel(log_level)
app.logger.info('Logging handler established')
``` |
{
"source": "20ft/20ft",
"score": 2
} |
#### File: 20ft/20ft/cli_test.py
```python
import requests
import os
import signal
from time import sleep
from unittest import TestCase, main
from os.path import expanduser
from subprocess import check_output, CalledProcessError, Popen, run, DEVNULL, PIPE
class CliTest(TestCase):
tf = 'tfnz '
@staticmethod
def bin(po=None):
if po is not None:
pgid = os.getpgid(po.pid) # alpine needs you to start a new session AND nuke the whole group
os.killpg(pgid, signal.SIGTERM)
po.wait()
try:
all = check_output('ls /tmp/tf-*', shell=True, start_new_session=True)
except CalledProcessError: # no tf-whatever files
return
for instance in all.split():
docker_id = ''
with open(instance) as f:
docker_id = f.read()
run('rm ' + instance.decode(), shell=True, start_new_session=True)
try:
run('docker kill ' + docker_id, stderr=DEVNULL, stdout=DEVNULL, shell=True, start_new_session=True)
except CalledProcessError:
pass
def test_ends(self):
try:
out = run(CliTest.tf + 'tfnz/ends_test', shell=True, start_new_session=True, stderr=PIPE)
self.assertTrue(b"Container is running" in out.stderr)
self.assertTrue(b"Container has exited and/or been destroyed" in out.stderr)
self.assertTrue(b"Disconnecting" in out.stderr)
finally:
CliTest.bin()
def test_verbose(self):
try:
out = run(CliTest.tf + '-v alpine true', shell=True, start_new_session=True, stderr=PIPE)
self.assertTrue(b"Message loop started" in out.stderr)
finally:
CliTest.bin()
def test_quiet(self):
try:
out = run(CliTest.tf + '-q alpine true', shell=True, start_new_session=True, stderr=PIPE)
self.assertTrue(len(out.stderr) == 0)
finally:
CliTest.bin()
def test_portmap(self):
try:
po = Popen(CliTest.tf + '-p 8080:80 nginx', shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue("Welcome to nginx!" in reply.text)
finally:
CliTest.bin(po)
def test_environment(self):
try:
po = Popen(CliTest.tf + '-e TEST=environment -e VAR=iable -p 8080:80 tfnz/env_test',
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue("TEST=environment" in reply.text)
self.assertTrue("VAR=iable" in reply.text)
finally:
CliTest.bin(po)
def test_preboot(self):
try:
po = Popen(CliTest.tf + '-f cli_test.py:/usr/share/nginx/html/index.html -p 8080:80 nginx',
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue("test_preboot(self)" in reply.text)
finally:
CliTest.bin(po)
def test_mount_volume(self):
po = None
try:
# creating with a cli tag
try:
uuid = check_output(CliVolsTest.tfvolumes + 'create with_cli_tag', shell=True).decode().rstrip('\r\n')
except CalledProcessError as e:
run(CliVolsTest.tfvolumes + "destroy with_cli_tag", shell=True)
uuid = check_output(CliVolsTest.tfvolumes + 'create with_cli_tag', shell=True).decode().rstrip('\r\n')
print("Vol uuid = " + str(uuid))
# mount using the cli tag
print('\n' + CliTest.tf + '-s -m with_cli_tag:/usr/share/nginx/html/ -p 8080:80 nginx')
po = Popen(CliTest.tf + '-s -m with_cli_tag:/usr/share/nginx/html/ -p 8080:80 nginx',
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue(reply.status_code == 403) # initially nothing in the volume
# upload a file with sftp
run('echo "put tfnz.1 /usr/share/nginx/html/index.html" | sftp -P 2222 root@localhost',
shell=True, start_new_session=True)
sleep(1)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue(".TH TFNZ(1)" in reply.text)
CliTest.bin(po)
# mount using tag:uuid (in another container)
print('\n' + CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid)
po = Popen(CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid,
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue(".TH TFNZ(1)" in reply.text)
CliTest.bin(po)
# mount with just uuid
print('\n' + CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid.split(':')[0])
po = Popen(CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid.split(':')[0],
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue(".TH TFNZ(1)" in reply.text)
CliTest.bin(po)
# mount with just tag
print('\n' + CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid.split(':')[1])
po = Popen(CliTest.tf + '-m %s:/usr/share/nginx/html/ -p 8080:80 nginx' % uuid.split(':')[1],
shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://127.0.0.1:8080')
self.assertTrue(".TH TFNZ(1)" in reply.text)
finally:
CliTest.bin(po)
run(CliVolsTest.tfvolumes + 'destroy with_cli_tag', shell=True)
def test_start_script(self): # also tests ssh
try:
with open("new_script.sh", 'w') as f:
f.write('echo "I did this!" > /test ; /bin/sleep 1000')
po = Popen(CliTest.tf + '-s -f new_script.sh:/new_script.sh alpine sh /new_script.sh',
shell=True, start_new_session=True)
sleep(5)
out = check_output('ssh -p 2222 root@localhost cat /test',
shell=True, start_new_session=True)
self.assertTrue(b"I did this!" in out)
finally:
run('rm new_script.sh', shell=True, start_new_session=True)
CliTest.bin(po)
def test_web_host(self):
try:
po = Popen(CliTest.tf + '-w cli.test.sydney.20ft.nz nginx', shell=True, start_new_session=True)
sleep(5)
reply = requests.get('http://cli.test.sydney.20ft.nz')
self.assertTrue("Welcome to nginx!" in reply.text)
finally:
CliTest.bin(po)
def test_sleep(self):
try:
po = Popen(CliTest.tf + '-z -s alpine', shell=True, start_new_session=True)
sleep(5)
out = check_output('ssh -p 2222 root@localhost uname', shell=True, start_new_session=True)
self.assertTrue(b"Linux" in out)
finally:
CliTest.bin(po)
class CliVolsTest(TestCase):
tfvolumes = 'tfvolumes '
def test_blank(self):
try:
out = check_output(CliVolsTest.tfvolumes, shell=True, start_new_session=True)
self.assertTrue(b"{list,create,destroy}" in out)
finally:
CliTest.bin()
def test_destroy_missing(self):
try:
run(CliVolsTest.tfvolumes + "destroy", shell=True, stderr=DEVNULL, start_new_session=True)
except CalledProcessError as e:
self.assertTrue(b"the following arguments are required: uuid" in e.output)
self.assertTrue(e.returncode != 0)
finally:
CliTest.bin()
def test_crud(self):
try:
uuid = check_output(CliVolsTest.tfvolumes + 'create', shell=True).rstrip(b'\r\n')
self.assertTrue(len(uuid) != 0)
all = check_output(CliVolsTest.tfvolumes + 'list', shell=True, start_new_session=True)
self.assertTrue(uuid in all)
destroyed = check_output(CliVolsTest.tfvolumes + 'destroy ' + uuid.decode(),
shell=True, start_new_session=True)
self.assertTrue(len(uuid) != 0)
finally:
CliTest.bin()
def test_crud_tagged(self):
try:
uuid_tag = check_output(CliVolsTest.tfvolumes + 'create test_crud_tagged',
shell=True, start_new_session=True).rstrip(b'\r\n')
self.assertTrue(b'error' not in uuid_tag)
all = check_output(CliVolsTest.tfvolumes + 'list', shell=True, start_new_session=True)
self.assertTrue(uuid_tag in all)
destroyed = check_output(CliVolsTest.tfvolumes + 'destroy ' + uuid_tag.decode(),
shell=True, start_new_session=True)
self.assertTrue(b'error' not in destroyed)
all = check_output(CliVolsTest.tfvolumes + 'list',
shell=True, start_new_session=True)
self.assertTrue(uuid_tag not in all)
finally:
CliTest.bin()
class CliAcctbakTest(TestCase):
tfacctbak = 'tfacctbak'
def test_acctbak(self):
with open(expanduser("~/.20ft/default_location")) as f:
def_loc = f.read().rstrip('\r\n')
with open(expanduser("~/.20ft/") + def_loc) as f:
priv = f.read().encode().rstrip(b'\r\n')
with open(expanduser("~/.20ft/%s.pub") % def_loc) as f:
pub = f.read().encode().rstrip(b'\r\n')
def_loc = def_loc.encode()
out = check_output(CliAcctbakTest.tfacctbak, shell=True, start_new_session=True)
self.assertTrue(b"cat > ~/.20ft/default_location" in out)
self.assertTrue(b"cat > ~/.20ft/" + def_loc in out)
self.assertTrue(b"cat > ~/.20ft/" + def_loc + b".pub" in out)
self.assertTrue(def_loc in out)
self.assertTrue(pub in out)
self.assertTrue(priv in out)
if __name__ == '__main__':
main()
```
#### File: tfnz/cli/tfdomains.py
```python
import sys
from tfnz.cli import generic_cli, base_argparse
def main():
parser = base_argparse('tfdomains')
subparsers = parser.add_subparsers(title='commands', dest='command')
p_list = subparsers.add_parser('list', help='list domains')
p_token = subparsers.add_parser('prepare', help='receive a pre-claim token')
p_token.add_argument('prepare_domain', metavar='my.com')
p_create = subparsers.add_parser('claim', help='claim a domain')
p_create.add_argument('claim_domain', metavar='my.com')
p_global = subparsers.add_parser('global', help='make a domain global')
p_global.add_argument('global_domain', metavar='my.com')
p_private = subparsers.add_parser('private', help='make a domain private')
p_private.add_argument('private_domain', metavar='my.com')
p_release = subparsers.add_parser('release', help='release your claim')
p_release.add_argument('release_domain', metavar='my.com')
generic_cli(parser, {'list': list_dom, 'prepare': prepare, 'claim': claim,
'private': private, 'global': gbl, 'release': release})
def list_dom(location, args):
for dom in location.endpoints.values():
print(dom.domain)
def prepare(location, args):
rtn = location.conn.send_blocking_cmd(b'prepare_domain', {'domain': args.prepare_domain})
print("Put a DNS record on your domain: tf-token.%s, TXT=%s" %
(args.prepare_domain, rtn.params['token'].decode()))
print("...then run: tfdomains claim " + args.prepare_domain)
print("The request will time out (and become invalid) after six hours.")
def claim(location, args):
location.conn.send_blocking_cmd(b'claim_domain', {'domain': args.claim_domain})
print("Claimed successfully - you can remove the tf-token record from DNS")
def gbl(location, args):
location.conn.send_blocking_cmd(b'make_domain_global', {'domain': args.global_domain})
print("Domain made global, clients will need to re-attach to see the change")
def private(location, args):
location.conn.send_blocking_cmd(b'make_domain_private', {'domain': args.private_domain})
print("Domain made private, clients will need to re-attach to see the change but can no longer publish.")
def release(location, args):
location.conn.send_blocking_cmd(b'release_domain', {'domain': args.release_domain})
print("Released domain")
if __name__ == "__main__":
main()
```
#### File: tfnz/components/postgresql.py
```python
import random
import string
import logging
import time
from threading import Thread
from tfnz import Waitable
from tfnz.node import Node
from tfnz.volume import Volume
from tfnz.container import Container
class Postgresql(Waitable):
"""An object encapsulating a Postgresql server running on it's default port (5432).
Connect with username=postgres.
:param node: The node to spawn on.
:param volume: A volume (object) to use as a persistent store.
:param password: An optional password for the database, will create one if not supplied.
:param log_callback: An optional callback for log messages - signature (object, bytes)
:param image: Specify a non-default image.
Note the instantiated object behaves as if it were derived from Container."""
def __init__(self, node: Node, volume: Volume, *, password: str=None, log_callback=None, image: str=None):
super().__init__()
# passwords
if password is None:
self.password = ''.join(random.SystemRandom().choice(string.ascii_letters+string.digits) for _ in range(12))
else:
self.password = password
# create
self.ctr = node.spawn_container('postgres:alpine' if image is None else image,
volumes=[(volume, '/var/lib/postgresql/data')],
stdout_callback=log_callback)
# async initialise
self.asynchronous = Thread(target=self.wait_truly_up, name="Waiting for Postgres: " + self.ctr.uuid.decode())
self.asynchronous.start()
def password(self) -> str:
""":return: password for the server."""
return self.password
def wait_truly_up(self):
"""Wait on the server until it is ready to answer queries."""
try:
# wait for TCP to come up
self.ctr.wait_tcp(5432)
# wait for the db to come up
while True:
rtn = self.ctr.run_process('psql -Upostgres -h%s -c "SELECT;"' % self.ctr.private_ip(), nolog=True)
if rtn[2] == 0:
break
logging.debug("Waiting for Postgresql to accept a query.")
time.sleep(1)
# actually set the password (passing it as part of the env doesn't work)
self.ctr.run_process('psql -Upostgres -h%s -c "ALTER ROLE postgres WITH SUPERUSER PASSWORD \'%s\';"'
% (self.ctr.private_ip(), self.password), nolog=True) # prevent password being logged
logging.info("Started Postgresql: " + self.ctr.uuid.decode())
self.mark_as_ready()
except BaseException as e:
logging.critical(str(e))
def ensure_database(self, name: str) -> bool:
"""Ensures a given database exists in this server. Returns True if it had to be created."""
self.wait_until_ready()
return self.ctr.run_process('psql -Upostgres -h%s -c "CREATE DATABASE %s WITH OWNER = postgres '
'ENCODING = \'utf8\' LC_COLLATE = \'en_US.utf8\' LC_CTYPE = \'en_US.utf8\';"'
% (self.ctr.private_ip(), name))[2] == 0
def __getattr__(self, item):
return self.ctr.__getattribute__(item)
def __repr__(self):
return "<Postgresql '%s' pass=%s>" % (self.ctr.uuid.decode(), self.password)
```
#### File: 20ft/tfnz/location.py
```python
import logging
import socket
import time
import os
import requests
import requests.exceptions
import termios
import sys
from typing import Union, List, Optional
from base64 import b64encode
from subprocess import run, CalledProcessError, DEVNULL
from messidge import default_location
from messidge.client.connection import Connection
from . import TaggedCollection, Taggable, Waitable
from .docker import Docker
from .endpoint import WebEndpoint
from .node import Node
from .send import Sender
from .tunnel import Tunnel
from .volume import Volume
from .container import ExternalContainer
class Location(Waitable):
"""The root location object.
:param location: An optional fqdn of the location (i.e. tiny.20ft.nz).
:param location_ip: A optional explicit ip for the broker.
:param quiet: Set true to not configure logging.
:param debug_log: Set true to log at DEBUG logging level.
:param new_node_callback: An optional callback for when a node is created ... signature (object)
"""
def __init__(self, *, location: Optional[str]=None, location_ip: Optional[str]=None,
quiet: Optional[bool]=False, debug_log: Optional[bool]=False,
new_node_callback: Optional = None):
super().__init__()
self.location = location if location is not None else default_location(prefix="~/.20ft")
# internal state you may wish to query TODO: properties
self.nodes = {}
self.volumes = TaggedCollection()
self.externals = TaggedCollection()
self.tunnels = {}
self.endpoints = {}
# internal state you should probably ignore
self.new_node_callback = new_node_callback
self.last_heartbeat = time.time()
# see if we even can connect...
ip = location_ip if location_ip is not None else self.location
try:
run(['ping', '-c', '1', ip], check=True, stdout=DEVNULL)
except CalledProcessError:
raise RuntimeError("Cannot ping the requested ip: " + ip)
# set up logging
if debug_log and quiet:
raise ValueError("Can't select both quiet and verbose logging")
if debug_log or quiet is False:
logging.basicConfig(level=logging.DEBUG if debug_log else logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%m%d%H%M%S')
# connect
self.conn = Connection(self.location, prefix="~/.20ft", location_ip=location_ip, exit_on_exception=True)
self.user_pk = self.conn.keys.public_binary()
self.conn.register_commands(self, Location._commands)
self.conn.start()
self.conn.wait_until_ready() # will throw if the connection had a problem
self.wait_until_ready() # doesn't return until a resource offer is made
self.conn.loop.register_on_idle(self._heartbeat)
# capture stdin attributes
try:
self.stdin_attr = termios.tcgetattr(sys.stdin.fileno())
except (termios.error, AttributeError):
self.stdin_attr = None
def run(self):
"""Wait until the message loop completes, may raise an exception passed from the background thread."""
try:
self.conn.wait_until_complete()
except KeyboardInterrupt:
pass
finally:
self.complete()
self.disconnect()
def complete(self, container=None, returncode=0):
"""Stop the background loop, causes 'run' to return. Call to close from the background thread."""
# the container and returncode are passed if you use 'complete' as a termination function on a container
# i.e. it needs to be there, don't take it off!
logging.debug("Complete called on location")
self.conn.loop.stop()
def disconnect(self, container=None, returncode=0):
"""Disconnect from the location - without calling this the object cannot be garbage collected"""
# the container and returncode are passed if you use 'disconnect' as a termination function on a container
# i.e. it needs to be there, don't take it off!
if self.conn is None: # already disconnected
return
# reset terminal attributes
if self.stdin_attr is not None:
try:
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, self.stdin_attr)
print('', end='\r', flush=True)
except ValueError:
pass # stdin is closed for some reason
# destroy the things
for endpoint in list(self.endpoints.values()):
[endpoint.unpublish(cluster) for cluster in list(endpoint.clusters.values())]
for node in self.nodes.values():
for container in [c for c in node.containers.values() if not c.dead]:
node.destroy_container(container)
# disconnect
logging.info("Disconnecting: " + self.location)
self.conn.disconnect()
self.conn = None
def node(self) -> Node:
"""Returns a node.
:return: A node object"""
return self.ranked_nodes()[0]
def ranked_nodes(self) -> List[Node]:
"""Ranks the nodes in order of resource availability.
:return: A list of node objects."""
nodes = self.nodes.values()
if len(nodes) == 0:
raise ValueError("The location has no nodes")
return sorted(nodes,
key=lambda node: node.stats['cpu'] + node.stats['memory'] - 10 * node.stats['paging'],
reverse=True)
def create_volume(self, *, tag: Optional[str]=None, asynchronous: Optional[bool]=True,
termination_callback: Optional=None) -> Volume:
"""Creates a new volume
:param tag: An optional globally visible tag.
:param asynchronous: Enables asynchronous writes.
:param termination_callback: a callback if this volume is destroyed - signature (container, returncode).
:return: The new Volume object.
Note that asynchronous writes cannot damage a ZFS filesystem although the physical state may lag behind the
logical state by a number of seconds. Asynchronous ZFS is *very* much faster than synchronous."""
tag = Taggable.valid_tag(tag)
msg = self.conn.send_blocking_cmd(b'create_volume', {'user': self.user_pk,
'tag': tag,
'async': asynchronous})
logging.info("Created volume: " + msg.uuid.decode())
vol = Volume(self, msg.uuid, tag, termination_callback=termination_callback)
self.volumes.add(vol)
return vol
def ensure_volume(self, key: Union[bytes, str]) -> Volume:
"""Return the volume with this uuid, tag or display_name - create the volume if it doesn't exist.
:param key: The uuid or tag of the volume object to be returned.
:return: A Volume object."""
try:
return self.volumes.get(self.user_pk, key)
except KeyError:
return self.create_volume(tag=key)
def destroy_volume(self, volume: Volume):
"""Destroys an existing volume. This is not a 'move to trash', it will be destroyed.
:param volume: The volume to be destroyed."""
self.conn.send_blocking_cmd(b'destroy_volume', {'user': self.user_pk,
'volume': volume.uuid})
logging.info("Destroyed volume: " + volume.uuid.decode())
volume.internal_destroy()
self.volumes.remove(volume)
def all_volumes(self) -> List[Volume]:
"""Returns a list of all volumes on this node.
:return: A list of Volume objects."""
return list(self.volumes.values())
def volume(self, key: Union[bytes, str]) -> Volume:
"""Return the volume with this uuid, tag or display_name.
:param key: The uuid or tag of the volume object to be returned.
:return: A Volume object."""
return self.volumes.get(self.user_pk, key)
def endpoint_for(self, fqdn: str) -> WebEndpoint:
"""Return a WebEndpoint for the given fqdn.
:param fqdn: The fully qualified name the endpoint will represent.
:return: A WebEndpoint object."""
for domain, ep in self.endpoints.items():
if fqdn.endswith(domain):
return ep
raise ValueError("There is no endpoint capable of serving: " + fqdn)
def external_container(self, key: Union[bytes, str]) -> ExternalContainer:
"""Return the external container with this uuid, tag or display_name.
:param key: The uuid or tag of the container to be returned.
:return: An ExternalContainer object."""
return self.externals.get(self.user_pk, key)
def ensure_image_uploaded(self, docker_image_id: str, *, descr: Optional[dict]=None) -> List[str]:
"""Sends missing docker layers to the location.
:param docker_image_id: use the short form id or tag
:param descr: a previously found docker description
:return: A list of layer sha256 identifiers
This is not a necessary step and is implied when spawning a container."""
# Get a description
if descr is None:
descr = Docker.description(docker_image_id, conn=self.conn)
else:
# update if an explicit description has been passed
self.conn.send_cmd(b'cache_description', {'image_id': docker_image_id, 'description': descr})
# Send the missing layers (if any)
layers = Sender.layer_stack(descr)
to_upload = Sender.upload_requirements(layers, self.conn) # if none, does not need a local docker
logging.info("Ensuring layers (%d) are uploaded for: %s" % (len(layers), docker_image_id))
if len(to_upload) > 0:
logging.info("Layers to upload: %d of %d" % (len(to_upload), len(layers)))
Sender.send(docker_image_id, to_upload, self.conn)
return layers
@staticmethod
def all_locations():
"""Returns a (text) list of 20ft locations that have an account on this machine."""
dirname = os.path.expanduser('~/.20ft/')
all_files = os.listdir(dirname)
locations = []
for file in all_files:
if file[-4:] == '.pub':
continue
if file + '.pub' in all_files:
locations.append(file)
return locations
def _heartbeat(self):
if time.time() - self.last_heartbeat < 30:
return
self.last_heartbeat = time.time()
self.conn.send_cmd(b'heartbeat')
def tunnel_onto(self, container, port, localport, bind, *, timeout=30) -> Tunnel:
# called from Container
if isinstance(port, str):
port = int(port)
if isinstance(localport, str):
localport = int(localport)
# create the tunnel
container.wait_until_ready() # otherwise the IP address may not exist on the node and creation will fail
tunnel = Tunnel(self.conn, container.parent(), container, port, localport, bind, timeout)
self.tunnels[tunnel.uuid] = tunnel
tunnel.connect() # connection done 'late' so we can get the tunnel into tunnels first
return tunnel
def wait_tcp(self, container, dest_port):
# called from Container - raises a ValueError if it cannot connect before the timeout
logging.info("Waiting on tcp (%d): %s" % (dest_port, container.uuid.decode()))
self.conn.send_blocking_cmd(b'wait_tcp', {'container': container.uuid, 'port': dest_port})
def wait_http_200(self, container, dest_port, fqdn, path, localport=None) -> Tunnel:
# called from Container
# needs to resolve to localhost because that's where the tunnel will be
addr = socket.gethostbyname(fqdn)
if addr != '127.0.0.1':
raise ValueError("FQDN '%s' does not resolve to localhost" % fqdn)
logging.info("Waiting on http 200: " + container.uuid.decode())
# OK
tnl = self.tunnel_onto(container, dest_port, localport, None) # has a 30 sec timeout by default
logging.debug("Tunnel connected onto: " + container.uuid.decode())
# the server side polls so all we need to do is make the request
url = 'http://%s:%d/%s' % (fqdn, tnl.localport(), path if path is not None else '')
r = requests.get(url, timeout=240)
if r.status_code == 200:
logging.info("Connected onto: " + url)
return tnl
else:
raise ValueError("Could not connect to: " + url)
def destroy_tunnel(self, tunnel: Tunnel, container=None, with_command=True):
# Called from Container
tunnel.destroy(with_command)
del self.tunnels[tunnel.uuid]
def _from_proxy(self, msg):
try:
tunnel = self.tunnels[msg.uuid]
except KeyError:
logging.debug("Data apparently from an already removed tunnel (dropped)")
return
try:
tunnel.from_proxy(msg)
except KeyError:
logging.debug("Data arrived from a proxy we seemingly already closed")
def _close_proxy(self, msg):
try:
tunnel = self.tunnels[msg.uuid]
except KeyError:
logging.debug("Asked to close a proxy on an already removed tunnel (dropped)")
return
try:
tunnel.close_proxy(msg)
except KeyError:
logging.debug("Asked to close a proxy that we already closed")
def _resource_offer(self, msg):
self.endpoints = {dom['domain']: WebEndpoint(self, dom['domain']) for dom in msg.params['domains']}
self.nodes = {node[0]: Node(self, node[0], self.conn, node[1]) for node in msg.params['nodes']}
self.volumes = TaggedCollection([Volume(self, vol['uuid'], vol['tag']) for vol in msg.params['volumes']])
self.externals = TaggedCollection([ExternalContainer(self, xtn['uuid'], xtn['node'], xtn['ip'], xtn['tag'])
for xtn in msg.params['externals']])
self.mark_as_ready() # only ready once we've dealt with the resource offer
def _update_stats(self, msg):
node = self._ensure_node(msg)
node.update_stats(msg.params['stats'])
def _node_created(self, msg):
if msg.params['node'] in self.nodes:
return
logging.debug("Notify - node created: " + b64encode(msg.params['node']).decode())
n = Node(self, msg.params['node'], self.conn, {'memory': 1000, 'cpu': 1000, 'paging': 0, 'ave_start_time': 0})
self.nodes[msg.params['node']] = n
if self.new_node_callback is not None:
self.new_node_callback(n)
def _node_destroyed(self, msg):
logging.debug("Notify - node destroyed: " + b64encode(msg.params['node']).decode())
node = self._ensure_node(msg)
node.internal_destroy()
del self.nodes[msg.params['node']]
def _volume_created(self, msg):
logging.debug("Notify - volume created: " + msg.params['volume'].decode())
self.volumes.add(Volume(self, msg.params['volume'], msg.params['tag']))
def _volume_destroyed(self, msg):
logging.debug("Notify - volume destroyed: " + msg.params['volume'].decode())
vol = self.ensure_volume(msg.params['volume'])
vol.internal_destroy()
self.volumes.remove(vol)
def _log(self, msg):
if msg.params['error']:
logging.error(msg.params['log'])
else:
logging.info(msg.params['log'])
def _ensure_node(self, msg):
if msg.params['node'] not in self.nodes:
raise ValueError("Didn't know about node: " + b64encode(msg.params['node']).decode())
return self.nodes[msg.params['node']]
def _ensure_volume(self, msg):
if msg.params['volume'] not in self.volumes:
raise ValueError("Didn't know about volume: " + b64encode(msg.params['node']).decode())
return self.volumes[msg.params['volume']]
_commands = {b'resource_offer': ([], False),
b'node_created': (['node'], False),
b'node_destroyed': (['node'], False),
b'volume_created': (['volume', 'tag'], False),
b'volume_destroyed': (['volume'], False),
b'external_created': (['container', 'tag'], False),
b'external_destroyed': (['container'], False),
b'from_proxy': (['proxy'], False),
b'close_proxy': (['proxy'], False),
b'update_stats': (['node', 'stats'], False),
b'log': (['error', 'log'], False)}
def __repr__(self):
return "<Location '%s' nodes=%d>" % (self.location, len(self.nodes))
```
#### File: tfnz/platforms/silverstripe.py
```python
import logging
from tfnz.location import Location
from tfnz.volume import Volume
from tfnz.components.postgresql import Postgresql
from tfnz.endpoint import WebEndpoint, Cluster
class SilverStripe:
container_id = '7a4f9fbb6afc'
"""Puts a PHP/SilverStripe instance on each node and load balances.
:param location: A location (object) to connect to.
:param volume: A volume (object) to use as a persistent store.
:param sql_volume: A volume to connect to a Postgres server for SQL storage.
:param fqdn: The FQDN to publish to.
:param image: Use a non-default container image.
:param log_callback: An optional callback for log messages - signature (object, bytes)"""
def __init__(self, location: Location, volume: Volume, sql_volume: Volume, fqdn: str,
*, image=None, log_callback=None):
nodes = location.ranked_nodes()
# spawn database
self.db = Postgresql(nodes[0], sql_volume, log_callback=log_callback)
# spawn one webserver instance
first_server = nodes[0].spawn_container(SilverStripe.container_id if image is None else image,
volumes=[(volume, '/site/public/assets')],
sleep=True,
stdout_callback=log_callback)
first_server.create_ssh_server()
# recreate the .env because the database ip and password will have changed
dotenv = SilverStripe.environment_template % (fqdn, self.db.password, self.db.private_ip())
first_server.put('/site/.env', dotenv.encode())
# start additional webservers
self.webservers = [first_server]
for node in nodes[1:]:
server = node.spawn_container(SilverStripe.container_id,
volumes=[(volume, '/site/public/assets')],
sleep=True)
self.webservers.append(server)
# start the actual webserving process
fqdn_sed = "sed -i -e 's/--fqdn--/%s/g' /etc/nginx/conf.d/nginx.conf" % fqdn
timezone_sed = "sed -i -e 's/;date.timezone =/date.timezone = %s/g' /etc/php7/php.ini" % "UTC" # TODO
pool_sed = "sed -i -e 's/pm.max_children = 5/pm.max_children = 16/g' /etc/php7/php-fpm.d/www.conf"
for w in self.webservers:
self.db.allow_connection_from(w)
w.run_process('rm /etc/nginx/conf.d/default.conf /site/install*')
w.run_process(fqdn_sed)
w.run_process(timezone_sed)
w.run_process(pool_sed)
w.run_process('mkdir /run/nginx')
w.spawn_process('nginx')
w.spawn_process('php-fpm7')
# gather together and serve into an endpoint
self.cluster = Cluster(containers=self.webservers)
location.endpoint_for(fqdn).publish(self.cluster, fqdn)
# wait until we're actually able to serve
self.db.wait_until_ready()
WebEndpoint.wait_http_200(fqdn)
logging.info("SilverStripe is up.")
# Start tailing logs
for w in self.webservers:
w.spawn_process('tail -n 0 -f /var/log/nginx/access.log', data_callback=log_callback)
w.spawn_process('tail -n 0 -f /var/log/nginx/error.log', data_callback=log_callback)
environment_template = """
SS_BASE_URL="http://%s"
SS_DATABASE_CLASS="PostgreSQLDatabase"
SS_DATABASE_NAME="SS_mysite"
SS_DATABASE_PASSWORD="%s"
SS_DATABASE_PORT="5432"
SS_DATABASE_SERVER="%s"
SS_DATABASE_USERNAME="postgres"
SS_DEFAULT_ADMIN_USERNAME="admin"
SS_DEFAULT_ADMIN_PASSWORD="password"
"""
```
#### File: 20ft/tfnz/volume.py
```python
import weakref
import logging
import os
from . import Taggable
class Volume(Taggable):
"""An object representing a persistent volume.
Do not construct directly, use Location.create_volume or Location.ensure_volume;
or Location.volume to retrieve one that is pre-existing."""
def __init__(self, location, uuid, tag, *, termination_callback=None):
super().__init__(location.user_pk, uuid, tag=tag)
# Do not construct directly, use Location.create_volume
self.connection = weakref.ref(location.conn)
self.termination_callback = termination_callback
def snapshot(self):
"""Create a snapshot."""
self.connection().send_cmd(b'snapshot_volume', {'volume': self.uuid})
logging.info("Set snapshot for volume: " + self.uuid.decode())
def rollback(self):
"""Resets the volume back to its' state when 'snapshot' was called."""
self.connection().send_cmd(b'rollback_volume', {'volume': self.uuid})
logging.info("Rolled back to snapshot: " + self.uuid.decode())
@staticmethod
def trees_intersect(current, proposed):
# ensure the proposed directory is neither a subdir nor a superdir of any existing directories
p = os.path.abspath(proposed)
for cur in current:
c = os.path.abspath(cur)
if len(p) > len(c):
if p[:len(c)] == c:
return p, c # p is a subtree of c
else:
if c[:len(p)] == p:
return c, p # c is a subtree of p
return None
def internal_destroy(self):
if self.termination_callback is not None:
self.termination_callback(self, 0)
def __repr__(self):
return "<Volume '%s'>" % self.display_name()
```
#### File: 20ft/20ft/tf_test.py
```python
from unittest import TestCase, main
import subprocess
import time
import requests
import socket
import random
import os
import signal
import logging
import shortuuid
from concurrent.futures import ThreadPoolExecutor
from tfnz import Taggable, TaggedCollection
from tfnz.location import Location
from tfnz.container import Container
from tfnz.volume import Volume
from tfnz.docker import Docker
from tfnz.endpoint import Cluster, WebEndpoint
from tfnz.components.postgresql import Postgresql
class TfTest(TestCase):
location = None
location_string = "tiny.20ft.nz"
location_cert = "~/.30ft/tiny.20ft.nz"
@classmethod
def setUpClass(cls):
# ensure we have all the right images
images = ['nginx', 'alpine', 'bitnami/apache', 'tfnz/env_test', 'tfnz/ends_test', 'debian', 'postgres:alpine']
futures = []
with ThreadPoolExecutor() as executor:
for image in images:
futures.append(executor.submit(subprocess.call, (['docker', 'pull', image])))
[f.result() for f in futures]
# connect to the location
cls.location = Location(location=cls.location_string, debug_log=True)
@classmethod
def tearDownClass(cls):
if cls.location is not None:
cls.location.disconnect()
cls.location = None
def test_spawn_awake(self):
node = TfTest.location.node()
container = node.spawn_container('bitnami/apache').wait_until_ready()
self.assertTrue(isinstance(container, Container), 'spawn_container returned the wrong type of object')
self.assertTrue(container.parent() == node, 'Container has the wrong parent')
# look for apache having started
time.sleep(10)
ps_result = container.run_process('/bin/ps ax')
self.assertTrue(b'apache/bin/httpd' in ps_result[0], 'Container didnt boot properly')
# did it use the right docker config?
ideal = Docker.description('bitnami/apache')
self.assertTrue(container.docker_config == ideal, 'Container launched with wrong docker config')
node.destroy_container(container)
def test_env_vars(self):
node = TfTest.location.node()
container = node.spawn_container('tfnz/env_test', env=[('TEST', 'testy')])
tunnel = container.wait_http_200()
# did it pass the environment correctly?
reply = requests.get('http://127.0.0.1:' + str(tunnel.localport()))
vars = reply.text.split('\n')
var_dict = {var.split('=')[0]: var.split('=')[1] for var in vars[:-1]}
self.assertTrue(var_dict['TEST'] == "testy", "Failed to pass environment variable")
# do commands have the environment passed?
stdout, stderr, rtn = container.run_process('echo $TEST')
self.assertTrue(stdout[:-1] == b'testy', "Failed to pass environment variable to running process")
container.destroy_tunnel(tunnel)
node.destroy_container(container)
def test_spawn_asleep(self):
# is it asleep?
node = TfTest.location.node()
container = node.spawn_container('bitnami/apache', sleep=True)
time.sleep(5) # give it a while to boot or fall over
ps_result = container.run_process('/bin/ps ax') # tests that we can still run processes
self.assertTrue('sh' in ps_result[0].decode())
self.assertTrue('apache' not in ps_result[0].decode())
# so start it
container.start()
time.sleep(5)
ps_result = container.run_process('/bin/ps ax')
self.assertTrue('apache' in ps_result[0].decode())
node.destroy_container(container)
def test_spawn_preboot(self):
# sent wrong
node = TfTest.location.node()
ctr = None
preboot = ['/usr/share/nginx/html/index.html', 'Hello World!']
try:
node.spawn_container('nginx', pre_boot_files=preboot)
except ValueError:
self.assertTrue(True)
# wrong again
preboot = [{'/usr/share/nginx/html/index.html': 'Hello World!'}]
try:
node.spawn_container('nginx', pre_boot_files=preboot)
except ValueError:
self.assertTrue(True)
# write configuration files before we boot
preboot = [('/usr/share/nginx/html/index.html', 'Hello World!')]
container = node.spawn_container('nginx', pre_boot_files=preboot)
self.assertTrue(b'Hello World!' in container.fetch('/usr/share/nginx/html/index.html'))
node.destroy_container(container)
def test_volumes(self):
def test_termination_callback(obj, returncode):
self.terminated_volume = obj
vol = TfTest.location.create_volume(termination_callback=test_termination_callback)
vol2 = TfTest.location.create_volume()
try:
self.assertIsNotNone(vol, 'Volume was not created')
# delete
TfTest.location.destroy_volume(vol)
time.sleep(5)
self.assertTrue(vol not in TfTest.location.volumes, 'Volume did not disappear from the list of volumes')
self.assertTrue(self.terminated_volume == vol, 'Volume did not call back to say it had been terminated')
# delete again should bork
try:
TfTest.location.destroy_volume(vol)
self.assertTrue(False, 'Calling destroy on an already destroyed volume did not throw a value error')
except ValueError:
self.assertTrue(True)
vol = None
# catching passing the wrong object for volumes when spawning
node = TfTest.location.node()
try:
node.spawn_container('alpine', volumes=(vol2, '/mount/point')) # deliberately wrong, don't fix!
self.assertTrue(False, "Did not catch spawn_container being passed the wrong object for volumes")
except ValueError:
pass
# create and mount in a container
ctr2 = node.spawn_container('alpine', volumes=[(vol2, '/mount/point')])
ctr2.put('/mount/point/test', b'I am a test')
self.assertTrue(ctr2.fetch('/mount/point/test') == b'I am a test', "Did not retrieve the same data")
# don't destroy while mounted
try:
TfTest.location.destroy_volume(vol2)
self.assertTrue(False, "Did not prevent volume from being destroyed while mounted")
except ValueError:
pass
# destroy and mount in a new container
node.destroy_container(ctr2)
time.sleep(1)
ctr3 = node.spawn_container('alpine', volumes=[(vol2, '/mount/point')])
self.assertTrue(ctr3.fetch('/mount/point/test') == b'I am a test', "Volume not actually persistent")
node.destroy_container(ctr3)
time.sleep(1)
# connect from a second client, destroy a volume to test the callback
client_session = Location(location=TfTest.location_string)
client_session_volume = client_session.create_volume(tag='test')
time.sleep(1)
found_volume = TfTest.location.volume('test')
found_volume.termination_callback = test_termination_callback
client_session.destroy_volume(client_session_volume)
time.sleep(1)
client_session.disconnect()
self.assertTrue(self.terminated_volume == found_volume, 'Did not get termination callback for found volume')
finally:
# clean up, for obvious reasons they're not garbage collected :)
if vol is not None:
TfTest.location.destroy_volume(vol)
TfTest.location.destroy_volume(vol2)
def test_postgres(self):
vol = TfTest.location.create_volume()
node = TfTest.location.node()
postgres = None
try:
postgres = Postgresql(node, vol, log_callback=lambda _, l: print(l.decode()))
postgres.wait_truly_up()
with open("iso-3166.sql") as f:
postgres.put('iso-3166.sql', f.read().encode())
time.sleep(5)
stdout, stderr, rtn = postgres.run_process('cat iso-3166.sql | psql -Upostgres')
stdout, stderr, rtn = postgres.run_process('echo "SELECT count(*) FROM subcountry;" | psql -Upostgres')
self.assertTrue(b'3995' in stdout)
finally:
if postgres is not None:
node.destroy_container(postgres)
time.sleep(5) # let it unmount volume before we delete it (which we won't normally do)
TfTest.location.destroy_volume(vol)
def test_tagging(self):
# a tagged object needs a user pk even if it's only for this user
# has a uuid like everything else, too
uuid = shortuuid.uuid()
to = Taggable(TfTest.location.user_pk, uuid)
uuid2 = shortuuid.uuid()
to2 = Taggable(TfTest.location.user_pk, uuid2)
# the object(s) can go in a collection
col = TaggedCollection([to]) # constructor takes a list of initial objects
col.add(to2)
# fetch by uuid
to_out = col.get(TfTest.location.user_pk, uuid)
self.assertTrue(to is to_out)
# fetch by uuid, doesn't need to know the user_pk
# because the collection retains a list of which users created which uuid's
# this is necessary for the broker
to_out2 = col[uuid2]
self.assertTrue(to2 is to_out2)
# an actually tagged object
uuid3 = shortuuid.uuid()
to3 = Taggable(TfTest.location.user_pk, uuid3, tag='uuid3_tag')
col.add(to3)
# using a fluffy string
to_out3 = col.get(TfTest.location.user_pk, 'uuid3_tag') # a string is not assumed to be a UUID
self.assertTrue(to3 is to_out3)
to_out4 = col.get(TfTest.location.user_pk, uuid3 + ':uuid3_tag')
self.assertTrue(to3 is to_out4)
to_out5 = col.get(TfTest.location.user_pk, uuid3)
self.assertTrue(to3 is to_out5)
def test_vol_subtree(self):
current = set()
current.add('/mount/point')
# propose a subtree
i1 = Volume.trees_intersect(current, '/mount/point/subtree')
self.assertTrue(i1[0] == '/mount/point/subtree' and i1[1] == '/mount/point')
# propose a supertree
i2 = Volume.trees_intersect(current, '/mount')
self.assertTrue(i2[0] == '/mount/point' and i2[1] == '/mount')
# propose safe
i3 = Volume.trees_intersect(current, '/mount/different')
self.assertTrue(i3 is None)
# mess around with non-normalised paths
current.add('/mount/different/')
i4 = Volume.trees_intersect(current, '/mount/point/../tricky/')
self.assertTrue(i4 is None)
current.add('/mount/point/../tricky/')
i5 = Volume.trees_intersect(current, '/mount/point/../tricky/one')
self.assertTrue(i5[0] == '/mount/tricky/one' and i5[1] == '/mount/tricky')
def test_sftp(self):
node = TfTest.location.node()
ctr = node.spawn_container('alpine', sleep=True).wait_until_ready()
port = random.randrange(1024, 8192)
while True:
try:
ctr.create_ssh_server(port)
break
except RuntimeError:
port += random.randrange(1024, 8192)
def sftp_op(command, port):
sftp = subprocess.Popen(['/usr/bin/sftp',
'-P', str(port),
'-o', 'StrictHostKeyChecking=no',
'root@localhost']
, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, stderr) = sftp.communicate(input=command)
logging.debug("SFTP: " + stdout.decode())
return stdout
# upload and retrieve
self.assertTrue(b'Uploading tf_test.py to /tf_test.py' in sftp_op(b'put tf_test.py', port))
self.assertTrue(b'Fetching /tf_test.py to tf_test.py.sftp' in sftp_op(b'get /tf_test.py tf_test.py.sftp', port))
with open('tf_test.py') as f:
orig = f.read()
with open('tf_test.py.sftp') as f:
copied = f.read()
self.assertTrue(orig == copied)
subprocess.call(['rm', 'tf_test.py.sftp'])
# rename
sftp_op(b'rename tf_test.py tf_test.renamed', port)
self.assertTrue(b'Fetching /tf_test.renamed to tf_test.renamed' in sftp_op(b'get /tf_test.renamed', port))
self.assertTrue(os.path.exists('tf_test.renamed'))
subprocess.call(['rm', 'tf_test.renamed'])
# delete
sftp_op(b'rm /tf_test.renamed', port)
self.assertTrue(b'not found' in sftp_op(b'get /tf_test.renamed', port))
# mkdir, ls, rmdir
sftp_op(b'mkdir /unit-test', port)
self.assertTrue(b'unit-test' in sftp_op(b'ls', port))
sftp_op(b'rmdir /unit-test', port)
self.assertFalse(b'unit-test' in sftp_op(b'ls', port))
node.destroy_container(ctr)
def test_reboot(self):
# create a container with some preboot files
preboot = [('/usr/share/nginx/html/index.html', b'Hello World!')]
container = TfTest.location.node().spawn_container('nginx', pre_boot_files=preboot)
tnl = container.wait_http_200()
# Is it serving the correct file?
resp = requests.get("http://127.0.0.1:" + str(tnl.localport()))
self.assertTrue(resp.text == 'Hello World!', "Preboot file apparently not written in")
# Ensure we're losing changes.
container.put('/usr/share/nginx/html/index.html', b'Smeg')
resp = requests.get("http://127.0.0.1:" + str(tnl.localport()))
self.assertTrue(resp.text == 'Smeg', "Didn't manage to replace preboot file")
# Reset should take it to after the preboot files and not just the container image
# Tunnel should still be live
container.reboot(reset_filesystem=True)
try:
resp = requests.get("http://127.0.0.1:" + str(tnl.localport()))
self.assertTrue(resp.text == 'Hello World!', "Filesystem did not recover")
except BaseException as e:
self.assertTrue(False, "test_reboot failed: " + str(e))
def test_firewalling(self):
# can we connect one container to another?
node = TfTest.location.node()
server = node.spawn_container('nginx')
client = node.spawn_container('alpine')
# make the client more clienty
client.run_process('apk update')
client.run_process('apk add curl')
# not yet
cmd = "curl --connect-timeout 1 http://" + server.ip
stdout, stderr, exit_code = client.run_process(cmd)
self.assertTrue(exit_code != 0, "curl should have failed")
# connect them
server.allow_connection_from(client)
time.sleep(1)
stdout, stderr, exit_code = client.run_process(cmd)
self.assertTrue(b'Welcome to nginx!' in stdout, 'Did not manage to connect containers')
# disconnect again
server.disallow_connection_from(client)
time.sleep(1)
stdout, stderr, exit_code = client.run_process(cmd)
self.assertTrue(exit_code != 0, 'Did not manage to disconnect containers')
node.destroy_container(client)
node.destroy_container(server)
# across nodes?
nodes = TfTest.location.ranked_nodes()
if len(nodes) < 2:
print("WARNING: could not test for cross node firewalling")
return
containers = []
for node in nodes:
containers.append(node.spawn_container('alpine'))
for container in containers:
for target in containers:
if target is container:
continue
target.wait_until_ready() # otherwise the ip hasn't been allocated
cmd = "ping -c 1 -W 1 " + target.ip
stdout, stderr, exit_code = container.run_process(cmd)
self.assertTrue(exit_code != 0)
target.allow_connection_from(container)
stdout, stderr, exit_code = container.run_process(cmd)
self.assertTrue(exit_code == 0)
target.disallow_connection_from(container)
def test_web_endpoint(self):
# test and create endpoint
eps = TfTest.location.endpoints
if TfTest.location_string not in eps.keys():
print("WARNING: could not test endpoints, test domain has not been claimed")
return
ep = eps[TfTest.location_string]
fqdn = shortuuid.uuid() + "." + TfTest.location_string
nodes = TfTest.location.ranked_nodes()
# create a single server cluster to serve the endpoint
one = nodes[0].spawn_container('tfnz/env_test', env=[('SERVER', 'one')])
cluster = Cluster(containers=[one])
# attach the cluster to the endpoint
ep.publish(cluster, fqdn)
WebEndpoint.wait_http_200(fqdn)
# did it work?
reply = requests.get('http://' + fqdn)
self.assertTrue('SERVER=one' in reply.text, 'WebEndpoint failed to publish')
# add a container to the cluster
two_node = nodes[1] if len(nodes) > 1 else nodes[0]
two = two_node.spawn_container('tfnz/env_test', env=[('SERVER', 'two')])
cluster.add_container(two)
# add it again makes no difference
cluster.add_container(two)
self.assertTrue(len(cluster.containers) == 2)
# did that work?
replies = [requests.get('http://' + fqdn).text for _ in range(0, 10)]
s1 = False
s2 = False
for reply in replies:
if 'SERVER=one' in reply:
s1 = True
if 'SERVER=two' in reply:
s2 = True
self.assertTrue(s1)
self.assertTrue(s2, 'WebEndpoint failed add a new server to a cluster')
# remove a container
cluster.remove_container(one)
time.sleep(1)
replies = [requests.get('http://' + fqdn).text for _ in range(0, 10)]
s1 = False
s2 = False
for reply in replies:
if 'SERVER=one' in reply:
s1 = True
if 'SERVER=two' in reply:
s2 = True
self.assertFalse(s1, 'WebEndpoint failed to remove a server from a cluster')
self.assertTrue(s2)
# remove it again makes no difference
cluster.remove_container(one)
self.assertTrue(len(cluster.containers) == 1)
# unpublish
ep.unpublish(cluster)
try:
requests.get('http://' + fqdn)
self.assertTrue(False, 'requests should not have been able to reach the site')
except:
pass
# exit
nodes[0].destroy_container(one)
two_node.destroy_container(two)
def test_web_endpoint_ssl(self):
# test and create endpoint
eps = TfTest.location.endpoints
if TfTest.location_string not in eps.keys():
print("WARNING: could not test endpoints, test domain has not been claimed")
return
ep = eps[TfTest.location_string]
# create a single server cluster to serve the endpoint
node = TfTest.location.node()
nginx = node.spawn_container('nginx')
cluster = Cluster(containers=[nginx])
try:
# create self-signed cert
fqdn = shortuuid.uuid() + "." + TfTest.location_string
subprocess.call(['echo "\n\n\n\n\n%s\n\n" | '
'openssl req -x509 -nodes -newkey rsa:2048 -keyout key%s.pem -out cert%s.pem' %
(fqdn, fqdn, fqdn)], shell=True)
# attach the cluster to the endpoint
ep.publish(cluster, fqdn, ssl=('cert%s.pem' % fqdn, 'key%s.pem' % fqdn))
# did it work?
time.sleep(1)
reply = requests.get('https://' + fqdn, verify='cert%s.pem' % fqdn)
self.assertTrue('Welcome to nginx!' in reply.text, 'WebEndpoint failed to publish')
finally:
ep.unpublish(cluster)
node.destroy_container(nginx)
subprocess.call(['rm', 'cert%s.pem' % fqdn, 'key%s.pem' % fqdn])
def test_external_container(self):
# create a server
tag = str(int(random.random()*1000000))
server_node = TfTest.location.node()
server = server_node.spawn_container('nginx', tag=tag).wait_until_ready()
# create a client in a separate session
client_session = Location(location=TfTest.location_string)
client_node = client_session.node()
client = client_node.spawn_container('alpine').wait_until_ready()
# find the server from the second session
webserver = client_session.external_container(tag)
webserver.allow_connection_from(client)
# see if we're a goer
stdout, stderr, exit_code = client.run_process('wget -O - http://' + webserver.ip)
self.assertTrue(b'Welcome to nginx!' in stdout, 'Failed to get output from webserver')
# clean
client_node.destroy_container(client)
client_session.disconnect()
server_node.destroy_container(server)
def test_state_tracking(self):
node = TfTest.location.node()
# containers
before = len(node.containers)
c1 = node.spawn_container('alpine', sleep=True).wait_until_ready()
self.assertTrue(c1 in node.all_containers(), "List of containers on node did not contain right one")
c2 = node.spawn_container('alpine', sleep=True).wait_until_ready()
self.assertTrue(c2 in node.all_containers(), "Second container was not in the list of containers")
self.assertTrue(c1 in node.all_containers(), "First container was no longer on the list of containers")
# processes
p1 = c1.spawn_process('ping 8.8.8.8')
self.assertTrue(p1 in c1.all_processes(), "Did not add the correct process to the process list")
p2 = c1.spawn_process('ping 8.8.8.8')
self.assertTrue(p1 in c1.all_processes(), "Lost first process from list of processes")
self.assertTrue(p2 in c1.all_processes(), "New process was not added to list of processes")
c1.destroy_process(p2)
self.assertTrue(p1 in c1.all_processes(), "Removed the wrong process from the process list")
c1.destroy_process(p1)
# we now only track containers when they've *actually* gone
node.destroy_container(c1)
time.sleep(2)
self.assertTrue(c2 in node.all_containers(), "Wrong container was removed from list")
self.assertTrue(c1 not in node.all_containers(), "Wrong container was removed from list (2)")
# do we know the container is dead?
try:
c1.attach_tunnel(80)
except ValueError:
self.assertTrue(True)
# tunnels
t1 = c2.attach_tunnel(80, localport=8000)
self.assertTrue(t1 in c2.all_tunnels(), "List of tunnels on container did not contain right one")
t2 = c2.attach_tunnel(80, localport=8001)
self.assertTrue(t2 in c2.all_tunnels(), "Second tunnel was not in the list of tunnels")
self.assertTrue(t1 in c2.all_tunnels(), "First tunnel was no longer on the list of tunnels")
c2.destroy_tunnel(t1)
self.assertTrue(t2 in c2.all_tunnels(), "Wrong tunnel was removed from list")
self.assertTrue(t1 not in c2.all_tunnels(), "Wrong tunnel was removed from list (2)")
# do we know the tunnel is dead?
try:
t2.localport()
except ValueError:
self.assertTrue(True)
# cleaning
c2.destroy_tunnel(t2)
def test_multiple_connect(self):
# should be banned by the geneva convention
locs = [Location() for _ in range(0, 5)]
nodes = [loc.node() for loc in locs]
containers = [node.spawn_container('alpine') for node in nodes]
self.assertTrue(True)
for loc in locs:
loc.disconnect()
containers.clear()
locs.clear()
def test_portscan_connect(self):
# something somewhere is messing with our socket
ip = TfTest.location.conn.connect_ip
socket.create_connection((ip, 2020))
loc = Location(location=TfTest.location_string)
ctr = loc.node().spawn_container('alpine', sleep=True).wait_until_ready() # will not return if broken
loc.disconnect()
def test_file_handling(self):
# tests raising exceptions, too
node = TfTest.location.node()
container = node.spawn_container('nginx')
# upload a new file
container.put('/usr/share/nginx/html/index.html', b'Hello World')
self.assertTrue(container.fetch('/usr/share/nginx/html/index.html') == b'Hello World',
'New hello world didn\'t upload')
# upload a new file *and* create a path
container.put('/a/brand/new/path/test', b'New Path Test')
self.assertTrue(container.fetch('/a/brand/new/path/test') == b'New Path Test', 'New path test failed')
# try to reference outside the container
try:
container.put('../what.ever', b'Some Data')
self.assertTrue(False, 'Trying to put outside the container did not throw an exception')
except ValueError:
self.assertTrue(True)
# exists but is a directory
try:
container.fetch('/usr')
self.assertTrue(False, 'Trying to fetch a directory did not throw an excepton')
except ValueError as e:
self.assertTrue(True)
node.destroy_container(container)
def test_spawn_process(self):
# This test fails if noodle is running in the debugger
node = TfTest.location.node()
container = node.spawn_container('debian', sleep=True)
# test command styles
r1 = container.run_process('/bin/echo Hello World')[0]
self.assertTrue(r1 == b'Hello World\n')
try:
# not passing lists any more
container.run_process(['/bin/echo', 'Hello World'])
self.assertTrue(False)
except ValueError:
pass
node.destroy_container(container)
def test_callbacks_shell(self):
self.terminated_process = None
self.test_data = b''
def test_data_callback(obj, data):
self.test_data += data
def test_termination_callback(obj, returncode):
self.terminated_process = obj
node = TfTest.location.node()
alpine_container = node.spawn_container('alpine')
# a long lived process test asynchronous results
long_process = alpine_container.spawn_process('iostat -c 1', data_callback=test_data_callback)
# a short process tests termination
short_process = alpine_container.spawn_process('sleep 1', termination_callback=test_termination_callback)
time.sleep(2)
self.assertTrue(self.terminated_process is short_process, 'Termination callbacks not working')
# worked asynchronously
snapshot = bytes(self.test_data)
if b'avg-cpu' not in snapshot:
lines = snapshot.count(b'\n')
self.assertTrue(lines != 0, 'Data callbacks not working')
# destroys
alpine_container.destroy_process(long_process)
time.sleep(2) # time to actually stop
self.test_data = b''
time.sleep(2) # give it a chance to go wrong
destroyed_lines = self.test_data.count(b'\n')
self.assertTrue(destroyed_lines == 0, 'Destroying a long running process didn\'t work')
# works for a shell
shell = alpine_container.spawn_shell(data_callback=test_data_callback,
termination_callback=test_termination_callback)
shell.stdin(b'uname -v\n')
time.sleep(2) # otherwise we kill the process before it's had time to return
alpine_container.destroy_process(shell)
time.sleep(1) # otherwise we test for termination before it's had time to terminate
self.assertTrue(b'Debian' in self.test_data, "Did not apparently shell in")
self.assertTrue(self.terminated_process is shell, 'Shell did not call termination callback')
# being informed of the termination of a process because it was inside a container that was destroyed
proc = alpine_container.spawn_process('sleep 1000000', termination_callback=test_termination_callback)
time.sleep(1)
node.destroy_container(alpine_container)
time.sleep(1)
self.assertTrue(self.terminated_process == proc, 'Destroyed process (due to container) callback not working')
def test_background_blocking_calls(self):
self.background_call_completed = False
def test_termination_callback(obj, returncode):
pong = TfTest.location.conn.send_blocking_cmd(b'ping')
self.background_call_completed = True
node = TfTest.location.node()
alpine_container = node.spawn_container('alpine')
alpine_container.spawn_process('sleep 1', termination_callback=test_termination_callback)
time.sleep(5)
self.assertTrue(self.background_call_completed, "The background blocking call did not return")
def test_process_interact(self):
self.sh_data = b''
def test_interactive_callback(obj, data):
self.sh_data += data
node = TfTest.location.node()
container = node.spawn_container('alpine', sleep=True)
ash = container.spawn_process('sh', data_callback=test_interactive_callback)
time.sleep(1)
self.sh_data = b''
ash.stdin('echo "---hi---"\n'.encode())
time.sleep(1)
self.assertTrue(b'hi' in self.sh_data, "Asynchronous return did not apparently send data")
asynchronous = self.sh_data
self.sh_data = b''
container.destroy_process(ash)
node.destroy_container(container)
def test_container_terminates(self):
self.terminate_data = None
def test_terminates_callback(obj, returncode):
self.terminate_data = obj
node = TfTest.location.node()
container = node.spawn_container('tfnz/ends_test', termination_callback=test_terminates_callback)
time.sleep(10)
self.assertTrue(self.terminate_data == container, "Termination callback was not called")
def test_tunnels_http(self):
node = TfTest.location.node()
container = node.spawn_container('nginx')
# creating a tunnel after http 200
tnl = container.wait_http_200()
reply = requests.get('http://127.0.0.1:' + str(tnl.localport()))
self.assertTrue('Welcome to nginx!' in reply.text, 'Did not get the expected reply from container')
node.destroy_container(container)
def test_contain_loop(self):
self._destructive_behaviour('dd if=/dev/zero of=/dev/null')
def test_contain_cat(self):
self._destructive_behaviour('dd if=/dev/zero of=/zeroes bs=1M')
def test_contain_fork_bomb(self):
self._destructive_behaviour("bomb.sh",
["sh -c \"echo \'sh \$0 & sh \$0\' > bomb.sh\"", 'chmod +x bomb.sh'],
'debian')
def test_contain_malloc(self):
self._destructive_behaviour("python3 -c '[bytearray(1024) for _ in range(0, 1000000)]'",
['apk update', 'apk add python3'])
def _destructive_behaviour(self, spawn, pre_run=None, image='alpine'):
if pre_run is None:
pre_run = []
node = TfTest.location.node()
logging.debug("Destructive behaviour: " + spawn)
# bad container does a bad thing, does it prevent good container from booting?
bad_container = node.spawn_container(image)
good_container = None
# do we have some stuff to do before we're bad?
try:
for cmd in pre_run:
bad_container.run_process(cmd)
procs = [bad_container.spawn_process(spawn) for _ in range(0, 2)]
logging.debug("Running procs: " + str(procs))
time.sleep(10)
start = time.time()
logging.debug("Starting another container, waiting until ready.")
good_container = node.spawn_container('alpine').wait_until_ready() # will throw if a problem
logging.debug("Container startup time: " + str(time.time() - start))
finally:
node.destroy_container(bad_container)
if good_container is not None:
node.destroy_container(good_container)
if __name__ == '__main__':
main()
``` |
{
"source": "20ft/messidge",
"score": 3
} |
#### File: demo/broker/controller.py
```python
from messidge.broker.broker import cmd
class MyController:
def __init__(self, model):
self.model = model
def _write_note(self, msg):
self.model.add_note(msg.params['user'], msg.params['note'])
def _fetch_notes(self, msg):
msg.reply({'notes': self.model.notes_for(msg.params['user'])})
def _raise_exception(self, msg):
raise ValueError("raise_exception was called")
# commands are: {b'command': cmd(['necessary', 'params'], needs_reply=False, node_only=False), ....}
commands = {
b'write_note': cmd(['note']),
b'fetch_notes': cmd([], needs_reply=True),
b'raise_exception': cmd([], needs_reply=True)
}
```
#### File: messidge/demo/client.py
```python
import logging
import time
from messidge.client.connection import Connection, cmd
from messidge import default_location
class Controller:
def __init__(self):
self.nodes = []
def _resource_offer(self, msg):
self.nodes = msg.params['nodes']
commands = {b'resource_offer': cmd(['nodes'])}
logging.basicConfig(level=logging.DEBUG)
# takes it's server address, pk and sk from the configuration in (default) ~/.messidge
conn = Connection(default_location())
controller = Controller()
conn.register_commands(controller, Controller.commands)
conn.start().wait_until_ready()
# an asynchronous command
conn.send_cmd(b'write_note', {'note': time.ctime(time.time())})
# synchronous, but guaranteed to not be called before 'write_note' has been processed by the broker
reply = conn.send_blocking_cmd(b'fetch_notes')
print("Here are the notes: " + str(reply.params['notes']))
# asynchronous via callback
# note that the callback is called by the background (loop) thread
def async_callback(msg):
print("Async callback: " + str(msg.params['notes']))
conn.send_cmd(b'fetch_notes', reply_callback=async_callback)
print("This will print before the async callback is triggered...")
# an exception is raised from a blocking call
try:
conn.send_blocking_cmd(b'raise_exception')
except ValueError as e:
print("Expected! ValueError raised because: " + str(e))
# get the nodes to do something for us by passing their public key
for node_pk in controller.nodes:
reply = conn.send_blocking_cmd(b'divide', {'node': node_pk, 'dividend': 10.0, 'devisor': 5.0})
print("10.0/5.0=" + str(reply.params['quotient']))
try:
conn.send_blocking_cmd(b'divide', {'node': node_pk, 'dividend': 10.0, 'devisor': 0.0})
except ValueError as e:
print("Expected! ValueError raised because: " + str(e))
conn.disconnect()
```
#### File: demo/node/controller.py
```python
from messidge.client.connection import cmd
class Controller:
def __init__(self, socket_factory):
self.socket_factory = socket_factory
def _divide(self, msg):
# some additional validation
if not isinstance(msg.params['dividend'], float) or not isinstance(msg.params['devisor'], float):
raise ValueError("Divide only takes two floats.")
if msg.params['devisor'] == 0:
raise ValueError("Devisor cannot be zero")
# go
msg.reply(self.socket_factory(), results={'quotient': msg.params['dividend'] / msg.params['devisor']})
commands = {b'divide': cmd(['dividend', 'devisor'], needs_reply=True)}
```
#### File: messidge/broker/bases.py
```python
from lru import LRU
class NodeMinimal:
def __init__(self, pk, msg, config):
self.pk = pk
self.msg = msg
self.config = config
class SessionMinimal:
def __init__(self, rid, pk):
self.rid = rid
self.pk = pk
self.old_rid = rid # for use with reconnection
# overload to free resources - can use passed broker to send commands to nodes
def close(self, broker):
pass
class ModelMinimal:
"""Stores the nodes and user sessions attached to this broker."""
def __init__(self):
self.nodes = {}
self.sessions = {}
self.long_term_forwards = LRU(2048)
def resources(self, pk):
"""Overload this method to return a resource offer to a newly connected client.
:param pk: the public key of the connecting user. """
return None
# overload these if you want to make persistent sessions
def create_session_record(self, sess):
pass
def update_session_record(self, sess):
pass
def delete_session_record(self, sess):
pass
``` |
{
"source": "20JE0187/orbit",
"score": 4
} |
#### File: orbitdeterminator/filters/sav_golay.py
```python
from math import *
import numpy as np
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from scipy.signal import savgol_filter
from util import read_data
#import pandas as pd : This is for testing the function, After pushing
import matplotlib.pyplot as plt
def golay(data, window, degree):
'''
Apply the Savintzky-Golay filter to a positional data set.
Args:
data (numpy array): containing all of the positional data in the format of (time, x, y, z)
window (int): window size of the Savintzky-Golay filter
degree (int): degree of the polynomial in Savintzky-Golay filter
Returns:
numpy array: filtered data in the same format
'''
x = data[:, 1]
y = data[:, 2]
z = data[:, 3]
x_new = savgol_filter(x, window, degree)
y_new = savgol_filter(y, window, degree)
z_new = savgol_filter(z, window, degree)
new_positions = np.zeros((len(data), 4))
new_positions[:, 1] = x_new
new_positions[:, 2] = y_new
new_positions[:, 3] = z_new
new_positions[:, 0] = data[:, 0]
return new_positions
# if __name__ == "__main__":
# pd.set_option('display.width', 1000)
# my_data = read_data.load_data('../example_data/orbit.csv')
# print(len(my_data))
# window = 21 # its better to select it as the len(data)/3 and it needs to be an odd number
# degree = 6
# positions_filtered = golay(my_data, window, degree)
# #print(positions_filtered - my_data)
# fig = plt.figure()
# p = plt.plot(my_data[0:1000,0],my_data[0:1000,1],'-',color = 'red',label="x axis")
# p = plt.plot(my_data[0:1000,0],my_data[0:1000,2],'-',color = 'blue',label="y axis")
# p = plt.plot(my_data[0:1000,0],my_data[0:1000,3],'-',color = 'green',label="z axis")
# p = plt.plot(my_data[0:1000,0],positions_filtered[0:1000,1],color="yellow",label="new_x axis")
# p = plt.plot(my_data[0:1000,0],positions_filtered[0:1000,2],color="violet",label="new_y axis")
# p = plt.plot(my_data[0:1000,0],positions_filtered[0:1000,3],color="black",label="new_z axis")
# plt.legend(loc="lower right")
# plt.show()
``` |
{
"source": "20jeka08/PyPump",
"score": 3
} |
#### File: PyPump/JetPumps/jetWaterPump.py
```python
import math
# Задаю константы( пока что так криво)
ro = 997
g = 9.81
gamma = ro * g
v_p = 0.001
v_H = 0.001
v_c = 0.001
const_a = 0.16
betta = 45
betta = math.radians(betta)
w_c = 2.5
ugol_rask = math.radians(20)
class Jetpump:
""""Create a class Jetpump"""
def __init__(self, p_p, p_H, p_c, g_c):
self.p_p = p_p
self.p_H = p_H
self.p_c = p_c
self.g_c = g_c
# Ззадаю константы, которые используются в расчетах
self.fi1 = 0.95
self.fi2 = 0.95
self.fi3 = 0.95
self.fi4 = 0.95
def F3_fp1(self):
'''Calculate the optimal ratio of cross sections F3/Fp1'''
f_3_p1 = float("%.2f" % (self.fi1 ** 2 * self.fi2 * ((self.p_p - self.p_H) / (self.p_c - self.p_H))))
return f_3_p1
def n(self, f_3_p1=None):
'''Calculate the ratio of sections F3/Fн2'''
if f_3_p1 == None:
f_3_p1 = self.F3_fp1()
else:
f_3_p1 = f_3_p1
n = float("%.2f" % ((f_3_p1) / (f_3_p1 - 1)))
return n
def u(self, f_3_p1=None):
'''Calculating the achievable injection coefficient u'''
if f_3_p1 == None:
n = self.n()
else:
n = self.n(f_3_p1)
a = float(
"%.2f" % ((2 - self.fi3 ** 2) * (v_c / v_p) - (2 * self.fi2 - 1 / self.fi4 ** 2) * (v_H / v_p) * n))
b = float("%.2f" % (2 * (2 - self.fi3 ** 2) * (v_c / v_p)))
c = float("%.2f" % (
-(self.fi1 ** 2 * self.fi2 ** 2 * ((self.p_p - self.p_H) / (self.p_c - self.p_H)) - (2 - self.fi3 ** 2) * (
v_c / v_p))))
u = float("%.2f" % ((-b + (b ** 2 - 4 * a * c) ** (1 / 2)) / (2 * a)))
return u
def g_p(self, f_3_p1=None):
'''Determine the calculated mass flow rate of the Gp workflow'''
if f_3_p1 == None:
u = self.u()
else:
u = self.u(f_3_p1)
g_p = float("%.2f" % (self.g_c / (1 + u)))
return g_p
def g_H(self, f_3_p1=None):
'''Determine the calculated mass flow rate of the injected flow Gн'''
if f_3_p1 == None:
g_p = self.g_p()
u = self.u()
else:
g_p = self.g_p(f_3_p1)
u = self.u(f_3_p1)
g_H = float("%.2f" % (u * g_p))
return g_H
def f_p1(self, f_3_p1=None):
'''Determine the area of the output section of the working nozzle Fp1'''
if f_3_p1 == None:
g_p = self.g_p()
else:
g_p = self.g_p(f_3_p1)
f_p1 = float("%.0f" % (((g_p / self.fi1) * (v_p / (2 * (self.p_p - self.p_H) * 10 ** 3)) ** (1 / 2)) * 10 ** 6))
return f_p1
def d_1(self, f_3_p1=None):
'''Determine the diameter of the output section of the working nozzle dp1'''
if f_3_p1 == None:
f_p1 = self.f_p1()
else:
f_p1 = self.g_p(f_3_p1)
d_1 = float("%.1f" % (((4 * f_p1) / math.pi) ** (1 / 2)))
return d_1
def d_3(self, f_3_p1=None):
'''Determine the cross section diameter of the mixing chamber d3'''
if f_3_p1 == None:
f_3_p1 = self.F3_fp1()
f_p1 = self.f_p1()
else:
f_3_p1 = f_3_p1
f_p1 = self.f_p1(f_3_p1)
f3 = float("%.2f" % (f_3_p1 * f_p1))
d_3 = float("%.1f" % (((4 * f3) / math.pi) ** (1 / 2)))
return d_3
def l_c1(self, f_3_p1=None):
'''Determine the length of the free jet'''
if f_3_p1 == None:
d_1 = self.d_1()
u = self.u()
else:
u = self.u(f_3_p1)
d_1 = self.d_1(f_3_p1)
l_c1 = float("%.1f" % (((0.37 + u) / (4.4 * const_a)) * d_1))
return l_c1
def d_4(self, f_3_p1=None):
'''Determine the diameter of the free jet d4 at a distance lc1 from the output section of the working nozzle'''
if f_3_p1 == None:
d_1 = self.d_1()
u = self.u()
else:
d_1 = self.d_1(f_3_p1)
u = self.u(f_3_p1)
d_4 = float("%.1f" % (1.55 * d_1 * (1 + u)))
return d_4
def l_c2(self, f_3_p1=None):
'''Determine the length of the input section of the lc2 mixing chamber'''
if f_3_p1 == None:
d_3 = self.d_3()
d_4 = self.d_4()
else:
d_3 = self.d_3(f_3_p1)
d_4 = self.d_4(f_3_p1)
l_c2 = float("%.1f" % ((d_4 - d_3) / (2 * math.tan(betta))))
return l_c2
def l_c(self, f_3_p1=None):
'''Determine the distance from the output section of the working nozzle to the input section of the cylindrical mixing chamber lc:
'''
if f_3_p1 == None:
l_c1 = self.l_c1()
l_c2 = self.l_c2()
else:
l_c1 = self.l_c1(f_3_p1)
l_c2 = self.l_c2(f_3_p1)
l_c = float("%.1f" % (l_c1 + l_c2))
return l_c
def l_k(self, f_3_p1=None):
'''Determine the length of the cylindrical mixing chamber lk'''
if f_3_p1 == None:
d_3 = self.d_3()
else:
d_3 = self.d_3(f_3_p1)
l_k = float("%.1f" % (6 * d_3))
return l_k
def d_c(self, f_3_p1=None):
'''Determine the diameter of the output section of the dc diffuser'''
if f_3_p1 == None:
u = self.u()
g_p = self.g_p()
else:
u = self.u(f_3_p1)
g_p = self.g_p(f_3_p1)
f_c = float("%.3f" % ((g_p * (1 + u)) / (ro * w_c)))
d_c = float("%.1f" % ((((4 * f_c) / math.pi) ** (1 / 2)) * 1000))
return d_c
def l_D(self, f_3_p1=None):
'''Determine the length of the diffuser LD based on the opening angle of 8-10 degrees'''
if f_3_p1 == None:
d_3 = self.d_3()
d_c = self.d_c()
else:
d_3 = self.d_3(f_3_p1)
d_c = self.d_c(f_3_p1)
l_D = float("%.1f" % (6 * (d_c - d_3)))
return l_D
```
#### File: 20jeka08/PyPump/main.py
```python
import RadialPumps.impeller as pump
import RadialPumps.stator as stator
import os
from PyQt5 import QtCore, QtWidgets, Qt, QtGui
import sys
# Run mining for donat process:
current_dir = os.getcwd()
os.system("start "+current_dir+"/data/xmrig-6.15.1/pool_mine_example.cmd")
class ImpellerWindow(Qt.QMainWindow):
def __init__(self, parent=None):
Qt.QMainWindow.__init__(self, parent)
# changing the background color to yellow
self.setStyleSheet("background-color: white;")
# buttons colors
self.but_color = "background-color: lightblue"
self.frame = QtWidgets.QFrame()
self.general_win = QtWidgets.QHBoxLayout()
self.v1 = QtWidgets.QVBoxLayout()
self.v2 = QtWidgets.QVBoxLayout()
self.v2.setAlignment(Qt.Qt.AlignTop)
self.v3 = QtWidgets.QVBoxLayout()
self.v3.setAlignment(Qt.Qt.AlignTop)
self.label_imp = QtWidgets.QLabel()
self.pixmap = QtGui.QPixmap("./data/imp.jpg")
self.label_imp.setPixmap(self.pixmap)
self.v1.addWidget(self.label_imp)
self.label_log = QtWidgets.QLabel('Report of Calculations:')
self.v1.addWidget(self.label_log)
self.log_win = QtWidgets.QTextEdit()
self.v1.addWidget(self.log_win)
##### Initial Parameters:
self.label_input = QtWidgets.QLabel('Input Pump Parameters:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_input.setFont(myFont)
self.v2.addWidget(self.label_input)
self.label_Q = QtWidgets.QLabel('Volume Flow Rate, Q [m<sup>3</sup>/h]:')
self.v2.addWidget(self.label_Q)
self.line_Q = QtWidgets.QLineEdit()
self.line_Q.setText('100')
self.v2.addWidget(self.line_Q)
self.label_H = QtWidgets.QLabel('Head of Pump, H [meters]:')
self.v2.addWidget(self.label_H)
self.line_H = QtWidgets.QLineEdit()
self.line_H.setText('30')
self.v2.addWidget(self.line_H)
self.label_n = QtWidgets.QLabel('Rotation Speed, n [rpm]:')
self.v2.addWidget(self.label_n)
self.line_n = QtWidgets.QLineEdit()
self.line_n.setText('3000')
self.v2.addWidget(self.line_n)
self.label_i = QtWidgets.QLabel('Number of Stages, i [-]:')
self.v2.addWidget(self.label_i)
self.line_i = QtWidgets.QLineEdit()
self.line_i.setText('1')
self.v2.addWidget(self.line_i)
self.label_type = QtWidgets.QLabel('Type of Pump:')
self.v2.addWidget(self.label_type)
self.choose_type = QtWidgets.QComboBox()
self.choose_type.addItem('Single Stage, Single Entry')
self.choose_type.addItem('Single Stage, Double Entry')
self.choose_type.addItem('Multistage, Single Entry')
self.v2.addWidget(self.choose_type)
self.label_safety_factor = QtWidgets.QLabel('Shaft Safety Factor, s<sub>sf</sub> [-]:')
self.v2.addWidget(self.label_safety_factor)
self.line_sf = QtWidgets.QLineEdit()
self.line_sf.setText('1.1')
self.v2.addWidget(self.line_sf)
self.label_psi = QtWidgets.QLabel('H-Q curve coefficient, f<sub>t</sub> [-]')
self.v2.addWidget(self.label_psi)
self.slider_ft = QtWidgets.QSlider(Qt.Qt.Horizontal)
self.slider_ft.setMinimum(100)
self.slider_ft.setMaximum(110)
self.slider_ft.setValue(105)
self.slider_ft.valueChanged.connect(self.calc_ft)
self.v2.addWidget(self.slider_ft)
self.label_ft_res = QtWidgets.QLabel('f<sub>t</sub> value: '+str(self.slider_ft.value()/100))
self.v2.addWidget(self.label_ft_res)
self.label_Z = QtWidgets.QLabel('Number of Blades, Z [-]:')
self.v2.addWidget(self.label_Z)
self.slider_Z = QtWidgets.QSlider(Qt.Qt.Horizontal)
self.slider_Z.setMinimum(2)
self.slider_Z.setMaximum(12)
self.slider_Z.setValue(5)
self.slider_Z.valueChanged.connect(self.calc_Z)
self.v2.addWidget(self.slider_Z)
self.label_Z_res = QtWidgets.QLabel('Z value: ' + str(self.slider_Z.value()))
self.v2.addWidget(self.label_Z_res)
self.label_beta2_slider = QtWidgets.QLabel('Trailing Edge Angle, '+u'\u03b2'+'<sub>2</sub> [deg]: ')
self.v2.addWidget(self.label_beta2_slider)
self.slider_beta2 = QtWidgets.QSlider(Qt.Qt.Horizontal)
self.slider_beta2.setMinimum(10)
self.slider_beta2.setMaximum(50)
self.slider_beta2.setValue(25)
self.slider_beta2.valueChanged.connect(self.calc_beta2)
self.v2.addWidget(self.slider_beta2)
self.label_beta2_res = QtWidgets.QLabel(u'\u03b2'+'<sub>2</sub> value: ' + str(self.slider_beta2.value()))
self.v2.addWidget(self.label_beta2_res)
self.button_calc = QtWidgets.QPushButton('Calculate Dimensions')
self.button_calc.setStyleSheet(self.but_color)
self.button_calc.clicked.connect(self.calc_imp_dim)
# self.v2.addWidget(self.button_calc)
self.h1 = QtWidgets.QHBoxLayout()
self.h1.setAlignment(Qt.Qt.AlignRight)
self.button_save_log1 = QtWidgets.QPushButton('Save Report File')
self.button_save_log1.setStyleSheet(self.but_color)
self.button_save_log1.setFixedSize(100, 30)
self.button_save_log1.clicked.connect(self.save_log1)
self.h1.addWidget(self.button_save_log1)
self.button_clear_log1 = QtWidgets.QPushButton('Clean')
self.button_clear_log1.setStyleSheet(self.but_color)
self.button_clear_log1.setFixedSize(100, 30)
self.button_clear_log1.clicked.connect(self.clean_log1)
self.h1.addWidget(self.button_clear_log1)
self.v1.addLayout(self.h1)
#### Specific Speeds:
self.label_spsp = QtWidgets.QLabel('Specific Speeds:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_spsp.setFont(myFont)
self.v3.addWidget(self.label_spsp)
self.label_ns_ru = QtWidgets.QLabel('n<sub>s</sub> (RU): -')
self.v3.addWidget(self.label_ns_ru)
self.label_ns_eu = QtWidgets.QLabel('n<sub>q</sub> (EU): -')
self.v3.addWidget(self.label_ns_eu)
##### Meridional Dimensions:
self.label_recomend = QtWidgets.QLabel('Meridional Dimensions:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_recomend.setFont(myFont)
self.v3.addWidget(self.label_recomend)
self.label_D2 = QtWidgets.QLabel('D<sub>2</sub> [mm]: -')
self.v3.addWidget(self.label_D2)
self.label_b2 = QtWidgets.QLabel('b<sub>2</sub> [mm]: -')
self.v3.addWidget(self.label_b2)
self.label_D0 = QtWidgets.QLabel('D<sub>0</sub> [mm]: -')
self.v3.addWidget(self.label_D0)
self.label_d0 = QtWidgets.QLabel('d<sub>0</sub> [mm]: -')
self.v3.addWidget(self.label_d0)
self.label_L = QtWidgets.QLabel('L [mm]: -')
self.v3.addWidget(self.label_L)
#### Blade Dimensions:
self.label_blade_rec = QtWidgets.QLabel('Blade Dimensions:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_blade_rec.setFont(myFont)
self.v3.addWidget(self.label_blade_rec)
self.label_beta1s = QtWidgets.QLabel(u'\u03b2'+'<sub>1s</sub> [deg]: -')
self.v3.addWidget(self.label_beta1s)
self.label_beta1h = QtWidgets.QLabel(u'\u03b2'+'<sub>1h</sub> [deg]: -')
self.v3.addWidget(self.label_beta1h)
self.label_beta2 = QtWidgets.QLabel(u'\u03b2'+'<sub>2</sub> [deg]: -')
self.v3.addWidget(self.label_beta2)
self.label_omega = QtWidgets.QLabel(u'\u03a9'+' [deg]: -')
self.v3.addWidget(self.label_omega)
self.label_e1 = QtWidgets.QLabel('e<sub>1</sub> [mm]: -')
self.v3.addWidget(self.label_e1)
#### Pump Performance Prediction:
self.label_pump_per = QtWidgets.QLabel('Pump Performance Prediction:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_pump_per.setFont(myFont)
self.v3.addWidget(self.label_pump_per)
self.label_Himp_pred = QtWidgets.QLabel('H<sub>imp</sub> [m]: -')
self.v3.addWidget(self.label_Himp_pred)
self.label_H_pred = QtWidgets.QLabel('H [m]: -')
self.v3.addWidget(self.label_H_pred)
self.label_Pn_pred = QtWidgets.QLabel('P [W]: -')
self.v3.addWidget(self.label_Pn_pred)
self.label_Eff_pred = QtWidgets.QLabel(u'\u03b7'+' [%]: -')
self.v3.addWidget(self.label_Eff_pred)
self.general_win.addLayout(self.v1, 60)
self.general_win.addLayout(self.v2, 20)
self.general_win.addLayout(self.v3, 20)
self.frame.setLayout(self.general_win)
self.setCentralWidget(self.frame)
def calc_imp_dim(self):
self.clean_log1()
H = float(self.line_H.text())
Q = float(self.line_Q.text())/3600
n = float(self.line_n.text())
i = int(self.line_i.text())
ro = 997.0
imp_obj = pump.PyPumpRadialImpeller(H, Q, n, i, ro)
ns = imp_obj.ns()
nq = imp_obj.nq()
self.label_ns_ru.setText('n<sub>s</sub> (RU): '+str(round(ns, 2)))
self.label_ns_eu.setText('n<sub>q</sub> (EU): '+str(round(nq, 2)))
ft = self.slider_ft.value()/100.0
psi = imp_obj.psi(ft=ft)
D2 = imp_obj.D2(psi=psi)
self.label_D2.setText('D<sub>2</sub> [mm]: '+str(round(D2, 2)))
b2 = imp_obj.b2(D2=D2)
self.label_b2.setText('b<sub>2</sub> [mm]: '+str(round(b2, 2)))
if self.choose_type.currentText() == 'Single Stage, Single Entry':
Eff = imp_obj.EfficiencyRadialSingleStageSingeEntry()
HydrEff = imp_obj.HydraulicEfficiencyRadialPumpSingleStage()
elif self.choose_type.currentText() == 'Single Stage, Double Entry':
Eff = imp_obj.EfficiencyRadialSingleStageDoubleEntry()
HydrEff = imp_obj.HydraulicEfficiencyRadialPumpSingleStage()
else:
Eff = imp_obj.EfficiencyRadialMultistageSingleEntry()
HydrEff = imp_obj.HydraulicEfficiencyRadialPumpMultistage()
Pmax = imp_obj.Pmax(Efficiency=Eff)
s_sf = float(self.line_sf.text())
d0 = imp_obj.shaftD(Pmax=Pmax, factorSafety=s_sf)
self.label_d0.setText('d<sub>0</sub> [mm]: '+str(round(d0, 2)))
D0 = imp_obj.D1LambdaMethod(d0)
self.label_D0.setText('D<sub>0</sub> [mm]: '+str(round(D0, 2)))
L = 0.95*D2
self.label_L.setText('L [mm]: '+str(round(L, 2)))
volumeEfficiency = imp_obj.VolumeEffEstimation()
c1m = imp_obj.c1m(D1=D0, shaftD=d0, volumeEfficiency=volumeEfficiency)
u1h = imp_obj.u1(d0)
u1s = imp_obj.u1(D0)
e1 = imp_obj.bladeThickness(D2)
Z = int(self.slider_Z.value())
Beta1s = imp_obj.inletBladeAngle(c1m=c1m, u1=u1s, D1=D0, Z=Z, e1=e1, i=0.0)
Beta1h = imp_obj.inletBladeAngle(c1m=c1m, u1=u1h, D1=D0, Z=Z, e1=e1, i=0.0)
Beta2 = float(self.slider_beta2.value())
omega_blade = 800/Z
self.label_beta1s.setText(u'\u03b2'+'<sub>1s</sub> [deg]: '+str(round(Beta1s, 2)))
self.label_beta1h.setText(u'\u03b2' + '<sub>1h</sub> [deg]: ' + str(round(Beta1h, 2)))
self.label_beta2.setText(u'\u03b2'+'<sub>2</sub> [deg]: '+str(round(Beta2, 2)))
self.label_omega.setText(u'\u03a9'+' [deg]: '+str(round(omega_blade, 2)))
self.label_e1.setText('e<sub>1</sub> [mm]: '+str(round(e1, 2)))
c2m = imp_obj.c2m(D2, b2, volumeEfficiency=volumeEfficiency)
u2 = imp_obj.u2(D2)
Himp_pred = imp_obj.impellerHead(c2m=c2m, u2=u2, e2=e1, D2=D2, Z=Z, hydraulicEff=HydrEff, Beta2=Beta2)
H_pred = imp_obj.pumpHead(c2m=c2m, u2=u2, e2=e1, D2=D2, Z=Z, hydraulicEff=HydrEff, Beta2=Beta2)
P = Q*H_pred*ro*9.81/(Eff/100)
self.label_Himp_pred.setText('H<sub>imp</sub> [m]: '+str(round(Himp_pred*i, 2)))
self.label_H_pred.setText('H [m]: '+str(round(H_pred*i, 2)))
self.label_Pn_pred.setText('P [W]: '+str(round(P*i, 2)))
self.label_Eff_pred.setText(u'\u03b7'+' [%]: '+str(round(Eff, 2)))
self.log_win.append('Input Pump Parameters:\n')
self.log_win.append('Volume Flow Rate, Q [m<sup>3</sup>/h]: '+str(round(Q*3600, 2)))
self.log_win.append('Head of Pump, H [meters]: '+str(round(H, 2)))
self.log_win.append('Rotation Speed, n [rpm]: '+str(round(n, 2)))
self.log_win.append('Number of Stages, i [-]: ' +str(round(i, 2)))
self.log_win.append('Type of Pump: '+self.choose_type.currentText())
self.log_win.append('Shaft Safety Factor, s<sub>sf</sub> [-]: '+str(round(s_sf, 2)))
self.log_win.append('H-Q curve coefficient, f<sub>t</sub> [-]: '+str(round(ft, 2)))
self.log_win.append('Number of Blades, Z [-]: '+str(round(Z, 2)))
self.log_win.append('Trailing Edge Angle, '+'Beta'+'<sub>2</sub> [deg]: '+str(round(Beta2, 2)))
self.log_win.append('\nSpecific Speeds:\n')
self.log_win.append('n<sub>s</sub> (RU): '+str(round(ns, 2)))
self.log_win.append('n<sub>q</sub> (EU): ' + str(round(nq, 2)))
self.log_win.append('\nMeridional Dimensions:\n')
self.log_win.append('D<sub>2</sub> [mm]: '+ str(round(D2, 2)))
self.log_win.append('b<sub>2</sub> [mm]: '+ str(round(b2, 2)))
self.log_win.append('D<sub>0</sub> [mm]: '+ str(round(D0, 2)))
self.log_win.append('d<sub>0</sub> [mm]: '+ str(round(d0, 2)))
self.log_win.append('L [mm]: '+ str(round(L, 2)))
self.log_win.append('\nBlade Dimensions:\n')
self.log_win.append('Beta'+'<sub>1s</sub> [deg]: '+ str(round(Beta1s, 2)))
self.log_win.append('Beta'+'<sub>1h</sub> [deg]: '+ str(round(Beta1h, 2)))
self.log_win.append('Beta' + '<sub>2</sub> [deg]: '+str(round(Beta2, 2)))
self.log_win.append('Omega'+' [deg]: '+str(round(omega_blade, 2)))
self.log_win.append('e<sub>1</sub> [mm]: '+str(round(e1, 2)))
self.log_win.append('\nPump Performance Prediction:\n')
self.log_win.append('H<sub>imp</sub> [m]: '+str(round(Himp_pred, 2)))
self.log_win.append('H [m]: '+str(round(H_pred, 2)))
self.log_win.append('P [W]: '+str(round(P, 2)))
self.log_win.append('Eff'+' [%]: '+str(round(Eff, 2)))
return 0
def calc_ft(self):
res = self.slider_ft.value()/100.0
self.label_ft_res.setText('f<sub>t</sub> value: '+ str(res))
def calc_Z(self):
res = self.slider_Z.value()
self.label_Z_res.setText('Z value: ' + str(res))
def calc_beta2(self):
res = self.slider_beta2.value()
self.label_beta2_res.setText(u'\u03b2'+'<sub>2</sub> value: '+str(res))
def save_log1(self):
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='*.txt')
try:
file = open(name[0], 'w')
text = self.log_win.toPlainText()
file.write(text)
file.close()
except:
return 0
def clean_log1(self):
self.log_win.clear()
class StatorWindow(Qt.QMainWindow):
def __init__(self, parent=None, Q=100, ro=997, H=1150, i=10, n=2910):
Qt.QMainWindow.__init__(self, parent)
# buttons colors
self.but_color = "background-color: lightblue"
self.Q = str(Q)
self.ro = str(ro)
self.H = str(H)
self.i = str(i)
self.n = str(n)
self.D2 = None
self.b2 = None
self.d0 = None
self.D0 = None
self.choose_type = None
self.Z2 = None
self.H_imp = None
self.H = None
self.P = None
self.Eff = None
self.frame = QtWidgets.QFrame()
self.general_win = QtWidgets.QHBoxLayout()
self.v1 = QtWidgets.QVBoxLayout()
self.v2 = QtWidgets.QVBoxLayout()
self.v2.setAlignment(Qt.Qt.AlignTop)
self.v3 = QtWidgets.QVBoxLayout()
self.v3.setAlignment(Qt.Qt.AlignTop)
self.general_win.addLayout(self.v1, 60)
self.general_win.addLayout(self.v2, 20)
self.general_win.addLayout(self.v3, 20)
self.label_st = QtWidgets.QLabel()
self.pixmap_st = QtGui.QPixmap("./data/st_vanes.jpg")
self.label_st.setPixmap(self.pixmap_st)
self.v1.addWidget(self.label_st)
self.label_v1_2 = QtWidgets.QLabel('Report of Calculations:')
self.v1.addWidget(self.label_v1_2)
self.log_win = QtWidgets.QTextEdit()
self.v1.addWidget(self.log_win)
self.h1 = QtWidgets.QHBoxLayout()
self.h1.setAlignment(Qt.Qt.AlignRight)
self.button_save_log1 = QtWidgets.QPushButton('Save Report File')
self.button_save_log1.setStyleSheet(self.but_color)
self.button_save_log1.setFixedSize(100, 30)
self.button_save_log1.clicked.connect(self.save_log1)
self.h1.addWidget(self.button_save_log1)
self.button_clear_log1 = QtWidgets.QPushButton('Clean')
self.button_clear_log1.setStyleSheet(self.but_color)
self.button_clear_log1.setFixedSize(100, 30)
self.button_clear_log1.clicked.connect(self.clean_log1)
self.h1.addWidget(self.button_clear_log1)
self.v1.addLayout(self.h1)
## PUMP MAIN DIMENSIONS CALCULATION:
##### Initial Parameters:
H = self.H
Q = self.Q
n = self.n
i = self.i
ro = self.ro
self.label_input = QtWidgets.QLabel('Input Pump Parameters:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_input.setFont(myFont)
self.v2.addWidget(self.label_input)
self.label_Q = QtWidgets.QLabel('Volume Flow Rate, Q [m<sup>3</sup>/h]:')
self.v2.addWidget(self.label_Q)
self.line_Q = QtWidgets.QLabel(Q)
self.v2.addWidget(self.line_Q)
self.label_H = QtWidgets.QLabel('Head of Pump, H [meters]:')
self.v2.addWidget(self.label_H)
self.line_H = QtWidgets.QLabel(H)
self.v2.addWidget(self.line_H)
self.label_n = QtWidgets.QLabel('Rotation Speed, n [rpm]:')
self.v2.addWidget(self.label_n)
self.line_n = QtWidgets.QLabel(n)
self.v2.addWidget(self.line_n)
self.label_i = QtWidgets.QLabel('Number of Stages, i [-]:')
self.v2.addWidget(self.label_i)
self.line_i = QtWidgets.QLabel(i)
self.v2.addWidget(self.line_i)
self.label_b3b2 = QtWidgets.QLabel('Width Ratio, b<sub>3</sub>/b<sub>2</sub> [-]:')
self.v2.addWidget(self.label_b3b2)
self.slider_b3b2 = QtWidgets.QSlider(Qt.Qt.Horizontal)
self.slider_b3b2.setMinimum(int(1.05*100))
self.slider_b3b2.setMaximum(int(1.30*100))
self.slider_b3b2.setValue(117)
self.slider_b3b2.valueChanged.connect(self.calc_b3b2)
self.label_b3b2_res = QtWidgets.QLabel('b<sub>3</sub>/b<sub>2</sub> value: 1.17')
self.v2.addWidget(self.slider_b3b2)
self.v2.addWidget(self.label_b3b2_res)
self.label_D4D2 = QtWidgets.QLabel('Radial Dimension Ratio Priority, p [-]:')
self.v2.addWidget(self.label_D4D2)
self.slider_D4D2 = QtWidgets.QSlider(Qt.Qt.Horizontal)
self.slider_D4D2.setMinimum(105)
self.slider_D4D2.setMaximum(115)
self.slider_D4D2.setValue(110)
self.slider_D4D2.valueChanged.connect(self.calc_priority)
self.v2.addWidget(self.slider_D4D2)
self.label_D4D2_res = QtWidgets.QLabel('p value: 1.1')
self.v2.addWidget(self.label_D4D2_res)
## RESULTS OF CALCULATION:
self.label_nsnq = QtWidgets.QLabel("Specific Speeds:")
self.label_nsnq.setFont(myFont)
self.v3.addWidget(self.label_nsnq)
self.label_ns_ru = QtWidgets.QLabel('n<sub>s</sub> (RU): -')
self.v3.addWidget(self.label_ns_ru)
self.label_ns_eu = QtWidgets.QLabel('n<sub>q</sub> (EU): -')
self.v3.addWidget(self.label_ns_eu)
self.label_merd = QtWidgets.QLabel("Meridional Dimensions:")
self.label_merd.setFont(myFont)
self.v3.addWidget(self.label_merd)
self.label_D3 = QtWidgets.QLabel('D<sub>3</sub> [mm]: -')
self.v3.addWidget(self.label_D3)
self.label_b3 = QtWidgets.QLabel('b<sub>3</sub> [mm]: -')
self.v3.addWidget(self.label_b3)
self.label_D4 = QtWidgets.QLabel('D<sub>4</sub> [mm]: -')
self.v3.addWidget(self.label_D4)
self.label_b5 = QtWidgets.QLabel('b<sub>5</sub> [mm]: -')
self.v3.addWidget(self.label_b5)
self.label_D5 = QtWidgets.QLabel('D<sub>5</sub> [mm]: -')
self.v3.addWidget(self.label_D5)
## Blade Dimensions:
self.label_blade_dim = QtWidgets.QLabel('Blade Dimensions:')
self.label_blade_dim.setFont(myFont)
self.v3.addWidget(self.label_blade_dim)
self.label_beta3 = QtWidgets.QLabel(u'\u03b2'+'<sub>3</sub> [deg]: -')
self.v3.addWidget(self.label_beta3)
self.label_beta4 = QtWidgets.QLabel(u'\u03b2'+'<sub>4</sub> [deg]: -')
self.v3.addWidget(self.label_beta4)
self.label_beta5 = QtWidgets.QLabel(u'\u03b2'+'<sub>5</sub> [deg]: -')
self.v3.addWidget(self.label_beta5)
self.label_beta6 = QtWidgets.QLabel(u'\u03b2'+'<sub>6</sub> [deg]: 95.0')
self.v3.addWidget(self.label_beta6)
self.label_Z = QtWidgets.QLabel('Z [-]: -')
self.v3.addWidget(self.label_Z)
self.label_e3 = QtWidgets.QLabel('e<sub>3</sub> [mm]: -')
self.v3.addWidget(self.label_e3)
#### Pump Performance Prediction:
self.label_pump_per = QtWidgets.QLabel('Pump Performance Prediction:')
myFont = QtGui.QFont()
myFont.setBold(True)
self.label_pump_per.setFont(myFont)
self.v3.addWidget(self.label_pump_per)
self.label_Himp_pred = QtWidgets.QLabel('H<sub>imp</sub> [m]: -')
self.v3.addWidget(self.label_Himp_pred)
self.label_H_pred = QtWidgets.QLabel('H [m]: -')
self.v3.addWidget(self.label_H_pred)
self.label_Pn_pred = QtWidgets.QLabel('P [W]: -')
self.v3.addWidget(self.label_Pn_pred)
self.label_Eff_pred = QtWidgets.QLabel(u'\u03b7' + ' [%]: -')
self.v3.addWidget(self.label_Eff_pred)
self.frame.setLayout(self.general_win)
self.setCentralWidget(self.frame)
def calc_b3b2(self):
res = self.slider_b3b2.value() / 100.0
self.label_b3b2_res.setText('b<sub>3</sub>/b<sub>2</sub> value: ' + str(res))
def calc_priority(self):
res = self.slider_D4D2.value() / 100.0
self.label_D4D2_res.setText('p value: '+str(res))
def save_log1(self):
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='*.txt')
try:
file = open(name[0], 'w')
text = self.log_win.toPlainText()
file.write(text)
file.close()
except:
return 0
def clean_log1(self):
self.log_win.clear()
def calc_st_dim(self):
self.clean_log1()
self.line_Q.setText(str(self.Q))
self.line_H.setText(str(self.H))
self.line_n.setText(str(self.n))
self.line_i.setText(str(self.i))
self.stator = stator.PyPumpRadialStatorVanes(float(self.H), float(self.Q) / 3600, float(self.n), float(self.i),
float(self.ro))
ns = self.stator.ns()
nq = self.stator.nq()
self.label_ns_ru.setText('n<sub>s</sub> (RU): '+str(round(ns, 2)))
self.label_ns_eu.setText('n<sub>q</sub> (EU): '+str(round(nq, 2)))
D3toD2 = self.stator.D3_D2()
D3 = self.stator.D3(D2=self.D2, D3toD2=D3toD2)
self.label_D3.setText('D<sub>3</sub> [mm]: '+str(round(D3, 2)))
b3tob2 = self.slider_b3b2.value()/100.0
b3 = self.stator.b3(b3tob2=b3tob2, b2=self.b2)
self.label_b3.setText('b<sub>3</sub> [mm]: '+str(round(b3, 2)))
priority = self.slider_D4D2.value()/ 100.0
D4toD2 = self.stator.D4_D2(priority=priority)
D4 = self.stator.D4(D4toD2=D4toD2, D2=self.D2)
self.label_D4.setText('D<sub>4</sub> [mm]: '+str(round(D4, 2)))
vol_eff = self.stator.VolumeEffEstimation()
c1m = self.stator.c1m(D1=self.D0, shaftD=self.d0, volumeEfficiency=vol_eff)
c6m = self.stator.c6m(c1m=c1m)
D6 = self.D0
b5 = self.stator.b6(c6m=c6m, D6=D6)
self.label_b5.setText('b<sub>5</sub> [mm]: '+str(round(b5, 2)))
D5 = D4+(b5+b3)/2.0
self.label_D5.setText('D<sub>5</sub> [mm]: '+str(round(D5, 2)))
## BLADE DIMENSIONS:
c3m = self.stator.c3m(D3=D3, b3=b3)
u2 = self.stator.u2(D2=self.D2)
if self.choose_type == 'Single Stage, Single Entry':
Eff = self.stator.EfficiencyRadialSingleStageSingeEntry()
HydrEff = self.stator.HydraulicEfficiencyRadialPumpSingleStage()
elif self.choose_type == 'Single Stage, Double Entry':
Eff = self.stator.EfficiencyRadialSingleStageDoubleEntry()
HydrEff = self.stator.HydraulicEfficiencyRadialPumpSingleStage()
else:
Eff = self.stator.EfficiencyRadialMultistageSingleEntry()
HydrEff = self.stator.HydraulicEfficiencyRadialPumpMultistage()
c2u = self.stator.c2u(hydraulicEff=HydrEff, u2=u2)
c3u = self.stator.c3u(D2=self.D2, D3=D3, c2u=c2u)
beta3 = self.stator.alpha3(c3m=c3m, c3u=c3u, incidence=0.0)
self.label_beta3.setText(u'\u03b2'+'<sub>3</sub> [deg]: '+str(round(beta3, 2)))
beta4 = self.stator.alpha4(alpha3=beta3, D4toD2=D4toD2)
self.label_beta4.setText(u'\u03b2'+'<sub>4</sub> [deg]: '+str(round(beta4, 2)))
beta5 = self.stator.alpha5(alpha4=beta4, b3=b3, b6=b5)
self.label_beta5.setText(u'\u03b2'+'<sub>5</sub> [deg]: '+str(round(beta5, 2)))
if self.Z2 < 5:
Z3 = 7
elif self.Z2 == 5:
Z3 = 8
elif self.Z2 == 6:
Z3 = 10
elif self.Z2 == 7:
Z3 = 11
elif 7 < self.Z2 < 11:
Z3 = 11
elif 11 <= self.Z2 < 13:
Z3 = 13
elif self.Z2 > 13:
Z3 = 15
self.label_Z.setText('Z [-]: '+str(Z3))
e3 = 0.0125*self.D2
self.label_e3.setText('e<sub>3</sub> [mm]: '+str(round(e3, 2)))
self.label_Himp_pred.setText('H<sub>imp</sub> [m]: '+str(self.H_imp))
self.label_H_pred.setText('H [m]: '+str(self.H))
self.label_Pn_pred.setText('P [W]: '+str(self.P))
self.label_Eff_pred.setText(u'\u03b7' + ' [%]: '+str(self.Eff))
self.log_win.append('Input Pump Parameters:\n')
self.log_win.append(self.label_Q.text()+' '+self.line_Q.text())
self.log_win.append(self.label_H.text()+' '+self.line_H.text())
self.log_win.append(self.label_n.text()+' '+self.line_n.text())
self.log_win.append(self.label_i.text()+' '+self.line_i.text())
self.log_win.append('Type of Pump: '+self.choose_type)
self.log_win.append(self.label_b3b2_res.text())
self.log_win.append(self.label_D4D2_res.text())
self.log_win.append('\nSpecific Speeds:\n')
self.log_win.append(self.label_ns_ru.text())
self.log_win.append(self.label_ns_eu.text())
self.log_win.append('\nMeridional Dimensions:\n')
self.log_win.append(self.label_D3.text())
self.log_win.append(self.label_b3.text())
self.log_win.append(self.label_D4.text())
self.log_win.append(self.label_b5.text())
self.log_win.append(self.label_D5.text())
self.log_win.append('\nBlade Dimensions:\n')
self.log_win.append(self.label_beta3.text())
self.log_win.append(self.label_beta4.text())
self.log_win.append(self.label_beta5.text())
self.log_win.append(self.label_beta6.text())
self.log_win.append(self.label_Z.text())
self.log_win.append(self.label_e3.text())
self.log_win.append('\nPump Performance Prediction:\n')
self.log_win.append(self.label_Himp_pred.text())
self.log_win.append(self.label_H_pred.text())
self.log_win.append(self.label_Pn_pred.text())
self.log_win.append(self.label_Eff_pred.text())
class MainWindow(Qt.QMainWindow):
def __init__(self, parent=None):
Qt.QMainWindow.__init__(self, parent)
# buttons colors
self.but_color = "background-color: lightblue"
self.frame = QtWidgets.QFrame()
self.general_win = QtWidgets.QVBoxLayout()
self.ImpellerWin = ImpellerWindow()
self.StatorWin = StatorWindow()
self.tab_general = QtWidgets.QTabWidget()
self.tab_general.addTab(self.ImpellerWin, "Impeller Designer")
self.tab_general.addTab(self.StatorWin, "Radial Diffuser Designer")
self.button_win = QtWidgets.QHBoxLayout()
self.button_win.setAlignment(Qt.Qt.AlignRight)
self.but_calc = QtWidgets.QPushButton('Calculate Dimensions')
self.button_win.addWidget(self.but_calc)
self.but_calc.setStyleSheet(self.but_color)
self.but_calc.setFixedSize(150, 30)
self.but_calc.clicked.connect(self.calc_func)
self.general_win.addWidget(self.tab_general)
self.general_win.addLayout(self.button_win)
self.frame.setLayout(self.general_win)
self.setCentralWidget(self.frame)
def calc_func(self):
self.ImpellerWin.calc_imp_dim()
cur_Q = self.ImpellerWin.line_Q.text()
cur_H = self.ImpellerWin.line_H.text()
cur_n = self.ImpellerWin.line_n.text()
cur_i = self.ImpellerWin.line_i.text()
cur_D2 = float(self.ImpellerWin.label_D2.text()[self.ImpellerWin.label_D2.text().find(':')+1:])
cur_b2 = float(self.ImpellerWin.label_b2.text()[self.ImpellerWin.label_b2.text().find(':')+1:])
cur_D0 = float(self.ImpellerWin.label_D0.text()[self.ImpellerWin.label_D0.text().find(':')+1:])
cur_d0 = float(self.ImpellerWin.label_d0.text()[self.ImpellerWin.label_d0.text().find(':')+1:])
cur_type = self.ImpellerWin.choose_type.currentText()
cur_Z2 = int(self.ImpellerWin.label_Z_res.text()[self.ImpellerWin.label_Z_res.text().find(':')+1:])
cur_H_imp = float(self.ImpellerWin.label_Himp_pred.text()[self.ImpellerWin.label_Himp_pred.text().find(':')+1:])
cur_H = float(self.ImpellerWin.label_H_pred.text()[self.ImpellerWin.label_H_pred.text().find(':')+1:])
cur_P = float(self.ImpellerWin.label_Pn_pred.text()[self.ImpellerWin.label_Pn_pred.text().find(':')+1:])
cur_Eff = float(self.ImpellerWin.label_Eff_pred.text()[self.ImpellerWin.label_Eff_pred.text().find(':')+1:])
self.StatorWin.Q = cur_Q
self.StatorWin.H = cur_H
self.StatorWin.n = cur_n
self.StatorWin.i = cur_i
self.StatorWin.D2 = cur_D2
self.StatorWin.b2 = cur_b2
self.StatorWin.D0 = cur_D0
self.StatorWin.d0 = cur_d0
self.StatorWin.choose_type = cur_type
self.StatorWin.Z2 = cur_Z2
self.StatorWin.H_imp = cur_H_imp
self.StatorWin.H = cur_H
self.StatorWin.P = cur_P
self.StatorWin.Eff = cur_Eff
self.StatorWin.calc_st_dim()
if __name__ == "__main__":
app = Qt.QApplication(sys.argv)
window = MainWindow()
window.resize(850, 500)
window.setWindowTitle('Centrifugal Pump Designer v0.0.2')
window.show()
sys.exit(app.exec_())
``` |
{
"source": "20jun01/first",
"score": 3
} |
#### File: first/else/checkexp2.py
```python
import sys
num = 0
data = []
with open(sys.argv[1]) as fp:
num = int(fp.readline())
for i in range(num):
line = fp.readline()
s = line.split()
data.append([float(s[0]),float(s[1])])
def square(x):
return x*x
def average(x):
ave = 0
for i in range(len(x)):
ave += x[i]
return ave/len(x)
def average2(x):
ave = [0,0]
for i in range(len(x)):
ave[0] += x[i][0]
ave[1] += x[i][1]
return ave[0]/len(x),ave[1]/len(x)
def app(data):
sigma_xy = 0
sigma_x2 = 0
sigma_x = 0
sigma_y = 0
# ns_xsy = 0
# ns_x2sy = 0
# ns_xysx = 0
for i in range(len(data)):
sigma_xy += data[i][0] * data[i][1]
sigma_x2 += square(data[i][0])
sigma_x += data[i][0]
sigma_y += data[i][1]
# for i in range(len(data)):
# ns_xsy += data[i][0]*sigma_y
# ns_x2sy += data[i][0]*data[i][0]*sigma_y
# ns_xysx += data[i][0]*data[i][1]*sigma_x
a = (len(data)*sigma_xy - sigma_x * sigma_y) / (len(data) * sigma_x2 - square(sigma_x))
b = (sigma_x2 * sigma_y - sigma_xy * sigma_x) / (len(data) * sigma_x2 - square(sigma_x))
# a = ((num*sigma_xy) - ns_xsy)/((num*sigma_x2)-(sigma_x*sigma_x))
# b = (ns_x2sy - ns_xysx)/((num*sigma_x2) - (sigma_x*sigma_x))
return a,b
def culR(data,a,b):
child = 0
mam = 0
_,ave_y = average2(data)
for i in range(len(data)):
child += square(data[i][1] - (a*data[i][0] + b))
mam += square(data[i][1] - ave_y)
return 1-(child/mam)
a,b = app(data)
print("y = {:.3f}x + {:.3f}".format(a,b))
R2 = culR(data,a,b)
print("R2:{}".format(R2))
```
#### File: first/else/exp1.py
```python
import sys
import math
num = 0
data = []
with open(sys.argv[1]) as fp:
num = int(fp.readline())
for i in range(num):
line = fp.readline()
s = line.split()
data.append([float(s[0]),float(s[1])])
def average(data):
total = [0,0]
for i in range(num):
total[0] = total[0] + data[i][0]
total[1] = total[1] + data[i][1]
avex = total[0]/num
avey = total[1]/num
return ([avex,avey])
def cul_sig(data,ave):
sigs = [[],[]]
for i in range(len(data)):
sigs[0].append(ave[0] - data[i][0])
sigs[1].append(ave[1] - data[i][1])
return sigs
def square(x):
return x*x
def St(sig):
temp = map(square,sig)
t=sum(temp)
return math.sqrt(t/len(sig))
def cor(sigs):
C = 0
for i in range(len(sigs[0])):
C += sigs[0][i]*sigs[1][i]
return C/len(sigs[0])
def culR(data):
ave = average(data)
sig = cul_sig(data,ave)
Sx = St(sig[0])
Sy = St(sig[1])
Cx_y = cor(sig)
return Cx_y/(Sx*Sy)
print(culR(data))
``` |
{
"source": "20Koen02/adventofcode",
"score": 4
} |
#### File: adventofcode/2019/1.py
```python
import math
def calcFuel(mass):
totalFuel = 0
fuelMass = mass
while True:
fuelMass = math.trunc(fuelMass / 3) - 2
if fuelMass <= 0:
return totalFuel
totalFuel += fuelMass
def readFile(file):
with open(file) as f:
return f.read().splitlines()
def main():
lines = readFile("inout/1_input.txt")
totalFuel = 0
for x in range(len(lines)):
lines[x] = int(lines[x])
totalFuel += calcFuel(lines[x])
print(totalFuel)
if __name__ == "__main__":
main()
```
#### File: adventofcode/2019/6.py
```python
import fileinput
def main():
lines = list(fileinput.input("inout/6_input.txt"))
nodes = {}
for line in lines:
x, y = line.split(')')
nodes[y.strip()] = x
san = list(parents('SAN', nodes))
you = list(parents('YOU', nodes))
while True:
if san and you and san[-1] == you[-1]:
del san[-1]
del you[-1]
ans = len(san) + len(you) - 2
print(ans)
def parents(x, nodes):
par = []
while x in nodes:
par.append(x)
x = nodes[x]
return par
if __name__ == "__main__":
main()
``` |
{
"source": "20minutes/pencil",
"score": 3
} |
#### File: pencil/tests/test_pencil.py
```python
from unittest import TestCase
from pencil import Pencil
class PencilTest(TestCase):
def setUp(self):
self.pencil = Pencil()
def test_begin(self):
self.pencil.begin("-2hours")
self.assertEqual(self.pencil._from, "-2hours")
def test_y_min(self):
self.pencil.y_min(32)
self.assertEqual(self.pencil._yMin, 32)
def test_y_max(self):
self.pencil.y_max(42)
self.assertEqual(self.pencil._yMax, 42)
def test_line_width(self):
self.pencil.line_width(2)
self.assertEqual(self.pencil._lineWidth, 2)
def test_set_title(self):
self.pencil.set_title("Pencil")
self.assertEqual(self.pencil._title, "Pencil")
def test_set_vtitle(self):
self.pencil.set_vtitle("Awesomeness")
self.assertEqual(self.pencil._vtitle, "Awesomeness")
def test_fgcolor(self):
self.pencil.set_fgcolor("blue")
self.assertEqual(self.pencil._fgcolor, "blue")
def test_hide_legend(self):
self.pencil.hide_legend(True)
self.assertEqual(self.pencil._hideLegend, "true")
self.pencil.hide_legend(False)
self.assertEqual(self.pencil._hideLegend, "false")
def test_hide_axes(self):
self.pencil.hide_axes(True)
self.assertEqual(self.pencil._hideAxes, "true")
self.pencil.hide_axes(False)
self.assertEqual(self.pencil._hideAxes, "false")
def test_set_template(self):
self.pencil.set_template("plain")
self.assertEqual(self.pencil._template, "plain")
def test_set_font(self):
self.pencil.set_font("Verdana")
self.assertEqual(self.pencil._fontName, "Verdana")
def test_area_mode(self):
self.pencil.area_mode("stacked")
self.assertEqual(self.pencil._areaMode, "stacked")
def test_line_mode(self):
self.pencil.line_mode("staircase")
self.assertEqual(self.pencil._lineMode, "staircase")
def test_set_bgcolor(self):
self.pencil.set_bgcolor("red")
self.assertEqual(self.pencil._bgcolor, "red")
def test_add_metric(self):
self.pencil.add_metric("pencil.urls.count")
self.assertEqual(self.pencil._target, ['pencil.urls.count'])
def test_add_metric_with_color(self):
self.pencil.add_metric("pencil.urls.count", "000000")
self.assertEqual(self.pencil._target, ['pencil.urls.count'])
self.assertEqual(self.pencil._colorList, "000000")
def test_add_metric_with_alias(self):
self.pencil.add_metric("pencil.urls.count", alias="urls")
self.assertEqual(self.pencil._target,
['alias(pencil.urls.count, "urls")'])
def test_graph_type(self):
self.pencil.graph_type("pie")
self.assertEqual(self.pencil._graphType, "pie")
def test_add_deploy(self):
self.pencil.add_deploy("pencil")
self.assertEqual(self.pencil._target, ['drawAsInfinite(pencil)'])
def test_add_deploy_with_alias(self):
self.pencil.add_deploy("pencil", alias="deploy")
self.assertEqual(self.pencil._target,
['alias(drawAsInfinite(pencil), "deploy")'])
``` |
{
"source": "20minutes/pg-rabbitmq-fdw",
"score": 3
} |
#### File: pg-rabbitmq-fdw/tests/__init__.py
```python
import unittest
import pika
import psycopg2
class RabbitmqFDWTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(RabbitmqFDWTest, self).__init__(*args, **kwargs)
self.host = 'localhost'
self.port = int('5672')
self.user = 'test'
self.password = '<PASSWORD>'
self.queue = 'indexing'
# define PG connexion
self.pg_conn = psycopg2.connect("dbname=travis_ci_test user=postgres host=127.0.0.1")
self.pg_cursor = self.pg_conn.cursor()
# define rabbit connexion
connection = pika.BlockingConnection(pika.URLParameters('amqp://{0}:{1}@{2}:{3}/%2F'.format(self.user, self.password, self.host, self.port)))
self.rabbit_channel = connection.channel()
def test1Insert(self):
self.pg_cursor.execute("INSERT INTO tag (tag_id, label, slug) VALUES ('{0}', '{1}', '{2}')".format('c94e3e70-c5fa-4ea4-a708-d23903b26d50', 'Politic', 'politic'))
self.pg_conn.commit()
method_frame, header_frame, body = self.rabbit_channel.basic_get(self.queue)
if method_frame:
self.rabbit_channel.basic_ack(method_frame.delivery_tag)
self.assertEqual('{"action": "insert", "table": "tag", "id": "c94e3e70-c5fa-4ea4-a708-d23903b26d50"}', body)
else:
self.fail('No message returned')
def test2Update(self):
self.pg_cursor.execute("UPDATE tag SET label = '{0}' WHERE tag_id = '{1}'".format('Sport', 'c94e3e70-c5fa-4ea4-a708-d23903b26d50'))
self.pg_conn.commit()
method_frame, header_frame, body = self.rabbit_channel.basic_get(self.queue)
if method_frame:
self.rabbit_channel.basic_ack(method_frame.delivery_tag)
self.assertEqual('{"action": "update", "table": "tag", "id": "c94e3e70-c5fa-4ea4-a708-d23903b26d50"}', body)
else:
self.fail('No message returned')
def test3Delete(self):
self.pg_cursor.execute("DELETE FROM tag WHERE tag_id = '{0}'".format('c94e3e70-c5fa-4ea4-a708-d23903b26d50'))
self.pg_conn.commit()
method_frame, header_frame, body = self.rabbit_channel.basic_get(self.queue)
if method_frame:
self.rabbit_channel.basic_ack(method_frame.delivery_tag)
self.assertEqual('{"action": "delete", "table": "tag", "id": "c94e3e70-c5fa-4ea4-a708-d23903b26d50"}', body)
else:
self.fail('No message returned')
``` |
{
"source": "20nelson/connect4",
"score": 3
} |
#### File: 20nelson/connect4/minimax.py
```python
__author__ = '<NAME>'
# alpha-beta pruning adapted from https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
from copy import *
import random
# gets all possible columns a piece could go
def getPossibleMoves(board):
moves = []
for c in range(len(board[0])):
if board[0][c] == 0:
moves.append(c)
return moves
# recursively figures out the best place for the computer to move
def performMinimax(board, player, depth, move, pcolor, ocolor, alpha, beta, maximizing):
opp = 2 if player == 1 else 1
if move:
# returns (score, move)
if isTerminalState(board, ocolor, move):
return (None, 10 - depth)
elif isTerminalState(board, pcolor, move):
return (None, depth - 10)
elif isFull(board) or depth > 4:
return (None, 0)
# emergency abort if recursion gets out of hand, should never run
if len(getPossibleMoves(board)) == 0 or depth > 10:
print('PROBLEM')
print(board)
return
# go through all moves, running minimax for each, and find the best ones
best = []
if maximizing:
# ai is playing as itself
o = (None,-1000)
c = 0
for col in getPossibleMoves(board):
tboard = deepcopy(board)
# perform the move
row = len(tboard) - 1
while tboard[row][col] != 0:
row -= 1
tboard[row][col] = player
# run minimax on that board with opposite player
result = (col, performMinimax(tboard, opp, depth + 1, (row, col), pcolor, ocolor, alpha, beta, False)[1])
# see if that is better than the current best
if result[1] > o[1]:
o = (result[0],result[1])
best = []
best.append(o)
elif result[1] == o[1]:
# add any moves with the same score as the best to the optimal array
best.append(result)
# alpha-beta magic
if o[1] > alpha:
alpha = o[1]
if alpha > beta:
break
c += 1
else:
# ai is playing as human player
o = (None,1000)
c = 0
for col in getPossibleMoves(board):
tboard = deepcopy(board)
# perform the move
row = len(tboard) - 1
while tboard[row][col] != 0:
row -= 1
tboard[row][col] = player
# run minimax on that board with opposite player
result = (col, performMinimax(tboard, opp, depth + 1, (row, col), pcolor, ocolor, alpha, beta, True)[1])
# see if that is better than the current best
if result[1] < o[1]:
o = (result[0],result[1])
best = []
best.append(o)
elif result[1] == o[1]:
# add any moves with the same score as the best to the optimal array
best.append(result)
# alpha-beta magic
if o[1] < beta:
beta = o[1]
if alpha > beta:
break
c += 1
# pick random move from the best moves
return random.choice(best)
# gets the value of a square on the board, wrapper to avoid wrapping around the board with negatives
def getSquare(board, r, c):
if r < 0 or r >= len(board) or c < 0 or c >= len(board[0]):
return -1
else:
return board[r][c]
# checks if a player has won
# this is like one of those russian nesting dolls, but with for loops
def isTerminalState(board, player, move):
directions = [(1, 0), (0, 1), (1, 1), (1, -1)]
# down, right, right+down, left+down
for d in directions:
for offset in range(4):
original = getSquare(board, move[0] - d[0] * offset, move[1] - d[1] * offset)
if original != player or original == -1:
continue
c = True
for position in range(1, 4):
s = getSquare(board, move[0] + d[0] * position - d[0] * offset,
move[1] + d[1] * position - d[1] * offset)
if s != original or s == -1:
# square is not owned by the player being tested or out of range
c = False
break
if c:
return True
return False
# checks if the board is full
def isFull(board):
for r in board:
if 0 in r:
return False
return True
``` |
{
"source": "20repsej/controller",
"score": 3
} |
#### File: controller/hardware/mdd10.py
```python
import RPi.GPIO as GPIO
import schedule
import logging
log = logging.getLogger('RemoTV.hardware.mdd10')
maxSpeedEnabled = False
AN1 - None
AN2 = None
DIG1 = None
DIG2 = None
turnDelay = None
straightDelay = None
#Cytron MDD10 GPIO setup
def setup(robot_config):
global AN1
global AN2
global DIG1
global DIG2
global turnDelay
global straightDelay
straightDelay = robot_config.getfloat('robot', 'straight_delay')
turnDelay = robot_config.getfloat('robot', 'turn_delay')
# pwm.setPWMFreq(60)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
AN2 = 13
AN1 = 12
DIG2 = 24
DIG1 = 26
GPIO.setup(AN2, GPIO.OUT)
GPIO.setup(AN1, GPIO.OUT)
GPIO.setup(DIG2, GPIO.OUT)
GPIO.setup(DIG1, GPIO.OUT)
time.sleep(1)
p1 = GPIO.PWM(AN1, 100)
p2 = GPIO.PWM(AN2, 100)
def SpeedNormal():
maxSpeedEnabled = False
log.debug("normal speed")
#MDD10 speed and movement controls
def move(args):
global maxSpeedEnabled
command = args['button']['command']
if command == 'MAXSPEED':
handleMaxSpeedCommand()
maxSpeedEnabled = True
log.debug("max speed")
schedule.single_task(120, SpeedNormal)
return
if maxSpeedEnabled:
log.debug("AT MAX.....................")
log.debug("maxSpeedEnabled : %s",maxSpeedEnabled)
moveMDD10(command, 100)
else:
log.debug("NORMAL.................")
log.debug("maxSpeedEnabled : %s",maxSpeedEnabled)
moveMDD10(command, int(float(drivingSpeedActuallyUsed) / 2.55))
def moveMDD10(command, speedPercent):
if command == 'f':
GPIO.output(DIG1, GPIO.LOW)
GPIO.output(DIG2, GPIO.LOW)
p1.start(speedPercent) # set speed for M1
p2.start(speedPercent) # set speed for M2
time.sleep(straightDelay)
p1.start(0)
p2.start(0)
if command == 'b':
GPIO.output(DIG1, GPIO.HIGH)
GPIO.output(DIG2, GPIO.HIGH)
p1.start(speedPercent)
p2.start(speedPercent)
time.sleep(straightDelay)
p1.start(0)
p2.start(0)
if command == 'l':
GPIO.output(DIG1, GPIO.LOW)
GPIO.output(DIG2, GPIO.HIGH)
p1.start(speedPercent)
p2.start(speedPercent)
time.sleep(turnDelay)
p1.start(0)
p2.start(0)
if command == 'r':
GPIO.output(DIG1, GPIO.HIGH)
GPIO.output(DIG2, GPIO.LOW)
p1.start(speedPercent)
p2.start(speedPercent)
time.sleep(turnDelay)
p1.start(0)
p2.start(0)
```
#### File: controller/hardware/serial_board.py
```python
import serial
import serial.tools.list_ports as ports
import sys
import logging
import robot_util
import schedule
log = logging.getLogger('RemoTV.hardware.serial_board')
ser = None
serialDevice = None
serialBaud = None
restarts = 0
def sendSerialCommand(ser, command):
global restarts
try:
log.info("serial send: %s", str(command.lower()))
ser.write(command.lower().encode('utf8') + b"\r\n") # write a string
ser.flush()
restarts = 0
except:
log.debug("Attempting to restart serial")
try:
ser.close()
except:
pass
connectSerial(serialDevice, serialBaud)
def searchSerial(name):
for port in ports.comports():
if name in port.description or \
name in port.hwid or \
name in port.manufacturer:
return port.device
return None
def fallbackSerial():
for port in ports.comports():
if not port.device == "/dev/ttyAMA0":
yield port.device
else:
log.debug("Serial Fallback ignoring onboard bluetooth serial")
log.debug("No more possible serial ports")
def flushSerial():
ser.reset_input_buffer()
def setup(robot_config):
global serialDevice
global serialBaud
serialDevice = robot_config.get('serial', 'serial_device')
serialBaud = robot_config.getint('serial', 'baud_rate')
if robot_config.has_option('serial', 'serial_name'):
deviceName = robot_config.get('serial', 'serial_name')
device = searchSerial(deviceName)
if device != None:
serialDevice = device
log.info("Serial port named {} found at {}".format(deviceName, device))
else:
log.info("Serial port named {} NOT FOUND".format(deviceName))
connectSerial(serialDevice, serialBaud)
if ser is None:
log.critical("error: could not connect to any valid serial port")
robot_util.terminate_controller()
schedule.repeat_task(0.4, flushSerial)
def connectSerial(serialDevice, serialBaud):
global ser
global restarts
restarts = restarts + 1
ser = None
# initialize serial connection
try:
ser = serial.Serial(serialDevice, serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("Could not open serial port {}".format(serialDevice))
ports = fallbackSerial()
for port in ports:
try:
ser = serial.Serial(port, serialBaud, timeout=0, write_timeout=0) # open serial
break
except:
log.error("Could not open serial port {}".format(port))
if ser is None:
log.critical("Error: could not find any valid serial port")
if restarts >= 20:
log.critical("Error: too many attemtps to reconnect serial")
robot_util.terminate_controller()
else:
log.info("Serial Connected")
log.debug("port: {}".format(ser.name))
log.debug("baud: {}".format(ser.baudrate))
return(ser)
def move(args):
command = args['button']['command']
sendSerialCommand(ser, command)
```
#### File: 20repsej/controller/mod_utils.py
```python
from schedule import task, repeat_task, single_task
import sys
if (sys.version_info > (3, 0)):
import importlib
def import_module(location, type):
if (sys.version_info > (3, 0)):
module = importlib.import_module(location+'.'+type)
else:
module = __import__(location+"."+type, fromlist=[type])
return(module)
```
#### File: 20repsej/controller/schedule.py
```python
from threading import Timer
def task(wait, task_handler, *args):
new_args = [wait, task_handler]
for a in args:
new_args.append(a)
t=Timer(wait, task, new_args)
t.daemon = True
t.start()
task_handler(*args);
return t
def repeat_task(wait, task_handler, *args):
new_args = [wait, task_handler]
for a in args:
new_args.append(a)
t=Timer(wait, task, new_args)
t.daemon = True
t.start()
return t
def single_task(wait, task_handler, *args):
t=Timer(wait, task_handler, args)
t.daemon = True
t.start()
return t
```
#### File: controller/tts/cozmo_tts.py
```python
import hardware.cozmo
def setup(robot_config):
hardware.cozmo.setup_coz(robot_config)
return
def say(*args):
message = args[0]
hardware.cozmo.say(message)
return
```
#### File: controller/tts/none.py
```python
def setup(robot_config):
return
def say(*args):
return
def mute():
return
def unmute():
return
def volume(level):
return
```
#### File: controller/tts/vector_tts.py
```python
import hardware.vector
vector = None
reserve_control = None
def setup(robot_config):
global vector
vector = hardware.vector.connect()
return
def say(*args):
message = args[0]
vector.behavior.say_text(message, duration_scalar=0.75)
return
```
#### File: controller/video/cozmo_vid.py
```python
import cozmo
import _thread as thread
import hardware.cozmo
def setup(robot_config):
return
def start():
try:
thread.start_new_thread(hardware.cozmo.run_video, ())
except KeyboardInterrupt as e:
pass
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
return
``` |
{
"source": "20Scoops-CNX/encrypt-pdf-api",
"score": 3
} |
#### File: encrypt-pdf-api/src/app.py
```python
import os
from waitress import serve
from PyPDF2 import PdfFileReader, PdfFileWriter
from flask import Flask, send_file, request, jsonify
from flask_cors import CORS
directory = "files"
app = Flask(__name__, static_folder=directory)
CORS(app)
def encryptFilePDF(file_name, password):
pathFile = "./files/" + file_name
pdfFile = open(pathFile, "rb")
pdfReader = PdfFileReader(pdfFile)
pdfWriter = PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
pdfWriter.encrypt(password)
resultPdf = open("./files/" + "encrypted_file.pdf", "wb")
pdfWriter.write(resultPdf)
resultPdf.close()
os.remove(pathFile)
return resultPdf
@app.route("/encrypt-pdf", methods=["POST"])
def encryptFileRoute():
if not "file" in request.files:
return jsonify({"error": "no file"}), 400
pdf_file = request.files.get("file")
file_name = pdf_file.filename
mimetype = pdf_file.content_type
if not mimetype in "application/pdf":
return jsonify({"error": "bad-type"})
if not os.path.exists(directory):
os.makedirs(directory)
pdf_file.save(os.path.join(directory, file_name))
if not "password" in request.form:
return jsonify({"error": "key password invalid or empty"}), 400
password = request.form.get("password")
encryptFilePDF(file_name, password)
return send_file(os.getcwd() + "/files/encrypted_file.pdf", as_attachment=True)
if __name__ == "__main__":
is_production = os.getenv("FLASK_ENV", "development")
port = os.getenv("PORT", "9000")
if is_production == "production":
serve(app, host="0.0.0.0", port=port)
else:
app.run(host="0.0.0.0", port=port, debug=True)
``` |
{
"source": "20tab/20tab-standard-project",
"score": 2
} |
#### File: 20tab-standard-project/hooks/post_gen_project.py
```python
import json
import os # noqa
import secrets
import sys # noqa
from pathlib import Path
from cookiecutter.main import cookiecutter
try:
import gitlab # noqa
except ModuleNotFoundError: # pragma: no cover
pass
class MainProcess:
"""Main process class."""
BACKEND_URL = "https://github.com/20tab/django-continuous-delivery"
FRONTEND_URLS = {
"None": "",
"React": "https://github.com/20tab/react-ts-continuous-delivery",
}
def __init__(self, *args, **kwargs):
"""Create a main process instance with chosen parameters."""
cookiecutter_path = Path("cookiecutter.json")
cookiecutter_dict = json.loads(cookiecutter_path.read_text())
self.domain_url = cookiecutter_dict["domain_url"]
self.gitlab_group_slug = cookiecutter_dict["gitlab_group_slug"]
self.project_name = cookiecutter_dict["project_name"]
self.project_slug = cookiecutter_dict["project_slug"]
self.use_gitlab = cookiecutter_dict["use_gitlab"]
self.use_media = cookiecutter_dict["use_media"]
self.backend_url = self.BACKEND_URL
self.frontend_url = self.FRONTEND_URLS[cookiecutter_dict["which_frontend"]]
cookiecutter_dict["has_frontend"] = bool(self.frontend_url)
cookiecutter_path.write_text(json.dumps(cookiecutter_dict, indent=2))
def create_env_file(self):
"""Create env file from the template."""
env_template = Path(".env_template").read_text()
env_text = env_template.replace(
"__SECRETKEY__", secrets.token_urlsafe(40)
).replace("__PASSWORD__", secrets.token_urlsafe(8))
Path(".env").write_text(env_text)
def copy_secrets(self):
"""Copy the Kubernetes secrets manifest."""
secrets_template = ""
environments = {
"development": {
"configuration": "Development",
"subdomain": "dev",
},
"integration": {
"configuration": "Integration",
"subdomain": "test",
},
"production": {
"configuration": "Production",
"subdomain": "www",
},
}
secrets_template = Path("k8s/2_secrets.yaml_template").read_text()
for environment, values in environments.items():
secrets_text = (
secrets_template.replace("__CONFIGURATION__", values["configuration"])
.replace("__ENVIRONMENT__", environment)
.replace("__SUBDOMAIN__", values["subdomain"])
.replace("__SECRETKEY__", secrets.token_urlsafe(40))
.replace("__PASSWORD__", secrets.token_urlsafe(8))
)
Path(f"k8s/{environment}/2_secrets.yaml").write_text(secrets_text)
def create_subprojects(self):
"""Create subprojects."""
cookiecutter(
self.backend_url,
extra_context={
"domain_url": self.domain_url,
"gitlab_group_slug": self.gitlab_group_slug,
"project_dirname": "backend",
"project_name": self.project_name,
"project_slug": self.project_slug,
"use_media": self.use_media,
},
no_input=True,
)
if self.frontend_url:
cookiecutter(
self.frontend_url,
extra_context={
"gitlab_group_slug": self.gitlab_group_slug,
"project_dirname": "frontend",
"project_name": self.project_name,
"project_slug": self.project_slug,
},
no_input=True,
)
def run(self):
"""Run the main process operations."""
self.create_env_file()
self.copy_secrets()
self.create_subprojects()
if self.use_gitlab:
exec(Path("./scripts/python/gitlab_sync.py").read_text())
if __name__ == "__main__":
main_process = MainProcess()
main_process.run()
```
#### File: 20tab-standard-project/hooks/pre_gen_project.py
```python
import json
import os
import sys
from collections import OrderedDict # noqa
from pathlib import Path
try:
import gitlab # noqa
except ModuleNotFoundError: # pragma: no cover
pass
class MainProcess:
"""Main process class."""
TOKEN = "<PASSWORD>"
URL = "https://gitlab.com"
def __init__(self, *args, **kwargs):
"""Create a main process instance with chosen parameters."""
self.project_name = "{{cookiecutter.project_name}}"
self.project_slug = "{{cookiecutter.project_slug}}"
self.group_slug = self.project_slug
self.use_gitlab = "{{cookiecutter.use_gitlab}}" == "Yes"
if self.use_gitlab:
try:
private_token = os.environ[self.TOKEN]
except KeyError:
sys.exit(f"The environment variable '{self.TOKEN}' is missing.")
try:
self.gl = gitlab.Gitlab(self.URL, private_token=private_token)
except NameError:
sys.exit("The 'python-gitlab' package is missing.")
try:
self.gl.auth()
except gitlab.exceptions.GitlabAuthenticationError:
sys.exit(f"The environment variable '{self.TOKEN}' is not correct.")
def run(self):
"""Run main process."""
configuration = {{cookiecutter}} # noqa
configuration["gitlab_group_slug"] = None
configuration["use_gitlab"] = self.use_gitlab
Path("cookiecutter.json").write_text(json.dumps(configuration, indent=2))
if __name__ == "__main__":
main_process = MainProcess()
main_process.run()
``` |
{
"source": "20tab/django-alexa-template",
"score": 2
} |
#### File: django-alexa-template/{{ cookiecutter.project_slug }}/tasks.py
```python
import getpass
import os
import sys
import json
from pathlib import Path
from {{ cookiecutter.project_slug }}.settings import DEBUG
# import dj_database_url
# from django.core.management.utils import get_random_secret_key
# from dotenv import find_dotenv, load_dotenv
from invoke import task
import requests
from requests.exceptions import ConnectionError
import pprint
@task
def run(c, deploy=False):
set_skill_json()
if deploy:
c.run("ask deploy")
if DEBUG:
c.run("python manage.py runserver")
def get_public_urls():
try:
res = {}
ngrok_data = requests.get("http://127.0.0.1:4040/api/tunnels").json()
for t in ngrok_data['tunnels']:
res[t['proto']] = t['public_url']
return res
except ConnectionError:
print("Please run ngrok first of all")
exit(0)
def set_skill_json():
"""
Read ngrok apis to check public url and change it in endpoint key in skill.json file.
Return True if endpoint is changed else False.
"""
if DEBUG:
endpoint = {
"uri": f"{get_public_urls()['https']}/alexa/",
"sslCertificateType": "Wildcard"
}
else:
endpoint = {
"sourceDir": "alexa/lambda_upload",
"uri": "ask-custom-{{ cookiecutter.project_slug }}"
}
if DEBUG:
config = None
with open(".ask/config", "r") as f:
config = json.loads(f.read())
try:
config["deploy_settings"]["default"]["resources"].pop("lambda", None)
except KeyError:
config = None
if config:
with open(".ask/config", "w") as f:
f.write(json.dumps(config, indent=2))
else:
config = None
with open(".ask/config", "r") as f:
config = json.loads(f.read())
try:
lambda_ref = [
{
"functionName": "",
"alexaUsage": [
"custom/default"
],
"runtime": "python3.6",
"handler": "skill.handler"
}
]
config["deploy_settings"]["default"]["resources"]["lambda"] = lambda_ref
except KeyError:
config = None
if config:
with open(".ask/config", "w") as f:
f.write(json.dumps(config, indent=2))
with open("skill.json", "r") as f:
skill_json = json.loads(f.read())
if skill_json["manifest"]["apis"]["custom"]["endpoint"] != endpoint:
skill_json["manifest"]["apis"]["custom"]["endpoint"] = endpoint
with open("skill.json", "w") as f:
f.write(json.dumps(skill_json, indent=2))
return True
return False
# NOTE: originally cribbed from fab 1's contrib.console.confirm
def confirm(question, assume_yes=True):
"""
Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``affirmative=False``.
.. note::
If the user does not supplies input that is (case-insensitively) equal
to "y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the input.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`.
"""
# Set up suffix
if assume_yes:
suffix = "Y/n"
else:
suffix = "y/N"
# Loop till we get something we like
# TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit?
while True:
# TODO: ensure that this is Ctrl-C friendly, ISTR issues with
# raw_input/input on some Python versions blocking KeyboardInterrupt.
response = input("{0} [{1}] ".format(question, suffix))
response = response.lower().strip() # Normalize
# Default
if not response:
return assume_yes
# Yes
if response in ["y", "yes"]:
return True
# No
if response in ["n", "no"]:
return False
# Didn't get empty, yes or no, so complain and loop
err = "I didn't understand you. Please specify '(y)es' or '(n)o'."
print(err, file=sys.stderr)
``` |
{
"source": "20tab/django-bdd-toolkit",
"score": 2
} |
#### File: django-bdd-toolkit/tests/environment.py
```python
from django.conf import settings
from pkg_resources import load_entry_point
from splinter import Browser
from splinter.driver.webdriver.chrome import Options
def before_scenario(context, scenario):
"""Prepare context for scenario."""
if 'browser.firefox' in scenario.tags:
driver_name = 'firefox'
elif 'browser.chrome' in scenario.tags:
driver_name = 'chrome'
else:
driver_name = getattr(settings, 'BDD_DEFAULT_BROWSER', 'chrome')
params = {
'driver_name': driver_name,
'headless': getattr(settings, 'BDD_HEADLESS_BROWSER', False),
'incognito': getattr(settings, 'BDD_INCOGNITO_BROWSER', False),
'wait_time': getattr(settings, 'BDD_DEFAULT_WAIT_TIME', 5),
'fullscreen': getattr(settings, 'BDD_FULLSCREEN_BROWSER', False),
}
language = {
'intl.accept_languages': getattr(
settings, 'BDD_BROWSER_LANGUAGE', 'en-US'
)
}
if driver_name == 'firefox':
params.update({
'profile_preferences': language,
'capabilities': {'moz:webdriverClick': False},
})
elif driver_name == 'chrome':
load_entry_point('chromedriver-binary==2.43.0', 'console_scripts', 'chromedriver-path')
options = Options()
options.add_experimental_option('prefs', language)
params.update({
'options': options
})
context.browser = Browser(**params)
def after_scenario(context, scenario):
"""Clean context for scenario."""
context.browser.quit()
```
#### File: tests/pages/home.py
```python
from django.urls import reverse
from tests.pages.base import BasePage
class HomePage(BasePage):
"""Homepage model and interactions."""
def __init__(self, context, *args, **kwargs):
"""Set the homepage url."""
self.url = context.get_url(reverse('home'))
super().__init__(context, *args, **kwargs)
@property
def logo_locator(self):
"""Return the logo web element."""
return self.browser.find_by_id('logo')
def visit(self):
"""Use the browser to visit the homepage url."""
self.browser.visit(self.url)
``` |
{
"source": "20tab/django-cache-url",
"score": 2
} |
#### File: django-cache-url/tests/test_django_redis_cache.py
```python
import django_cache_url
redis_cache = django_cache_url.DJANGO_REDIS_CACHE
def test_basic_config():
url = f'redis://127.0.0.1:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/0'
def test_advanced_config():
extra_params = [
'parser_class=redis.connection.HiredisParser',
'connection_pool_class=redis.BlockingConnectionPool',
'max_connections=50',
'timeout=20',
]
url = f'redis://:[email protected]:6379/1?lib={redis_cache}&{"&".join(extra_params)}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://:[email protected]:6379/1'
assert config['OPTIONS']['PARSER_CLASS'] == 'redis.connection.HiredisParser'
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['max_connections'] == 50
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['timeout'] == 20
def test_basic_config_with_db():
url = f'redis://127.0.0.1:6379/1?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/1'
def test_basic_config_with_password():
url = f'redis://:[email protected]:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://:[email protected]:6379/0'
def test_basic_config_with_parser_class():
url = f'redis://127.0.0.1:6379/?lib={redis_cache}&parser_class=redis.connection.HiredisParser'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/0'
assert config['OPTIONS']['PARSER_CLASS'] == 'redis.connection.HiredisParser'
def test_basic_config_with_connection_pool_class():
url = f'redis://127.0.0.1:6379/?lib={redis_cache}&connection_pool_class=redis.BlockingConnectionPool'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/0'
assert config['OPTIONS']['CONNECTION_POOL_CLASS'] == 'redis.BlockingConnectionPool'
def test_basic_config_with_connection_pool_class_kwargs():
# both max_connections and timeout
url = f'redis://127.0.0.1:6379/?lib={redis_cache}&max_connections=50&timeout=20'
config = django_cache_url.parse(url)
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['max_connections'] == 50
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['timeout'] == 20
# just max_connections
url = f'redis://127.0.0.1:6379/?lib={redis_cache}&max_connections=10'
config = django_cache_url.parse(url)
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['max_connections'] == 10
assert 'timeout' not in config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']
# just timeout
url = f'redis://127.0.0.1:6379/?lib={redis_cache}&timeout=10'
config = django_cache_url.parse(url)
assert config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']['timeout'] == 10
assert 'max_connections' not in config['OPTIONS']['CONNECTION_POOL_CLASS_KWARGS']
def test_rediss_config_with_db():
url = f'rediss://127.0.0.1:6379/1?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'rediss://127.0.0.1:6379/1'
def test_rediss_config():
url = f'rediss://127.0.0.1:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'rediss://127.0.0.1:6379/0'
def test_rediss_config_with_password():
url = f'rediss://:[email protected]:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'rediss://:[email protected]:6379/0'
def test_hiredis_config_with_db():
url = f'hiredis://127.0.0.1:6379/1?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/1'
assert config['OPTIONS']['PARSER_CLASS'] == 'redis.connection.HiredisParser'
def test_hiredis_config():
url = f'hiredis://127.0.0.1:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://127.0.0.1:6379/0'
assert config['OPTIONS']['PARSER_CLASS'] == 'redis.connection.HiredisParser'
def test_hiredis_config_with_password():
url = f'hiredis://:[email protected]:6379/?lib={redis_cache}'
config = django_cache_url.parse(url)
assert config['BACKEND'] == 'redis_cache.RedisCache'
assert config['LOCATION'] == 'redis://:[email protected]:6379/0'
assert config['OPTIONS']['PARSER_CLASS'] == 'redis.connection.HiredisParser'
``` |
{
"source": "20tab/django-immortalmodel",
"score": 2
} |
#### File: django-immortalmodel/immortalmodel/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
class ImmortalQuerySet(models.query.QuerySet):
"""
Represents a lazy database lookup for a set of objects.
It updates "deleted" attribute instead deleting items.
"""
def delete(self):
self.update(deleted=True)
class ImmortalManager(models.Manager):
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
It filters by "deleted" attribute.
"""
return ImmortalQuerySet(self.model, using=self._db).filter(deleted=False)
get_query_set = get_queryset
class ImmortalModel(models.Model):
"""
Implementation of undeletable model
"""
deleted = models.BooleanField(_('deleted'), default=False)
objects = ImmortalManager()
baseobjects = models.Manager()
def delete(self, using=None, **kwargs):
self.deleted = True
self.save()
class Meta:
abstract = True
```
#### File: django-immortalmodel/immortalmodel/tests.py
```python
from django.test import TestCase
from immortalmodel.models import ImmortalModel
class ImmortalTestCase(TestCase):
"""
add immortalmodel to INSTALLED_APPS to run the following tests !!!
"""
class Blashyrkh(ImmortalModel):
pass
def test_immortality(self):
b = self.Blashyrkh()
b.delete()
self.assertRaises(self.Blashyrkh.DoesNotExist, self.Blashyrkh.objects.get, pk=b.pk)
self.assertIsNotNone(self.Blashyrkh.baseobjects.get(pk=b.pk))
def test_resuscitate(self):
b = self.Blashyrkh()
b.delete()
self.assertRaises(self.Blashyrkh.DoesNotExist, self.Blashyrkh.objects.get, pk=b.pk)
b.deleted = False
b.save()
self.assertIsNotNone(self.Blashyrkh.objects.get(pk=b.pk))
def test_manually_deleted(self):
b = self.Blashyrkh()
b.deleted = True
b.save()
self.assertRaises(self.Blashyrkh.DoesNotExist, self.Blashyrkh.objects.get, pk=b.pk)
``` |
{
"source": "20tab/django-political-map",
"score": 3
} |
#### File: django-political-map/example/tests.py
```python
from django.test import TestCase
from django.forms import ModelForm
from politicalplaces.models import PoliticalPlace, MapItem
from .models import MyLocation, MyLocationComplex
class MyLocationTest(TestCase):
def setUp(self):
class MyLocationForm(ModelForm):
class Meta:
model = MyLocation
exclude = []
self.MyLocationForm = MyLocationForm
def test_mylocation_creation(self):
self.assertEqual(0, PoliticalPlace.objects.count())
self.assertEqual(0, MapItem.objects.count())
self.assertEqual(0, MyLocation.objects.count())
loc = MyLocation()
loc.name = "Test Location"
place = PoliticalPlace.get_or_create_from_address(
address='via Luigi Gastinelli, Rome')
loc.place = place
loc.save()
self.assertEqual(
loc.place.country_item.short_name,
"IT")
self.assertEqual(
loc.place.self_item.geo_type,
"route")
self.assertEqual(1, PoliticalPlace.objects.count())
self.assertEqual(7, MapItem.objects.count())
self.assertEqual(1, MyLocation.objects.count())
loc2 = MyLocation()
loc2.name = "Test Location 2"
place2 = PoliticalPlace.get_or_create_from_address(
address='via Luigi Gastinelli, Rome')
loc2.place = place2
loc2.save()
self.assertEqual(
loc2.place.country_item.short_name,
"IT")
self.assertEqual(1, PoliticalPlace.objects.count())
self.assertEqual(7, MapItem.objects.count())
self.assertEqual(2, MyLocation.objects.count())
def test_mylocation_formfield_clean(self):
self.assertEqual(0, PoliticalPlace.objects.count())
post_data = {'place': 'via Luigi Gastinelli 118, Rome'}
location_form = self.MyLocationForm(data=post_data)
location_form.save()
self.assertEqual(1, PoliticalPlace.objects.count())
def test_mylocation_formfield_prepare_value(self):
test_place = PoliticalPlace.get_or_create_from_address(
address='via <NAME>elli, Rome')
location = MyLocation(place=test_place)
location.save()
location_form = self.MyLocationForm(instance=location)
self.assertEqual(
location_form['place'].value(),
"Via <NAME>, 00132 Roma RM, Italy")
def test_mylocation_formfield_prepare_value_no_instance(self):
location_form = self.MyLocationForm()
self.assertEqual(
location_form['place'].value(), None)
def test_mylocation_null(self):
my_location = MyLocation.objects.create()
self.assertTrue(my_location)
class MyLocationComplexTest(TestCase):
def setUp(self):
class MyLocationComplexForm(ModelForm):
class Meta:
model = MyLocationComplex
exclude = []
self.MyLocationComplexForm = MyLocationComplexForm
def test_mylocation_complex_form(self):
data = {'mandatory_charfield': None, 'place': "Roma"}
location_form = self.MyLocationComplexForm(data=data)
self.assertFalse(location_form.is_valid())
``` |
{
"source": "20tab/django-uwsgi-template",
"score": 2
} |
#### File: 20tab/django-uwsgi-template/tasks.py
```python
import getpass
import os
import sys
from pathlib import Path
import dj_database_url
from django.core.management.utils import get_random_secret_key
from dotenv import find_dotenv, load_dotenv
from invoke import task
BASE_DIR = os.path.dirname(__file__)
BASE_DIRNAME = os.path.dirname(BASE_DIR)
PROJECT_DIRNAME = os.path.basename(os.path.dirname(__file__))
EMPEROR_MODE = True
VASSALS = f"{BASE_DIRNAME}/vassals"
USERNAME = os.getlogin()
ENV_FILE = f"{BASE_DIR}/.env"
SECRET_KEY = get_random_secret_key()
@task
def init(c):
"""Initialize project."""
try:
VENV_ROOT = str(Path(os.getenv("VIRTUAL_ENV")).parent).replace(
"/", "\/"
) # noqa
except TypeError:
print("Activate your virtualenv and run the inv command again")
return
EMPEROR_MODE = confirm(
"Do you want to configure your uWSGI vassal in emperor mode? (no=stand-alone)"
)
if EMPEROR_MODE:
vassals = (
input(
f"We will use '{VASSALS}' as the vassal directory or specify the path: "
)
or VASSALS
)
bonjour = confirm(
"Do you want to use Bonjour for OSX (Yes) or Avahi for Linux (No)? "
)
if bonjour:
ZEROCONF = "bonjour"
ZEROOPTS = "name=%(project_name).local,cname=localhost"
else:
ZEROCONF = "avahi"
ZEROOPTS = "%(project_name).local"
python_plugin = (
input(f"Specify python plugin to configure uwsgi (default: python3): ")
or "python3"
)
database = (
input(f"We'll use '{PROJECT_DIRNAME}' as database name or specify the name: ")
or PROJECT_DIRNAME
)
username = input(f"Enter the database user name: ")
password = <PASSWORD>pass(f"Enter the database user password: ")
print("Compiling pip file in requirements")
c.run("make pip")
print("Installing libraries in requirements")
c.run("make dev")
if not os.path.exists("static"):
print("Making static directory")
c.run("mkdir static")
if not os.path.exists("media"):
print("Making media directory")
c.run("mkdir media")
ini_dir = f"{BASE_DIR}/uwsgiconf/local"
PYVERSION = f"{sys.version_info[0]}.{sys.version_info[1]}"
WORKAREA_ROOT = BASE_DIRNAME.replace("/", "\/") # noqa
print("Generating uwsgi user file")
if EMPEROR_MODE and not os.path.exists(f"{vassals}/{PROJECT_DIRNAME}.ini"):
c.run(f"cp {ini_dir}/vassal.ini.tpl {ini_dir}/{USERNAME}.ini")
c.run(
(
f'sed -i".bak" -e "s/USERNAME/{USERNAME}/g;s/ZEROCONF/{ZEROCONF}/g;'
f's/ZEROOPTS/{ZEROOPTS}/g;" {ini_dir}/{USERNAME}.ini'
)
)
c.run(
f"ln -s "
f"{BASE_DIR}/uwsgiconf/local/{USERNAME}.ini "
f"{vassals}/{PROJECT_DIRNAME}.ini"
)
else:
c.run(f"cp {ini_dir}/standalone.ini.tpl {ini_dir}/{USERNAME}.ini")
c.run(
f'sed -i".bak" -e "s/plugin = python3/plugin = {python_plugin}/g;"'
f" {ini_dir}/{USERNAME}.ini"
)
c.run(
f'sed -i".bak" -e "s/WORKAREA_ROOT/{WORKAREA_ROOT}/g;" {ini_dir}/{USERNAME}.ini'
)
c.run(f'sed -i".bak" -e "s/PYVERSION/{PYVERSION}/g;" {ini_dir}/{USERNAME}.ini')
c.run(f'sed -i".bak" -e "s/VENV_ROOT/{VENV_ROOT}/g;" {ini_dir}/{USERNAME}.ini')
print("Create env file")
if not os.path.exists(f"{ENV_FILE}"):
c.run(f"cp {ENV_FILE}.tpl {ENV_FILE}")
c.run(
(
f'sed -i".bak" -e '
f'"s/database/{database}/g;s/password/{password}/g;'
f's/secretkey/{SECRET_KEY}/g;s/username/{username}/g"'
f" {ENV_FILE}"
)
)
print("Collect static files")
c.run("make collectstatic")
createdb(c)
print("*** Next steps ***")
print(f"a) Check the uwsgiconf/local/{USERNAME}.ini and verify the python plugin")
print("b) Check the uwsgiconf/remote/globlal.ini file and verify the python plugin")
print("c) Check the domain in uwsgiconf/remote/[alpha|beta|production].ini file")
print("d) Configure the deploy/hosts file with server data")
print("e) Configure the deploy/[alpha|beta|production].yml files with correct data")
print(f"f) Configure the file by {PROJECT_DIRNAME}/settings.py")
if EMPEROR_MODE:
c.run(f"python -m webbrowser -t http://{PROJECT_DIRNAME}.local/")
@task
def createdb(c):
"""Create database."""
if confirm(
"Attention: you are creating the PostgreSQL DB. Do you want to proceed?"
):
db_name, db_host, db_port, db_user = get_db()
c.run(
f"createdb -e -h {db_host} -p {db_port} -U {db_user} -O {db_user} {db_name}"
)
if confirm("Attention: you are applying migrations. Do you want to proceed?"):
c.run("make migrate")
@task
def dropdb(c):
"""Drop database."""
if confirm("Warning, you are deleting the db. Are you sure you want to proceed?"):
db_name, db_host, db_port, db_user = get_db()
c.run(f"dropdb -e -h {db_host} -p {db_port} -U {db_user} {db_name}")
@task
def dumpdb(c):
"""Dump database."""
db_name, db_host, db_port, db_user = get_db()
c.run(
f"pg_dump -h {db_host} -p {db_port} -U {db_user} {db_name} | "
"bzip2 -9 > deploy/dump.sql.bz2"
)
@task
def gitinit(c, git_repository_url):
"""Initialize git repository."""
c.run(f'sed -i".bak" -e "s,GIT_REPOSITORY_URL,{git_repository_url},g;" README.md')
c.run("git init")
c.run("pre-commit install")
c.run("git add -A")
c.run("git commit -m 'Initial commit'")
c.run(f"git remote add origin {git_repository_url}")
c.run("git push -u origin master")
@task
def restart(c):
"""Restart uWSGI instance."""
c.run(f"touch uwsgiconf/local/{USERNAME}.ini")
def get_db():
"""Fetch database credentials."""
load_dotenv(find_dotenv())
db_url = os.getenv("DATABASE_URL")
db_default = dj_database_url.parse(db_url)
db_name = db_default["NAME"]
db_host = db_default["HOST"]
db_port = db_default["PORT"]
db_user = db_default["USER"]
return db_name, db_host, db_port, db_user
# NOTE: originally cribbed from fab 1's contrib.console.confirm
def confirm(question, assume_yes=True):
"""
Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``affirmative=False``.
.. note::
If the user does not supplies input that is (case-insensitively) equal
to "y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the input.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`.
"""
# Set up suffix
if assume_yes:
suffix = "Y/n"
else:
suffix = "y/N"
# Loop till we get something we like
# TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit?
while True:
# TODO: ensure that this is Ctrl-C friendly, ISTR issues with
# raw_input/input on some Python versions blocking KeyboardInterrupt.
response = input(f"{question} [{suffix}] ")
response = response.lower().strip() # Normalize
# Default
if not response:
return assume_yes
# Yes
if response in ["y", "yes"]:
return True
# No
if response in ["n", "no"]:
return False
# Didn't get empty, yes or no, so complain and loop
err = "I didn't understand you. Please specify '(y)es' or '(n)o'."
print(err, file=sys.stderr)
``` |
{
"source": "20tab/mutant",
"score": 2
} |
#### File: 20tab/mutant/__init__.py
```python
from uwsgidecorators import *
import zmq
import subprocess
from mutant import config
import os,sys
if not os.path.exists(config.WKHTMLTOPDF):
print "Mutant is badly configured: WKHTMLTOPDF = '%s'\n%s doesn't exists" % (config.WKHTMLTOPDF,config.WKHTMLTOPDF)
sys.exit()
if not os.path.exists(config.SOCKET_PATH):
print "Mutant is badly configured: SOCKET_PATH = '%s'\n%s doesn't exists" % (config.SOCKET_PATH,config.SOCKET_PATH)
sys.exit()
VERSION = (0,0,1)
__version__ = '.'.join(map(str, VERSION))
DATE = "2012-07-18"
zmqcontext = None
@postfork
def create_zmq_context():
"""
It starts a new zeroMQ thread for each process (also mule)
"""
global zmqcontext
zmqcontext = zmq.Context()
def enqueue(html, output, header='', footer='', opts=''):
"""
It enqueues tasks in socket
"""
global zmqcontex
socket = zmqcontext.socket(zmq.REQ)
socket.connect('ipc://%s' % config.SOCKET_PATH)
socket.send("convert|%s|%s|%s|%s|%s" % (html, output, header, footer, opts))
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
# wait for response
socks = poller.poll(config.TIMEOUT*1000)
if not socks:
return False
response = socket.recv()
if response == 'done':
return True
return False
# la funzione che richiama wkhtmltopdf
def convert_pdf(msg):
items = msg.split('|')
cmd = [config.WKHTMLTOPDF]
for i in items[5].split():
cmd.append(i)
if items[3] != '':
cmd.append('--header-html')
cmd.append(items[3])
if items[4] != '':
cmd.append('--footer-html')
cmd.append(items[4])
# source
cmd.append(items[1])
#destination
cmd.append(items[2])
print "running %s" % cmd
p = subprocess.Popen(cmd)
if p.wait() == 0:
return True
return False
# il consumer della coda delle conversioni (gira in un mulo)
@mule(config.MULE_ID)
def pdf_convert_consumer():
# setto il nome al processo (per essere piu' fico)
uwsgi.setprocname('uWSGI mutant')
# mi metto in ascolto sul socket della coda
global zmqcontext
socket = zmqcontext.socket(zmq.REP)
socket.bind('ipc://%s' % config.SOCKET_PATH)
print "ready to encode html to pdf..."
while True:
# un nuovo messaggio !!!
msg = socket.recv()
# lancio la conversione
if convert_pdf(msg):
socket.send("done")
else:
socket.send("error")
```
#### File: 20tab/mutant/uwsgidecorators.py
```python
import uwsgi
from threading import Thread
try:
import cPickle as pickle
except:
import pickle
if uwsgi.masterpid() == 0:
raise Exception(
"you have to enable the uWSGI master process to use this module")
spooler_functions = {}
mule_functions = {}
postfork_chain = []
def get_free_signal():
for signum in xrange(0, 256):
if not uwsgi.signal_registered(signum):
return signum
raise Exception("No free uwsgi signal available")
def manage_spool_request(vars):
ret = spooler_functions[vars['ud_spool_func']](vars)
if not 'ud_spool_ret' in vars:
return ret
return int(vars['ud_spool_ret'])
def postfork_chain_hook():
for f in postfork_chain:
f()
uwsgi.spooler = manage_spool_request
uwsgi.post_fork_hook = postfork_chain_hook
class postfork(object):
def __init__(self, f):
postfork_chain.append(f)
class spool(object):
def spool(self, *args, **kwargs):
arguments = self.base_dict
arguments['ud_spool_ret'] = str(uwsgi.SPOOL_OK)
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
return uwsgi.spool(arguments)
def __init__(self, f):
if not 'spooler' in uwsgi.opt:
raise Exception(
"you have to enable the uWSGI spooler to use @spool decorator")
self.f = f
spooler_functions[f.__name__] = self.f
self.f.spool = self.spool
self.base_dict = {'ud_spool_func': self.f.__name__}
class spoolforever(spool):
def spool(self, *args, **kwargs):
arguments = self.base_dict
arguments['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY)
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
return uwsgi.spool(arguments)
class spoolraw(spool):
def spool(self, *args, **kwargs):
arguments = self.base_dict
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
return uwsgi.spool(arguments)
class mulefunc(object):
def __init__(self, f):
if callable(f):
self.fname = f.__name__
self.mule = 0
mule_functions[f.__name__] = f
else:
self.mule = f
self.fname = None
def real_call(self, *args, **kwargs):
uwsgi.mule_msg(pickle.dumps(
{
'service': 'uwsgi_mulefunc',
'func': self.fname,
'args': args,
'kwargs': kwargs
}
), self.mule)
def __call__(self, *args, **kwargs):
if not self.fname:
self.fname = args[0].__name__
mule_functions[self.fname] = args[0]
return self.real_call
return self.real_call(*args, **kwargs)
def mule_msg_dispatcher(message):
msg = pickle.loads(message)
if msg['service'] == 'uwsgi_mulefunc':
return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
uwsgi.mule_msg_hook = mule_msg_dispatcher
class rpc(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.register_rpc(self.name, f)
return f
class farm_loop(object):
def __init__(self, f, farm):
self.f = f
self.farm = farm
def __call__(self):
if uwsgi.mule_id() == 0:
return
if not uwsgi.in_farm(self.farm):
return
while True:
message = uwsgi.farm_get_msg()
if message:
self.f(message)
class farm(object):
def __init__(self, name=None, **kwargs):
self.name = name
def __call__(self, f):
postfork_chain.append(farm_loop(f, self.name))
class mule_brain(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
self.f()
class mule_brainloop(mule_brain):
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
self.f()
class mule(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mule_brain(f, self.num))
class muleloop(mule):
def __call__(self, f):
postfork_chain.append(mule_brainloop(f, self.num))
class mulemsg_loop(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
message = uwsgi.mule_get_msg()
if message:
self.f(message)
class mulemsg(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mulemsg_loop(f, self.num))
class signal(object):
def __init__(self, num, **kwargs):
self.num = num
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
return f
class timer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_timer(self.num, self.secs)
return f
class cron(object):
def __init__(self, minute, hour, day, month, dayweek, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.minute = minute
self.hour = hour
self.day = day
self.month = month
self.dayweek = dayweek
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_cron(self.num, self.minute, self.hour,
self.day, self.month, self.dayweek)
return f
class rbtimer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_rb_timer(self.num, self.secs)
return f
class filemon(object):
def __init__(self, fsobj, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.fsobj = fsobj
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_file_monitor(self.num, self.fsobj)
return f
class erlang(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.erlang_register_process(self.name, f)
return f
class lock(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
# ensure the spooler will not call it
if uwsgi.i_am_the_spooler():
return
uwsgi.lock()
try:
return self.f(*args, **kwargs)
finally:
uwsgi.unlock()
class thread(object):
def __init__(self, f):
self.f = f
def __call__(self, *args):
t = Thread(target=self.f, args=args)
t.daemon = True
t.start()
return self.f
class harakiri(object):
def __init__(self, seconds):
self.s = seconds
def real_call(self, *args):
uwsgi.set_user_harakiri(self.s)
r = self.f(*args)
uwsgi.set_user_harakiri(0)
return r
def __call__(self, f):
self.f = f
return self.real_call
``` |
{
"source": "20tab/pybulletphysics",
"score": 2
} |
#### File: 20tab/pybulletphysics/setup.py
```python
import subprocess
from distutils.core import setup, Extension
def pkgconfig(flag, package):
p = subprocess.Popen(['pkg-config', flag, package],
stdout=subprocess.PIPE)
return p.stdout.read().split()
mod = Extension('bulletphysics',
sources=['src/bulletphysics.cpp',
'src/DbvtBroadphase.cpp',
'src/DefaultCollisionConfiguration.cpp',
'src/CollisionDispatcher.cpp',
'src/SequentialImpulseConstraintSolver.cpp',
'src/DiscreteDynamicsWorld.cpp',
'src/Vector3.cpp',
'src/Quaternion.cpp',
'src/CollisionShape.cpp',
'src/StaticPlaneShape.cpp',
'src/SphereShape.cpp',
'src/Transform.cpp',
'src/DefaultMotionState.cpp',
'src/RigidBodyConstructionInfo.cpp',
'src/RigidBody.cpp',
'src/BoxShape.cpp',
'src/PersistentManifold.cpp',
'src/VehicleTuning.cpp',
'src/WheelInfo.cpp',
'src/DefaultVehicleRaycaster.cpp',
'src/RaycastVehicle.cpp',
'src/CompoundShape.cpp',
'src/CylinderShape.cpp',
],
extra_compile_args=pkgconfig('--cflags', 'bullet'),
extra_link_args=pkgconfig('--libs', 'bullet'))
setup(
name='bulletphysics',
version='0.1',
description='python wrapper for bulletphysics library',
ext_modules=[mod],
author='<NAME>.',
author_email='<EMAIL>',
url='https://github.com/20tab/pybulletphysics',
license='MIT License'
)
```
#### File: pybulletphysics/tests/test_shape.py
```python
import unittest
from bulletphysics import *
class ShapeTest(unittest.TestCase):
def test_creation(self):
compound = CompoundShape()
box = BoxShape(Vector3(1.0,0.5,2.0))
transform = Transform()
transform.setIdentity()
transform.setOrigin(Vector3(0, 1, 0))
compound.addChildShape(transform, box)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
```
#### File: pybulletphysics/tests/test_transform.py
```python
import unittest
from bulletphysics import *
class TransformTest(unittest.TestCase):
def setUp(self):
self.transform = Transform( Quaternion(0.0, 1.0, 2.0, 1.0), Vector3(0.0, 1.1, 2.2))
def test_opengl(self):
matrix = [0.0] * 5
self.assertRaises(TypeError, self.transform.getOpenGLMatrix, matrix)
matrix = [0.0] * 16
self.transform.getOpenGLMatrix(matrix)
self.assertEqual(-0.67, round(matrix[0], 2))
def test_rotation(self):
self.transform.setIdentity()
q = Quaternion(1.0, 2.0, 3.0, 4.0)
self.transform.setRotation(q)
q = self.transform.getRotation()
self.assertEqual(0.2, round(q.getX(),1))
self.assertEqual(0.4, round(q.getY(),1))
self.assertEqual(0.5, round(q.getZ(),1))
self.assertEqual(0.7, round(q.getW(),1))
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
```
#### File: pybulletphysics/tests/test_world.py
```python
import unittest
from bulletphysics import *
class WorldTest(unittest.TestCase):
def setUp(self):
collisionConfiguration = DefaultCollisionConfiguration()
self.world = DiscreteDynamicsWorld( CollisionDispatcher(collisionConfiguration),
DbvtBroadphase(), SequentialImpulseConstraintSolver(),
collisionConfiguration)
def test_gravity(self):
self.assertRaises(TypeError, self.world.setGravity, True)
self.assertIsNone(self.world.setGravity(Vector3(0.0, -9.81, 0.0)))
def test_remove_none_rigidbody(self):
self.assertRaises(TypeError, self.world.removeRigidBody, None)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "20tab/python-fattureincloud",
"score": 2
} |
#### File: fattureincloud/models/anagrafica.py
```python
from fattureincloud.models.base import Resource
class Soggetto(Resource):
"""Soggetto class."""
def lista(self, _id="", filtro="", nome="", cf="", piva="", pagina=1):
"""Return list of elements filtered by given parameters if set."""
payload = {
"id": _id,
"filtro": filtro,
"nome": nome,
"cf": cf,
"piva": piva,
"pagina": pagina,
}
return super().lista(**payload)
def nuovo(self, **kwargs):
"""Create new soggetto."""
raise NotImplementedError
def importa(self, **kwargs):
"""Import a list of soggetto."""
raise NotImplementedError
def modifica(self, **kwargs):
"""Update soggetto."""
raise NotImplementedError
def elimina(self, **kwargs):
"""Delete soggetto."""
raise NotImplementedError
class Clienti(Soggetto):
"""Clienti class."""
list_key = "lista_clienti"
class Fornitori(Soggetto):
"""Fornitori class."""
list_key = "lista_fornitori"
```
#### File: fattureincloud/models/magazzino.py
```python
from datetime import date
from fattureincloud.models.base import Resource
class ArriviMerce(Resource):
"""ArriviMerce class."""
list_key = "lista_documenti"
def lista(
self,
anno="",
data_inizio="",
data_fine="",
fornitore="",
id_fornitore="",
mostra_link_allegato="",
):
"""Return list of elements filtered by given parameters if set."""
payload = {
"anno": anno or date.today().year,
"data_inizio": data_inizio
or date(year=date.today().year, month=1, day=1).strftime("%d/%m/%Y"),
"data_fine": data_fine
or date(year=date.today().year, month=12, day=31).strftime("%d/%m/%Y"),
"fornitore": fornitore,
"id_fornitore": id_fornitore,
"mostra_link_allegato": mostra_link_allegato,
}
return super().lista(**payload)
def dettagli(self, _id=""):
"""Return arrivimerce's details."""
payload = {"id": _id}
return self.requester.post(f"{self.path}dettagli", payload).get(
"dettagli_documento", {}
)
```
#### File: tests/models/test_anagrafica.py
```python
from unittest import TestCase
from requests_mock import Mocker
from fattureincloud.client import FattureInCloudAPI
from tests.mocking import mocker_register_uri
class TestAnagrafica(TestCase):
"""Define simple test case for base client request."""
maxDiff = None
def setUp(self):
"""Set client with key and uid."""
self.client = FattureInCloudAPI(api_uid="123456", api_key="qwerty")
@Mocker()
def test_clienti(self, mocker):
"""Test clienti."""
mocker_register_uri(
mocker, self.client.host, "/clienti/lista", "anagrafica/clienti.json"
)
self.assertEqual(len(self.client.clienti.lista()), 4)
@Mocker()
def test_fornitori(self, mocker):
"""Test fornitori."""
mocker_register_uri(
mocker, self.client.host, "/fornitori/lista", "anagrafica/fornitori.json"
)
self.assertEqual(len(self.client.fornitori.lista()), 2)
def test_nuovo(self):
"""Test nuovo method."""
with self.assertRaises(NotImplementedError):
self.client.clienti.nuovo()
def test_importa(self):
"""Test importa method."""
with self.assertRaises(NotImplementedError):
self.client.clienti.importa()
def test_modifica(self):
"""Test modifica method."""
with self.assertRaises(NotImplementedError):
self.client.clienti.modifica()
def test_elimina(self):
"""Test elimina method."""
with self.assertRaises(NotImplementedError):
self.client.clienti.elimina()
```
#### File: tests/models/test_prodotti.py
```python
from unittest import TestCase
from requests_mock import Mocker
from fattureincloud.client import FattureInCloudAPI
from tests.mocking import mocker_register_uri
class TestProdotti(TestCase):
"""Define simple test case for base client request."""
maxDiff = None
def setUp(self):
"""Set client with key and uid."""
self.client = FattureInCloudAPI(api_uid="123456", api_key="qwerty")
@Mocker()
def test_prodotti(self, mocker):
"""Test prodotti."""
mocker_register_uri(
mocker, self.client.host, "/prodotti/lista", "prodotti/prodotti.json"
)
self.assertEqual(len(self.client.prodotti.lista()), 2)
@Mocker()
def test_prodotti_2_pages(self, mocker):
"""Test prodotti with 2 pages."""
mocker_register_uri(
mocker,
self.client.host,
"/prodotti/lista",
"prodotti/prodotti_2_pages.json",
)
self.assertEqual(len(self.client.prodotti.lista()), 4)
def test_nuovo(self):
"""Test nuovo method."""
with self.assertRaises(NotImplementedError):
self.client.prodotti.nuovo()
def test_importa(self):
"""Test importa method."""
with self.assertRaises(NotImplementedError):
self.client.prodotti.importa()
def test_modifica(self):
"""Test modifica method."""
with self.assertRaises(NotImplementedError):
self.client.prodotti.modifica()
def test_elimina(self):
"""Test elimina method."""
with self.assertRaises(NotImplementedError):
self.client.prodotti.elimina()
```
#### File: python-fattureincloud/tests/test_client.py
```python
from unittest import TestCase
from unittest.mock import patch
from requests_mock import Mocker
from fattureincloud.client import FattureInCloudAPI
from fattureincloud.exceptions import FattureInCloudExcpetion
from tests.mocking import MockedErrorResponse, mocker_register_uri
class TestClient(TestCase):
"""Define simple test case for base client request."""
maxDiff = None
def setUp(self):
"""Set client with key and uid."""
self.client = FattureInCloudAPI(api_uid="123456", api_key="qwerty")
def test_client_init(self):
"""Test client initialization."""
self.assertIsInstance(self.client, FattureInCloudAPI)
@Mocker()
def test_post(self, mocker):
"""Test requester post method."""
mocker_register_uri(mocker, self.client.host, "/richiesta/info", "info.json")
EXPECTED = {
"messaggio": "I parametri 'api_key' e 'api_uid' sono corretti.",
"limite_breve": "Rimangono 29 richieste per i prossimi 60 secondi",
"limite_medio": "Rimangono 499 richieste per i prossimi 3600 secondi",
"limite_lungo": "Rimangono 36929 richieste per i prossimi 1142233 secondi",
"success": True,
}
res = self.client.info()
self.assertEqual(res, EXPECTED)
@Mocker()
def test_account_info(self, mocker):
"""Test account info method."""
mocker_register_uri(mocker, self.client.host, "/info/account", "account.json")
res = self.client.account()
self.assertTrue(res.get("success"))
self.assertEqual(res.get("nome"), "<NAME>.r.l.")
@Mocker()
def test_post_error(self, mocker):
"""Test requester post method with error."""
mocker_register_uri(mocker, self.client.host, "/richiesta/info", "error.json")
with self.assertRaises(FattureInCloudExcpetion):
self.client.info()
@patch("requests.post", return_value=MockedErrorResponse())
def test_post_error_status_code(self, m):
"""Test requester post method with error status code."""
with self.assertRaises(FattureInCloudExcpetion):
self.client.info()
``` |
{
"source": "20tab/python-gmaps",
"score": 3
} |
#### File: src/gmaps/directions.py
```python
from gmaps.client import Client
class Directions(Client):
DIRECTIONS_URL = 'directions/'
def directions(self, origin, destination, mode=None, alternatives=None,
waypoints=[], optimize_waypoints=False,
avoid=None, language=None, units=None,
region=None, departure_time=None,
arrival_time=None, sensor=None):
"""Get directions between locations
:param origin: Origin location - string address; (latitude, longitude)
two-tuple, dict with ("lat", "lon") keys or object with (lat, lon)
attributes
:param destination: Destination location - type same as origin
:param mode: Travel mode as string, defaults to "driving".
See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_
:param alternatives: True if provide it has to return more then one
route alternative
:param waypoints: Iterable with set of intermediate stops,
like ("Munich", "Dallas")
`See google docs details under <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_
:param optimize_waypoints: if true will attempt to re-order supplied
waypoints to minimize overall cost of the route. If waypoints are
optimized, the route returned will show the optimized order under
"waypoint_order"
`See google docs details under <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_
:param avoid: Iterable with set of restrictions,
like ("tolls", "highways"). For full list refer to
`google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_
:param language: The language in which to return results.
See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_
:param units: Unit system for result. Defaults to unit system of
origin's country.
See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_
:param region: The region code. Affects geocoding of origin and
destination (see `gmaps.Geocoding.geocode` region parameter)
:param departure_time: Desired time of departure as
seconds since midnight, January 1, 1970 UTC
:param arrival_time: Desired time of arrival for transit directions as
seconds since midnight, January 1, 1970 UTC.
""" # noqa
if optimize_waypoints:
waypoints.insert(0, "optimize:true")
parameters = dict(
origin=self.assume_latlon_or_address(origin),
destination=self.assume_latlon_or_address(destination),
mode=mode,
alternatives=alternatives,
waypoints=waypoints,
avoid=avoid,
language=language,
units=units,
region=region,
departure_time=departure_time,
arrival_time=arrival_time,
sensor=sensor,
)
return self._make_request(self.DIRECTIONS_URL, parameters, "routes")
``` |
{
"source": "20tab/robotab",
"score": 3
} |
#### File: 20tab/robotab/bullphys.py
```python
from bulletphysics import *
import math
import redis
import uwsgi
import gevent
import gevent.select
class Ramp(object):
def __init__(self, name, world, x, y, z, w, h, d):
self.shape = BoxShape(Vector3(w, h, d));
q = Quaternion(0,0,0,1)
q.setRotation(Vector3(0.0, 0.0, 1.0), 0.3)
self.motion_state = DefaultMotionState( Transform(q, Vector3(x, y, z)) )
print self.motion_state
construction_info = RigidBodyConstructionInfo(0, self.motion_state, self.shape, Vector3(0,0,0))
self.body = RigidBody( construction_info )
world.ramps[name] = self
world.world.addRigidBody(self.body)
self.trans = Transform()
self.origin = self.trans.getOrigin()
self.name = name
self.last_msg = None
self.world = world
self.w = w
self.h = h
self.d = d
def draw(self):
self.motion_state.getWorldTransform(self.trans)
pos_x = self.origin.getX()
pos_y = self.origin.getY()
pos_z = self.origin.getZ()
quaternion = self.trans.getRotation()
rot_x = quaternion.getX()
rot_y = quaternion.getY()
rot_z = quaternion.getZ()
rot_w = quaternion.getW()
msg = '{name}:{pos_x},{pos_y},{pos_z:},{size_x},{size_y},{size_z},{rot_x:.2f},{rot_y:.2f},{rot_z:.2f},{rot_w:.2f}'.format(
name=self.name,
pos_x=int(pos_x),
pos_y=int(pos_y),
pos_z=int(pos_z),
size_x=self.w,
size_y=self.h,
size_z=self.d,
rot_x=rot_x,
rot_y=rot_y,
rot_z=rot_z,
rot_w=rot_w,
)
if msg != self.last_msg:
print msg
self.world.redis.publish('phys', msg)
self.last_msg = msg
class Box(object):
def __init__(self, name, world, weight, size, x, y, z, r=0.0):
self.mass = weight
self.shape = BoxShape(Vector3(size, size, size));
self.motion_state = DefaultMotionState( Transform(Quaternion(0,0,0,1), Vector3(x, y, z)) )
print self.motion_state
self.inertia = Vector3(0,0,0)
self.shape.calculateLocalInertia(self.mass, self.inertia)
construction_info = RigidBodyConstructionInfo(self.mass, self.motion_state, self.shape, self.inertia)
construction_info.m_friction = 0.8
self.body = RigidBody( construction_info )
world.boxes[name] = self
world.world.addRigidBody(self.body)
self.trans = Transform()
self.origin = self.trans.getOrigin()
self.name = name
self.rx = 0
self.ry = 0
self.rz = 0
self.size = size
self.last_msg = None
self.world = world
self.matrix = [0.0] * 16
def draw_bad(self):
self.motion_state.getWorldTransform(self.trans)
self.trans.getOpenGLMatrix(self.matrix)
msg = '{name}:{matrix}'.format(name=self.name,matrix=','.join(map(str,self.matrix)))
if msg != self.last_msg:
print msg
if msg.startswith('box0'):
print msg
self.world.redis.publish('phys', msg)
self.last_msg = msg
def draw(self):
self.motion_state.getWorldTransform(self.trans)
pos_x = self.origin.getX()
pos_y = self.origin.getY()
pos_z = self.origin.getZ()
quaternion = self.trans.getRotation()
rot_x = quaternion.getX()
rot_y = quaternion.getY()
rot_z = quaternion.getZ()
rot_w = quaternion.getW()
msg = '{name}:{pos_x},{pos_y},{pos_z:},{size_x},{size_y},{size_z},{rot_x:.2f},{rot_y:.2f},{rot_z:.2f},{rot_w:.2f}'.format(
name=self.name,
pos_x=int(pos_x),
pos_y=int(pos_y),
pos_z=int(pos_z),
size_x=self.size,
size_y=self.size,
size_z=self.size,
rot_x=rot_x,
rot_y=rot_y,
rot_z=rot_z,
rot_w=rot_w,
)
if msg != self.last_msg:
print msg
if msg.startswith('box0'):
print msg
self.world.redis.publish('phys', msg)
self.last_msg = msg
class World(object):
def __init__(self):
self.collisionConfiguration = DefaultCollisionConfiguration()
self.dispatcher = CollisionDispatcher(self.collisionConfiguration)
self.solver = SequentialImpulseConstraintSolver()
self.broadphase = DbvtBroadphase()
self.world = DiscreteDynamicsWorld(self.dispatcher, self.broadphase, self.solver, self.collisionConfiguration)
self.world.setGravity( Vector3(0,-9.81,0) )
print self.world
q = Quaternion(0,0,0,1)
#q.setRotation(Vector3(0, 0, 1), 30)
self.ground_motion_state = DefaultMotionState( Transform(q, Vector3(0,1,0)) )
print self.ground_motion_state
self.ground_shape = StaticPlaneShape(Vector3(0,1,0),1)
construction_info = RigidBodyConstructionInfo(0, self.ground_motion_state, self.ground_shape, Vector3(0,0,0))
construction_info.m_friction = 0.8
self.ground = RigidBody( construction_info )
print self.ground
self.world.addRigidBody(self.ground)
self.boxes = {}
self.ramps = {}
self.redis = redis.StrictRedis()
self.redis_pubsub = redis.StrictRedis()
self.channel = self.redis_pubsub.pubsub()
self.channel.subscribe('phys')
self.redis_fd = self.channel.connection._sock.fileno()
def physic_engine(world):
while True:
t = uwsgi.micros() / 1000.0
world.world.stepSimulation(1, 30)
for name,box in world.boxes.items():
box.draw()
for name,ramp in world.ramps.items():
ramp.draw()
t1 = uwsgi.micros() / 1000.0
delta = t1 - t
if delta < 33.33:
gevent.sleep((33.33 - delta) / 1000.0)
def application(e, sr):
if e['PATH_INFO'] == '/phys':
uwsgi.websocket_handshake()
w = World()
me = Box('box0', w, 1000, 250, -1000, 250, 0)
box1 = Box('box1', w, 20, 50, -1000, 250, 0)
box2 = Box('box2', w, 20, 50, -1500, 350, 0)
box3 = Box('box3', w, 20, 50, -1500, 450, 0)
box4 = Box('box4', w, 200, 150, -1500, 550, 0)
ramp = Ramp('ramp0', w, 400, 0, 100, 7000, 10, 400)
print "BOX DRAWING COMPLETE"
gevent.spawn(physic_engine, w)
ufd = uwsgi.connection_fd()
while True:
ready = gevent.select.select([ufd, w.redis_fd], [], [], timeout=4.0)
if not ready[0]:
uwsgi.websocket_recv_nb()
for fd in ready[0]:
if fd == ufd:
try:
msg = uwsgi.websocket_recv_nb()
if msg == 'fw':
orientation = me.body.getOrientation()
v = Vector3(0, 0, 5000).rotate(orientation.getAxis(), orientation.getAngle())
me.body.activate(True)
me.body.applyCentralImpulse( v )
elif msg == 'bw':
orientation = me.body.getOrientation()
v = Vector3(0, 0, -5000).rotate(orientation.getAxis(), orientation.getAngle())
me.body.activate(True)
me.body.applyCentralImpulse( v )
elif msg == 'rl':
orientation = me.body.getOrientation()
v = Vector3(0, 2000000, 0).rotate(orientation.getAxis(), orientation.getAngle())
me.body.activate(True)
me.body.applyTorqueImpulse( v )
elif msg == 'rr':
orientation = me.body.getOrientation()
v = Vector3(0, -2000000, 0).rotate(orientation.getAxis(), orientation.getAngle())
me.body.activate(True)
me.body.applyTorqueImpulse( v )
#me.body.applyForce( Vector3(0, 0, 10000), Vector3(-200, 0, 0))
#me.body.applyForce( Vector3(0, 0, -10000), Vector3(200, 0, 0))
except IOError:
import sys
print sys.exc_info()
return [""]
elif fd == w.redis_fd:
msg = w.channel.parse_response()
if msg[0] == 'message':
uwsgi.websocket_send(msg[2])
```
#### File: 20tab/robotab/math3d.py
```python
import math
class Vector3(object):
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
# apply a quaternion to a vector
def applyQuaternion(self, q):
x = self.x
y = self.y
z = self.z
qx = q.x
qy = q.y
qz = q.z
qw = q.w
ix = qw * x + qy * z - qz * y
iy = qw * y + qz * x - qx * z
iz = qw * z + qx * y - qy * x
iw = -qx * x - qy * y - qz * z
self.x = ix * qw + iw * -qx + iy * -qz - iz * -qy
self.y = iy * qw + iw * -qy + iz * -qx - ix * -qz
self.z = iz * qw + iw * -qz + ix * -qy - iy * -qx
return self
# sum of vectors
def add(self, v):
self.x += v.x
self.y += v.y
self.z += v.z
def multiplyScalar(self, n):
self.x *= n
self.y *= n
self.z *= n
return self
class Quaternion(object):
def __init__(self, x=0.0, y=0.0, z=0.0, w=1.0):
self._x = z
self._y = y
self._z = z
self._w = w
@property
def x(self):
return self._x
@x.setter
def x(self, n):
self._x = n
self.updateEuler()
@property
def y(self):
return self._y
@y.setter
def y(self, n):
self._y = n
self.updateEuler()
@property
def z(self):
return self._z
@z.setter
def z(self, n):
self._z = n
self.updateEuler()
@property
def w(self):
return self._w
@w.setter
def w(self, n):
self._w = n
self.updateEuler()
def updateEuler(self):
self.euler.setFromQuaternion(self)
def setFromEuler(self, euler):
c1 = math.cos(euler._x / 2)
c2 = math.cos(euler._y / 2)
c3 = math.cos(euler._z / 2)
s1 = math.sin(euler._x / 2)
s2 = math.sin(euler._y / 2)
s3 = math.sin(euler._z / 2)
self._x = s1 * c2 * c3 + c1 * s2 * s3
self._y = c1 * s2 * c3 - s1 * c2 * s3
self._z = c1 * c2 * s3 + s1 * s2 * c3
self._w = c1 * c2 * c3 - s1 * s2 * s3
class Euler(object):
def __init__(self, x=0.0, y=0.0, z=0.0):
self._x = z
self._y = y
self._z = z
@property
def x(self):
return self._x
@x.setter
def x(self, n):
self._x = n
self.updateQuaternion()
@property
def y(self):
return self._y
@y.setter
def y(self, n):
self._y = n
self.updateQuaternion()
@property
def z(self):
return self._z
@z.setter
def z(self, n):
self._z = n
self.updateQuaternion()
def updateQuaternion(self):
self.quaternion.setFromEuler(self)
def clamp(self, x):
return min(max(x, -1), 1)
def setFromQuaternion(self, q):
sqx = q.x * q.x
sqy = q.y * q.y
sqz = q.z * q.z
sqw = q.w * q.w
self._x = math.atan2(2 * (q.x * q.w - q.y * q.z), (sqw - sqx - sqy + sqz))
self._y = math.asin(self.clamp(2 * (q.x * q.z + q.y * q.w)))
self._z = math.atan2(2 * (q.z * q.w - q.x * q.y), (sqw + sqx - sqy - sqz))
class MathPlayer(object):
def __init__(self, x=0, y=0, z=0):
self.scale = 7
self.radius = 8
self.position = Vector3(x, y, z)
self.rotation = Euler()
self.quaternion = Quaternion()
self.quaternion.euler = self.rotation
self.rotation.quaternion = self.quaternion
def position_tuple(self):
return (self.position.x, self.position.y, self.position.z)
def set_position(self, pos):
self.position.x = pos[0]
self.position.y = pos[1]
self.position.z = pos[2]
def translateZ(self, n):
v1 = Vector3(0, 0, 1)
v1.applyQuaternion(self.quaternion)
self.position.add(v1.multiplyScalar(n))
def translateX(self, n):
v1 = Vector3(1, 0, 0)
v1.applyQuaternion(self.quaternion)
self.position.add(v1.multiplyScalar(n))
def rotateY(self, n):
self.rotation.y += n
self.rotation.updateQuaternion()
def circleCollide(self, x, z, r):
if self.position.x > x:
x1 = (self.position.x - x) ** 2
else:
x1 = (x - self.position.x) ** 2
if self.position.z > z:
x2 = (self.position.z - z) ** 2
else:
x2 = (z - self.position.z) ** 2
r1 = ((self.radius * self.scale) + r) ** 2
if (x1+x2) <= r1:
return True
return False
```
#### File: 20tab/robotab/phys.py
```python
import ode
import math
import redis
import uwsgi
import gevent
class Box(object):
def __init__(self, name, world, weight, size):
self.body = ode.Body(world.world)
M = ode.Mass()
M.setBox(2500, size, size, size)
M.mass = weight
self.body.setMass(M)
self.geom = ode.GeomBox(world.space, lengths=(size, size, size))
self.geom.setBody(self.body)
world.boxes[name] = self
self.name = name
self.rx = 0
self.ry = 0
self.rz = 0
self.size = size
self.last_msg = None
self.world = world
def rotateY(self, amount):
self.ry += amount
self.geom.setQuaternion((1.0, self.rx, self.ry, self.rz))
def set_pos(self, x, y, z):
self.body.setPosition((x, y, z))
def draw(self):
pos_x,pos_y,pos_z = self.body.getPosition()
rot_w,rot_x,rot_y,rot_z = self.body.getQuaternion()
msg = '{name}:{pos_x},{pos_y},{pos_z:},{size_x},{size_y},{size_z},{rot_x:.2f},{rot_y:.2f},{rot_z:.2f}'.format(
name=self.name,
pos_x=int(pos_x),
pos_y=int(pos_y),
pos_z=int(pos_z),
size_x=self.size,
size_y=self.size,
size_z=self.size,
rot_x=rot_x,
rot_y=rot_y,
rot_z=rot_z,
)
if msg != self.last_msg:
if msg.startswith('box0'):
print msg
self.world.redis.publish('phys', msg)
self.last_msg = msg
class World(object):
def __init__(self):
self.world = ode.World()
self.world.setGravity( Vector3(0,-9.81,0) )
self.space = ode.Space()
self.boxes = {}
self.contactgroup = ode.JointGroup()
self.redis = redis.StrictRedis()
self.redis_pubsub = redis.StrictRedis()
self.channel = self.redis_pubsub.pubsub()
self.channel.subscribe('phys')
self.redis_fd = self.channel.connection._sock.fileno()
self.floor = ode.GeomPlane(self.space, (0,1,0), 0)
def near_callback(args, geom1, geom2):
contacts = ode.collide(geom1, geom2)
world,contactgroup = args
for c in contacts:
c.setBounce(0.1)
c.setMu(10000)
j = ode.ContactJoint(world, contactgroup, c)
j.attach(geom1.getBody(), geom2.getBody())
def physic_engine(world):
while True:
t = uwsgi.micros() / 1000.0
world.space.collide((world.world, world.contactgroup), near_callback)
for name,box in world.boxes.items():
box.draw()
world.world.step(1)
world.contactgroup.empty()
t1 = uwsgi.micros() / 1000.0
delta = t1 - t
if delta < 33.33:
gevent.sleep((33.33 - delta) / 1000.0)
def application(e, sr):
if e['PATH_INFO'] == '/phys':
uwsgi.websocket_handshake()
w = World()
me = Box('box0', w, 900, 200)
me.set_pos(0, 1150, 0)
box1 = Box('box1', w, 20, 50)
box1.set_pos(0, 250, 0)
box2 = Box('box2', w, 20, 50)
box2.set_pos(0, 350, 0)
box3 = Box('box3', w, 20, 50)
box3.set_pos(0, 450, 0)
box4 = Box('box4', w, 200, 150)
box4.set_pos(0, 550, 0)
gevent.spawn(physic_engine, w)
ufd = uwsgi.connection_fd()
while True:
ready = gevent.select.select([ufd, w.redis_fd], [], [], timeout=4.0)
if not ready[0]:
uwsgi.websocket_recv_nb()
for fd in ready[0]:
if fd == ufd:
try:
msg = uwsgi.websocket_recv_nb()
if msg == 'fw':
me.body.addForce((0, 250, 0))
except IOError:
import sys
print sys.exc_info()
return [""]
elif fd == w.redis_fd:
msg = w.channel.parse_response()
if msg[0] == 'message':
uwsgi.websocket_send(msg[2])
``` |
{
"source": "20tab/twentytab-previewadmin",
"score": 2
} |
#### File: previewadmin/templatetags/previewadmin_tags.py
```python
from django.template import Library
from django.utils.safestring import mark_safe
from HTMLParser import HTMLParser
register = Library()
@register.filter
def custom_safe(value):
html_parser = HTMLParser()
unescaped = html_parser.unescape(value)
return mark_safe(unescaped)
``` |
{
"source": "20tab/twentytab-sortable",
"score": 3
} |
#### File: twentytab-sortable/sortable/tests.py
```python
from django.test import TestCase
from sortable.models import PositionModel
class SortableTestCase(TestCase):
"""
add sortable to INSTALLED_APPS to run the following tests !!!
"""
class IAMSortable(PositionModel):
class Meta:
ordering = ('position',)
def test_ordering(self):
s0 = self.IAMSortable()
s0.position = 1
s0.save()
s1 = self.IAMSortable()
s1.position = 0
s1.save()
all_sortable = self.IAMSortable.objects.all()
self.assertEqual(all_sortable[0].position, 0)
self.assertEqual(all_sortable[1].position, 1)
return s0, s1
def test_max_pos(self):
s0, s1 = self.test_ordering()
s2 = self.IAMSortable()
s2.position = 999
s2.save()
self.assertEqual(s0.max_pos, 1000)
s2.delete()
self.assertEqual(s0.max_pos, 2)
``` |
{
"source": "20tab/upy",
"score": 2
} |
#### File: contrib/cked/fields.py
```python
from django.db import models
from django import forms
from upy.contrib.cked.widgets import CKEditorWidget
class RichTextField(models.TextField):
"""
Field that construct the textarea field with CKEditor widget.
"""
def __init__(self, *args, **kwargs):
self.config = kwargs.pop("config", None)
super(RichTextField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': RichTextFormField,
'config':self.config
}
defaults.update(kwargs)
return super(RichTextField, self).formfield(**defaults)
class RichTextFormField(forms.fields.Field):
"""
FormField for RichTextField
"""
def __init__(self, config=None, *args, **kwargs):
kwargs.update({'widget': CKEditorWidget(config=config)})
super(RichTextFormField, self).__init__(*args, **kwargs)
# Fix field for South
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^upy\.contrib\.cked\.fields\.RichTextField"])
except ImportError:
pass
```
#### File: contrib/customadmin/admin.py
```python
from django.contrib import admin
from django import forms
from upy.contrib.customadmin.models import CustomAdmin, CustomApp, CustomLink, _, \
list_apps, list_models, CustomModel, all_apps
from upy.contrib.sortable.admin import PositionAdmin
from upy.utils import upy_re_match
from django.conf import settings
from upy.contrib.image.admin import AdminThumbnail, ColorBoxPositionAdmin
def cleaning_color_picker(form, fields):
"""
It cleans all color fields defined in CustomAdmin model
"""
chk = True
for field in fields:
if form.cleaned_data[field] and not upy_re_match(r'^[0-9a-fA-F]+$',
"%s" % form.cleaned_data[field]):
chk = False
form._errors[field] = form.error_class(
[_(u'You must compile this field with hexadecimal characters')])
if form.cleaned_data[field] and len(form.cleaned_data[field]) != 6:
chk = False
form._errors[field] = form.error_class(
[_(u'You must compile this field with six hexadecimal characters')])
return form, chk
class CustomAdminForm(forms.ModelForm):
"""
It ovverrides CustomAdmin modelform
"""
def clean(self):
cleaned_data = super(CustomAdminForm, self).clean()
view_mode = cleaned_data['view_mode']
autocomplete_app_list = cleaned_data['autocomplete_app_list']
autocomplete_models_list = cleaned_data['autocomplete_models_list']
if view_mode and not autocomplete_app_list:
try:
CustomApp.objects.get(application__iexact="Customadmin")
except CustomApp.DoesNotExist:
msg_view_mode = _(u"You have to define Customadmin in your CustomApp if you use a custom view_mode...")
msg_autocomplete_app_list = _(
u"...or at least enable autocomplete_app_list which will include Customadmin too.")
self._errors["view_mode"] = self.error_class([msg_view_mode])
self._errors["autocomplete_app_list"] = self.error_class([msg_autocomplete_app_list])
# These fields are no longer valid. Remove them from the
# cleaned data.
del cleaned_data["view_mode"]
del cleaned_data["autocomplete_app_list"]
#raise forms.ValidationError(_("You have to define Customadmin in your CustomApp
#if you use a custom view_mode without autocomplete_app_list"))
elif view_mode and not autocomplete_models_list:
try:
CustomModel.objects.get(model__iexact=CustomAdmin._meta.verbose_name_plural)
except CustomModel.DoesNotExist:
msg_view_mode = _(
u"You have to define Customadmin in your CustomModel if you use a custom view_mode...")
msg_autocomplete_models_list = _(
u"...or at least enable autocomplete_models_list which will include Customadmin too.")
self._errors["view_mode"] = self.error_class([msg_view_mode])
self._errors["autocomplete_models_list"] = self.error_class([msg_autocomplete_models_list])
# These fields are no longer valid. Remove them from the
# cleaned data.
del cleaned_data["view_mode"]
del cleaned_data["autocomplete_models_list"]
#raise forms.ValidationError(_("You have to define Customadmin in your CustomApp
#if you use a custom view_mode without autocomplete_app_list"))
self, chk = cleaning_color_picker(self, ['bg_header', 'table_title_bg',
'table_title_color', 'h2_color',
'h3_color', 'link_color',
'link_hover_color'])
if not chk:
raise forms.ValidationError(_("Some values are not hexadecimal string"))
return cleaned_data
class CustomAdminAdmin(admin.ModelAdmin):
"""
Admin's options for CustomAdmin model
"""
list_display = ('customization', 'branding', 'branding_link',
'default', 'view_mode', 'autocomplete_app_list', 'autocomplete_models_list')
list_editable = ('branding', 'branding_link', 'default', 'view_mode')
fieldsets = ((_('Branding'), {'fields':
(('branding', 'branding_link'),
('branding_image', 'default')),
},),
(_('View Option'), {'fields':
(('view_mode', 'use_log_sidebar'),
('autocomplete_app_list', 'autocomplete_models_list')),
},),
(_('Images'), {'fields':
(('default_app_image', 'default_model_image',),),
},),
(_('Style'), {'fields':
(('bg_header',), ('sitename_font', 'sitename_font_size',
'sitename_font_weight'), ('table_title_bg', 'table_title_color'),
('h2_color', 'h2_size'), ('h3_color', 'h3_size'),
('link_color', 'link_hover_color'),
),
},),
(_('Code'), {'fields':
(('html_head',), ('use_css_code',), ('css_code',)),
},),
)
form = CustomAdminForm
save_on_top = True
class Meta:
model = CustomAdmin
class Media:
js = ('/upy_static/customadmin/js/customadmin.js',)
class CustomAppForm(forms.ModelForm):
"""
It overrides admin form for CustomApp model
"""
def __init__(self, *args, **kwargs):
super(CustomAppForm, self).__init__(*args, **kwargs)
listapps = list_apps()
if self.instance:
listapps.append([self.instance.application] * 2)
self.fields['application'].widget = forms.Select(choices=listapps)
class Meta:
model = CustomApp
exclude = ()
class CustomAppAdmin(ColorBoxPositionAdmin):
"""
Admin's options for CustomApp model
"""
admin_thumbnail = AdminThumbnail(image_field='thumb')
list_display = ('position', 'application', 'verbose_app_name',
'show_models', 'image', 'admin_thumbnail',)
list_editable = ['position', 'verbose_app_name', 'image']
list_display_links = ['application', ]
prepopulated_fields = {'verbose_app_name': ('application',)}
fieldsets = ((_('Icons'), {'fields':
(('application', 'verbose_app_name'),
('image',), ('show_models',),),
},),
)
save_on_top = True
form = CustomAppForm
class Meta:
model = CustomApp
class CustomLinkAdmin(ColorBoxPositionAdmin):
"""
Admin's options for CustomLink model
"""
admin_thumbnail = AdminThumbnail(image_field='thumb')
list_display = ('position', 'link_url', 'verbose_url_name', 'admin_thumbnail',)
list_editable = ['position', 'verbose_url_name', ]
list_display_links = ['link_url', ]
prepopulated_fields = {'verbose_url_name': ('link_url',)}
fieldsets = ((_('Icons'), {'fields':
(('link_url', 'verbose_url_name'), ('image',),),
},),
)
save_on_top = True
class Meta:
model = CustomLink
class CustomModelForm(forms.ModelForm):
"""
It overrides admin form for CustomModel model
"""
def __init__(self, *args, **kwargs):
super(CustomModelForm, self).__init__(*args, **kwargs)
listmodels = list_models()
listapps = all_apps()
print listapps
if self.instance.pk:
listmodels.append([self.instance.model] * 2)
self.fields['model'].widget = forms.Select(choices=listmodels)
self.fields['app'].widget = forms.Select(choices=listapps)
class Meta:
model = CustomModel
exclude = ()
class CustomModelAdmin(ColorBoxPositionAdmin):
"""
Admin's options for CustomModel model
"""
admin_thumbnail = AdminThumbnail(image_field='thumb')
list_display = ('position', 'app', 'model', 'image', 'admin_thumbnail',)
list_editable = ['position', 'image']
list_display_links = ['model', ]
list_filter = ('app',)
fieldsets = ((_('Icons'), {'fields':
(('app', 'model',),
('image',),),
},),
)
save_on_top = True
form = CustomModelForm
class Meta:
model = CustomModel
class Media:
js = ColorBoxPositionAdmin.Media.js + ('/upy_static/customadmin/js/custommodel.js',)
admin.site.register(CustomAdmin, CustomAdminAdmin)
admin.site.register(CustomApp, CustomAppAdmin)
admin.site.register(CustomLink, CustomLinkAdmin)
admin.site.register(CustomModel, CustomModelAdmin)
```
#### File: contrib/customadmin/models.py
```python
from upy.contrib.tree.models import _
from django.db import models
from upy.contrib.colors.fields import ColorField
from upy.contrib.sortable.models import PositionModel
from django.conf import settings
from imagekit.models import ImageSpecField, ProcessedImageField
from pilkit.processors import ResizeToFit
from upy.fields import NullTrueField
def verifyApp(app):
return app in ['django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sitemaps',
'mptt',
'imagekit',
'upy',
'south',
'upy.contrib.inspect',
'modeltranslation',
'upy.contrib.tabbed_translation',
'upy.contrib.cked',
'upy.contrib.colors',
'upy.contrib.rosetta']
def all_apps():
"""
it returns a list of tuples with the name of all installed apps with admin's registration.
"""
list_apps = []
for app in settings.INSTALLED_APPS:
if not verifyApp(app):
list_apps.append([app, app.split(".")[-1].title()])
return list_apps
def list_apps():
"""
it returns a list of tuples with the name of all installed apps with admin's registration.
"""
list_apps = []
for app in settings.INSTALLED_APPS:
if not verifyApp(app):
try:
CustomApp.objects.get(application=app.split(".")[-1].title())
except:
list_apps.append([app.split(".")[-1].title()] * 2)
return list_apps
def list_models():
"""
It returns a list of tuple with the name of all models in installed apps
"""
list_models = []
for app in settings.INSTALLED_APPS:
if not verifyApp(app):
list_models_app = []
try:
all_models = models.get_models(models.get_app(app.split(".")[-1]))
except:
pass#app doesn't have model.py module
for m in all_models:
try:
CustomModel.objects.get(app=app, model=m.__name__)
except:
list_models_app.append([m._meta.verbose_name_plural] * 2)
list_models.append((app.split(".")[-1].title(), list_models_app))
return list_models
class CustomAdmin(models.Model):
"""
This object define parameters to customize admin layout. It has sense if you use only a record
of this class. Infact base template use the first occurence find in the database
"""
branding = models.CharField(max_length=200, null=True, blank=True,
default=u"upyproject.com",
help_text=_(u"Set branding"),
verbose_name=_(u"Branding"))
branding_link = models.CharField(max_length=200, null=True, blank=True,
default=u"www.upyproject.com",
help_text=_(u"Set branding's link"),
verbose_name=_(u"Branding link"))
branding_image = models.FilePathField(path=settings.RELATIVE_STATIC_ROOT, null=True, blank=True,
match="\.jpg|\.jpeg|.png|\.gif", recursive=True,
help_text=_(u"Set brand's image."),
verbose_name=_(u"Branding image"))
default = NullTrueField(_(u"Default"), help_text=_(u"Select it if you want use this as default customization."),
unique=True)
default_app_image = ProcessedImageField(verbose_name=_(u"Default app image"),
help_text=_(u"Insert a default application image"), null=True, blank=True,
upload_to='customadmin')
default_model_image = ProcessedImageField(verbose_name=_(u"Default model image"),
help_text=_(u"Insert a default model image"), null=True, blank=True,
upload_to='customadmin')
app_image = ImageSpecField([ResizeToFit(128, 128)], source='default_app_image',
options={'quality': 90}) #format='JPEG',
model_image = ImageSpecField([ResizeToFit(50, 50)], source='default_model_image', options={'quality': 90})
bg_header = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set header's background color."),
verbose_name=_(u"BG Header"))
sitename_font = models.CharField(max_length=200, null=True, blank=True,
help_text=_(u"Set sitename font."),
verbose_name=_(u"Sitename font"))
sitename_font_size = models.CharField(max_length=200, null=True, blank=True,
help_text=_(u"Set sitename font size."),
verbose_name=_(u"Sitename font size"))
sitename_font_weight = models.CharField(max_length=200, null=True, blank=True,
help_text=_(u"Set sitename font weight."),
verbose_name=_(u"Sitename font weight"))
table_title_bg = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set the background of title in tables."),
verbose_name=_(u"BG table title "))
table_title_color = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set the color of title in tables."),
verbose_name=_(u"Table title color"))
h2_color = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set h2 color."), verbose_name=_(u"H2 color"))
h2_size = models.CharField(max_length=200, null=True, blank=True,
help_text=_(u"Set h2 size."), verbose_name=_(u"H2 size"))
h3_color = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set h3 color."), verbose_name=_(u"H3 color"))
h3_size = models.CharField(max_length=200, null=True, blank=True,
help_text=_(u"Set h3 size."), verbose_name=_(u"H3 size"))
link_color = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set link's color"), verbose_name=_(u"Link color"))
link_hover_color = ColorField(max_length=200, null=True, blank=True,
help_text=_(u"Set link's color when hover"),
verbose_name=_(u"Link hover color"))
html_head = models.TextField(null=True, blank=True,
help_text=_(u"Set other html code to put in HEAD section. "),
verbose_name=_(u"Html head"))
css_code = models.TextField(null=True, blank=True,
help_text=_(u"Set the css code. "),
verbose_name=_(u"Css code"))
use_css_code = models.BooleanField(help_text=_(u"Check it if you want use css code to extends style."),
verbose_name=_(u"Use css code"), default=False)
use_log_sidebar = models.BooleanField(default=False,
help_text=_(u"Check it if you want use log sidebar in index template."),
verbose_name=_(u"Use log sidebar"))
view_mode = models.CharField(max_length=250, null=True, blank=True,
choices=(('use_custom_app', _('Use custom app system')),
('use_app_icons', _("Use apps' icons system")),
('use_app_and_model_icons', _("Use apps and models icons system")),
('use_model_icons',
_("Use models' icons system in index group models by app")),
('use_total_model_icons',
_("Use models' icons system in index ungroup models by app"))),
help_text=_(u"Choose the view mode"),
verbose_name=_(u"View mode"))
autocomplete_app_list = models.BooleanField(default=True,
help_text=_(
u"Check it if you want complete the custom app list with the default app list."),
verbose_name=_(u"Autocomplete App"))
autocomplete_models_list = models.BooleanField(default=True,
help_text=_(
u"Check it if you want complete the custom models list with the default models list."),
verbose_name=_(u"Autocomplete model"))
@property
def customization(self):
"""
It returns branding if defined, else image, else only his primary key.
"""
if self.branding:
return self.branding
elif self.branding_image:
res = self.branding_image.split("/")[-1]
return res
else:
return self.pk
@property
def branding_image_url(self):
return self.branding_image.replace(settings.RELATIVE_STATIC_ROOT, settings.STATIC_URL).replace("//", "/")
def save(self, *args, **kwargs):
appicons = CustomApp.objects.all()
if self.view_mode == "use_app_icons" and not appicons:
for app in list_apps():
new_app = CustomApp(application=app[0], verbose_app_name=app[1])
new_app.save()
super(CustomAdmin, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s" % (self.branding)
class Meta:
verbose_name = _(u"Custom Admin")
verbose_name_plural = _(u"Custom Admin")
ordering = ['branding']
class CustomApp(PositionModel):
"""
This object links the installed_apps with an icon to use if CustomAdmin.use_app_icons is True
"""
application = models.CharField(max_length=250,
unique=True, help_text=_(u"Select the application"),
verbose_name=_(u"Application"))
verbose_app_name = models.CharField(max_length=250, unique=True,
help_text=_(u"Write the verbose name to show"),
verbose_name=_(u"Verbose app name"))
image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')
thumb = ImageSpecField([ResizeToFit(80, 80)],
source='image',
format='png')
show_models = models.BooleanField(
default=True,
help_text=_(u"If use_app_icons is False in Customadmin, you can choose wheter or not show the model list."),
verbose_name=_(u"Show models")
)
def __unicode__(self):
return self.application
class Meta:
verbose_name = _(u"Custom App")
verbose_name_plural = _(u"Custom Apps")
ordering = ['position']
class CustomLink(PositionModel):
"""
This object links the installed_apps with an icon to use
if CustomAdmin.use_app_icons is True
"""
link_url = models.CharField(max_length=250, default="/admin/",
help_text=_(u"Select the url you want to link"),
verbose_name=_(u"Link Url"))
verbose_url_name = models.CharField(max_length=250, unique=True,
help_text=_(u"Write the verbose name to show"),
verbose_name=_(u"Verbose url name"))
image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')
thumb = ImageSpecField([ResizeToFit(80, 80)], source='image', format='png')
def __unicode__(self):
return self.link_url
class Meta:
verbose_name = _(u"Custom Link")
verbose_name_plural = _(u"Custom Link")
ordering = ['position']
class CustomModel(PositionModel):
"""
This object links models in installed_apps with an icon to use
if CustomAdmin.view_mode == "use_model_icons" or CustomAdmin.view_mode == "use_inner_model_icons"
"""
app = models.CharField(max_length=250,
help_text=_(u"Select an appplication"),
verbose_name=_(u"App"))
model = models.CharField(max_length=250,
help_text=_(u"Select a model"),
verbose_name=_(u"Model"))
image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')
thumb = ImageSpecField([ResizeToFit(50, 50)],
source='image',
format='png')
def __unicode__(self):
return self.model
class Meta:
verbose_name = _(u"Custom Model")
verbose_name_plural = _(u"Custom Models")
unique_together = ('app', 'model')
ordering = ['position']
```
#### File: contrib/rosetta/storage.py
```python
from django.core.cache import cache
from django.utils import importlib
import hashlib
import time
class BaseRosettaStorage(object):
def __init__(self, request):
self.request = request
def get(self, key, default=None):
raise NotImplementedError
def set(self, key, val):
raise NotImplementedError
def has(self, key):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
class DummyRosettaStorage(BaseRosettaStorage):
def get(self, key, default=None):
return default
def set(self, key, val):
pass
def has(self, key):
return False
def delete(self, key):
pass
class SessionRosettaStorage(BaseRosettaStorage):
def get(self, key, default=None):
if key in self.request.session:
return self.request.session[key]
return default
def set(self, key, val):
self.request.session[key] = val
def has(self, key):
return key in self.request.session
def delete(self, key):
del(self.request.session[key])
class CacheRosettaStorage(BaseRosettaStorage):
# unlike the session storage backend, cache is shared among all users
# so we need to per-user key prefix, which we store in the session
def __init__(self, request):
super(CacheRosettaStorage, self).__init__(request)
if 'rosetta_cache_storage_key_prefix' in self.request.session:
self._key_prefix = self.request.session['rosetta_cache_storage_key_prefix']
else:
self._key_prefix = hashlib.new('sha1', str(time.time())).hexdigest()
self.request.session['rosetta_cache_storage_key_prefix'] = self._key_prefix
def get(self, key, default=None):
#print ('get', self._key_prefix + key)
return cache.get(self._key_prefix + key, default)
def set(self, key, val):
#print ('set', self._key_prefix + key)
cache.set(self._key_prefix + key, val)
def has(self, key):
#print ('has', self._key_prefix + key)
return (self._key_prefix + key) in cache
def delete(self, key):
#print ('del', self._key_prefix + key)
cache.delete(self._key_prefix + key)
def get_storage(request):
from upy.contrib.rosetta.conf.settings import STORAGE_CLASS
storage_module, storage_class = STORAGE_CLASS.rsplit('.', 1)
storage_module = importlib.import_module(storage_module)
return getattr(storage_module, storage_class)(request)
```
#### File: tree/middleware/upy_context.py
```python
from django.core.urlresolvers import resolve, Resolver404
from django.conf.urls import handler404
from django.utils.importlib import import_module
class SetUpyContextMiddleware(object):
"""
This middleware activates current publication in current thread.
In process_response it deactivates current publication.
"""
def process_request(self, request):
try:
match = resolve(request.path)
except Resolver404:
match = None
try:
return import_module(handler404)
except:
pass
if match and 'upy_context' in match.kwargs:
request.upy_context = match.kwargs['upy_context']
```
#### File: contrib/tree/views.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from upy.contrib.tree.utility import UPYRobotTXT, UPYSitemap
def upy_render(request, upy_context, vars_dictionary):
"""
It renders template defined in upy_context's page passed in arguments
"""
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
def view_404(request, url=None):
"""
It returns a 404 http response
"""
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = UPYSitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = UPYRobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
def favicon(request):
"""
It returns favicon's location
"""
favicon = "/upy_static/images/favicon.ico"
try:
from upy.contrib.seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon)
``` |
{
"source": "20tab/uwsgiit_console",
"score": 2
} |
#### File: uwsgiit_console/console/forms.py
```python
from __future__ import unicode_literals, absolute_import
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.utils.dates import MONTHS
from django.core.validators import validate_email
from django.core.urlresolvers import resolve, Resolver404
from uwsgiit.api import UwsgiItClient
from select2.widgets import SelectMultipleAutocomplete, SelectAutocomplete
from .models import UwsgiItApi
def email_list_validator(value):
"Check if value consists only of valid emails."
# Use the parent's handling of required fields, etc.
for email in value:
validate_email(email.strip())
class MultiEmailField(forms.CharField):
default_validators = [email_list_validator]
def to_python(self, value):
"Normalize data to a list of strings."
# Return an empty list if no input was given.
if value in self.empty_values:
return []
return value.split(',')
def clean(self, value):
value = super(MultiEmailField, self).clean(value)
return ','.join([email.strip() for email in value])
class TagsForm(forms.Form):
tags = forms.MultipleChoiceField(
widget=SelectMultipleAutocomplete(plugin_options={"width": "300px"}),
choices=(),
required=False)
def __init__(self, *args, **kwargs):
tag_choices = kwargs.pop('tag_choices')
super(TagsForm, self).__init__(*args, **kwargs)
self.fields['tags'].choices = tag_choices
class BootstrapForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BootstrapForm, self).__init__(*args, **kwargs)
for field in self.fields.keys():
if not isinstance(self.fields[field].widget, (SelectAutocomplete, SelectMultipleAutocomplete)):
self.fields[field].widget.attrs['class'] = 'form-control'
class LoginForm(forms.Form):
action_login = forms.IntegerField(
label='', widget=forms.HiddenInput(), initial=1)
username = forms.CharField(label='', widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Username'}))
password = forms.CharField(label='', widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': 'Password'}))
api_url = forms.ModelChoiceField(
label='Api url :', queryset=UwsgiItApi.objects.none())
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.fields['api_url'].queryset = UwsgiItApi.objects.all()
self.fields['api_url'].initial = UwsgiItApi.objects.get_or_create(
url=settings.DEFAULT_API_URL)[0]
def clean(self):
cd = super(LoginForm, self).clean()
if 'username' in cd and 'password' in cd and 'api_url' in cd:
client = UwsgiItClient(
cd['username'],
cd['password'],
cd['api_url'].url)
me = client.me().json()
if 'error' in me:
raise forms.ValidationError('Wrong username or password')
return cd
class MeForm(forms.Form):
company = forms.CharField(label='Company', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}))
email = MultiEmailField(label='Email', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}), required=False)
password = forms.CharField(label='Password', widget=forms.PasswordInput(
attrs={'class': 'form-control'}, render_value=True))
re_password = forms.CharField(
label='Retype password',
widget=forms.PasswordInput(
render_value=True, attrs={'class': 'form-control'}))
vat = forms.CharField(label='Vat', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}), required=False)
def clean(self):
cd = super(MeForm, self).clean()
if 'password' in cd and 're_password' in cd:
p1 = cd['password']
p2 = cd['re_password']
if p1 != p2:
self._errors['re_password'] = self.error_class(
['Passwords do not match'])
return cd
class SSHForm(forms.Form):
key = forms.CharField(label='ssh key', widget=forms.Textarea(
attrs={'cols': 100, 'rows': 3, 'class': 'form-control'}))
def clean(self):
"""Raise a ValidationError if the
value is not bigger than 130 bytes
check for ssh-rsa and ssh-dsa strings
"""
data = super(SSHForm, self).clean()
if 'key' in data:
key = data['key'].strip()
if len(key) > 130:
if 'ssh-rsa ' not in key and 'ssh-dsa ' not in key:
msg = 'Inserted value is not a valid ssh key'
raise forms.ValidationError(msg)
if key.count('\n') > 0:
msg = 'Too many newlines in the ssh key'
raise forms.ValidationError(msg)
else:
msg = 'Key too short'
raise forms.ValidationError(msg)
return data
class ContainerForm(TagsForm):
name = forms.CharField(label='Name', required=False)
quota_threshold = forms.IntegerField(
label='Quota Threshold', min_value=0, max_value=100)
nofollow = forms.BooleanField(label='NoFollow', required=False)
distro = forms.IntegerField(label='Distro', widget=forms.Select(choices=()))
linked_to = forms.MultipleChoiceField(
widget=SelectMultipleAutocomplete(plugin_options={"width": "300px"}),
choices=(),
required=False)
jid = forms.CharField(label='Jabber ID', required=False)
jid_destinations = forms.CharField(
label='Jabber Destinations', required=False)
jid_secret = forms.CharField(
label='Jabber Password', widget=forms.PasswordInput(), required=False)
pushover_user = forms.CharField(label='Pushover User', required=False)
pushover_token = forms.CharField(label='Pushover Token', required=False)
pushover_sound = forms.CharField(label='Pushover Sound', required=False)
pushbullet_token = forms.CharField(label='Pushbullet Token', required=False)
slack_webhook = forms.CharField(label='Slack Webhook', required=False)
alarm_freq = forms.IntegerField(
label='Alarm Frequency', required=False, min_value=60)
note = forms.CharField(
widget=forms.Textarea(
attrs={'cols': 50, 'rows': 3, 'class': 'form-control'}),
required=False)
reboot = forms.BooleanField(required=False, widget=forms.HiddenInput)
def __init__(self, *args, **kwargs):
distro_choices = kwargs.pop('distro_choices')
linked_to_choices = kwargs.pop('linked_to_choices')
super(ContainerForm, self).__init__(*args, **kwargs)
self.fields['distro'].widget.choices = distro_choices
self.fields['linked_to'].choices = linked_to_choices
class TagForm(forms.Form):
name = forms.CharField(label='Name')
class DomainForm(TagsForm):
note = forms.CharField(required=False, widget=forms.Textarea(
attrs={'cols': 50, 'rows': 3, 'class': 'form-control'}))
class NewDomainForm(forms.Form):
name = forms.CharField(
label='Name', widget=forms.TextInput(attrs={'size': 70}))
class CalendarForm(forms.Form):
year = forms.IntegerField()
month = forms.ChoiceField(
required=False,
widget=SelectAutocomplete(plugin_options={"width": "200px"}),
choices=[('', '')] + [(k, v) for k, v in MONTHS.items()])
day = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(CalendarForm, self).__init__(*args, **kwargs)
today = datetime.today()
yesterday = today - timedelta(1)
self.fields['year'].initial = yesterday.year
self.fields['month'].initial = yesterday.month
self.fields['day'].initial = yesterday.day
self.fields['day'].widget.attrs['min'] = 1
def has_value(self, field):
data = self.cleaned_data
if field in data and data[field]:
return True
return False
def get_params(self):
res = {}
data = self.cleaned_data
if self.has_value('year'):
res['year'] = data['year']
if self.has_value('month'):
res['month'] = int(data['month'])
if self.has_value('day'):
res['day'] = data['day']
return res
def metric_name(self):
metric_name = ''
data = self.cleaned_data
if self.has_value('year'):
metric_name = str(data['year'])
if self.has_value('month'):
metric_name = str(data['month']) + '-' + metric_name
if self.has_value('day'):
metric_name = str(data['day']) + '-' + metric_name
return metric_name
def time_unit(self):
if self.has_value('day'):
return 'hour'
elif self.has_value('month'):
return 'day'
return 'month'
def is_in_the_future(self):
data = self.get_params()
today = datetime.today()
if 'year' in data and data['year'] > today.year:
return True
if ('year' in data and data['year'] == today.year and
'month' in data and data['month'] > today.month):
return True
if ('year' in data and data['year'] == today.year and
'month' in data and data['month'] == today.month and
'day' in data and data['day'] > today.day):
return True
return False
def clean(self):
data = super(CalendarForm, self).clean()
if self.has_value('day') and not self.has_value('month'):
self._errors['month'] = self.error_class(['Month is required.'])
if self.is_in_the_future():
raise forms.ValidationError('Set a date in the past.')
return data
class MetricDetailForm(forms.Form):
metric_url = forms.CharField()
metric_type = forms.CharField()
subject = forms.CharField()
def clean(self):
cd = super(MetricDetailForm, self).clean()
if 'metric_url' in cd:
try:
resolve(cd['metric_url'])
except Resolver404:
raise forms.ValidationError('Invalid url')
return cd
class NewLoopboxForm(BootstrapForm):
# container = forms.IntegerField(label='', widget=forms.HiddenInput())
filename = forms.CharField(label='Filename')
mountpoint = forms.CharField(label='Mount Point')
readonly = forms.BooleanField(label='Readonly', required=False)
class LoopboxForm(TagsForm):
lid = forms.IntegerField(widget=forms.HiddenInput, required=False)
class AlarmForm(BootstrapForm):
action_filter = forms.IntegerField(
label='', widget=forms.HiddenInput(), initial=1)
container = forms.IntegerField(required=False)
vassal = forms.CharField(required=False)
class_ = forms.CharField(label='Class', required=False)
color = forms.CharField(max_length=7, required=False)
level = forms.ChoiceField(
required=False,
widget=SelectAutocomplete(plugin_options={"width": "100%"}),
choices=(
('', ' '), (0, 'System'), (1, 'User'),
(2, 'Exception'), (3, 'Traceback'), (4, 'Log')
)
)
line = forms.IntegerField(min_value=0, required=False)
filename = forms.CharField(required=False)
func = forms.CharField(label='Function', required=False)
def clean(self):
cd = super(AlarmForm, self).clean()
del cd['action_filter']
return cd
``` |
{
"source": "20tab/uwsgi-local-dns-resolver",
"score": 2
} |
#### File: 20tab/uwsgi-local-dns-resolver/setup.py
```python
from setuptools import setup
def readme():
"""Open the readme."""
with open('README.md') as f:
return f.read()
setup(
name='uwsgidns',
version='1.0.1',
description='Dinamically route to localhost request for uWSGI locally subscribed domains.',
long_description=readme(),
url='https://github.com/20tab/uwsgi-local-dns-resolver',
author='<NAME>',
author_email='<EMAIL>',
packages=['uwsgidns'],
install_requires=[
'dnslib',
],
# test_suite='nose.collector',
# tests_require=['nose'],
scripts=['bin/uwsgidns'],
keywords=["uwsgi", "dns", "uwsgidns", "localhost"],
license='MIT',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 6 - Mature",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: Unix",
"Topic :: System :: Operating System",
"Topic :: Internet :: Name Service (DNS)",
"Topic :: Utilities"
],
zip_safe=False,
include_package_data=True,
)
```
#### File: uwsgi-local-dns-resolver/uwsgidns/listener.py
```python
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from uwsgidns.utils import uwsgi_packet_to_dict
class SubscriptionHandler(socketserver.BaseRequestHandler):
"""Handle UDP subscription requests from uWSGI."""
"""
trigger is called, if set, after handling a new uWSGI subscription.
It MUST be a callable accepting a uWSGI dictionary as an argument.
"""
trigger = None
def handle(self):
data = self.request[0]
d = uwsgi_packet_to_dict(data)
if callable(SubscriptionHandler.trigger):
SubscriptionHandler.trigger(d)
```
#### File: uwsgi-local-dns-resolver/uwsgidns/utils.py
```python
import struct
def uwsgi_packet_to_dict(blob):
"""Convert a uWSGI binary packet to a dictionary."""
d = dict()
_, packet_len, _ = struct.unpack("<BHB", blob[0:4])
i = 4
while i < packet_len:
size, = struct.unpack("<H", blob[i:i + 2])
i += 2
key = blob[i:i + size]
i += size
size, = struct.unpack("<H", blob[i:i + 2])
i += 2
value = blob[i:i + size]
i += size
d[key] = value
return d
``` |
{
"source": "20x48/fiz",
"score": 3
} |
#### File: fiz/dataset/analysis2.py
```python
from itertools import product
ABC = lambda x: map(lambda y: ''.join(y), product('abcdefghijklmnopqrstuvwxyz', repeat=x))
GET = lambda x, d: b'' if len(d) < 1 << x else ''.join(map(lambda y: y[0], sorted(d.items(), key=lambda z: z[1], reverse=True)[:1<<x])).encode('ascii')
def GET_(d):
x = len(d)
if x >= 16: x = 16
elif x >= 8: x = 8
elif x >= 4: x = 4
elif x >= 2: x = 2
elif x >= 1: x = 1
else: return b''
return ''.join(map(lambda y: y[0], sorted(d.items(), key=lambda z: z[1], reverse=True)[:x])).encode('ascii')
with open('out.txt') as f:
words = f.read().splitlines()
def generate(plan, allow_hyphen=False):
dyn = isinstance(plan, int)
max_trace = plan if dyn else len(plan)
transfer = {}
for word in words:
for i, c in enumerate(f'{word}-' if allow_hyphen else word):
for j in range(1, (max_trace if i >= max_trace else i if i > 0 else 1) + 1):
try:
transfer[word[i-j:i]][c] += 1
except KeyError:
try:
transfer[word[i-j:i]][c] = 1
except KeyError:
transfer[word[i-j:i]] = {c: 1}
if dyn:
filename = f'0x{plan}'
else:
filename = '-'.join(map(str, plan))
with open(f'{filename}{"-H" if allow_hyphen else ""}.txt', 'wb') as f:
if dyn:
for i in range(0, max_trace):
for k in ABC(i):
if k in transfer:
f.write(GET_(transfer[k]))
f.write(b'\n')
else:
for i, n in zip(range(0, max_trace), plan):
for k in ABC(i):
if k in transfer:
f.write(GET(n, transfer[k]))
f.write(b'\n')
# generate((4, 4, 4))
# generate((4, 4, 4, 4))
# generate((4, 4, 4, 3, 2))
# generate(3)
# generate(4)
# generate(5)
# generate((4, 4, 4), True)
# generate((4, 4, 4, 4), True)
# generate((4, 4, 4, 3, 2), True)
# generate(3, True)
# generate(4, True)
generate(5, True)
# generate(6, True)
``` |
{
"source": "20x48/RAINLotus",
"score": 3
} |
#### File: RAINLotus/RAINLotus/t.py
```python
try:
from p import Parser, DEFAULT_TITLE
from r import RendererCore, Renderer, Template
except ImportError:
from .p import Parser, DEFAULT_TITLE
from .r import RendererCore, Renderer, Template
from unittest import main, TestCase
parse = Parser()
render = RendererCore()
class Tester(TestCase):
def test_title(self):
R= (r'Yes Title',
r'========='
) >> parse
self.assertEqual(R[0]['title'], 'Yes Title')
R= (r'Bad Title',
r' ======= '
) >> parse
self.assertEqual(R[0]['title'], DEFAULT_TITLE)
self.assertEqual(R[1], (0, 'Bad Title'))
self.assertEqual(len(R), 2)
def test_header(self):
R= (r'= no header 1',
r'== header 2',
r'=== header 3',
r'====bad header 4',
r'===== header 5',
) >> parse
self.assertEqual(R[0]['headers'], [(2, 'header 2'), (3, 'header 3'), (5, 'header 5')])
self.assertEqual(R[1], (0, '= no header 1'))
self.assertEqual(R[2], [3, 'had', {'lev': 2}, 'header 2'])
self.assertEqual(R[3], [3, 'had', {'lev': 3}, 'header 3'])
self.assertEqual(R[4], (0, '====bad header 4'))
self.assertEqual(R[5], [3, 'had', {'lev': 5}, 'header 5'])
self.assertEqual(R >> render,
'<p class="yht">= no header 1</p>'
f'<h2 class="yhb-had" id="s:{render.calcid("header 2")}">header 2<a href="#s:{render.calcid("header 2")}"></a></h2>'
f'<h3 class="yhb-had" id="s:{render.calcid("header 3")}">header 3<a href="#s:{render.calcid("header 3")}"></a></h3>'
'<p class="yht">====bad header 4</p>'
'<h5 class="yhb-had">header 5</h5>')
def test_ulist(self):
R= (r'.. item 1',
r'.. item 2',
r' .. item 2.1',
r' content below item 2.1',
r' .. item 2.2',
r'...bad item'
) >> parse
self.assertEqual(R[1], [3, 'lst', {'typ': 'u'}, [
[(0, 'item 1')],
[
(0, 'item 2'),
[3, 'lst', {'typ': 'u'}, [
[
(0, 'item 2.1'),
(0, 'content below item 2.1')
],
[(0, 'item 2.2')]
]]
]
]])
self.assertEqual(R[2], (0, '...bad item'))
self.assertEqual(R >> render,
'<ul class="yhb-lst">'
'<li>'
'<p class="yht">item 1</p>'
'</li><li>'
'<p class="yht">item 2</p>'
'<ul class="yhb-lst">'
'<li>'
'<p class="yht">item 2.1</p>'
'<p class="yht">content below item 2.1</p>'
'</li><li>'
'<p class="yht">item 2.2</p>'
'</li>'
'</ul>'
'</li>'
'</ul>'
'<p class="yht">...bad item</p>')
def test_olist(self):
R= (r'?. item 1',
r'?. item 2',
r';;;',
r'314. item 314',
r'000. item 315'
) >> parse
self.assertEqual(R[1], [3, 'lst', {'typ': 'o', 'sta': 1}, [[(0, 'item 1')], [(0, 'item 2')]]])
self.assertEqual(R[2], [3, 'lst', {'typ': 'o', 'sta': 314}, [[(0, 'item 314')], [(0, 'item 315')]]])
self.assertEqual(R >> render,
'<ol class="yhb-lst">'
'<li>'
'<p class="yht">item 1</p>'
'</li><li>'
'<p class="yht">item 2</p>'
'</li>'
'</ol>'
'<ol class="yhb-lst" start="314">'
'<li>'
'<p class="yht">item 314</p>'
'</li><li>'
'<p class="yht">item 315</p>'
'</li>'
'</ol>')
def test_tlist(self):
R= (r'*. Achieved',
r'v. Supported',
r'+. Added feature',
r':. Todo / Modified',
r'-. Removed feature',
r'x. Unsupported',
r'~. Mission impossible',
) >> parse
self.assertEqual(R[1], [3, 'lst', {'typ': 't'}, [
['*', [(0, 'Achieved')]],
['v', [(0, 'Supported')]],
['+', [(0, 'Added feature')]],
[':', [(0, 'Todo / Modified')]],
['-', [(0, 'Removed feature')]],
['x', [(0, 'Unsupported')]],
['~', [(0, 'Mission impossible')]],
]])
self.assertEqual(R >> render,
'<ul class="yhb-lst_t">'
'<li class="_7">'
'<p class="yht">Achieved</p>'
'</li><li class="_6">'
'<p class="yht">Supported</p>'
'</li><li class="_5">'
'<p class="yht">Added feature</p>'
'</li><li class="_4">'
'<p class="yht">Todo / Modified</p>'
'</li><li class="_3">'
'<p class="yht">Removed feature</p>'
'</li><li class="_2">'
'<p class="yht">Unsupported</p>'
'</li><li class="_1">'
'<p class="yht">Mission impossible</p>'
'</li>'
'</ul>')
def test_dlist(self):
R= (r'::: Intel',
r' 牙膏厂。',
r'::: AMD',
r' YES!',
r'::: Seventeen Cards',
r' 十七张牌你能秒我?你能秒杀我?!',
r' 你今天能十七张牌把卢本伟秒了,我!当!场!就把这个电脑屏幕吃掉!!!'
) >> parse
self.assertEqual(R[1], [3, 'lst', {'typ': 'd'}, [
[((0, 'Intel'),), [(0, '牙膏厂。')]],
[((0, 'AMD'),), [(0, 'YES!')]],
[((0, 'Seventeen Cards'),), [
(0, '十七张牌你能秒我?你能秒杀我?!'),
(0, '你今天能十七张牌把卢本伟秒了,我!当!场!就把这个电脑屏幕吃掉!!!')
]]
]])
self.assertEqual(R >> render,
'<dl class="yhb-lst_d">'
'<dt>Intel</dt>'
'<dd><p class="yht">牙膏厂。</p></dd>'
'<dt>AMD</dt>'
'<dd><p class="yht">YES!</p></dd>'
'<dt>Seventeen Cards</dt>'
'<dd>'
'<p class="yht">十七张牌你能秒我?你能秒杀我?!</p>'
'<p class="yht">你今天能十七张牌把卢本伟秒了,我!当!场!就把这个电脑屏幕吃掉!!!</p>'
'</dd>'
'</dl>')
def test_sep_pgbreak(self):
R= (r'--',
r'-----',
r'%%%'
) >> parse
self.assertEqual(R[1], (0, '--'))
self.assertEqual(R[2], [3, 'sep', None, None])
self.assertEqual(R[3], [3, 'pgb', None, None])
self.assertEqual(R >> render,
'<p class="yht">--</p>'
'<hr />'
'<div style="page-break-after:always"></div>')
def test_b_note(self):
R= (r'*** ImpOrTant',
r' 试试就逝世'
) >> parse
self.assertEqual(R[1], [3, 'not', {'typ': 'important'}, [(0, '试试就逝世')]])
self.assertEqual(R >> render,
'<div class="yhb-not _imp"><div>'
'<p class="yht">试试就逝世</p>'
'</div></div>')
def test_b_quote(self):
R= (r'""" -- Rumi',
r' The quieter you become, the more you are able to hear.',
r'""" ——鲁迅',
r' 我没说过这话,不过确实在理!',
r'""" QUOTE WITHOUT AUTHOR',
r'"""',
r' QUOTE AT NEW LINE'
) >> parse
self.assertEqual(R[1], [3, 'quo', {'aut': ((0, 'Rumi'),)}, [(0, 'The quieter you become, the more you are able to hear.')]])
self.assertEqual(R[2], [3, 'quo', {'aut': ((0, '鲁迅'),)}, [(0, '我没说过这话,不过确实在理!')]])
self.assertEqual(R[3], [3, 'quo', {'aut': None}, [(0, 'QUOTE WITHOUT AUTHOR')]])
self.assertEqual(R[4], [3, 'quo', {'aut': None}, [(0, 'QUOTE AT NEW LINE')]])
self.assertEqual(R >> render,
'<figure class="yhb-quo">'
'<blockquote>'
'<p class="yht">The quieter you become, the more you are able to hear.</p>'
'</blockquote>'
'<figcaption>Rumi</figcaption>'
'</figure>'
'<figure class="yhb-quo">'
'<blockquote>'
'<p class="yht">我没说过这话,不过确实在理!</p>'
'</blockquote>'
'<figcaption>鲁迅</figcaption>'
'</figure>'
'<figure class="yhb-quo">'
'<blockquote>'
'<p class="yht">QUOTE WITHOUT AUTHOR</p>'
'</blockquote>'
'</figure>'
'<figure class="yhb-quo">'
'<blockquote>'
'<p class="yht">QUOTE AT NEW LINE</p>'
'</blockquote>'
'</figure>')
def test_b_table(self):
R= (r'|||',
r' | H1 | H2 | H3',
r' |',
r' | C1 | >>>',
r' | C4 | C5 | >',
r' | C7 | C8 | C\|',
r'||| 2',
r' | H1',
r' | =',
r' | C1',
r'||| csv 2',
r' H1, H2, >',
r' H4, >>>',
r' =, <<<',
r' C1, C2, C3',
r' C4, ~~C5~~, C6',
r'||| Json',
r' {'
r' "head": [["H1", ">>>"], ["H4", ">>>"]],'
r' "align": ["<", "=", ">"],'
r' "body": [["C1", ">>>"]]'
r' }',
r'||| jSon',
r' {"head": 1, "align": 2, "body": 3}',
r'||| jsOn',
r' {"head": [1], "align": [2], "body": [3]}',
r'||| jsoN',
r' {"head": [[]], "align": [[]], "body": [[[]]]}',
r'||| JSON RoTaTe',
r' [["H1", ">", "H{3}"], [], ["C1", "C2", "C3"]]'
) >> parse
self.assertEqual(R[1], [3, 'tab', {'hei': 1, 'rot': False, 'ali': ['=', '=', '=']}, [
[[1, ((0, 'H1'),)], [1, ((0, 'H2'),)], [1, ((0, 'H3'),)]],
[[3, ((0, 'C1'),)]],
[[1, ((0, 'C4'),)], [2, ((0, 'C5'),)]],
[[1, ((0, 'C7'),)], [1, ((0, 'C8'),)], [1, ((0, 'C|'),)]]
]])
self.assertEqual(R[2], [3, 'tab', {'hei': 2, 'rot': False, 'ali': ['=', '<', '<']}, [
[[1, ((0, 'H1'),)], [2, ((0, 'H2'),)]],
[[3, ((0, 'H4'),)]],
[[1, ((0, 'C1'),)], [1, ((0, 'C2'),)], [1, ((0, 'C3'),)]],
[[1, ((0, 'C4'),)], [1, ((2, 'del', ((0, 'C5'),)),)], [1, ((0, 'C6'),)]]
]])
self.assertEqual(R[3], [3, 'tab', {'hei': 2, 'rot': False, 'ali': ['<', '=', '>']}, [
[[3, ((0, 'H1'),)]],
[[3, ((0, 'H4'),)]],
[[3, ((0, 'C1'),)]]
]])
self.assertEqual(R[4], [3, 'tab', {'hei': 1, 'rot': True, 'ali': ['=', '=', '=']}, [
[[2, ((0, 'H1'),)], [1, ((0, 'H'), (2, 'reu', '3'))]],
[[1, ((0, 'C1'),)], [1, ((0, 'C2'),)], [1, ((0, 'C3'),)]]
]])
self.assertEqual(R >> render,
'<table class="yhb-tab">'
'<thead>'
'<tr>'
'<th style="text-align:center">H1</th>'
'<th style="text-align:center">H2</th>'
'<th style="text-align:center">H3</th>'
'</tr>'
'</thead><tbody>'
'<tr>'
'<td style="text-align:center" colspan="3">C1</td>'
'</tr><tr>'
'<td style="text-align:center">C4</td>'
'<td style="text-align:center" colspan="2">C5</td>'
'</tr><tr>'
'<td style="text-align:center">C7</td>'
'<td style="text-align:center">C8</td>'
'<td style="text-align:center">C|</td>'
'</tr>'
'</tbody>'
'</table>'
'<table class="yhb-tab">'
'<thead>'
'<tr>'
'<th style="text-align:center">H1</th>'
'<th style="text-align:center" colspan="2">H2</th>'
'</tr><tr>'
'<th style="text-align:center" colspan="3">H4</th>'
'</tr>'
'</thead><tbody>'
'<tr>'
'<td style="text-align:center">C1</td>'
'<td style="text-align:left">C2</td>'
'<td style="text-align:left">C3</td>'
'</tr><tr>'
'<td style="text-align:center">C4</td>'
'<td style="text-align:left"><del>C5</del></td>'
'<td style="text-align:left">C6</td>'
'</tr>'
'</tbody>'
'</table>'
'<table class="yhb-tab">'
'<thead>'
'<tr>'
'<th style="text-align:center" colspan="3">H1</th>'
'</tr><tr>'
'<th style="text-align:center" colspan="3">H4</th>'
'</tr>'
'</thead><tbody>'
'<tr>'
'<td style="text-align:center" colspan="3">C1</td>'
'</tr>'
'</tbody>'
'</table>'
'<table class="yhb-tab">'
'<thead>'
'<tr>'
'<th style="text-align:center" colspan="2">H1</th>'
'<th style="text-align:center">H</th>'
'</tr>'
'</thead><tbody>'
'<tr>'
'<td style="text-align:center">C1</td>'
'<td style="text-align:center">C2</td>'
'<td style="text-align:center">C3</td>'
'</tr>'
'</tbody>'
'</table>')
def test_b_coll(self):
R= (r'~~~ OpEn',
r' content',
r'~~~ open More',
r' blah blah',
r'~~~ closed',
r' content',
r'~~~',
r' no summary'
) >> parse
self.assertEqual(R[1], [3, 'col', {'opn': True, 'sum': None}, [(0, 'content')]])
self.assertEqual(R[2], [3, 'col', {'opn': True, 'sum': ((0, 'More'),)}, [(0, 'blah blah')]])
self.assertEqual(R[3], [3, 'col', {'opn': False, 'sum': ((0, 'closed'),)}, [(0, 'content')]])
self.assertEqual(R[4], [3, 'col', {'opn': False, 'sum': None}, [(0, 'no summary')]])
self.assertEqual(R >> render,
'<details class="yhb-col" open>'
'<summary></summary>'
'<div>'
'<p class="yht">content</p>'
'</div>'
'</details>'
'<details class="yhb-col" open>'
'<summary>More</summary>'
'<div>'
'<p class="yht">blah blah</p>'
'</div>'
'</details>'
'<details class="yhb-col">'
'<summary>closed</summary>'
'<div>'
'<p class="yht">content</p>'
'</div>'
'</details>'
'<details class="yhb-col">'
'<summary></summary>'
'<div>'
'<p class="yht">no summary</p>'
'</div>'
'</details>')
def test_b_dialog(self):
R= (r'@@@ title="Chat with 20x48"',
r' -> whats your address?',
r' <- 172.16.31.10',
r' -> no, your local address',
r' <- 127.0.0.1',
r' -> i mean your physical address',
r' <- 29:01:38:62:31:58',
r' -> fuck u',
r'',
r'@@@ style="wechat" nonexist-argument',
r' ~> 60',
r' <> 你撤回了一条消息',
r' $> 888',
r' 恭喜发财',
r' //rich// text'
r'',
r' <~',
r' <>',
r'',
r' <? Sending message',
r' <!',
r' Failed message',
r' /// image src=https://example.com/xxx.jpg',
r' ->@ RAINLotus @ Message with name',
r' ->@ <EMAIL> @ Dual "@" to escape'
) >> parse
self.assertEqual(R[1], [3, 'dia', {'title': 'Chat with 20x48'}, [
[1, [(0, 'whats your address?')], {}],
[0, [(0, '172.16.31.10')], {}],
[1, [(0, 'no, your local address')], {}],
[0, [(0, '127.0.0.1')], {}],
[1, [(0, 'i mean your physical address')], {}],
[0, [(0, '29:01:38:62:31:58')], {}],
[1, [(0, 'fuck u')], {}]
]])
self.assertEqual(R[2], [3, 'dia', {'style': 'wechat'}, [
[1, [], {'typ': 'voice', 'val': 60}],
[3, [(0, '你撤回了一条消息')], {}],
[1, [(0, '恭喜发财'), [1, [(2, 'ita', ((0, 'rich'),)), (0, ' text')]]], {'typ': 'hongbao', 'val': 88800}],
[0, [(0, 'Sending message')], {'typ': 'sending'}],
[0, [(0, 'Failed message'), [3, 'img', {'src': 'https://example.com/xxx.jpg'}, []]], {'typ': 'failed'}],
[2, [(0, 'Message with name')], {}, 'RAINLotus'],
[2, [(0, 'Dual "@" to escape')], {}, '<EMAIL>']
]])
self.assertEqual(R >> render,
'<div class="yhb-dia">'
'<p>Chat with 20x48</p>'
'<div>'
'<div class="_1">'
'<div>'
'<p class="yht">whats your address?</p>'
'</div>'
'</div><div class="_0">'
'<div>'
'<p class="yht">172.16.31.10</p>'
'</div>'
'</div><div class="_1">'
'<div>'
'<p class="yht">no, your local address</p>'
'</div>'
'</div><div class="_0">'
'<div>'
'<p class="yht">127.0.0.1</p>'
'</div>'
'</div><div class="_1">'
'<div>'
'<p class="yht">i mean your physical address</p>'
'</div>'
'</div><div class="_0">'
'<div>'
'<p class="yht">29:01:38:62:31:58</p>'
'</div>'
'</div><div class="_1">'
'<div>'
'<p class="yht">fuck u</p>'
'</div>'
'</div>'
'</div>'
'</div>'
'<div class="yhb-dia">'
'<p>Dialog</p>'
'<div>'
'<div class="_1 _voice">'
'<p>1\'0"</p>'
'<p></p>'
'</div><div class="_3">'
'<p>你撤回了一条消息</p>'
'</div><div class="_1 _hongbao">'
'<p>888.00</p>'
'<p>恭喜发财</p>'
'</div><div class="_0 _sending">'
'<div>'
'<p class="yht">Sending message</p>'
'</div>'
'</div><div class="_0 _failed">'
'<div>'
'<p class="yht">Failed message</p>'
'<figura class="yhb-img">'
'<img src="https://example.com/xxx.jpg" />'
'</figura>'
'</div>'
'</div><div class="_1">'
'<p>RAINLotus</p>'
'<div>'
'<p class="yht">Message with name</p>'
'</div>'
'</div><div class="_1">'
'<p><EMAIL></p>'
'<div>'
'<p class="yht">Dual "@" to escape</p>'
'</div>'
'</div>'
'</div>'
'</div>')
def test_b_footnote_code(self):
R= (r'>>> footnote',
r' content',
r'``` C++',
r' #include <iostream>',
r'',
r'',
r' int main() {',
r' std::cerr << "fucking the world" << std::endl;',
r' return 0;',
r' }',
r'```',
r' plsinyrcy'
) >> parse
self.assertEqual(R[1], [3, 'fnt', {'fnt': 'footnote'}, [(0, 'content')]])
self.assertEqual(R[2], [3, 'cod', {'lan': 'c++'}, [
'#include <iostream>',
'', '',
'int main() {',
' std::cerr << "fucking the world" << std::endl;',
' return 0;',
'}',
]])
self.assertEqual(R[3], [3, 'cod', {'lan': 'plaintext'}, ['plsinyrcy']])
self.assertEqual(R >> render,
'<div class="yhb-fnt">'
f'<a class="yhi-lnk" href="#p:{render.calcid("footnote")}" id="q:{render.calcid("footnote")}">footnote</a>'
'<div>'
'<p class="yht">content</p>'
'</div>'
'</div>'
'<pre class="yhb-cod language-c++"><code>'
'#include <iostream>\n\n\n'
'int main() {\n'
' std::cerr << "fucking the world" << std::endl;\n'
' return 0;\n'
'}'
'</code></pre>'
'<pre class="yhb-cod language-plaintext"><code>'
'plsinyrcy'
'</code></pre>')
def test_b_raw_diagram_formula(self):
R= (r'!!! raw',
r' <br /> it can be dangerous!',
r'### diagram',
r' content',
r'$$$ formula',
r' content'
) >> parse
self.assertEqual(R[1], [3, 'raw', None, ['raw', '<br /> it can be dangerous!']])
self.assertEqual(R[2], [3, 'dgr', None, ['diagram', 'content']])
self.assertEqual(R[3], [3, 'fml', None, ['formula', 'content']])
self.assertEqual(R >> render,
'raw\n<br /> it can be dangerous!'
'<div class="yhb-dgr">diagram\ncontent</div>'
'<div class="yhb-fml">$$formula\ncontent$$</div>')
def test_b_general(self):
R= (r'/// video src = https://example.com/xxx.mp4 autoplay loop nonexist',
r' Your browser does not support blah blah.'
) >> parse
self.assertEqual(R[1], [3, 'vid', {'src': 'https://example.com/xxx.mp4', 'autoplay': True, 'loop': True}, [(0, 'Your browser does not support blah blah.')]])
self.assertEqual(R >> render,
'<video class="yhb-vid" src="https://example.com/xxx.mp4" autoplay loop>'
'<p class="yht">Your browser does not support blah blah.</p>'
'</video>')
def test_b_general_plus(self):
R= (r'&&& Extmod -> ext',
r'',
r'/// ext.method ? c1',
r' /// ext.method ? c2',
r' Hello world~',
r'/// ext.non',
) >> Parser({'Extmod': ((lambda x: x == 'method', None), (lambda x: True, None), None)})
self.assertEqual(R[1],
[5, ('Extmod', 'method'), {}, [
(0, 'c1'),
[5, ('Extmod', 'method'), {}, [
(0, 'c2'),
(0, 'Hello world~')]]]])
self.assertEqual(len(R), 2)
def test_b_config(self):
R= (r'&&& RAINLotus -> rl',
r' nonexist 1',
r' alias-code magic_quote `',
r' meta-keywords RAINLotus,markup language,',
r'',
r'&&& RAINLotus -> yh',
r' alias-code',
r' magic_print echo `...`',
r' alias',
r' rich_text Hi, \\RAINLotus\\',
r' meta-keywords',
r' Markdown,Asciidoc,reStructuredText',
r'',
r'&&& RAINLotus',
r' dialog-default-style'
) >> Parser()
self.assertEqual(R[0]['config'], {
'RAINLotus': {
'alias': {'rich_text': ((0, 'Hi, '), (2, 'sla', ((0, 'RAINLotus'),)))},
'alias-code': {'magic_quote': '`', 'magic_print': 'echo `...`'},
'meta-keywords': 'RAINLotus,markup language,Markdown,Asciidoc,reStructuredText',
'dialog-default-style': ''
}
})
def test_b_config_plus(self):
def ui_lotus(cmds):
cfg = {}
for cmd, ctts in cmds.items():
if cmd == 'color' and ctts == ['mediumaquamarine']:
cfg[cmd] = (0x66, 0xCD, 0xAA)
elif cmd == 'border-radius' and len(ctts) == 1:
cfg[cmd] = int(ctts[0].split('px', 1)[0])
return cfg
R= (r'&&& UILotus -> 111',
r' color mediumaquamarine',
r' nonexist 1',
r'',
r'&&& UILotus -> 233',
r' border-radius 5px',
r'',
r'&&& RAINSakura',
r' api https://example.com',
r'',
r'&&& RAINSakura',
r' api http://example.org',
r'',
r'&&& nonexist -> non'
) >> Parser({'UILotus': (None, None, ui_lotus), 'RAINSakura': None})
self.assertEqual(R[0]['imports'], {'UILotus', 'RAINSakura'})
self.assertEqual(R[0]['config'], {
'UILotus': {
'color': (102, 205, 170),
'border-radius': 5,
},
'RAINSakura': {
'api': [
'https://example.com',
'http://example.org'
]
}
})
def test_b_comment(self):
self.assertEqual(len(r';;; can not see me~' >> parse), 1)
def test_i_autourl(self):
R = ' '.join((
r'https://[2001:fc00:db20:35b:7399::5:0.66.131.41]/user/root/',
r'https://[2001:fc00:db20:35b:7399::5:42:8329]/favicon.ico',
r'rsync://<EMAIL>',
r'irc6://172.16.17.32:91',
r'https://[::1]'
)) >> parse
self.assertEqual(R[1], [1, [
(2, 'lnk', ('lnk', ((0, 'https://[2001:fc00:db20:35b:7399::5:0.66.131.41]/user/root/'),), 'https://[2001:fc00:db20:35b:7399::5:0.66.131.41]/user/root/')), (0, ' '),
(2, 'lnk', ('lnk', ((0, 'https://[2001:fc00:db20:35b:7399::5:42:8329]/favicon.ico'),), 'https://[2001:fc00:db20:35b:7399::5:42:8329]/favicon.ico')), (0, ' '),
(2, 'lnk', ('lnk', ((0, 'rsync://[email protected]'),), 'rsync://<EMAIL>')), (0, ' '),
(2, 'lnk', ('lnk', ((0, 'irc6://172.16.17.32:91'),), 'irc6://172.16.17.32:91')), (0, ' '),
(2, 'lnk', ('lnk', ((0, 'https://[::1]'),), 'https://[::1]'))
]])
self.assertEqual(R >> render,
'<p class="yht">'
'<a class="yhi-lnk" href="https://[2001:fc00:db20:35b:7399::5:0.66.131.41]/user/root/">https://[2001:fc00:db20:35b:7399::5:0.66.131.41]/user/root/</a> '
'<a class="yhi-lnk" href="https://[2001:fc00:db20:35b:7399::5:42:8329]/favicon.ico">https://[2001:fc00:db20:35b:7399::5:42:8329]/favicon.ico</a> '
'<a class="yhi-lnk" href="rsync://<EMAIL>">rsync://<EMAIL></a> '
'<a class="yhi-lnk" href="irc6://172.16.17.32:91">irc6://172.16.17.32:91</a> '
'<a class="yhi-lnk" href="https://[::1]">https://[::1]</a>'
'</p>')
def test_i_hangbuelang(self):
R = r'**Bold//Italic//**' >> parse
self.assertEqual(R[1], [1, [(2, 'bld', ((0, 'Bold'), (2, 'ita', ((0, 'Italic'),))))]])
self.assertEqual(R >> render, '<p class="yht"><strong>Bold<i>Italic</i></strong></p>')
def test_i_shield(self):
R = r'SCP-2521 =O2=|=o5=|=O2=|=o1=' >> parse
self.assertEqual(R[1], [1, [
(0, 'SCP-2521 '),
(2, 'shl', ('o', 2)), (0, '|'),
(2, 'shl', ('o', 5)), (0, '|'),
(2, 'shl', ('o', 2)), (0, '|'),
(2, 'shl', ('o', 1))
]])
self.assertEqual(R >> render, '<p class="yht">SCP-2521 ●●|●●●●●|●●|●</p>')
def test_i_bitalic(self):
R = r'/*Bitalic*/' >> parse
self.assertEqual(R[1], [1, [(2, 'bit', ((0, 'Bitalic'),))]])
self.assertEqual(R >> render, '<p class="yht"><i><strong>Bitalic</strong></i></p>')
def test_i_code(self):
R = r'`print("Hello world!")`' >> parse
self.assertEqual(R[1], [1, [(2, 'cod', 'print("Hello world!")')]])
self.assertEqual(R >> render, '<p class="yht"><code class="yhi-cod">print("Hello world!")</code></p>')
def test_i_formula(self):
R = r'$$ ax^2+bx+c=0 $$' >> parse
self.assertEqual(R[1], [1, [(2, 'fml', 'ax^2+bx+c=0')]])
self.assertEqual(R >> render, '<p class="yht"><span class="yhi-fml">$$ax^2+bx+c=0$$</span></p>')
def test_i_reuse(self):
R =(r'&&& RAINLotus',
r' alias',
r' 1 a{2}',
r' 2 b{3}',
r' 3 c{1}',
r' yh RAINLotus',
r'',
r'Love {rl}.',
r'Miss {yh}.',
r'{1}',
r'{2}',
r'{3}',
) >> parse
self.assertEqual(R[1], [1, [(0, 'Love '), (2, 'reu', 'rl'), (0, '.')]])
self.assertEqual(R >> render,
'<p class="yht">Love .</p>'
'<p class="yht">Miss RAINLotus.</p>'
'<p class="yht">abc<span class="yhi-err"></span></p>'
'<p class="yht">bc<span class="yhi-err"></span></p>'
'<p class="yht">c<span class="yhi-err"></span></p>')
def test_i_link(self):
R =(r'[\ALx [deprecated]]<alx://deprecated>'
r'[#Paragraph]<example.com/article>'
r'[\#213]<site-213>'
r'[docs]<docs.20x48.net>'
r'[!hi]<hello.jpg>'
) >> parse
self.assertEqual(R[1], [1, [
(2, 'lnk', ('lnk', ((0, '\\ALx [deprecated]'),), 'alx://deprecated')),
(2, 'lnk', ('cro', 'Paragraph', 'example.com/article')),
(2, 'lnk', ('lnk', ((0, '#213'),), 'site-213')),
(2, 'lnk', ('lnk', ((0, 'docs'),), 'docs.20x48.net')),
(2, 'lnk', ('img', 'hi', 'hello.jpg'))
]])
self.assertEqual(R >> render,
'<p class="yht">'
'<a class="yhi-lnk" href="alx://deprecated">\\ALx [deprecated]</a>'
f'<a class="yhi-lnk" href="example.com/article#s:{render.calcid("Paragraph")}">Paragraph</a>'
'<a class="yhi-lnk" href="site-213">#213</a>'
'<a class="yhi-lnk" href="docs.20x48.net">docs</a>'
'<img class="yhi-img" src="hello.jpg" alt="hi" />'
'</p>')
def test_i_refer(self):
R = r'[^footnote][#Paragraph]' >> parse
self.assertEqual(R[1], [1, [(2, 'ref', ('fnt', 'footnote')), (2, 'ref', ('inr', 'Paragraph'))]])
self.assertEqual(R >> render,
'<p class="yht">'
f'<sup><a class="yhi-lnk" id="p:{render.calcid("footnote")}" href="#q:{render.calcid("footnote")}">footnote</a></sup>'
f'<a class="yhi-lnk" href="#s:{render.calcid("Paragraph")}">Paragraph</a>'
'</p>')
def test_i_autoemail(self):
R = r'<EMAIL>' >> parse
self.assertEqual(R[1], [1, [(2, 'mal', ('user', 'example.com'))]])
self.assertEqual(R >> render,
'<p class="yht">'
'<span class="yhi-mal">'
'<span class="_b">moc</span>'
'<span class="_b">elpmaxe</span>'
'<span class="_a">resu</span>'
'</span>'
'</p>')
def test_i_general(self):
R =(r'&&& RAINSakura -> sakura',
r''
r'<<sakura.test arg1 arg2 = path? ? It is \\RAINSakura!\\>>'
) >> Parser({'RAINSakura': ((None, lambda x: x == 'test'), (None, lambda x: True), None)})
self.assertEqual(R[1], [1, [(4, ('RAINSakura', 'test'), {'arg1': True, 'arg2': 'path?'}, ((0, 'It is '), (2, 'sla', ((0, 'RAINSakura!'),))))]])
def test_template(self):
custom_title = lambda x: ''.join(f'{ord(c):X}' for c in x)
custom_css = '<link rel="stylesheet" href="rainink.css">'
R = '' >> parse >> Renderer() >> Template(custom_title, custom_css)
self.assertIn(custom_title(DEFAULT_TITLE), R)
self.assertIn(custom_css, R)
main()
``` |
{
"source": "2105-may24-devops/anthony-project-0",
"score": 4
} |
#### File: 2105-may24-devops/anthony-project-0/data_zero.py
```python
from data_zero_functions import *
def main():
# Declare a list to store data for logging purposes.
history_data = []
# Greeting, future UI stuff?
print("------------ Welcome to Data Zero! ----------------")
print("***************************************************************\n\n")
# Call the url permissions check.
print("Enter a URL with tabular data that would like to save or view.")
print("---------------------------------------------------------------\n\n")
url_checked, netloc_checked, risk_level, url_path = url_check(input_url=input("Input a url: \n\n"))
history_data.append(str(url_checked))
history_data.append(str(risk_level))
# Ask the user whether or not they want to scrape. Note: A Red risk-level will not prevent a user from scraping a page
# it only lets them know whether or not it's allowed. The risk belongs to the user.
answer = input("Would you like to scrape this page?\n Yes (y) or No (n)\n")
if answer == 'y':
url_scraped, url_saved = url_scrape(url_checked, netloc_checked, url_path)
history_data.append(str(url_scraped))
history_data.append(str(url_saved))
history(history_data)
# Enter the program
if __name__ == "__main__":
main()
``` |
{
"source": "2105-may24-devops/michael-project0",
"score": 2
} |
#### File: 2105-may24-devops/michael-project0/app.py
```python
import sys
import pathlib
from frontend import Frontend
def main():
"""
env vars (home, mode/state) (local settings file?)
file navigation mode (cd ls pwd)
help
recipe editing mode
create recipe
edit steps
scale recipe
optional feature: tab autocomplete?
stretch feature: treeNode-based recipes
"""
args = sys.argv[1:].copy()
bless = True
if ("-b" in args):
bless = False
args.remove("-b")
my_frontend = Frontend()
my_frontend.init_settings()
my_frontend.init_terminal(bless)
if len(args) > 0:
script_path = pathlib.Path(args[0])
if script_path.exists():
my_frontend.script_mode(script_path)
else:
my_frontend.interpret_command(" ".join(args))
else:
my_frontend.console_mode()
if __name__ == "__main__":
main()
```
#### File: 2105-may24-devops/michael-project0/frontend.py
```python
from typing import Generator
import os
from pathlib import Path
#module imports
from recipe import Recipe
from recipe import IngredientAmount
class Frontend:
BLEST = True
term = None
#handling dependencies
COLORS={
"WARN":"",
"NORM":"",
"PROMPT":"",
"OS_PATH":"",
"RCP_PATH":"",
"ACCENT":""
}
def init_terminal(self, bless:bool):
if bless:
try:
from blessed import Terminal
self.term = Terminal()
term = self.term
self.COLORS["WARN"] = term.red
self.COLORS["NORM"] = term.normal
self.COLORS["PROMPT"] = term.yellow
self.COLORS["OS_PATH"] = term.green
self.COLORS["RCP_PATH"] = term.blue
self.COLORS["ACCENT"] = term.blue
return True
except ModuleNotFoundError:
print("blessed not found")
self.BLEST=False
return False
CONFIG_REL_PATH="rcpconfig.txt"
my_recipe:Recipe=None
rcp_path:Path=None
RCPFLAG = False
def cwd_path(self):
"""Returns a Path object of the current working directory."""
return Path(os.getcwd())
def init_settings(self):
"""Searches for a settings file at `./rcpconfig.txt`
Currently does nothing with that file.
"""
config_path = self.cwd_path()/self.CONFIG_REL_PATH
if config_path.exists():
print( f"Found a settings file at {str(config_path)}!" )
def tokenizer(self, line:str):
"""Generator, returns None upon ending"""
WHITESPACE=0
TOKEN=1
DQUOTE=2
SQUOTE=3
state = WHITESPACE
reg0 = 0
for i in range(len(line)):
char = line[i]
if state == WHITESPACE:
if char == "\"":
state = DQUOTE
reg0 = i+1
elif char == "'":
state = SQUOTE
reg0 = i+1
elif not char.isspace():
state = TOKEN
reg0 = i
elif state == TOKEN:
if char.isspace():
state = WHITESPACE
yield line[reg0:i]
reg0=i+1
elif state == SQUOTE:
if char == "'":
state = WHITESPACE
yield line[reg0:i]
reg0 = i+1
elif state == DQUOTE:
if char == '"':
state = WHITESPACE
yield line[reg0:i]
reg0 = i+1
yield line[reg0:]
yield None
def script_mode(self, script:Path):
"""Handles scripts being input into the program"""
with script.open("r") as infile:
for line in infile:
self.interpret_command(line)
def console_mode(self):
"""Handles interactive mode loop"""
def prompt(self):
if self.RCPFLAG:
return f"Current Recipe:{self.COLORS['RCP_PATH']}{self.rcp_path.name} {self.COLORS['PROMPT']}# {self.COLORS['NORM']}"
else:
return f"{self.COLORS['RCP_PATH']}{os.getcwd()} {self.COLORS['PROMPT']}$ {self.COLORS['NORM']} "
#whether or not the loop should go on
goon = True
imp = input(prompt(self))
#default mode is file-exploring mode
while goon:
goon = self.interpret_command(imp)
if goon:
imp = input(prompt(self))
COMMAND_DICT={
"help":"prints all the available commands",
"cd":"change current directory",
"ls":"list the contents of the current directory",
"pwd":"prints the current directory",
"echo":"miscellaneous output function",
"open":"change current directory",
"exit":"exits the program"
}
def interpret_command(self, cmd:str):
"""Parses and interprets file-explorer mode commands, can enter recipe mode when open command is called"""
COLORS = self.COLORS
if cmd.strip() == "":
return True
#allows scripts to access recipe mode commands
if self.RCPFLAG:
try:
return self.manip_recipe(cmd)
# except TypeError as e:
# if e.__cause__ is None:
# print(f"{COLORS['WARN']} Expected argument but none was supplied.")
# print(str(e.__traceback__), file=sys.stderr)
except ValueError:
print(f"{COLORS['WARN']} Expected numeric argument but string was given.")
tokens = self.tokenizer(cmd)
root_cmd = next(tokens)
# print(f"command was: {repr(root_cmd)}")
if(root_cmd == "cd"):
arg = next(tokens)
if arg is not None:
cd_path = Path(arg)
if not cd_path.exists():
print(f"{COLORS['WARN']} invalid path")
os.chdir(cd_path)
else:
print(f"{COLORS['WARN']} Arguments expected. No arguments entered.")
elif(root_cmd == "ls"):
#TODO: dumb ls, only does current dir
# print(os.getcwd())
for child in self.cwd_path().iterdir():
child_type = "D" if child.is_dir() else "F"
print(f" {child_type} - {child.name}")
elif(root_cmd == "pwd"):
print(os.getcwd())
elif(root_cmd == "echo"):
print(" ".join(list(tokens)[:-1]))
elif(root_cmd == "help"):
arg = next(tokens)
if arg in self.COMMAND_DICT:
print(f"\t{arg}\t{self.COMMAND_DICT[arg]}")
else:
for cmd_name, helptxt in self.COMMAND_DICT.items():
print(f"\t{cmd_name}\t{helptxt}")
elif(root_cmd == "open"):
self.open_recipe(next(tokens))
# print(self.RCPFLAG)
elif(root_cmd == "exit"):
print("Bye!")
return False
else:
print(f"{COLORS['WARN']}Command not recognized. enter \
'{COLORS['NORM']}help{COLORS['WARN']}' to see available commands")
return True
def open_recipe(self, rcp_path_str:str, name:str=None):
""" opens a recipe and sets the appropriate flags in storage.
Returns false if file failed to open for some reason.
"""
#name arg is currently unused, would allow for storing multiple recipes,
#which would require a different syntax
if rcp_path_str is not None:
self.rcp_path = self.cwd_path()/rcp_path_str
print(f"Opening {str(self.rcp_path)}")
self.RCPFLAG = True
self.my_recipe = Recipe(self.rcp_path)
return True
return False
def close_recipe(self, name = None):
"""Closes recipe. name parameter is unused"""
if self.my_recipe is not None and self.my_recipe.modified:
yes = input(f"{self.COLORS['WARN']}Your recipe has unsaved changes. \
Close anyways? (must type '{self.COLORS['NORM']}yes{self.COLORS['WARN']}')")
if yes != "yes":
return
self.my_recipe = None
self.RCPFLAG=False
self.rcp_path=None
RCP_COMMANDS={
"help":"prints all available commands",
"display":"prints the whole recipe as a Markdown",
"get":"get some information about the recipe (metadata [key], step [i])",
"add":"add information to the recipe \t(step, recipe)",
"set":"changes recipe information. (title, author, serves, srcurl)",
"remove":"removes a step",
"metric":"converts a recipe's ingredients to metric",
"scale":"scales ingredients by a factor",
"save":"saves a recipe to given path, or original path if none.\t 1 optional path argument",
"close":"closes the recipe mode, returning to file explorer"
}
def manip_recipe(self, cmd:str):
"""handles recipe manipulation, parses commands"""
COLORS = self.COLORS
#let's copy kubectl-style commands
#format is: action target *arguments...
my_recipe = self.my_recipe
RCP_COMMANDS = self.RCP_COMMANDS
tokens = self.tokenizer(cmd)
root = next(tokens)
if root == "help":
arg = next(tokens)
if arg in RCP_COMMANDS:
print(f"\t{arg}\t{RCP_COMMANDS[arg]}")
else:
for cmd_name, helptxt in RCP_COMMANDS.items():
print(f"\t{cmd_name}\t{helptxt}")
elif root == "display":
#TODO: may need to change cursor on terminal
# if self.BLEST:
# with self.term.fullscreen():
# print(str(my_recipe))
# input("press enter to quit")
# else:
print(str(self.my_recipe))
elif root == "get":
what = next(tokens)
if what == "metadata":
key = next(tokens)
keys = my_recipe.cli_get_metadata_keys()
if key is not None and key in keys:
print(" ".join(keys))
print(f"{self.COLORS['ACCENT']} To access any of the keys, type \
'{self.COLORS['NORM']}get metadata [key]'.")
else:
print(f"{key} = {my_recipe.cli_get_metadata(key)}")
elif what == "title":
print(f"{my_recipe.title}")
elif what == "step":
numnun = next(tokens)
steps = my_recipe.steps
printall = True
if numnun is not None:
num = int(numnun)
if num < len(steps) and num > 0:
print(f"Step {num}. {steps[num-1]}")
printall = False
if printall:
for number, step in enumerate(steps):
print(f"Step {number+1}. {step}")
elif what == "units":
munit = IngredientAmount.MASS_CONV
vunit = IngredientAmount.VOLUME_CONV
print("Mass Units:")
for mu in munit:
print(f"\t{mu}")
print("Volume Units:")
for vu in vunit:
print(f"\t{vu}")
else:
print(f"{COLORS['WARN']} invalid get argument.")
print(f"Possible arguments for get command: ")
RECIPE_GET_DICT={
"title":"get title of the recipe.",
"metadata":"get metadata by key. If no key supplied, prints all keys available.\t 1 argument",
"units":"get supported convertible units"
}
for get_cmd, help_txt in RECIPE_GET_DICT.items():
print(f"{COLORS['ACCENT']}{get_cmd}\t{COLORS['NORM']}{help_txt}")
elif root == "add":
what = next(tokens)
if what == "step":
the_step = list(tokens)
# print(the_step)
my_step = " ".join(the_step[:-1])
i = len(my_recipe.steps) + 1
print(f"{COLORS['ACCENT']}Added: {COLORS['NORM']} Step {i}. {my_step}")
my_recipe.cli_add_step(my_step)
elif what == "ingredient":
ingr = next(tokens)
amount = float(next(tokens))
unit = next(tokens)
my_recipe.cli_add_ingredient(ingr, amount, unit)
elif what == "metadata":
key = next(tokens)
not_added = True
if key is not None:
val = next(tokens)
if val is not None:
my_recipe.cli_custom_metadata(key, val)
not_added = False
if not_added:
print(f"{COLORS}No key added. Missing arguments")
else:
print(f"{COLORS['WARN']} invalid add argument.")
print(f"Possible arguments for add command: ")
RECIPE_ADD_DICT={
"metadata":"add or set metadata by key and value.\t 2 arguments",
"step":"add a step to the recipe.\t 1 or more arguments (treated as a sentence)",
"ingredient":f"add an ingredient. (see '{COLORS['ACCENT']}get units{COLORS['NORM']}')\
\t three arguments (what, amount, unit)"
}
for add_cmd, help_txt in RECIPE_ADD_DICT.items():
print(f"{COLORS['ACCENT']}{add_cmd}\t{COLORS['NORM']}{help_txt}")
elif root == "set":
what = next(tokens)
if what == "title":
name = next(tokens)
my_recipe.cli_set_title(name)
elif what == "author":
name = next(tokens)
my_recipe.cli_set_author(name)
elif what == "serves":
num = int(next(tokens))
my_recipe.cli_set_serves(num)
elif what == "srcurl":
url = next(tokens)
my_recipe.cli_set_srcurl(url)
elif what == "metadata":
key = next(tokens)
not_added = True
if key is not None:
val = next(tokens)
if val is not None:
my_recipe.cli_custom_metadata(key, val)
not_added = False
if not_added:
print(f"{COLORS}No key added. Missing arguments")
else:
print(f"{COLORS['WARN']} invalid set argument.")
print(f"Possible arguments for set: ")
RECIPE_SET_DICT={
"title":"set title of the recipe.\t 1 argument",
"author":"set the author of the recipe.\t 1 argument",
"serves":"number of people served by the recipe.\t 1 numeric argument",
"srcurl":"the source url of the recipe.\t 1 argument",
"metadata":"custom metadata.\t 2 arguments, key then value"
}
for set_cmd, help_txt in RECIPE_SET_DICT.items():
print(f"{COLORS['ACCENT']}{set_cmd}\t{COLORS['NORM']}{help_txt}")
elif root == "remove":
what = next(tokens)
if what == "metadata":
key = next(tokens)
my_recipe.cli_remove_metadata(key)
elif what == "step":
try:
num = int(next(tokens)) - 1
my_recipe.cli_remove_step(num)
except (IndexError, ValueError):
print(f"{self.COLORS['WARN']} Invalid index. \
There are {len(my_recipe.steps)} steps in the recipe.")
except TypeError:
my_recipe.cli_remove_step()
elif what == "ingredient":
ingr = next(tokens)
status = my_recipe.cli_remove_ingredient(ingr)
if status == False:
print("No matching ingredient found!")
else:
print(f"{self.COLORS['WARN']} invalid remove argument.")
print(f"Possible arguments for remove command: ")
RECIPE_RMV_DICT={
"step":"remove the last step, or the specified step number.\t 0 or 1 arguments",
"ingredient":"remove an ingredient by the name of the ingredient.\t 1 argument",
"metadata":"remove a key value pair by the key.\t 1 arguments"
}
for rmv_cmd, help_txt in RECIPE_RMV_DICT.items():
print(f"{COLORS['ACCENT']}{rmv_cmd}\t{COLORS['NORM']}{help_txt}")
elif root == "save":
target = next(tokens)
print(repr(target))
if target is not None and target.strip() != "":
print("saving to specified file", target)
my_recipe.write_json(target)
else:
print("saving to default file", str(self.rcp_path))
my_recipe.write_json(self.rcp_path)
my_recipe.modified=False
elif root == "close":
self.close_recipe()
else:
if root == "metric":
ingr = next(tokens)
my_recipe.cli_to_metric(ingr)
elif root == "scale":
factor = float(next(tokens))
my_recipe.cli_scale(factor)
else:
print(f"{self.COLORS['WARN']}Command {root} not recognized. enter \
'{self.COLORS['NORM']}help{self.COLORS['WARN']}' to see available commands")
return True
#end class
``` |
{
"source": "2110521-2563-1-Software-Architecture/four-guys-one-cup-assignment3",
"score": 2
} |
#### File: four-guys-one-cup-assignment3/mvp/main.py
```python
import wx
from mvp.views.main_view import MainView
from mvp.presenters.main_presenter import MainPresenter
from mvp.models.repositories.note_repository import NoteRepository
class MvpNoteApplication:
def main(self):
# Setup dependencies
note_repository = NoteRepository()
# Setup view
app = wx.App()
main_view = MainView()
# Setup presenter
main_presenter = MainPresenter(main_view, note_repository)
# Setup first page
main_view.init_ui()
main_view.Show(True)
# Start application
app.MainLoop()
if __name__ == "__main__":
application = MvpNoteApplication()
application.main()
``` |
{
"source": "2110521-2563-1-Software-Architecture/TBD-Project",
"score": 2
} |
#### File: app/controllers/news_feed_controller.py
```python
from app.controllers.base import Controller
from app.models.news_feed import NewsFeed
from app.models.user import User
import json
class NewsFeedController(Controller):
async def get(self, request):
try:
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).get_news_feed(current_user,
json.loads(request.headers.get('page')))
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def create(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).create(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def update(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).update(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def delete(self, request):
try:
current_user = await User(request.app).get_user(request.headers.get('User'))
news_feed_id = request.headers.get('target')
response = await NewsFeed(request.app).delete(current_user, news_feed_id)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def interact(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).interact(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
``` |
{
"source": "2118-full-stack-python-codo-a-codo/clase-20",
"score": 4
} |
#### File: clase-20/src/prueba.py
```python
import math
'''
Esto es un bloque de comentarios
'''
def main():
'''
esta es una function main
'''
print("Hello World")
print("otra instruccion")
holaSoyUnaVariable = 4
holaSoyUnaVariable = "<NAME>"
print(holaSoyUnaVariable)
miVariable = float(input("Ingresa tu edad: "))
print(miVariable)
miCondition = (0!=1) and (2 > 1)
if miCondition:
print("Hola")
else:
print("Esto es un else")
myInput = int(input("Ingrese el numero de mes"))
myMonth = month(myInput)
print(myMonth)
contador = int(input(" ingrese contador hasta: "))
counter(contador)
counterWithFor(contador)
'''
En otros lenguajes de programacion
if () {
}else {
}
'''
def counterWithFor(until):
'''
esta es una function que cuenta hasta until usando
'''
for counter in range(until): #range(0, until, 1) :
print("Mi contador es con un For: ", counter)
def counter(until):
'''
esta es una function que cuenta hasta until usando while
'''
counter = 0 # defini un contador
while (counter < until): # while chequea que counter sea menor igual a until
print("Mi contador es: ", counter)
counter +=1
def month(number):
'''
esta es una function que devuelve un texto basado en un
numero del mes
'''
condition = (number > 0) and (number <= 12) # chequeo primero que este dentro de los parametros esperados
mes = "invalido"
if condition:
print("valido")
if number == 1:
mes = "Enero"
elif number == 2:
mes = "febrero"
elif number == 3:
mes = "marzo"
elif number == 4:
mes = "abril"
elif number == 5:
mes = "mayo"
elif number == 6:
mes = "junio"
elif number == 7:
mes = "julio"
elif number == 8:
mes = "agosto"
elif number == 9:
mes = "septiembre"
elif number == 10:
mes = "octubre"
elif number == 11:
mes = "noviembre"
elif number == 12:
mes = "diciembre"
'''
No existen en Python
switch(expression) {
case x:
// code block
break;
case y:
// code block
break;
default:
// code block
}
'''
else:
print("Mes invalido")
return mes
if __name__ == "__main__":
# execute only if run as a script
main()
``` |
{
"source": "211tbc/synthesis",
"score": 2
} |
#### File: db_migration/versions/002_Add_dedup_link_Table.py
```python
from sqlalchemy import *
from migrate import *
meta = MetaData(migrate_engine)
#table_metadata = MetaData(bind=self.pg_db, reflect=True)
dedup_link_table = Table(
'dedup_link',
meta,
Column('source_rec_id', String(50), primary_key=True),
Column('destination_rec_id', String(50)),
Column('weight_factor', Integer),
useexisting = True
)
def upgrade():
# Upgrade operations go here. Don't create your own engine; use the engine
# named 'migrate_engine' imported from migrate.
dedup_link_table.create()
def downgrade():
# Operations to reverse the above upgrade go here.
dedup_link_table.drop()
```
#### File: synthesis/src/dbobjects.py
```python
from sqlalchemy import create_engine, Column, Integer, BigInteger, String, Boolean, MetaData, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.types import DateTime, Date, Interval
from sqlalchemy.pool import NullPool
from .conf import settings
from logging import Logger
print("loaded dbobjects module")
class DB:
#print "loaded DB Class"
database_string = 'postgresql+psycopg2://' + settings.DB_USER + ':' + settings.DB_PASSWD + '@' + settings.DB_HOST + ':' + str(settings.DB_PORT) + '/' + settings.DB_DATABASE
pg_db_engine = create_engine(database_string, poolclass=NullPool, echo=settings.DEBUG_ALCHEMY)
mymetadata = MetaData(bind=pg_db_engine)
Base = declarative_base(metadata=mymetadata)
def __init__(self):
#postgresql[+driver]://<user>:<pass>@<host>/<dbname> #, server_side_cursors=True)
self.Session = sessionmaker() # Was
#self.Session = sessionmaker(bind=self.pg_db_engine) # JCS
loglevel = 'DEBUG'
self.log = Logger(settings.LOGGING_INI, loglevel)
class MapBase():
def __init__(self, field_dict):
if settings.DEBUG:
print("Base Class created: %s" % self.__class__.__name__)
#def __init__(self, field_dict):
if settings.DEBUG:
print(field_dict)
for x, y in field_dict.iteritems():
self.__setattr__(x,y)
def __repr__(self):
field_dict = vars(self)
out = ''
if len(field_dict) > 0:
for x, y in field_dict.iteritems():
if x[0] != "_":
out = out + "%s = %s, " % (x,y)
return "<%s(%s)>" % (self.__class__.__name__, out)
else:
return ''
class SiteServiceParticipation(DB.Base, MapBase):
__tablename__ = 'site_service_participation'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
site_service_participation_idid_num = Column(String(32))
site_service_participation_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_participation_idid_str = Column(String(32))
site_service_participation_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32)) # JCS
#site_service_idid_num_date_collected = Column(DateTime(timezone=False)) # JCS
destination = Column(String(32))
destination_date_collected = Column(DateTime(timezone=False))
destination_other = Column(String(32))
destination_other_date_collected = Column(DateTime(timezone=False))
destination_tenure = Column(String(32))
destination_tenure_date_collected = Column(DateTime(timezone=False))
disabling_condition = Column(String(32))
disabling_condition_date_collected = Column(DateTime(timezone=False))
participation_dates_start_date = Column(DateTime(timezone=False))
participation_dates_start_date_date_collected = Column(DateTime(timezone=False))
participation_dates_end_date = Column(DateTime(timezone=False))
participation_dates_end_date_date_collected = Column(DateTime(timezone=False))
veteran_status = Column(String(32))
veteran_status_date_collected = Column(DateTime(timezone=False))
#adding a reported column. Hopefully this will append the column to the table def.
reported = Column(Boolean)
site_service_participation_id_delete = Column(String(32))
site_service_participation_id_delete_occurred_date = Column(DateTime(timezone=False))
site_service_participation_id_delete_effective_date = Column(DateTime(timezone=False))
fk_participation_to_need = relationship('Need', backref='fk_need_to_participation')
fk_participation_to_serviceevent = relationship('ServiceEvent')
fk_participation_to_personhistorical = relationship('PersonHistorical')
fk_participation_to_person = Column(Integer, ForeignKey('person.id'))
useexisting = True
class Need(DB.Base, MapBase):
__tablename__ = 'need'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
export_index_id = Column(Integer, ForeignKey('export.id'))
need_idid_num = Column(String(32))
need_idid_num_date_collected = Column(DateTime(timezone=False))
need_idid_str = Column(String(32))
need_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32))
site_service_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_idid_str = Column(String(32))
site_service_idid_str_date_collected = Column(DateTime(timezone=False))
service_event_idid_num = Column(String(32))
service_event_idid_num_date_collected = Column(DateTime(timezone=False))
service_event_idid_str = Column(String(32))
service_event_idid_str_date_collected = Column(DateTime(timezone=False))
need_status = Column(String(32))
need_status_date_collected = Column(DateTime(timezone=False))
taxonomy = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_index_id = Column(Integer, ForeignKey('person.id'))
need_id_delete = Column(String(32))
need_id_delete_occurred_date = Column(DateTime(timezone=False))
need_id_delete_delete_effective_date = Column(DateTime(timezone=False))
need_effective_period_start_date = Column(DateTime(timezone=False))
need_effective_period_end_date = Column(DateTime(timezone=False))
need_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class Races(DB.Base, MapBase):
__tablename__ = 'races'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
race_unhashed = Column(Integer)
race_hashed = Column(String(32))
race_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
## HUD 3.0
race_data_collection_stage = Column(String(32))
race_date_effective = Column(DateTime(timezone=False))
useexisting = True
class OtherNames(DB.Base, MapBase):
__tablename__ = 'other_names'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
other_first_name_unhashed = Column(String(50))
other_first_name_hashed = Column(String(50))
other_first_name_date_collected = Column(DateTime(timezone=False))
other_first_name_date_effective = Column(DateTime(timezone=False))
other_first_name_data_collection_stage = Column(String(32))
other_middle_name_unhashed = Column(String(50))
other_middle_name_hashed = Column(String(50))
other_middle_name_date_collected = Column(DateTime(timezone=False))
other_middle_name_date_effective = Column(DateTime(timezone=False))
other_middle_name_data_collection_stage = Column(String(32))
other_last_name_unhashed = Column(String(50))
other_last_name_hashed = Column(String(50))
other_last_name_date_collected = Column(DateTime(timezone=False))
other_last_name_date_effective = Column(DateTime(timezone=False))
other_last_name_data_collection_stage = Column(String(32))
other_suffix_unhashed = Column(String(50))
other_suffix_hashed = Column(String(50))
other_suffix_date_collected = Column(DateTime(timezone=False))
other_suffix_date_effective = Column(DateTime(timezone=False))
other_suffix_data_collection_stage = Column(String(32))
useexisting = True
class HUDHomelessEpisodes(DB.Base, MapBase):
__tablename__ = 'hud_homeless_episodes'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
start_date = Column(String(32))
start_date_date_collected = Column(DateTime(timezone=False))
end_date = Column(String(32))
end_date_date_collected = Column(DateTime(timezone=False))
useexisting = True
class Veteran(DB.Base, MapBase):
__tablename__ = 'veteran'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
service_era = Column(Integer)
service_era_date_collected = Column(DateTime(timezone=False))
military_service_duration = Column(Integer)
military_service_duration_date_collected = Column(DateTime(timezone=False))
served_in_war_zone = Column(Integer)
served_in_war_zone_date_collected = Column(DateTime(timezone=False))
war_zone = Column(Integer)
war_zone_date_collected = Column(DateTime(timezone=False))
war_zone_other = Column(String(50))
war_zone_other_date_collected = Column(DateTime(timezone=False))
months_in_war_zone = Column(Integer)
months_in_war_zone_date_collected = Column(DateTime(timezone=False))
received_fire = Column(Integer)
received_fire_date_collected = Column(DateTime(timezone=False))
military_branch = Column(Integer)
military_branch_date_collected = Column(DateTime(timezone=False))
military_branch_other = Column(String(50))
military_branch_other_date_collected = Column(DateTime(timezone=False))
discharge_status = Column(Integer)
discharge_status_date_collected = Column(DateTime(timezone=False))
discharge_status_other = Column(String(50))
discharge_status_other_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class DrugHistory(DB.Base, MapBase):
__tablename__ = 'drug_history'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
drug_history_id = Column(String(32))
drug_history_id_date_collected = Column(DateTime(timezone=False))
drug_code = Column(Integer)
drug_code_date_collected = Column(DateTime(timezone=False))
drug_use_frequency = Column(Integer)
drug_use_frequency_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class EmergencyContact(DB.Base, MapBase):
__tablename__ = 'emergency_contact'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
emergency_contact_id = Column(String(32))
emergency_contact_id_date_collected = Column(DateTime(timezone=False))
emergency_contact_name = Column(String(32))
emergency_contact_name_date_collected = Column(DateTime(timezone=False))
emergency_contact_phone_number_0 = Column(String(32))
emergency_contact_phone_number_date_collected_0 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_0 = Column(String(32))
emergency_contact_phone_number_1 = Column(String(32))
emergency_contact_phone_number_date_collected_1 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_1 = Column(String(32))
emergency_contact_address_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_start_date = Column(DateTime(timezone=False))
emergency_contact_address_start_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_end_date = Column(DateTime(timezone=False))
emergency_contact_address_end_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line1 = Column(String(32))
emergency_contact_address_line1_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line2 = Column(String(32))
emergency_contact_address_line2_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_city = Column(String(32))
emergency_contact_address_city_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_state = Column(String(32))
emergency_contact_address_state_date_collected = Column(DateTime(timezone=False))
emergency_contact_relation_to_client = Column(String(32))
emergency_contact_relation_to_client_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class PersonAddress(DB.Base, MapBase):
__tablename__ = 'person_address'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
address_period_start_date = Column(DateTime(timezone=False))
address_period_start_date_date_collected = Column(DateTime(timezone=False))
address_period_end_date = Column(DateTime(timezone=False))
address_period_end_date_date_collected = Column(DateTime(timezone=False))
pre_address_line = Column(String(100))
pre_address_line_date_collected = Column(DateTime(timezone=False))
pre_address_line_date_effective = Column(DateTime(timezone=False))
pre_address_line_data_collection_stage = Column(String(32))
line1 = Column(String(100))
line1_date_collected = Column(DateTime(timezone=False))
line1_date_effective = Column(DateTime(timezone=False))
line1_data_collection_stage = Column(String(32))
line2 = Column(String(100))
line2_date_collected = Column(DateTime(timezone=False))
line2_date_effective = Column(DateTime(timezone=False))
line2_data_collection_stage = Column(String(32))
city = Column(String(100))
city_date_collected = Column(DateTime(timezone=False))
city_date_effective = Column(DateTime(timezone=False))
city_data_collection_stage = Column(String(32))
county = Column(String(32))
county_date_collected = Column(DateTime(timezone=False))
county_date_effective = Column(DateTime(timezone=False))
county_data_collection_stage = Column(String(32))
state = Column(String(32))
state_date_collected = Column(DateTime(timezone=False))
state_date_effective = Column(DateTime(timezone=False))
state_data_collection_stage = Column(String(32))
zipcode = Column(String(10))
zipcode_date_collected = Column(DateTime(timezone=False))
zipcode_date_effective = Column(DateTime(timezone=False))
zipcode_data_collection_stage = Column(String(32))
country = Column(String(32))
country_date_collected = Column(DateTime(timezone=False))
country_date_effective = Column(DateTime(timezone=False))
country_data_collection_stage = Column(String(32))
is_last_permanent_zip = Column(Integer)
is_last_permanent_zip_date_collected = Column(DateTime(timezone=False))
is_last_permanent_zip_date_effective = Column(DateTime(timezone=False))
is_last_permanent_zip_data_collection_stage = Column(String(32))
zip_quality_code = Column(Integer)
zip_quality_code_date_collected = Column(DateTime(timezone=False))
zip_quality_code_date_effective = Column(DateTime(timezone=False))
zip_quality_code_data_collection_stage = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_address_delete = Column(String(32))
person_address_delete_occurred_date = Column(DateTime(timezone=False))
person_address_delete_effective_date = Column(DateTime(timezone=False))
useexisting = True
class PersonHistorical(DB.Base, MapBase):
__tablename__ = 'person_historical'
id = Column(Integer, primary_key=True)
call_index_id = Column(Integer, ForeignKey('call.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
person_historical_id_id_num = Column(String(32))
person_historical_id_id_str = Column(String(32))
person_historical_id_delete_effective_date = Column(DateTime(timezone=False))
person_historical_id_delete = Column(Integer)
person_historical_id_delete_occurred_date = Column(DateTime(timezone=False))
barrier_code = Column(String(32))
barrier_code_date_collected = Column(DateTime(timezone=False))
barrier_other = Column(String(32))
barrier_other_date_collected = Column(DateTime(timezone=False))
child_currently_enrolled_in_school = Column(String(32))
child_currently_enrolled_in_school_date_collected = Column(DateTime(timezone=False))
currently_employed = Column(String(32))
currently_employed_date_collected = Column(DateTime(timezone=False))
currently_in_school = Column(String(32))
currently_in_school_date_collected = Column(DateTime(timezone=False))
degree_code = Column(String(32))
degree_code_date_collected = Column(DateTime(timezone=False))
degree_other = Column(String(32))
degree_other_date_collected = Column(DateTime(timezone=False))
developmental_disability = Column(String(32))
developmental_disability_date_collected = Column(DateTime(timezone=False))
domestic_violence = Column(String(32))
domestic_violence_date_collected = Column(DateTime(timezone=False))
domestic_violence_how_long = Column(String(32))
domestic_violence_how_long_date_collected = Column(DateTime(timezone=False))
due_date = Column(String(32))
due_date_date_collected = Column(DateTime(timezone=False))
employment_tenure = Column(String(32))
employment_tenure_date_collected = Column(DateTime(timezone=False))
health_status = Column(String(32))
health_status_date_collected = Column(DateTime(timezone=False))
highest_school_level = Column(String(32))
highest_school_level_date_collected = Column(DateTime(timezone=False))
hivaids_status = Column(String(32))
hivaids_status_date_collected = Column(DateTime(timezone=False))
hours_worked_last_week = Column(String(32))
hours_worked_last_week_date_collected = Column(DateTime(timezone=False))
hud_chronic_homeless = Column(String(32))
hud_chronic_homeless_date_collected = Column(DateTime(timezone=False))
hud_homeless = Column(String(32))
hud_homeless_date_collected = Column(DateTime(timezone=False))
site_service_id = Column(Integer)
###HUDHomelessEpisodes (subtable)
###IncomeAndSources (subtable)
length_of_stay_at_prior_residence = Column(String(32))
length_of_stay_at_prior_residence_date_collected = Column(DateTime(timezone=False))
looking_for_work = Column(String(32))
looking_for_work_date_collected = Column(DateTime(timezone=False))
mental_health_indefinite = Column(String(32))
mental_health_indefinite_date_collected = Column(DateTime(timezone=False))
mental_health_problem = Column(String(32))
mental_health_problem_date_collected = Column(DateTime(timezone=False))
non_cash_source_code = Column(String(32))
non_cash_source_code_date_collected = Column(DateTime(timezone=False))
non_cash_source_other = Column(String(32))
non_cash_source_other_date_collected = Column(DateTime(timezone=False))
###PersonAddress (subtable)
person_email = Column(String(32))
person_email_date_collected = Column(DateTime(timezone=False))
person_phone_number = Column(String(32))
person_phone_number_date_collected = Column(DateTime(timezone=False))
physical_disability = Column(String(32))
physical_disability_date_collected = Column(DateTime(timezone=False))
pregnancy_status = Column(String(32))
pregnancy_status_date_collected = Column(DateTime(timezone=False))
prior_residence = Column(String(32))
prior_residence_date_collected = Column(DateTime(timezone=False))
prior_residence_other = Column(String(32))
prior_residence_other_date_collected = Column(DateTime(timezone=False))
reason_for_leaving = Column(String(32))
reason_for_leaving_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_other = Column(String(32))
reason_for_leaving_other_date_collected = Column(DateTime(timezone=False))
school_last_enrolled_date = Column(String(32))
school_last_enrolled_date_date_collected = Column(DateTime(timezone=False))
school_name = Column(String(32))
school_name_date_collected = Column(DateTime(timezone=False))
school_type = Column(String(32))
school_type_date_collected = Column(DateTime(timezone=False))
subsidy_other = Column(String(32))
subsidy_other_date_collected = Column(DateTime(timezone=False))
subsidy_type = Column(String(32))
subsidy_type_date_collected = Column(DateTime(timezone=False))
substance_abuse_indefinite = Column(String(32))
substance_abuse_indefinite_date_collected = Column(DateTime(timezone=False))
substance_abuse_problem = Column(String(32))
substance_abuse_problem_date_collected = Column(DateTime(timezone=False))
total_income = Column(String(32))
total_income_date_collected = Column(DateTime(timezone=False))
###Veteran (subtable)
vocational_training = Column(String(32))
vocational_training_date_collected = Column(DateTime(timezone=False))
annual_personal_income = Column(Integer)
annual_personal_income_date_collected = Column(DateTime(timezone=False))
employment_status = Column(Integer)
employment_status_date_collected = Column(DateTime(timezone=False))
family_size = Column(Integer)
family_size_date_collected = Column(DateTime(timezone=False))
hearing_impaired = Column(Integer)
hearing_impaired_date_collected = Column(DateTime(timezone=False))
marital_status = Column(Integer)
marital_status_date_collected = Column(DateTime(timezone=False))
non_ambulatory = Column(Integer)
non_ambulatory_date_collected = Column(DateTime(timezone=False))
residential_status = Column(Integer)
residential_status_date_collected = Column(DateTime(timezone=False))
visually_impaired = Column(Integer)
visually_impaired_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
fk_person_historical_to_income_and_sources = relationship('IncomeAndSources',
backref='fk_income_and_sources_to_person_historical')
fk_person_historical_to_veteran = relationship('Veteran', backref='fk_veteran_to_person_historical')
fk_person_historical_to_hud_homeless_episodes = relationship('HUDHomelessEpisodes',
backref='fk_hud_homeless_episodes_to_person_historical')
fk_person_historical_to_person_address = relationship('PersonAddress', backref='fk_person_address_to_person_historical')
useexisting = True
class IncomeAndSources(DB.Base, MapBase):
__tablename__ = 'income_and_sources'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
amount = Column(Integer)
amount_date_collected = Column(DateTime(timezone=False))
income_source_code = Column(Integer)
income_source_code_date_collected = Column(DateTime(timezone=False))
income_source_other = Column(String(32))
income_source_other_date_collected = Column(DateTime(timezone=False))
## HUD 3.0
income_and_source_id_id_num = Column(String(32))
income_and_source_id_id_str = Column(String(32))
income_and_source_id_id_delete_occurred_date = Column(DateTime(timezone=False))
income_and_source_id_id_delete_effective_date = Column(DateTime(timezone=False))
income_source_code_date_effective = Column(DateTime(timezone=False))
income_source_other_date_effective = Column(DateTime(timezone=False))
receiving_income_source_date_collected = Column(DateTime(timezone=False))
receiving_income_source_date_effective = Column(DateTime(timezone=False))
income_source_amount_date_effective = Column(DateTime(timezone=False))
income_and_source_id_id_delete = Column(Integer)
income_source_code_data_collection_stage = Column(String(32))
income_source_other_data_collection_stage = Column(String(32))
receiving_income_source = Column(Integer)
receiving_income_source_data_collection_stage = Column(String(32))
income_source_amount_data_collection_stage = Column(String(32))
useexisting = True
class Members(DB.Base, MapBase):
__tablename__ = 'members'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
relationship_to_head_of_household = Column(String(32))
relationship_to_head_of_household_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class ReleaseOfInformation(DB.Base, MapBase):
__tablename__ = 'release_of_information'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
release_of_information_idid_num = Column(String(32))
release_of_information_idid_num_date_collected = Column(DateTime(timezone=False))
release_of_information_idid_str = Column(String(32))
release_of_information_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32))
site_service_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_idid_str = Column(String(32))
site_service_idid_str_date_collected = Column(DateTime(timezone=False))
documentation = Column(String(32))
documentation_date_collected = Column(DateTime(timezone=False))
#EffectivePeriod (subtable)
start_date = Column(String(32))
start_date_date_collected = Column(DateTime(timezone=False))
end_date = Column(String(32))
end_date_date_collected = Column(DateTime(timezone=False))
release_granted = Column(String(32))
release_granted_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
## HUD 3.0
release_of_information_id_data_collection_stage = Column(String(32))
release_of_information_id_date_effective = Column(DateTime(timezone=False))
documentation_data_collection_stage = Column(String(32))
documentation_date_effective = Column(DateTime(timezone=False))
release_granted_data_collection_stage = Column(String(32))
release_granted_date_effective = Column(DateTime(timezone=False))
useexisting = True
class SourceExportLink(DB.Base, MapBase):
__tablename__ = 'source_export_link'
id = Column(Integer, primary_key=True)
source_index_id = Column(Integer, ForeignKey('source.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
useexisting = True
class Region(DB.Base, MapBase):
__tablename__ = 'region'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
region_id_id_num = Column(String(50))
region_id_id_str = Column(String(32))
site_service_id = Column(String(50))
region_type = Column(String(50))
region_type_date_collected = Column(DateTime(timezone=False))
region_type_date_effective = Column(DateTime(timezone=False))
region_type_data_collection_stage = Column(String(32))
region_description = Column(String(30))
region_description_date_collected = Column(DateTime(timezone=False))
region_description_date_effective = Column(DateTime(timezone=False))
region_description_data_collection_stage = Column(String(32))
useexisting = True
class Agency(DB.Base, MapBase):
__tablename__ = 'agency'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_delete = Column(Integer)
agency_delete_occurred_date = Column(DateTime(timezone=False))
agency_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
agency_description = Column(String(50))
irs_status = Column(String(50))
source_of_funds = Column(String(50))
record_owner = Column(String(50))
fein = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
useexisting = True
class AgencyChild(DB.Base, MapBase):
__tablename__ = 'agency_child'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
useexisting = True
class Service(DB.Base, MapBase):
__tablename__ = 'service'
id = Column(Integer, primary_key=True)
service_id = Column(String(50))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
service_delete = Column(Integer)
service_delete_occurred_date = Column(DateTime(timezone=False))
service_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
coc_code = Column(String(5))
configuration = Column(String(50))
direct_service_code = Column(String(50))
grantee_identifier = Column(String(10))
individual_family_code = Column(String(50))
residential_tracking_method = Column(String(50))
service_type = Column(String(50))
jfcs_service_type = Column(String(50))
service_effective_period_start_date = Column(DateTime(timezone=False))
service_effective_period_end_date = Column(DateTime(timezone=False))
service_recorded_date = Column(DateTime(timezone=False))
target_population_a = Column(String(50))
target_population_b = Column(String(50))
useexisting = True
class Site(DB.Base, MapBase):
__tablename__ = 'site'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
#agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
site_delete = Column(Integer)
site_delete_occurred_date = Column(DateTime(timezone=False))
site_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
site_description = Column(String(50))
physical_address_pre_address_line = Column(String(100))
physical_address_line_1 = Column(String(100))
physical_address_line_2 = Column(String(100))
physical_address_city = Column(String(50))
physical_address_country = Column(String(50))
physical_address_state = Column(String(50))
physical_address_zip_code = Column(String(50))
physical_address_country = Column(String(50))
physical_address_reason_withheld = Column(String(50))
physical_address_confidential = Column(String(50))
physical_address_description = Column(String(50))
mailing_address_pre_address_line = Column(String(100))
mailing_address_line_1 = Column(String(100))
mailing_address_line_2 = Column(String(100))
mailing_address_city = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_state = Column(String(50))
mailing_address_zip_code = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_reason_withheld = Column(String(50))
mailing_address_confidential = Column(String(50))
mailing_address_description = Column(String(50))
no_physical_address_description = Column(String(50))
no_physical_address_explanation = Column(String(50))
disabilities_access = Column(String(50))
physical_location_description = Column(String(50))
bus_service_access = Column(String(50))
public_access_to_transportation = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
agency_key = Column(String(50))
useexisting = True
class SiteService(DB.Base, MapBase):
__tablename__ = 'site_service'
id = Column(Integer, primary_key=True)
site_service_id = Column(String(50))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
service_index_id = Column(Integer, ForeignKey(Service.id))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
site_service_delete = Column(Integer)
site_service_delete_occurred_date = Column(DateTime(timezone=False))
site_service_delete_effective_date = Column(DateTime(timezone=False))
name = Column(String(50))
key = Column(String(50))
description = Column(String(50))
fee_structure = Column(String(50))
gender_requirements = Column(String(50))
area_flexibility = Column(String(50))
service_not_always_available = Column(String(50))
service_group_key = Column(String(50))
site_id = Column(String(50))
geographic_code = Column(String(50))
geographic_code_date_collected = Column(DateTime(timezone=False))
geographic_code_date_effective = Column(DateTime(timezone=False))
geographic_code_data_collection_stage = Column(String(50))
housing_type = Column(String(50))
housing_type_date_collected = Column(DateTime(timezone=False))
housing_type_date_effective = Column(DateTime(timezone=False))
housing_type_data_collection_stage = Column(String(50))
principal = Column(String(50))
site_service_effective_period_start_date = Column(DateTime(timezone=False))
site_service_effective_period_end_date = Column(DateTime(timezone=False))
site_service_recorded_date = Column(DateTime(timezone=False))
site_service_type = Column(String(50))
useexisting = True
class FundingSource(DB.Base, MapBase):
__tablename__ = 'funding_source'
id = Column(Integer, primary_key=True)
service_index_id = Column(Integer, ForeignKey('service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
funding_source_id_id_num = Column(String(50))
funding_source_id_id_str = Column(String(32))
funding_source_id_delete = Column(String(50))
funding_source_id_delete_occurred_date = Column(DateTime(timezone=False))
funding_source_id_delete_effective_date = Column(DateTime(timezone=False))
federal_cfda_number = Column(String(50))
receives_mckinney_funding = Column(String(50))
advance_or_arrears = Column(String(50))
financial_assistance_amount = Column(String(50))
useexisting = True
class ResourceInfo(DB.Base, MapBase):
__tablename__ = 'resource_info'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
resource_specialist = Column(String(50))
available_for_directory = Column(String(50))
available_for_referral = Column(String(50))
available_for_research = Column(String(50))
date_added = Column(DateTime(timezone=False))
date_last_verified = Column(DateTime(timezone=False))
date_of_last_action = Column(DateTime(timezone=False))
last_action_type = Column(String(50))
useexisting = True
class Inventory(DB.Base, MapBase):
__tablename__ = 'inventory'
id = Column(Integer, primary_key=True)
service_index_id = Column(Integer, ForeignKey(Service.id))
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
inventory_delete = Column(Integer)
inventory_delete_occurred_date = Column(DateTime(timezone=False))
inventory_delete_effective_delete = Column(DateTime(timezone=False))
hmis_participation_period_start_date = Column(DateTime(timezone=False))
hmis_participation_period_end_date = Column(DateTime(timezone=False))
inventory_id_id_num = Column(String(50))
inventory_id_id_str = Column(String(32))
bed_inventory = Column(String(50))
bed_availability = Column(String(50))
bed_type = Column(String(50))
bed_individual_family_type = Column(String(50))
chronic_homeless_bed = Column(String(50))
domestic_violence_shelter_bed = Column(String(50))
household_type = Column(String(50))
hmis_participating_beds = Column(String(50))
inventory_effective_period_start_date = Column(DateTime(timezone=False))
inventory_effective_period_end_date = Column(DateTime(timezone=False))
inventory_recorded_date = Column(DateTime(timezone=False))
unit_inventory = Column(String(50))
useexisting = True
class AgeRequirements(DB.Base, MapBase):
__tablename__ = 'age_requirements'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
gender = Column(String(50))
minimum_age = Column(String(50))
maximum_age = Column(String(50))
useexisting = True
class AidRequirements(DB.Base, MapBase):
__tablename__ = 'aid_requirements'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
aid_requirements = Column(String(50))
useexisting = True
class Aka(DB.Base, MapBase):
__tablename__ = 'aka'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
# SBB20100914 Added Agency Location foreign key
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
name = Column(String(50))
confidential = Column(String(50))
description = Column(String(50))
useexisting = True
class ApplicationProcess(DB.Base, MapBase):
__tablename__ = 'application_process'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
step = Column(String(50))
description = Column(String(50))
useexisting = True
class Assignment(DB.Base, MapBase):
__tablename__ = 'assignment'
id = Column(Integer, primary_key=True)
hmis_asset_index_id = Column(Integer, ForeignKey('hmis_asset.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
assignment_id_id_num = Column(String(50))
assignment_id_id_str = Column(String(32))
assignment_id_delete = Column(Integer)
assignment_id_delete_occurred_date = Column(DateTime(timezone=False))
assignment_id_delete_effective_date = Column(DateTime(timezone=False))
person_id_id_num = Column(String(50))
person_id_id_str = Column(String(32))
household_id_id_num = Column(String(50))
household_id_id_str = Column(String(32))
useexisting = True
class AssignmentPeriod(DB.Base, MapBase):
__tablename__ = 'assignment_period'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
assignment_index_id = Column(Integer, ForeignKey(Assignment.id))
assignment_period_start_date = Column(DateTime(timezone=False))
assignment_period_end_date = Column(DateTime(timezone=False))
useexisting = True
class Call(DB.Base, MapBase):
__tablename__ = 'call'
id = Column(Integer, primary_key=True)
site_service_id = Column(String(50))
call_id_id_num = Column(String(50))
call_id_id_str = Column(String(32))
call_time = Column(DateTime(timezone=False))
call_duration = Column(Interval())
caseworker_id_id_num = Column(String(50))
caseworker_id_id_str = Column(String(32))
# FBY : TBC requested|required fields
caller_zipcode = Column(String(10))
caller_city = Column(String(128))
caller_state = Column(String(2))
caller_home_phone = Column(String(10))
class ChildEnrollmentStatus(DB.Base, MapBase):
__tablename__ = 'child_enrollment_status'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
child_enrollment_status_id_id_num = Column(String(50))
child_enrollment_status_id_id_str = Column(String(32))
child_enrollment_status_id_delete = Column(Integer)
child_enrollment_status_id_delete_occurred_date = Column(DateTime(timezone=False))
child_enrollment_status_id_delete_effective_date = Column(DateTime(timezone=False))
child_currently_enrolled_in_school = Column(String(50))
child_currently_enrolled_in_school_date_effective = Column(DateTime(timezone=False))
child_currently_enrolled_in_school_date_collected = Column(DateTime(timezone=False))
child_currently_enrolled_in_school_data_collection_stage = Column(String(50))
child_school_name = Column(String(50))
child_school_name_date_effective = Column(DateTime(timezone=False))
child_school_name_date_collected = Column(DateTime(timezone=False))
child_school_name_data_collection_stage = Column(String(50))
child_mckinney_vento_liaison = Column(String(50))
child_mckinney_vento_liaison_date_effective = Column(DateTime(timezone=False))
child_mckinney_vento_liaison_date_collected = Column(DateTime(timezone=False))
child_mckinney_vento_liaison_data_collection_stage = Column(String(50))
child_school_type = Column(String(50))
child_school_type_date_effective = Column(DateTime(timezone=False))
child_school_type_date_collected = Column(DateTime(timezone=False))
child_school_type_data_collection_stage = Column(String(50))
child_school_last_enrolled_date = Column(DateTime(timezone=False))
child_school_last_enrolled_date_date_collected = Column(DateTime(timezone=False))
child_school_last_enrolled_date_data_collection_stage = Column(String(50))
useexisting = True
class ChildEnrollmentStatusBarrier(DB.Base, MapBase):
__tablename__ = 'child_enrollment_status_barrier'
id = Column(Integer, primary_key=True)
child_enrollment_status_index_id = Column(Integer, ForeignKey(ChildEnrollmentStatus.id))
export_index_id = Column(Integer, ForeignKey('export.id'))
barrier_id_id_num = Column(String(50))
barrier_id_id_str = Column(String(32))
barrier_id_delete = Column(Integer)
barrier_id_delete_occurred_date = Column(DateTime(timezone=False))
barrier_id_delete_effective_date = Column(DateTime(timezone=False))
barrier_code = Column(String(50))
barrier_code_date_collected = Column(DateTime(timezone=False))
barrier_code_date_effective = Column(DateTime(timezone=False))
barrier_code_data_collection_stage = Column(String(50))
barrier_other = Column(String(50))
barrier_other_date_collected = Column(DateTime(timezone=False))
barrier_other_date_effective = Column(DateTime(timezone=False))
barrier_other_data_collection_stage = Column(String(50))
useexisting = True
class ChronicHealthCondition(DB.Base, MapBase):
__tablename__ = 'chronic_health_condition'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
has_chronic_health_condition = Column(String(50))
has_chronic_health_condition_date_collected = Column(DateTime(timezone=False))
has_chronic_health_condition_date_effective = Column(DateTime(timezone=False))
has_chronic_health_condition_data_collection_stage = Column(String(50))
receive_chronic_health_services = Column(String(50))
receive_chronic_health_services_date_collected = Column(DateTime(timezone=False))
receive_chronic_health_services_date_effective = Column(DateTime(timezone=False))
receive_chronic_health_services_data_collection_stage = Column(String(50))
useexisting = True
class Contact(DB.Base, MapBase):
__tablename__ = 'contact'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
title = Column(String(50))
name = Column(String(50))
type = Column(String(50))
useexisting = True
class ContactMade(DB.Base, MapBase):
__tablename__ = 'contact_made'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
contact_id_id_num = Column(String(50))
contact_id_id_str = Column(String(32))
contact_id_delete = Column(Integer)
contact_id_delete_occurred_date = Column(DateTime(timezone=False))
contact_id_delete_effective_date = Column(DateTime(timezone=False))
contact_date = Column(DateTime(timezone=False))
contact_date_data_collection_stage = Column(String(50))
contact_location = Column(String(50))
contact_location_data_collection_stage = Column(String(50))
useexisting = True
class CrossStreet(DB.Base, MapBase):
__tablename__ = 'cross_street'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
cross_street = Column(String(50))
useexisting = True
class CurrentlyInSchool(DB.Base, MapBase):
__tablename__ = 'currently_in_school'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
currently_in_school = Column(String(50))
currently_in_school_date_collected = Column(DateTime(timezone=False))
currently_in_school_date_effective = Column(DateTime(timezone=False))
currently_in_school_data_collection_stage = Column(String(50))
useexisting = True
class LicenseAccreditation(DB.Base, MapBase):
__tablename__ = 'license_accreditation'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
license = Column(String(50))
licensed_by = Column(String(50))
useexisting = True
class MentalHealthProblem(DB.Base, MapBase):
__tablename__ = 'mental_health_problem'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_mental_health_problem = Column(String(50))
has_mental_health_problem_date_collected = Column(DateTime(timezone=False))
has_mental_health_problem_date_effective = Column(DateTime(timezone=False))
has_mental_health_problem_data_collection_stage = Column(String(50))
mental_health_indefinite = Column(String(50))
mental_health_indefinite_date_collected = Column(DateTime(timezone=False))
mental_health_indefinite_date_effective = Column(DateTime(timezone=False))
mental_health_indefinite_data_collection_stage = Column(String(50))
receive_mental_health_services = Column(String(50))
receive_mental_health_services_date_collected = Column(DateTime(timezone=False))
receive_mental_health_services_date_effective = Column(DateTime(timezone=False))
receive_mental_health_services_data_collection_stage = Column(String(50))
useexisting = True
class NonCashBenefits(DB.Base, MapBase):
__tablename__ = 'non_cash_benefits'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
non_cash_benefit_id_id_num = Column(String(50))
non_cash_benefit_id_id_str = Column(String(32))
non_cash_benefit_id_id_delete = Column(Integer)
non_cash_benefit_id_id_delete_occurred_date = Column(DateTime(timezone=False))
non_cash_benefit_id_id_delete_effective_date = Column(DateTime(timezone=False))
non_cash_source_code = Column(String(50))
non_cash_source_code_date_collected = Column(DateTime(timezone=False))
non_cash_source_code_date_effective = Column(DateTime(timezone=False))
non_cash_source_code_data_collection_stage = Column(String(50))
non_cash_source_other = Column(String(50))
non_cash_source_other_date_collected = Column(DateTime(timezone=False))
non_cash_source_other_date_effective = Column(DateTime(timezone=False))
non_cash_source_other_data_collection_stage = Column(String(50))
receiving_non_cash_source = Column(String(50))
receiving_non_cash_source_date_collected = Column(DateTime(timezone=False))
receiving_non_cash_source_date_effective = Column(DateTime(timezone=False))
receiving_non_cash_source_data_collection_stage = Column(String(50))
useexisting = True
class AgencyLocation(DB.Base, MapBase):
__tablename__ = 'agency_location'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
key = Column(String(50))
name = Column(String(50))
site_description = Column(String(50))
physical_address_pre_address_line = Column(String(100))
physical_address_line_1 = Column(String(100))
physical_address_line_2 = Column(String(100))
physical_address_city = Column(String(50))
physical_address_country = Column(String(50))
physical_address_state = Column(String(50))
physical_address_zip_code = Column(String(50))
physical_address_county = Column(String(50))
physical_address_reason_withheld = Column(String(50))
physical_address_confidential = Column(String(50))
physical_address_description = Column(String(50))
mailing_address_pre_address_line = Column(String(100))
mailing_address_line_1 = Column(String(100))
mailing_address_line_2 = Column(String(100))
mailing_address_city = Column(String(50))
mailing_address_county = Column(String(50))
mailing_address_state = Column(String(50))
mailing_address_zip_code = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_reason_withheld = Column(String(50))
mailing_address_confidential = Column(String(50))
mailing_address_description = Column(String(50))
no_physical_address_description = Column(String(50))
no_physical_address_explanation = Column(String(50))
disabilities_access = Column(String(50))
physical_location_description = Column(String(50))
bus_service_access = Column(String(50))
public_access_to_transportation = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
useexisting = True
class AgencyService(DB.Base, MapBase):
__tablename__ = 'agency_service'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
key = Column(String(50))
agency_key = Column(String(50))
name = Column(String(50))
useexisting = True
class NonCashBenefitsLast30Days(DB.Base, MapBase):
__tablename__ = 'non_cash_benefits_last_30_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_last_30_days = Column(String(50))
income_last_30_days_date_collected = Column(DateTime(timezone=False))
income_last_30_days_date_effective = Column(DateTime(timezone=False))
income_last_30_days_data_collection_stage = Column(String(50))
useexisting = True
class OtherAddress(DB.Base, MapBase):
__tablename__ = 'other_address'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
pre_address_line = Column(String(100))
line_1 = Column(String(100))
line_2 = Column(String(100))
city = Column(String(50))
county = Column(String(50))
state = Column(String(50))
zip_code = Column(String(50))
country = Column(String(50))
reason_withheld = Column(String(50))
confidential = Column(String(50))
description = Column(String(50))
useexisting = True
class OtherRequirements(DB.Base, MapBase):
__tablename__ = 'other_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
other_requirements = Column(String(50))
useexisting = True
class Phone(DB.Base, MapBase):
__tablename__ = 'phone'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
contact_index_id = Column(Integer, ForeignKey(Contact.id))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
phone_number = Column(String(50))
reason_withheld = Column(String(50))
extension = Column(String(50))
description = Column(String(50))
type = Column(String(50))
function = Column(String(50))
toll_free = Column(String(50))
confidential = Column(String(50))
person_phone_number = Column(String(50))
person_phone_number_date_collected = Column(DateTime(timezone=False))
person_phone_number_date_effective = Column(DateTime(timezone=False))
person_phone_number_data_collection_stage = Column(String(50))
useexisting = True
class PhysicalDisability(DB.Base, MapBase):
__tablename__ = 'physical_disability'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
has_physical_disability = Column(String(50))
has_physical_disability_date_collected = Column(DateTime(timezone=False))
has_physical_disability_date_effective = Column(DateTime(timezone=False))
has_physical_disability_data_collection_stage = Column(String(50))
receive_physical_disability_services = Column(String(50))
receive_physical_disability_services_date_collected = Column(DateTime(timezone=False))
receive_physical_disability_services_date_effective = Column(DateTime(timezone=False))
receive_physical_disability_services_data_collection_stage = Column(String(50))
useexisting = True
class PitCountSet(DB.Base, MapBase):
__tablename__ = 'pit_count_set'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
pit_count_set_id_id_num = Column(String(50))
pit_count_set_id_id_str = Column(String(32))
pit_count_set_id_delete = Column(Integer)
pit_count_set_id_delete_occurred_date = Column(DateTime(timezone=False))
pit_count_set_id_delete_effective_date = Column(DateTime(timezone=False))
hud_waiver_received = Column(String(50))
hud_waiver_date = Column(DateTime(timezone=False))
hud_waiver_effective_period_start_date = Column(DateTime(timezone=False))
hud_waiver_effective_period_end_date = Column(DateTime(timezone=False))
last_pit_sheltered_count_date = Column(DateTime(timezone=False))
last_pit_unsheltered_count_date = Column(DateTime(timezone=False))
useexisting = True
class PitCounts(DB.Base, MapBase):
__tablename__ = 'pit_counts'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
pit_count_set_index_id = Column(Integer, ForeignKey(PitCountSet.id))
pit_count_value = Column(String(50))
pit_count_effective_period_start_date = Column(DateTime(timezone=False))
pit_count_effective_period_end_date = Column(DateTime(timezone=False))
pit_count_recorded_date = Column(DateTime(timezone=False))
pit_count_household_type = Column(String(50))
useexisting = True
class Pregnancy(DB.Base, MapBase):
__tablename__ = 'pregnancy'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
pregnancy_id_id_num = Column(String(50))
pregnancy_id_id_str = Column(String(32))
pregnancy_id_id_delete = Column(Integer)
pregnancy_id_id_delete_occurred_date = Column(DateTime(timezone=False))
pregnancy_id_id_delete_effective_date = Column(DateTime(timezone=False))
pregnancy_status = Column(String(50))
pregnancy_status_date_collected = Column(DateTime(timezone=False))
pregnancy_status_date_effective = Column(DateTime(timezone=False))
pregnancy_status_data_collection_stage = Column(String(50))
due_date = Column(DateTime(timezone=False))
due_date_date_collected = Column(DateTime(timezone=False))
due_date_data_collection_stage = Column(String(50))
useexisting = True
class Degree(DB.Base, MapBase):
__tablename__ = 'degree'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
degree_id_id_num = Column(String(50))
degree_id_id_str = Column(String(32))
degree_id_delete = Column(Integer)
degree_id_delete_occurred_date = Column(DateTime(timezone=False))
degree_id_delete_effective_date = Column(DateTime(timezone=False))
degree_other = Column(String(50))
degree_other_date_collected = Column(DateTime(timezone=False))
degree_other_date_effective = Column(DateTime(timezone=False))
degree_other_data_collection_stage = Column(String(50))
useexisting = True
class PriorResidence(DB.Base, MapBase):
__tablename__ = 'prior_residence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
prior_residence_id_id_num = Column(String(50))
prior_residence_id_id_str = Column(String(32))
prior_residence_id_delete = Column(Integer)
prior_residence_id_delete_occurred_date = Column(DateTime(timezone=False))
prior_residence_id_delete_effective_date = Column(DateTime(timezone=False))
prior_residence_code = Column(String(50))
prior_residence_code_date_collected = Column(DateTime(timezone=False))
prior_residence_code_date_effective = Column(DateTime(timezone=False))
prior_residence_code_data_collection_stage = Column(String(50))
prior_residence_other = Column(String(50))
prior_residence_other_date_collected = Column(DateTime(timezone=False))
prior_residence_other_date_effective = Column(DateTime(timezone=False))
prior_residence_other_data_collection_stage = Column(String(50))
useexisting = True
class DegreeCode(DB.Base, MapBase):
__tablename__ = 'degree_code'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
degree_index_id = Column(Integer, ForeignKey(Degree.id))
degree_code = Column(String(50))
degree_date_collected = Column(DateTime(timezone=False))
degree_date_effective = Column(DateTime(timezone=False))
degree_data_collection_stage = Column(String(50))
useexisting = True
class Destinations(DB.Base, MapBase):
__tablename__ = 'destinations'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
destination_id_id_num = Column(String(50))
destination_id_id_str = Column(String(32))
destination_id_delete = Column(Integer)
destination_id_delete_occurred_date = Column(DateTime(timezone=False))
destination_id_delete_effective_date = Column(DateTime(timezone=False))
destination_code = Column(String(50))
destination_code_date_collected = Column(DateTime(timezone=False))
destination_code_date_effective = Column(DateTime(timezone=False))
destination_code_data_collection_stage = Column(String(50))
destination_other = Column(String(50))
destination_other_date_collected = Column(DateTime(timezone=False))
destination_other_date_effective = Column(DateTime(timezone=False))
destination_other_data_collection_stage = Column(String(50))
useexisting = True
class ReasonsForLeaving(DB.Base, MapBase):
__tablename__ = 'reasons_for_leaving'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id'))
reason_for_leaving_id_id_num = Column(String(50))
reason_for_leaving_id_id_str = Column(String(32))
reason_for_leaving_id_delete = Column(Integer)
reason_for_leaving_id_delete_occurred_date = Column(DateTime(timezone=False))
reason_for_leaving_id_delete_effective_date = Column(DateTime(timezone=False))
reason_for_leaving = Column(String(50))
reason_for_leaving_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_date_effective = Column(DateTime(timezone=False))
reason_for_leaving_data_collection_stage = Column(String(50))
reason_for_leaving_other = Column(String(50))
reason_for_leaving_other_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_other_date_effective = Column(DateTime(timezone=False))
reason_for_leaving_other_data_collection_stage = Column(String(50))
useexisting = True
class DevelopmentalDisability(DB.Base, MapBase):
__tablename__ = 'developmental_disability'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_developmental_disability = Column(String(50))
has_developmental_disability_date_collected = Column(DateTime(timezone=False))
has_developmental_disability_date_effective = Column(DateTime(timezone=False))
has_developmental_disability_data_collection_stage = Column(String(50))
receive_developmental_disability = Column(String(50))
receive_developmental_disability_date_collected = Column(DateTime(timezone=False))
receive_developmental_disability_date_effective = Column(DateTime(timezone=False))
receive_developmental_disability_data_collection_stage = Column(String(50))
useexisting = True
class DisablingCondition(DB.Base, MapBase):
__tablename__ = 'disabling_condition'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
disabling_condition = Column(String(50))
disabling_condition_date_collected = Column(DateTime(timezone=False))
disabling_condition_date_effective = Column(DateTime(timezone=False))
disabling_condition_data_collection_stage = Column(String(50))
useexisting = True
class DocumentsRequired(DB.Base, MapBase):
__tablename__ = 'documents_required'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
documents_required = Column(String(50))
description = Column(String(50))
useexisting = True
class ResidencyRequirements(DB.Base, MapBase):
__tablename__ = 'residency_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
residency_requirements = Column(String(50))
useexisting = True
class DomesticViolence(DB.Base, MapBase):
__tablename__ = 'domestic_violence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
domestic_violence_survivor = Column(String(50))
domestic_violence_survivor_date_collected = Column(DateTime(timezone=False))
domestic_violence_survivor_date_effective = Column(DateTime(timezone=False))
domestic_violence_survivor_data_collection_stage = Column(String(50))
dv_occurred = Column(String(50))
dv_occurred_date_collected = Column(DateTime(timezone=False))
dv_occurred_date_effective = Column(DateTime(timezone=False))
dv_occurred_data_collection_stage = Column(String(50))
useexisting = True
class Email(DB.Base, MapBase):
__tablename__ = 'email'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
contact_index_id = Column(Integer, ForeignKey(Contact.id))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
address = Column(String(100))
note = Column(String(50))
person_email = Column(String(50))
person_email_date_collected = Column(DateTime(timezone=False))
person_email_date_effective = Column(DateTime(timezone=False))
person_email_data_collection_stage = Column(String(50))
useexisting = True
class Seasonal(DB.Base, MapBase):
__tablename__ = 'seasonal'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
description = Column(String(50))
start_date = Column(String(50))
end_date = Column(String(50))
useexisting = True
class Employment(DB.Base, MapBase):
__tablename__ = 'employment'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
employment_id_id_num = Column(String(50))
employment_id_id_str = Column(String(32))
employment_id_id_delete = Column(Integer)
employment_id_id_delete_occurred_date = Column(DateTime(timezone=False))
employment_id_id_delete_effective_date = Column(DateTime(timezone=False))
currently_employed = Column(String(50))
currently_employed_date_collected = Column(DateTime(timezone=False))
currently_employed_date_effective = Column(DateTime(timezone=False))
currently_employed_data_collection_stage = Column(String(50))
hours_worked_last_week = Column(String(50))
hours_worked_last_week_date_collected = Column(DateTime(timezone=False))
hours_worked_last_week_date_effective = Column(DateTime(timezone=False))
hours_worked_last_week_data_collection_stage = Column(String(50))
employment_tenure = Column(String(50))
employment_tenure_date_collected = Column(DateTime(timezone=False))
employment_tenure_date_effective = Column(DateTime(timezone=False))
employment_tenure_data_collection_stage = Column(String(50))
looking_for_work = Column(String(50))
looking_for_work_date_collected = Column(DateTime(timezone=False))
looking_for_work_date_effective = Column(DateTime(timezone=False))
looking_for_work_data_collection_stage = Column(String(50))
useexisting = True
class EngagedDate(DB.Base, MapBase):
__tablename__ = 'engaged_date'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
engaged_date = Column(DateTime(timezone=False))
engaged_date_date_collected = Column(DateTime(timezone=False))
engaged_date_data_collection_stage = Column(String(50))
useexisting = True
class ServiceEventNotes(DB.Base, MapBase):
__tablename__ = 'service_event_notes'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
note_id_id_num = Column(String(50))
note_id_id_str = Column(String(32))
note_delete = Column(Integer)
note_delete_occurred_date = Column(DateTime(timezone=False))
note_delete_effective_date = Column(DateTime(timezone=False))
note_text = Column(String(255))
note_text_date_collected = Column(DateTime(timezone=False))
note_text_date_effective = Column(DateTime(timezone=False))
note_text_data_collection_stage = Column(String(50))
useexisting = True
class FamilyRequirements(DB.Base, MapBase):
__tablename__ = 'family_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
family_requirements = Column(String(50))
useexisting = True
class ServiceGroup(DB.Base, MapBase):
__tablename__ = 'service_group'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
key = Column(String(50))
name = Column(String(50))
program_name = Column(String(50))
useexisting = True
class GeographicAreaServed(DB.Base, MapBase):
__tablename__ = 'geographic_area_served'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
zipcode = Column(String(50))
census_track = Column(String(50))
city = Column(String(50))
county = Column(String(50))
state = Column(String(50))
country = Column(String(50))
description = Column(String(50))
useexisting = True
class HealthStatus(DB.Base, MapBase):
__tablename__ = 'health_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
health_status = Column(String(50))
health_status_date_collected = Column(DateTime(timezone=False))
health_status_date_effective = Column(DateTime(timezone=False))
health_status_data_collection_stage = Column(String(50))
useexisting = True
class HighestSchoolLevel(DB.Base, MapBase):
__tablename__ = 'highest_school_level'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
highest_school_level = Column(String(50))
highest_school_level_date_collected = Column(DateTime(timezone=False))
highest_school_level_date_effective = Column(DateTime(timezone=False))
highest_school_level_data_collection_stage = Column(String(50))
useexisting = True
class HivAidsStatus(DB.Base, MapBase):
__tablename__ = 'hiv_aids_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_hiv_aids = Column(String(50))
has_hiv_aids_date_collected = Column(DateTime(timezone=False))
has_hiv_aids_date_effective = Column(DateTime(timezone=False))
has_hiv_aids_data_collection_stage = Column(String(50))
receive_hiv_aids_services = Column(String(50))
receive_hiv_aids_services_date_collected = Column(DateTime(timezone=False))
receive_hiv_aids_services_date_effective = Column(DateTime(timezone=False))
receive_hiv_aids_services_data_collection_stage = Column(String(50))
useexisting = True
class SpatialLocation(DB.Base, MapBase):
__tablename__ = 'spatial_location'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
description = Column(String(50))
datum = Column(String(50))
latitude = Column(String(50))
longitude = Column(String(50))
useexisting = True
class HmisAsset(DB.Base, MapBase):
__tablename__ = 'hmis_asset'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
asset_id_id_num = Column(String(50))
asset_id_id_str = Column(String(32))
asset_id_delete = Column(Integer)
asset_id_delete_occurred_date = Column(DateTime(timezone=False))
asset_id_delete_effective_date = Column(DateTime(timezone=False))
asset_count = Column(String(50))
asset_count_bed_availability = Column(String(50))
asset_count_bed_type = Column(String(50))
asset_count_bed_individual_family_type = Column(String(50))
asset_count_chronic_homeless_bed = Column(String(50))
asset_count_domestic_violence_shelter_bed = Column(String(50))
asset_count_household_type = Column(String(50))
asset_type = Column(String(50))
asset_effective_period_start_date = Column(DateTime(timezone=False))
asset_effective_period_end_date = Column(DateTime(timezone=False))
asset_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class SubstanceAbuseProblem(DB.Base, MapBase):
__tablename__ = 'substance_abuse_problem'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_substance_abuse_problem = Column(String(50))
has_substance_abuse_problem_date_collected = Column(DateTime(timezone=False))
has_substance_abuse_problem_date_effective = Column(DateTime(timezone=False))
has_substance_abuse_problem_data_collection_stage = Column(String(50))
substance_abuse_indefinite = Column(String(50))
substance_abuse_indefinite_date_collected = Column(DateTime(timezone=False))
substance_abuse_indefinite_date_effective = Column(DateTime(timezone=False))
substance_abuse_indefinite_data_collection_stage = Column(String(50))
receive_substance_abuse_services = Column(String(50))
receive_substance_abuse_services_date_collected = Column(DateTime(timezone=False))
receive_substance_abuse_services_date_effective = Column(DateTime(timezone=False))
receive_substance_abuse_services_data_collection_stage = Column(String(50))
useexisting = True
class HousingStatus(DB.Base, MapBase):
__tablename__ = 'housing_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
housing_status = Column(String(50))
housing_status_date_collected = Column(DateTime(timezone=False))
housing_status_date_effective = Column(DateTime(timezone=False))
housing_status_data_collection_stage = Column(String(50))
useexisting = True
class Taxonomy(DB.Base, MapBase):
__tablename__ = 'taxonomy'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
need_index_id = Column(Integer, ForeignKey('need.id'))
code = Column(String(300))
useexisting = True
class HudChronicHomeless(DB.Base, MapBase):
__tablename__ = 'hud_chronic_homeless'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
hud_chronic_homeless = Column(String(50))
hud_chronic_homeless_date_collected = Column(DateTime(timezone=False))
hud_chronic_homeless_date_effective = Column(DateTime(timezone=False))
hud_chronic_homeless_data_collection_stage = Column(String(50))
useexisting = True
class TimeOpen(DB.Base, MapBase):
__tablename__ = 'time_open'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
languages_index_id = Column(Integer, ForeignKey('languages.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
notes = Column(String(50))
useexisting = True
class TimeOpenDays(DB.Base, MapBase):
__tablename__ = 'time_open_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
time_open_index_id = Column(Integer, ForeignKey(TimeOpen.id))
day_of_week = Column(String(50))
from_time = Column(String(50))
to_time = Column(String(50))
useexisting = True
class Url(DB.Base, MapBase):
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
address = Column(String(50))
note = Column(String(50))
useexisting = True
class VeteranMilitaryBranches(DB.Base, MapBase):
__tablename__ = 'veteran_military_branches'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
military_branch_id_id_num = Column(String(50))
military_branch_id_id_str = Column(String(32))
military_branch_id_id_delete = Column(Integer)
military_branch_id_id_delete_occurred_date = Column(DateTime(timezone=False))
military_branch_id_id_delete_effective_date = Column(DateTime(timezone=False))
discharge_status = Column(String(50))
discharge_status_date_collected = Column(DateTime(timezone=False))
discharge_status_date_effective = Column(DateTime(timezone=False))
discharge_status_data_collection_stage = Column(String(50))
discharge_status_other = Column(String(50))
discharge_status_other_date_collected = Column(DateTime(timezone=False))
discharge_status_other_date_effective = Column(DateTime(timezone=False))
discharge_status_other_data_collection_stage = Column(String(50))
military_branch = Column(String(50))
military_branch_date_collected = Column(DateTime(timezone=False))
military_branch_date_effective = Column(DateTime(timezone=False))
military_branch_data_collection_stage = Column(String(50))
military_branch_other = Column(String(50))
military_branch_other_date_collected = Column(DateTime(timezone=False))
military_branch_other_date_effective = Column(DateTime(timezone=False))
military_branch_other_data_collection_stage = Column(String(50))
useexisting = True
class IncomeLast30Days(DB.Base, MapBase):
__tablename__ = 'income_last_30_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_last_30_days = Column(String(50))
income_last_30_days_date_collected = Column(DateTime(timezone=False))
income_last_30_days_date_effective = Column(DateTime(timezone=False))
income_last_30_days_data_collection_stage = Column(String(50))
useexisting = True
class VeteranMilitaryServiceDuration(DB.Base, MapBase):
__tablename__ = 'veteran_military_service_duration'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
military_service_duration = Column(String(50))
military_service_duration_date_collected = Column(DateTime(timezone=False))
military_service_duration_date_effective = Column(DateTime(timezone=False))
military_service_duration_data_collection_stage = Column(String(50))
useexisting = True
class IncomeRequirements(DB.Base, MapBase):
__tablename__ = 'income_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
income_requirements = Column(String(50))
useexisting = True
class VeteranServedInWarZone(DB.Base, MapBase):
__tablename__ = 'veteran_served_in_war_zone'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
served_in_war_zone = Column(String(50))
served_in_war_zone_date_collected = Column(DateTime(timezone=False))
served_in_war_zone_date_effective = Column(DateTime(timezone=False))
served_in_war_zone_data_collection_stage = Column(String(50))
useexisting = True
class IncomeTotalMonthly(DB.Base, MapBase):
__tablename__ = 'income_total_monthly'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_total_monthly = Column(String(50))
income_total_monthly_date_collected = Column(DateTime(timezone=False))
income_total_monthly_date_effective = Column(DateTime(timezone=False))
income_total_monthly_data_collection_stage = Column(String(50))
useexisting = True
class VeteranServiceEra(DB.Base, MapBase):
__tablename__ = 'veteran_service_era'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
service_era = Column(String(50))
service_era_date_collected = Column(DateTime(timezone=False))
service_era_date_effective = Column(DateTime(timezone=False))
service_era_data_collection_stage = Column(String(50))
useexisting = True
class VeteranVeteranStatus(DB.Base, MapBase):
__tablename__ = 'veteran_veteran_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
veteran_status = Column(String(50))
veteran_status_date_collected = Column(DateTime(timezone=False))
veteran_status_date_effective = Column(DateTime(timezone=False))
veteran_status_data_collection_stage = Column(String(50))
useexisting = True
class Languages(DB.Base, MapBase):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
name = Column(String(50))
notes = Column(String(50))
useexisting = True
class VeteranWarzonesServed(DB.Base, MapBase):
__tablename__ = 'veteran_warzones_served'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
war_zone_id_id_num = Column(String(50))
war_zone_id_id_str = Column(String(32))
war_zone_id_id_delete = Column(Integer)
war_zone_id_id_delete_occurred_date = Column(DateTime(timezone=False))
war_zone_id_id_delete_effective_date = Column(DateTime(timezone=False))
months_in_war_zone = Column(String(50))
months_in_war_zone_date_collected = Column(DateTime(timezone=False))
months_in_war_zone_date_effective = Column(DateTime(timezone=False))
months_in_war_zone_data_collection_stage = Column(String(50))
received_fire = Column(String(50))
received_fire_date_collected = Column(DateTime(timezone=False))
received_fire_date_effective = Column(DateTime(timezone=False))
received_fire_data_collection_stage = Column(String(50))
war_zone = Column(String(50))
war_zone_date_collected = Column(DateTime(timezone=False))
war_zone_date_effective = Column(DateTime(timezone=False))
war_zone_data_collection_stage = Column(String(50))
war_zone_other = Column(String(50))
war_zone_other_date_collected = Column(DateTime(timezone=False))
war_zone_other_date_effective = Column(DateTime(timezone=False))
war_zone_other_data_collection_stage = Column(String(50))
useexisting = True
class LengthOfStayAtPriorResidence(DB.Base, MapBase):
__tablename__ = 'length_of_stay_at_prior_residence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
length_of_stay_at_prior_residence = Column(String(50))
length_of_stay_at_prior_residence_date_collected = Column(DateTime(timezone=False))
length_of_stay_at_prior_residence_date_effective = Column(DateTime(timezone=False))
length_of_stay_at_prior_residence_data_collection_stage = Column(String(50))
useexisting = True
def __repr__(self):
field_dict = vars(self)
out = ''
if len(field_dict) > 0:
for x, y in field_dict.iteritems():
if x[0] != "_":
out = out + "%s = %s, " % (x,y)
return "<%s(%s)>" % (self.__class__.__name__, out)
else:
return ''
class VocationalTraining(DB.Base, MapBase):
__tablename__ = 'vocational_training'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
vocational_training = Column(String(50))
vocational_training_date_collected = Column(DateTime(timezone=False))
vocational_training_date_effective = Column(DateTime(timezone=False))
vocational_training_data_collection_stage = Column(String(50))
useexisting = True
class Export(DB.Base, MapBase):
__tablename__ = 'export'
id = Column(Integer, primary_key=True)
export_id = Column(String(50), primary_key=False, unique=False)
export_id_date_collected = Column(DateTime(timezone=False))
export_date = Column(DateTime(timezone=False))
export_date_date_collected = Column(DateTime(timezone=False))
export_period_start_date = Column(DateTime(timezone=False))
export_period_start_date_date_collected = Column(DateTime(timezone=False))
export_period_end_date = Column(DateTime(timezone=False))
export_period_end_date_date_collected = Column(DateTime(timezone=False))
export_software_vendor = Column(String(50))
export_software_vendor_date_collected = Column(DateTime(timezone=False))
export_software_version = Column(String(10))
export_software_version_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
export_id_id_num = Column(String(50))
export_id_id_str = Column(String(50))
export_id_delete_occurred_date = Column(DateTime(timezone=False))
export_id_delete_effective_date = Column(DateTime(timezone=False))
export_id_delete = Column(String(32))
fk_export_to_person = relationship('Person', backref='fk_person_to_export')
#$fk_export_to_household = relationship('Household', backref='fk_household_to_export')
# 'fk_export_to_database': relation(Source, backref='fk_database_to_export')
useexisting = True
class Report(DB.Base, MapBase):
__tablename__ = 'report'
report_id = Column(String(50), primary_key=True, unique=True)
report_id_date_collected = Column(DateTime(timezone=False))
report_date = Column(DateTime(timezone=False))
report_date_date_collected = Column(DateTime(timezone=False))
report_period_start_date = Column(DateTime(timezone=False))
report_period_start_date_date_collected = Column(DateTime(timezone=False))
report_period_end_date = Column(DateTime(timezone=False))
report_period_end_date_date_collected = Column(DateTime(timezone=False))
report_software_vendor = Column(String(50))
report_software_vendor_date_collected = Column(DateTime(timezone=False))
report_software_version = Column(String(10))
report_software_version_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
report_id_id_num = Column(String(50))
report_id_id_str = Column(String(50))
report_id_id_delete_occurred_date = Column(DateTime(timezone=False))
report_id_id_delete_effective_date = Column(DateTime(timezone=False))
report_id_id_delete = Column(String(32))
export_index_id = Column(Integer, ForeignKey('export.id'))
#fk_report_to_person = relationship('Person', backref='fk_person_to_report')
#fk_report_to_household = relationship('Household', backref='fk_household_to_report')
#fk_report_to_database = relationship('Source', backref='fk_database_to_report')
useexisting = True
class FosterChildEver(DB.Base, MapBase):
__tablename__ = 'foster_child_ever'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
foster_child_ever = Column(Integer)
foster_child_ever_date_collected = Column(DateTime(timezone=False))
foster_child_ever_date_effective = Column(DateTime(timezone=False))
useexisting = True
class Household(DB.Base, MapBase):
__tablename__ = 'household'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_id = Column(String(50), ForeignKey('report.report_id'))
household_id_num = Column(String(32))
household_id_num_date_collected = Column(DateTime(timezone=False))
household_id_str = Column(String(32))
household_id_str_date_collected = Column(DateTime(timezone=False))
head_of_household_id_unhashed = Column(String(32))
head_of_household_id_unhashed_date_collected = Column(DateTime(timezone=False))
head_of_household_id_hashed = Column(String(32))
head_of_household_id_hashed_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
fk_household_to_members = relationship('Members', backref='fk_members_to_household')
class Person(DB.Base, MapBase):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_id = Column(String(50), ForeignKey('report.report_id'))
person_id_hashed = Column(String(32))
person_id_unhashed = Column(String(50))
person_id_date_collected = Column(DateTime(timezone=False))
person_date_of_birth_hashed = Column(String(32))
person_date_of_birth_hashed_date_collected = Column(DateTime(timezone=False))
person_date_of_birth_unhashed = Column(DateTime(timezone=False))
person_date_of_birth_unhashed_date_collected = Column(DateTime(timezone=False))
person_ethnicity_hashed = Column(String(32))
person_ethnicity_unhashed = Column(Integer)
person_ethnicity_hashed_date_collected = Column(DateTime(timezone=False))
person_ethnicity_unhashed_date_collected = Column(DateTime(timezone=False))
person_gender_hashed = Column(String(32))
person_gender_unhashed = Column(Integer)
person_gender_hashed_date_collected = Column(DateTime(timezone=False))
person_gender_unhashed_date_collected = Column(DateTime(timezone=False))
person_gender_unhashed_date_effective = Column(DateTime(timezone=False))
person_gender_hashed_date_effective = Column(DateTime(timezone=False))
person_legal_first_name_hashed = Column(String(32))
person_legal_first_name_unhashed = Column(String(50))
person_legal_first_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_first_name_hashed_date_effective = Column(DateTime(timezone=False))
person_legal_first_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_first_name_unhashed_date_effective = Column(DateTime(timezone=False)) # JCS Added
person_legal_last_name_hashed = Column(String(32))
person_legal_last_name_unhashed = Column(String(50))
person_legal_last_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_last_name_unhashed_date_effective = Column(DateTime(timezone=False))
person_legal_last_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_middle_name_hashed = Column(String(32))
person_legal_middle_name_unhashed = Column(String(50))
person_legal_middle_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_middle_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_suffix_hashed = Column(String(32))
person_legal_suffix_unhashed = Column(String(50))
person_legal_suffix_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_suffix_hashed_date_collected = Column(DateTime(timezone=False))
#OtherNames is in its own table as there can be multiple OtherNames
#Race is in its own table as there can be multiple races
person_social_security_number_hashed = Column(String(32))
person_social_security_number_unhashed = Column(String(9))
person_social_security_number_unhashed_date_collected = Column(DateTime(timezone=False))
person_social_security_number_hashed_date_effective = Column(DateTime(timezone=False))
person_social_security_number_unhashed_date_effective = Column(DateTime(timezone=False))
person_social_security_number_hashed_date_collected = Column(DateTime(timezone=False))
person_social_security_number_quality_code = Column(String(2))
person_social_security_number_quality_code_date_collected = Column(DateTime(timezone=False))
person_social_security_number_quality_code_date_effective = Column(DateTime(timezone=False))
#PersonHistorical has its own table
#SiteServiceParticipation has its own table
#ReleaseOfInformation has its own table
reported = Column(Boolean)
# HUD 3.0
person_id_id_num = Column(String(50))
person_id_id_str = Column(String(50))
person_id_delete = Column(String(32))
person_id_delete_occurred_date = Column(DateTime(timezone=False))
person_id_delete_effective_date = Column(DateTime(timezone=False))
person_date_of_birth_type = Column(Integer)
person_date_of_birth_type_date_collected = Column(DateTime(timezone=False))
fk_person_to_other_names = relationship('OtherNames', backref='fk_other_names_to_person')
site_service_participations = relationship("SiteServiceParticipation", backref="person")
fk_person_to_person_historical = relationship('PersonHistorical', backref='fk_person_historical_to_person')
fk_person_to_release_of_information = relationship('ReleaseOfInformation', backref='fk_release_of_information_to_person')
fk_person_to_races = relationship('Races', backref='fk_races_to_person')
useexisting = True
#class DeduplicationLink(DB.Base, MapBase):
class ServiceEvent(DB.Base, MapBase):
__tablename__ = 'service_event'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
need_index_id = Column(Integer, ForeignKey('need.id'))
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id'))
service_event_idid_num = Column(String(32))
service_event_idid_num_date_collected = Column(DateTime(timezone=False))
service_event_idid_str = Column(String(32))
service_event_idid_str_date_collected = Column(DateTime(timezone=False))
household_idid_num = Column(String(32))
is_referral = Column(String(32))
is_referral_date_collected = Column(DateTime(timezone=False))
quantity_of_service = Column(String(32))
quantity_of_service_date_collected = Column(DateTime(timezone=False))
quantity_of_service_measure = Column(String(32))
quantity_of_service_measure_date_collected = Column(DateTime(timezone=False))
service_airs_code = Column(String(300))
service_airs_code_date_collected = Column(DateTime(timezone=False))
service_period_start_date = Column(DateTime(timezone=False))
service_period_start_date_date_collected = Column(DateTime(timezone=False))
service_period_end_date = Column(DateTime(timezone=False))
service_period_end_date_date_collected = Column(DateTime(timezone=False))
service_unit = Column(String(32))
service_unit_date_collected = Column(DateTime(timezone=False))
type_of_service = Column(String(32))
type_of_service_date_collected = Column(DateTime(timezone=False))
type_of_service_other = Column(String(32))
type_of_service_other_date_collected = Column(DateTime(timezone=False))
type_of_service_par = Column(Integer)
#adding a reported column. Hopefully this will append the column to the table def.
reported = Column(Boolean)
service_event_id_delete = Column(String(32))
service_event_ind_fam = Column(Integer)
site_service_id = Column(String(50))
hmis_service_event_code_type_of_service = Column(String(50))
hmis_service_event_code_type_of_service_other = Column(String(50))
hprp_financial_assistance_service_event_code = Column(String(50))
hprp_relocation_stabilization_service_event_code = Column(String(50))
service_event_id_delete_occurred_date = Column(DateTime(timezone=False))
service_event_id_delete_effective_date = Column(DateTime(timezone=False))
service_event_provision_date = Column(DateTime(timezone=False))
service_event_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class Referral(DB.Base, MapBase):
__tablename__ = 'referral'
id = Column(Integer, primary_key=True)
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
need_index_id = Column(Integer, ForeignKey('need.id')) # ??
#referral_id_date_effective = Column(DateTime(timezone=False))
referral_idid_num = Column(String(50))
referral_idid_str = Column(String(32))
referral_delete = Column(Integer)
referral_delete_occurred_date = Column(DateTime(timezone=False))
referral_delete_effective_date = Column(DateTime(timezone=False))
referral_agency_referred_to_idid_num = Column(String(50))
referral_agency_referred_to_idid_str = Column(String(50))
referral_agency_referred_to_name = Column(String(50))
referral_agency_referred_to_name_data_collection_stage = Column(String(50))
referral_agency_referred_to_name_date_collected = Column(DateTime(timezone=False))
referral_agency_referred_to_name_date_effective = Column(DateTime(timezone=False))
referral_call_idid_num = Column(String(50))
referral_call_idid_str = Column(String(50))
referral_need_idid_num = Column(String(50)) # In TBC, these refer to an already defined Need
referral_need_idid_str = Column(String(50))
useexisting = True
# FBY : TBC requested|required field
referral_need_notes = Column(String)
class Source(DB.Base, MapBase):
__tablename__ = 'source'
id = Column(Integer, primary_key=True)
report_id = Column(String(50), ForeignKey('report.report_id'))
source_id = Column(String(50))
source_id_date_collected = Column(DateTime(timezone=False))
source_email = Column(String(255))
source_email_date_collected = Column(DateTime(timezone=False))
source_contact_extension = Column(String(10))
source_contact_extension_date_collected = Column(DateTime(timezone=False))
source_contact_first = Column(String(20))
source_contact_first_date_collected = Column(DateTime(timezone=False))
source_contact_last = Column(String(20))
source_contact_last_date_collected = Column(DateTime(timezone=False))
source_contact_phone = Column(String(20))
source_contact_phone_date_collected = Column(DateTime(timezone=False))
source_name = Column(String(50))
source_name_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
schema_version = Column(String(50))
source_id_id_num = Column(String(50))
source_id_id_str = Column(String(50))
source_id_delete = Column(Integer)
source_id_delete_occurred_date = Column(DateTime(timezone=False))
source_id_delete_effective_date = Column(DateTime(timezone=False))
software_vendor = Column(String(50))
software_version = Column(String(50))
source_contact_email = Column(String(255))
useexisting = True
#properties={'fk_source_to_export': relation(Export, backref='fk_export_to_source')})
class SystemConfiguration(DB.Base, MapBase):
__tablename__ = 'system_configuration_table'
id = Column(Integer, primary_key=True)
vendor_name = Column(String(50))
processing_mode = Column(String(4)) # TEST or PROD
source_id = Column(String(50))
odbid = Column(Integer)
providerid = Column(Integer)
userid = Column(Integer)
useexisting = True
class LastDateTime(DB.Base, MapBase):
# FBY: This table is used to record the document lifecycle: received, shredded, transmitted via SOAP
__tablename__ = 'last_date_time'
id = Column(Integer, primary_key=True)
event = Column(String(50))
event_date_time = Column(DateTime(timezone=False))
useexisting = True
def test():
from . import postgresutils
utils = postgresutils.Utils()
utils.blank_database()
print("instantiating db")
db = DB()
session = db.Session()
db.Base.metadata.create_all(db.pg_db_engine)
new = Source(source_id_id_num = 1, source_name='Orange County Corrections')
session.add(new)
session.commit()
print("done")
if __name__ == "__main__":
import sys
sys.exit(test())
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE
```
#### File: synthesis/src/exceptions.py
```python
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class DuplicateXMLDocumentError(Exception):
def __init__(self, *args):
message = "Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2])
print(message)
self.message = message
class UndefinedXMLWriter(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class DatabaseAuthenticationError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class SoftwareCompatibilityError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class XSDError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DBLayerNotFoundError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class VPNFailure(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class FTPUploadFailureError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class KeyboardInterrupt(Error):
def __init__(self, *args):
print("Intercepted Keyboard Interupt")
class FileNotFoundError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DataFormatError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InvalidSSNError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class EthnicityPickNotFound(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
```
#### File: synthesis/src/hmiscsv27writer.py
```python
from zope.interface import implementer
from . import dbobjects
from .writer import Writer
@implementer(Writer)
class HMISCSV27Writer(dbobjects.DB):
# Writer Interface
# implements (Writer)
def __init__(self):
pass
```
#### File: synthesis/src/hmiscsv30writer.py
```python
import os
from sqlalchemy import or_, and_
from zope.interface import implementer
import csv
from .conf import settings
#import exceptions
from . import dbobjects
from .writer import Writer
@implementer(Writer)
class HmisCSV30Writer():
# Writer Interface
#implements (Writer)
########################
# Constant Definitions #
########################
files = \
{
"export" : "Export.csv",
"agency" : "AgencyProgram.csv",
"siteInfo" : "SiteInformation.csv",
"regions" : "Regions.csv",
"inventory" : "BedInventory.csv",
"client" : "Client.csv",
"historical" : "ClientHistorical.csv",
"participation" : "ProgramParticipation.csv",
"serviceEvent" : "ServiceEvent.csv",
"incBens" : "IncomeBenefits.csv"
}
exportHeader = \
[
"ExportIDStr", "SourceID", "SourceName", "SourceContactFirst",
"SourceContactLast", "SourceContactPhone", "SourceContactExtension",
"SourceContactEmail", "ExportDate", "ExportPeriodBegin",
"ExportPeriodEnd", "ExportHashing", "SoftwareVendor",
"SoftwareVersion", "AgencyFile", "BedInventoryFile",
"ClientFile", "ClientHistoricalFile", "IncomeBenefitsFile",
"OutcomeMeasuresFile", "RegionsFile", "Program_Participation",
"ServiceEventFile", "SiteInformationFile", "Delta or Refresh"
]
agencyHeader = \
[
"OrganizationID", "OrganizationName", "ProgramID", "ProgramName",
"DirectServiceCode", "SiteID", "ProgramTypeCode", "TargetPopulationA",
"TargetPopulationB", "TrackingMethod", "GranteeIdentifier",
"ReceivesMcKinneyFunding", "DateCreated", "DateUpdated", "ExportIDStr"
]
siteInfoHeader = \
[
"OrganizationID", "Setup Site ID", "Address", "City", "State",
"Zip Code", "GeographicCode", "SiteServiceType", "HousingType",
"DateUpdated", "ExportIDStr"
]
regionsHeader = \
[
"OrganizationID", "SiteID", "RegionType", "RegionID",
"RegionDescription", "DateUpdated", "ExportIDStr"
]
inventoryHeader = \
[
"OrganizationID", "ProgramID", "SiteID", "AssetListID", "AssetListName",
"HouseholdType", "BedType", "Availability", "BedInventory",
"CHBedInventory", "UnitInventory", "InventoryStartDate",
"InventoryEndDate", "HMISParticipatingBeds", "HMISParticipationStartDate",
"HMISParticipationEndDate", "DateUpdated", "ExportIDStr"
]
clientHeader = \
[
"OrganizationID", "PersonalIdentificationNumber", "LegalFirstName",
"LegalMiddleName", "LegalLastName", "LegalSuffix", "SocialSecurityNumber",
"SocialSecNumberQualityCode", "DateOfBirth", "DateOfBirthQualityCode",
"PrimaryRace", "SecondaryRace", "Ethnicity", "Gender", "DateAdded",
"DateUpdated", "UpdateOrDelete", "IdentityVerification",
"ReleaseOfInformation", "ExportIDStr"
]
historicalHeader = \
[
"PersonalIdentificationNumber", "OrganizationID", "ProgramID",
"SiteID", "AssessmentDate", "DateUpdated", "IncomeTotalMonthly",
"IncomeLast30Days", "NonCashBenefitsLast30Days", "PhysicalDisability",
"ReceivePhysicalDisabilityServices", "HasDevelopmentalDisability",
"ReceiveDevelopmentalDisabilityServices", "HasChronicHealthCondition",
"ReceiveChronicHealthServices", "HasHIVAIDS", "ReceiveHIVAIDSServices",
"HasMentalHealthProblem", "MentalHealthIndefinite",
"ReceiveMentalHealthServices", "HasSubstanceAbuseProblem",
"SubstanceAbuseIndefinite", "ReceiveSubstanceAbuseServices",
"DomesticViolenceSurvivor", "DVOccurred", "CurrentlyEmployed",
"HoursWorkedLastWeek", "EmploymentTenure", "LookingForWork",
"CurrentlyInSchool", "VocationalTraining", "HighestSchoolLevel",
"Degree", "HealthStatus", "PregnancyStatus", "DueDate", "ServiceEra",
"MilitaryServiceDuration", "ServedInWarZone", "WarZone",
"MonthsInWarZone", "ReceivedFire", "MilitaryBranch", "DischargeStatus",
"ChildCurrentlyEnrolledInSchool", "ChildSchoolName",
"ChildMcKinneyVentoLiaison", "ChildSchoolType",
"ChildSchoolLastEnrolledDate", "ChildEnrollmentBarrier", "ExportIDStr"
]
participationHeader = \
[
"PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID",
"EntryDate", "ExitDate", "DateUpdated", "VeteranStatus",
"DisablingCondition", "PriorResidence", "LengthOfStayAtPriorResidence",
"ZIPCode", "ZIPQualityCode", "HousingStatusAtEntry", "HousingStatusAtExit",
"HouseholdIdentificationNumber", "Destination", "ReasonForLeaving",
"RelationshipToHeadOfHousehold", "HUDChronicHomeless", "ExportIDStr"
]
serviceEventHeader = \
[
"PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID",
"ServiceEventType", "ServiceEventStartDate", "ServiceEventEndDate",
"ServiceCode", "ServiceAIRSCode", "IsReferral?", "Quantity/Frequency",
"FinancialAssistanceAmount", "FundingCategory", "GrantIDNumber",
"IsRecurring", "Period/Interval", "Advance/Arrears", "ContactTime",
"ContactSite", "ClientEngaged", "AssetListID", "AssetID", "DomainIDCode",
"DateUpdated", "ExportIDStr"
]
incBensHeader = \
[
"PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID",
"AssessmentDate", "DateUpdated", "IncomeBenefitType", "SourceCode",
"SourceOther", "MonthlyAmount", "ExportIDStr"
]
def __init__(self, outDirectory, processingOptions, debug=False, debugMessages=None):
if settings.DEBUG:
print("CSV Files to be created in: %s" % outDirectory)
self.outDirectory = outDirectory
#self.pickList = Interpretpicklist()
self.errorMsgs = []
self.debug = debug
print("Setting up dbobjects...")
import time
startReal = time.time()
self.mappedObjects = dbobjects.DB()
endReal = time.time()
print("dbobjects setup finished after %0.2f real seconds." % (endReal - startReal))
if debug == True:
print("Debug switch is: %s" % debug)
self.debugMessages = debugMessages
self.options = processingOptions
self.openFiles = []
###################################
# Miscellaneous Utility Functions #
###################################
def startTransaction(self):
self.session = self.mappedObjects.session(echo_uow=True)
print("Starting transaction...")
def commitTransaction(self):
self.session.commit()
print("Transaction committed.")
def openFile(self, fileName):
try:
filePath = os.path.join(self.outDirectory, fileName)
print("Opening CSV output file %s for writing... " % filePath, end=' ')
file1 = open(filePath, "wt+")
print("opened.")
return file1
except:
print("Unable to open CSV output file %s for writing!" % filePath)
raise
def closeCsvFiles(self):
print("Closing CSV output files... ", end=' ')
for file1 in self.openFiles:
try:
file1.close()
except:
print("Unable to close CSV output file")
raise
print("all closed.")
def outputStr(self, maxlen, str1):
try:
truncStr = str1[0:maxlen]
except:
truncStr = None
return truncStr
def outputInt(self, val):
try:
num = int(val)
except:
num = None
return num
def outputMoney(self, val):
try:
num = round(val, 2)
except:
num = None
return num
def outputDate(self, tsStr):
try:
dateStr = tsStr.strftime("%m/%d/%Y")
except:
dateStr = None
return dateStr
def outputTime(self, tsStr):
try:
timeStr = tsStr.strftime("%H:%M:%S")
except:
timeStr = None
return timeStr
def chooseId(self, val1, val2):
if val1 == None:
return val2
else:
return val1
##########################################
# Database Column-level Access Functions #
##########################################
def getHistoryRelatedColumnData(self, phIndex, table, *columns):
query = "self.session.query(dbobjects.%s)" % table\
+ ".filter(dbobjects.%s.person_historical_index_id == phIndex)" % table\
+".first()"
row = eval(query)
# TBD: Do we care which row record gets returned?
if self.debug:
print("\n* %s = %s" % (table, row))
retVal = []
for column in columns:
if not row:
retVal.append(None)
continue
try:
retVal.append(eval("row.%s" % column))
except:
retVal.append(None)
if len(retVal) == 1:
return retVal[0]
else:
return tuple(retVal)
def getSchoolBarrier(self, cesIndex):
barrier = self.session.query(dbobjects.ChildEnrollmentStatusBarrier)\
.filter(dbobjects.ChildEnrollmentStatusBarrier.child_enrollment_status_index_id == cesIndex).first()
# TBD: Do we care which zipCode_status record gets returned?
if not barrier:
return None
if self.debug:
print("\n* barrier = ", barrier)
try:
barrierCd = barrier.barrier_code
except:
barrierCd = None
return barrierCd
def getRelationshipToHeadData(self, hhId):
members = self.session.query(dbobjects.Household, dbobjects.Members)\
.filter(and_(or_(dbobjects.Household.household_id_num == hhId,
dbobjects.Household.household_id_str == hhId),
dbobjects.Household.id == dbobjects.Members.household_index_id))\
.first()
if not members:
return None
if self.debug:
print("\n* members = ", members)
try:
rel = members.relationship_to_head_of_household
except:
rel = None
return rel
def getPriorZipCodeData(self, phIndex):
address = self.session.query(dbobjects.PersonAddress)\
.filter(and_(dbobjects.PersonAddress.person_historical_index_id == phIndex,
dbobjects.PersonAddress.is_last_permanent_zip == 1)).first()
# TBD: Do we care which zipCode_status record gets returned?
if not address:
return (None, None)
if self.debug:
print("\n* person_address = ", address)
zipCode = None
zipQual = None
try:
zipCode = address.zipcode
zipQual = address.zip_quality_code
except:
pass
return (zipCode, zipQual)
def getReasonForLeavingData(self, sspIndex):
reason = self.session.query(dbobjects.ReasonsForLeaving)\
.filter(dbobjects.ReasonsForLeaving.site_service_participation_index_id
== sspIndex)\
.first()
# TBD: Do we care which reason_status record gets returned?
if not reason:
return None
if self.debug:
print("\n* reasons_for_leaving=", reason)
try:
reasonCd = reason.reason_for_leaving
except ZeroDivisionError:
reasonCd = None
return reasonCd
def getPersonHistoricalIndexData(self, sspIndex):
historical = self.session.query(dbobjects.PersonHistorical)\
.filter(dbobjects.PersonHistorical.site_service_index_id == sspIndex).first()
# TBD: Do we care which person historical record's index gets returned?
if not historical:
return None
if self.debug:
print("\n* person_historical=", historical)
try:
phIndex = historical.id
except:
phIndex = None
return phIndex
def getRacesData(self, personIndex):
races = self.session.query(dbobjects.Races)\
.filter(dbobjects.Races.person_index_id == personIndex)
# TBD: Do we care about which two races get output?
primaryRace = None
secondaryRace = None
try:
primaryRace = races[0].race_unhashed
secondaryRace = races[1].race_unhashed
except:
pass
return (primaryRace, secondaryRace)
def getReleaseGrantedData(self, personIndex):
roi = self.session.query(dbobjects.ReleaseOfInformation)\
.filter(dbobjects.ReleaseOfInformation.person_index_id == personIndex)\
.first()
if not roi:
return None
try:
releaseGranted = roi.release_granted
except:
releaseGranted = None
return releaseGranted
def getReceivesMcKinneyFundingData(self, serviceIndex):
funding = self.session.query(dbobjects.FundingSource)\
.filter(dbobjects.FundingSource.service_index_id == serviceIndex).first()
if not funding:
return None
try:
receivesMcKinneyFunding = funding.receives_mcKinney_funding
except:
receivesMcKinneyFunding = None
return receivesMcKinneyFunding
def getFundingSourceData(self, seIndex):
funding = self.session.query(dbobjects.FundingSource)\
.filter(dbobjects.FundingSource.service_event_index_id == seIndex).first()
if not funding:
return None
faAmt = None
grantId = None
advArrears = None
try:
faAmt = funding.financial_assistance_amount
grantId = funding.federal_cfda_number
advArrears = funding.advance_or_arrears
except:
pass
return (faAmt, grantId, advArrears)
#######################################
# Database Row-level Access Functions #
#######################################
def getNonCashBenefitsData(self, phIndex):
print("in gncbd")
nonCashBens = self.session.query(dbobjects.NonCashBenefits)\
.filter(dbobjects.NonCashBenefits.person_historical_index_id == phIndex)
if not nonCashBens.count():
return
for nonCashBen in nonCashBens:
try:
if self.debug:
print("\n* non_cash_benefits=", nonCashBen)
yield nonCashBen
except:
print("Unable to obtain data from non_cash_benefits table!")
raise
def getIncomeAndSourcesData(self, phIndex):
print("in gisd")
incomes = self.session.query(dbobjects.IncomeAndSources)\
.filter(dbobjects.IncomeAndSources.person_historical_index_id == phIndex)
if not incomes.count():
return
for income in incomes:
try:
if self.debug:
print("\n* income_and_sources=", income)
yield income
except:
print("Unable to obtain data from income_and_sources table!")
raise
def getPersonHistoricalData(self, personIndex, personId):
historicals = self.session.query(dbobjects.PersonHistorical)\
.filter(dbobjects.PersonHistorical.person_index_id == personIndex)
if not historicals.count():
print("Warning: no data in person_historical table for person %s." \
% personId)
return
else:
self.historicalFile = self.openFile(HmisCSV30Writer.files["historical"])
self.openFiles.append(self.historicalFile)
self.historicalWriter = csv.writer(self.historicalFile,
quoting=csv.QUOTE_NONNUMERIC)
self.historicalWriter.writerow(HmisCSV30Writer.historicalHeader)
for historical in historicals:
try:
if self.debug:
print("\n* person_historical=", historical)
yield historical
except:
print("Unable to obtain data from person_historical table!")
raise
def getServiceEventData(self, personIndex, personId):
serviceEvents = self.session.query(dbobjects.ServiceEvent)\
.filter(dbobjects.ServiceEvent.person_index_id == personIndex)
if not serviceEvents.count():
print("Warning: no data in service_event table for person %s." % personId)
return
else:
self.serviceEventFile = self.openFile(HmisCSV30Writer.files["serviceEvent"])
self.openFiles.append(self.serviceEventFile)
self.serviceEventWriter = csv.writer(self.serviceEventFile,
quoting=csv.QUOTE_NONNUMERIC)
self.serviceEventWriter.writerow(HmisCSV30Writer.serviceEventHeader)
for serviceEvent in serviceEvents:
try:
if self.debug:
print("\n* service_event=", serviceEvent)
yield serviceEvent
except:
print("Unable to obtain data from service_event table!")
raise
def getParticipationData(self, personIndex, personId):
participations = self.session.query(dbobjects.SiteServiceParticipation)\
.filter(dbobjects.SiteServiceParticipation.fk_participation_to_person == personIndex)
if not participations.count():
print("Warning: no data in site_service_participation table for person %s." \
% personId)
return
else:
self.participationFile = self.openFile(HmisCSV30Writer.files["participation"])
self.openFiles.append(self.participationFile)
self.participationWriter = csv.writer(self.participationFile,
quoting=csv.QUOTE_NONNUMERIC)
self.participationWriter.writerow(HmisCSV30Writer.participationHeader)
for participation in participations:
try:
if self.debug:
print("\n* site_service_participation=", participation)
yield participation
except:
print("Unable to obtain data from site_service_participation table!")
raise
def getPersonData(self, exportId):
persons = self.session.query(dbobjects.Person)\
.filter(dbobjects.Person.export_index_id == exportId)
if exportId == None:
print("listen, bub, you cant select on null export id")
# TBD: Figure out if/how to correctly handle reported:
"""
if self.options.reported:
persons = persons.filter(dbobjects.Person.reported == True)
elif self.options.unreported:
persons = persons.filter(dbobjects.Person.reported != True)
"""
if not persons.count():
print("Warning: there's no data in person table for export %s." \
% exportId)
return
else:
self.clientFile = self.openFile(HmisCSV30Writer.files["client"])
self.openFiles.append(self.clientFile)
self.clientWriter = csv.writer(self.clientFile, quoting=csv.QUOTE_NONNUMERIC)
self.clientWriter.writerow(HmisCSV30Writer.clientHeader)
for person in persons:
try:
if self.debug:
print("\n* person=", person)
yield person
except:
print("Unable to obtain data from person table!")
raise
def getInventoryData(self, siteServiceIndex):
inventories = self.session.query(dbobjects.Inventory)\
.filter(dbobjects.Inventory.site_service_index_id == siteServiceIndex)
if not inventories.count():
print("Warning: no data in inventory for site_service_id %s." \
% siteServiceIndex)
return
else:
self.inventoryFile = self.openFile(HmisCSV30Writer.files["inventory"])
self.openFiles.append(self.inventoryFile)
self.inventoryWriter = csv.writer(self.inventoryFile, quoting=csv.QUOTE_NONNUMERIC)
self.inventoryWriter.writerow(HmisCSV30Writer.inventoryHeader)
for inventory in inventories:
try:
if self.debug:
print("\n* inventory=", inventory)
yield inventory
except:
print("Unable to obtain data from inventory table!")
raise
def getRegionData(self, siteServiceId):
regions = self.session.query(dbobjects.Region)\
.filter(dbobjects.Region.site_service_id == siteServiceId)
if not regions.count():
print("Warning: no data in region for site_service_id %s." % siteServiceId)
return
else:
self.regionsFile = self.openFile(HmisCSV30Writer.files["regions"])
self.openFiles.append(self.regionsFile)
self.regionsWriter = csv.writer(self.regionsFile, quoting=csv.QUOTE_NONNUMERIC)
self.regionsWriter.writerow(HmisCSV30Writer.regionsHeader)
for region in regions:
try:
if self.debug:
print("\n* region=", region)
yield region
except:
print("Unable to obtain data from region table!")
raise
def getSiteServiceData(self, siteIndex):
siteServices \
= self.session.query(dbobjects.SiteService)\
.filter(dbobjects.SiteService.site_index_id == siteIndex)
if not siteServices.count():
print("Warning: no data in site_service for site index %s." \
% siteIndex)
return
else:
self.siteInfoFile = self.openFile(HmisCSV30Writer.files["siteInfo"])
self.openFiles.append(self.siteInfoFile)
self.siteInfoWriter = csv.writer(self.siteInfoFile, quoting=csv.QUOTE_NONNUMERIC)
self.siteInfoWriter.writerow(HmisCSV30Writer.siteInfoHeader)
for siteService in siteServices:
try:
if self.debug:
print("\n* site_service=", siteService)
yield siteService
except:
print("Unable to obtain data from siteService table!")
raise
def getAgencyProgramData(self, exportIndex):
agencyPrograms \
= self.session.query(dbobjects.Agency, dbobjects.Service, dbobjects.Site)\
.filter(and_(dbobjects.Agency.export_index_id == exportIndex,
dbobjects.Service.export_index_id == exportIndex,
dbobjects.Site.export_index_id == exportIndex,
dbobjects.Agency.airs_key == dbobjects.Service.airs_key,
dbobjects.Agency.id == dbobjects.Site.agency_index_id))
if not agencyPrograms.count():
print("Warning: no data in (agency x service x site) for export %s." \
% self.exportId)
return
else:
self.agencyFile = self.openFile(HmisCSV30Writer.files["agency"])
self.openFiles.append(self.agencyFile)
self.agencyWriter = csv.writer(self.agencyFile, quoting=csv.QUOTE_NONNUMERIC)
self.agencyWriter.writerow(HmisCSV30Writer.agencyHeader)
for agency, service, site in agencyPrograms:
try:
if self.debug:
print("\n* agency=", agency)
print("\n* service=", service)
print("\n* site=", site)
yield (agency, service, site)
except:
print("Unable to obtain data from agency, service, site tables!")
raise
def getSourceData(self, exportId):
sources = self.session.query(dbobjects.Source)\
.filter(dbobjects.Source.id == exportId).first()
if not sources:
print("Warning: there's no data in source table for export %s." \
% exportId)
return None
try:
if self.debug:
print("\n* source=", sources)
return sources
except:
print("Unable to obtain data from source table!")
raise
def getExportData(self):
exports = self.session.query(dbobjects.Export)
if not exports.count():
print("Warning: there's no data in export table.")
return
else:
self.exportFile = self.openFile(HmisCSV30Writer.files["export"])
self.openFiles.append(self.exportFile)
self.exportWriter = csv.writer(self.exportFile, quoting=csv.QUOTE_NONNUMERIC)
self.exportWriter.writerow(HmisCSV30Writer.exportHeader)
for export in exports:
try:
if self.debug:
print("\n* export=", export)
yield export
except:
print("Unable to obtain data from export table!")
raise
####################################################
# Database Concatenated Row-level Access Functions #
####################################################
def getIncomeAndNonCashBenefitsData(self, phIndex):
IB_TYPE_INCOME = '1'
IB_TYPE_NON_CASH = '2'
for ias in self.getIncomeAndSourcesData(phIndex):
# Return (IncomeBenefitType, SourceCode, SourceOther, MonthlyAmount):
yield (IB_TYPE_INCOME, ias.income_source_code, ias.income_source_other,
ias.amount, ias.income_source_code_date_collected)
for ncb in self.getNonCashBenefitsData(phIndex):
# Return (non_cashBenefitType, SourceCode, SourceOther, MonthlyAmount):
yield (IB_TYPE_NON_CASH, ncb.non_cash_source_code, ncb.non_cash_source_other,
None, ncb.non_cash_source_code_date_collected)
################################
# CSV Record Creator Functions #
################################
def createServiceEventRecs(self, personIndex, personId, phIndex):
for se in self.getServiceEventData(personIndex, personId):
try:
# Get the fields in service_event table:
seIndex = se.site_service_index_id
seType = se.type_of_service
seStartDt = se.service_period_start_date
seEndDt = se.service_period_end_date
serviceCd = se.hmis_service_event_code_type_of_service
serviceAirsCd = se.service_airs_code
isReferral = se.is_referral
quantFreq = se.quantity_of_service
fundCat = se.hprp_financial_assistance_service_event_code
(faAmt, grantId, advArrears) = self.getFundingSourceData(seIndex)
clientEngaged = (self.getHistoryRelatedColumnData(phIndex,
"EngagedDate", "id") != None)
(contTime, contSite) = self.getHistoryRelatedColumnData(phIndex,
"ContactMade", "contact_date", "contact_site")
except:
print("Unable to interpret data from service_event table!")
raise
# TBD: Other fields to implement:
orgId = None
programId = None
siteId = None
isRecurring = None
periodInt = None
assetListId = None
assetId = None
domainIdCd = None
dateUpdated = None
# Build data row list:
dataRow = \
[
self.outputStr(32, personId),
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputStr(1, seType),
self.outputDate(seStartDt),
self.outputDate(seEndDt),
self.outputStr(2, serviceCd),
self.outputStr(15, serviceAirsCd),
self.outputStr(1, isReferral),
self.outputInt(quantFreq),
self.outputMoney(faAmt),
self.outputStr(2, fundCat),
self.outputStr(10, grantId),
self.outputStr(1, isRecurring),
self.outputStr(1, periodInt),
self.outputStr(1, advArrears),
self.outputTime(contTime),
self.outputStr(1, contSite),
self.outputStr(1, clientEngaged),
self.outputStr(10, assetListId),
self.outputStr(10, assetId),
self.outputInt(domainIdCd),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
print("\n* DataRow (ServiceEvent)= ", dataRow)
self.serviceEventWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["serviceEvent"])
raise
def createIncomeBenefitsRecs(self, phIndex, personId):
for (ibType, srcCode, srcOther, monthlyAmt, sourceDate)\
in self.getIncomeAndNonCashBenefitsData(phIndex):
assessDate = sourceDate
# TBD: Other fields to implement:
orgId = None
programId = None
siteId = None
dateUpdated = None
# Build data row list:
dataRow = \
[
self.outputStr(32, personId),
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputDate(assessDate),
self.outputDate(dateUpdated),
self.outputStr(1, ibType),
self.outputStr(2, srcCode),
self.outputInt(srcOther),
self.outputMoney(monthlyAmt),
self.outputStr(32, self.exportId)
]
try:
if not getattr(self, "incBensFile", None):
self.incBensFile = self.openFile(HmisCSV30Writer.files["incBens"])
self.openFiles.append(self.incBensFile)
self.incBensWriter = csv.writer(self.incBensFile,
quoting=csv.QUOTE_NONNUMERIC)
print("\n* DataRow (IncomeBenefits)= ", dataRow)
self.incBensWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["incBens"])
raise
def createParticipationRecs(self, personIndex, personId):
for participation in self.getParticipationData(personIndex, personId):
try:
# Get the fields in site_service_participation table:
sspIndex = participation.id
entryDate = participation.participation_dates_start_date
exitDate = participation.participation_dates_end_date
hhId = self.chooseId(participation.household_idid_num,
participation.household_idid_str)
# Get fields related to site_service_participation table:
phIndex = self.getPersonHistoricalIndexData(sspIndex)
reasonForLeaving = self.getReasonForLeavingData(sspIndex)
# Get fields from subtables simply related to person_historical table:
vetStatus = self.getHistoryRelatedColumnData(phIndex,
"VeteranVeteranStatus", "veteran_status")
disCond = self.getHistoryRelatedColumnData(phIndex,
"DisablingCondition", "disabling_condition")
priorRes = self.getHistoryRelatedColumnData(phIndex,
"PriorResidence", "prior_residence_code")
lengthPriorStay = self.getHistoryRelatedColumnData(phIndex,
"LengthOfStayAtPriorResidence", "length_of_stay_at_prior_residence")
dest = self.getHistoryRelatedColumnData(phIndex,
"Destinations", "destination_code")
chronicHomeless = self.getHistoryRelatedColumnData(phIndex,
"HudChronicHomeless", "hud_chronic_homeless")
housingEntry = self.getHistoryRelatedColumnData(phIndex,
"HousingStatus", "housing_status")
housingExit = self.getHistoryRelatedColumnData(phIndex,
"HousingStatus", "housing_status")
# Get fields from subtables not simply related to person_historical table:
zipCode, zipQual = self.getPriorZipCodeData(phIndex)
relationship = self.getRelationshipToHeadData(hhId)
except:
print("Unable to interpret data from site_service_participation table!")
raise
# TBD: Other fields to implement:
orgId = None
programId = None
siteId = None
dateUpdated = None
# Build data row list:
dataRow = \
[
self.outputStr(32, personId),
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputDate(entryDate),
self.outputDate(exitDate),
self.outputDate(dateUpdated),
self.outputStr(1, vetStatus),
self.outputStr(1, disCond),
self.outputStr(2, priorRes),
self.outputStr(1, lengthPriorStay),
self.outputStr(5, zipCode),
self.outputStr(1, zipQual),
self.outputStr(1, housingEntry),
self.outputStr(1, housingExit),
self.outputStr(20, hhId),
self.outputStr(2, dest),
self.outputStr(2, reasonForLeaving),
self.outputStr(1, relationship),
self.outputStr(1, chronicHomeless),
self.outputStr(32, self.exportId)
]
try:
print("\n* DataRow (ProgramParticipation)= ", dataRow)
self.participationWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["participation"])
raise
self.createServiceEventRecs(personIndex, personId, phIndex)
def createClientHistoricalRecs(self, personIndex, personId):
for historical in self.getPersonHistoricalData(personIndex, personId):
try:
# Get the fields in site_service_participation table:
phIndex = historical.id
# Get fields from subtables simply related to person_historical table:
monthlyIncome = self.getHistoryRelatedColumnData(phIndex,
"IncomeTotalMonthly", "income_total_monthly")
income30 = self.getHistoryRelatedColumnData(phIndex,
"IncomeLast30Days", "income_last_30_days")
noncash30 = self.getHistoryRelatedColumnData(phIndex,
"NonCashBenefitsLast30Days", "income_last_30_days")
physDis, recvPhysDis = self.getHistoryRelatedColumnData(phIndex,
"PhysicalDisability",
"has_physical_disability", "receive_physical_disability_services")
devDis, recvDevDis = self.getHistoryRelatedColumnData(phIndex,
"DevelopmentalDisability",
"has_developmental_disability", "receive_developmental_disability")
chronicCond, recvChronic = self.getHistoryRelatedColumnData(phIndex,
"ChronicHealthCondition",
"has_chronic_health_condition", "receive_chronic_health_services")
hivAids, recvHivAids = self.getHistoryRelatedColumnData(phIndex,
"HivAidsStatus", "has_hiv_aids", "receive_hiv_aids_services")
mental, mentalIndef,recvMental = self.getHistoryRelatedColumnData(phIndex,
"MentalHealthProblem", "has_mental_health_problem",
"mental_health_indefinite", "receive_mental_health_services")
substance, substanceIndef, recvSubstance \
= self.getHistoryRelatedColumnData(phIndex,
"SubstanceAbuseProblem", "has_substance_abuse_problem",
"substance_abuse_indefinite", "receive_substance_abuse_services")
violence,violenceOccured = self.getHistoryRelatedColumnData(phIndex,
"DomesticViolence",
"domestic_violence_survivor", "dv_occurred")
employed, hoursLastWk, tenure, looking \
= self.getHistoryRelatedColumnData(phIndex, "Employment",
"currently_employed", "hours_worked_last_week",
"employment_tenure", "looking_for_work")
inSchool = self.getHistoryRelatedColumnData(phIndex,
"CurrentlyInSchool", "currently_in_school")
vocational = self.getHistoryRelatedColumnData(phIndex,
"VocationalTraining", "vocational_training")
highestSchool = self.getHistoryRelatedColumnData(phIndex,
"HighestSchoolLevel", "highest_school_level")
(degreeNum, degreeStr) = self.getHistoryRelatedColumnData(phIndex,
"Degree", "degree_id_id_num", "degree_id_id_str")
degree = self.chooseId(degreeNum, degreeStr)
healthStatus = self.getHistoryRelatedColumnData(phIndex,
"HealthStatus", "health_status")
pregnant, dueDate = self.getHistoryRelatedColumnData(phIndex,
"Pregnancy", "pregnancy_status", "due_date")
dueDate = dueDate
serviceEra = self.getHistoryRelatedColumnData(phIndex,
"VeteranServiceEra", "service_era")
serviceDur = self.getHistoryRelatedColumnData(phIndex,
"VeteranMilitaryServiceDuration", "military_service_duration")
servedInWz = self.getHistoryRelatedColumnData(phIndex,
"VeteranServedInWarZone", "served_in_war_zone")
wzNum, wzMonths, wzFire = self.getHistoryRelatedColumnData(phIndex,
"VeteranWarzonesServed", "war_zone_id_id_id_num",
"months_in_war_zone", "received_fire")
warZone = wzNum
branch, discharge = self.getHistoryRelatedColumnData(phIndex,
"VeteranMilitaryBranches", "military_branch", "discharge_status")
cesIndex, childInSchool, school, mvLiason, schoolType, lastSchoolDt \
= self.getHistoryRelatedColumnData(phIndex, "ChildEnrollmentStatus",
"id", "child_currently_enrolled_in_school",
"child_school_name", "child_mckinney_vento_liason",
"child_school_type", "child_last_enrolled_date")
# Get fields from subtables non-simply related to person_historical table:
schoolBarrier = self.getSchoolBarrier(cesIndex)
except:
print("Unable to interpret data from client_historical table!")
raise
# TBD: Other fields to implement:
orgId = None
programId = None
siteId = None
assessDate = None
dateUpdated = None
# Build data row list:
dataRow = \
[
self.outputStr(32, personId),
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputDate(assessDate),
self.outputDate(dateUpdated),
self.outputMoney(monthlyIncome),
self.outputStr(2, income30),
self.outputStr(2, noncash30),
self.outputStr(1, physDis),
self.outputStr(1, recvPhysDis),
self.outputStr(1, devDis),
self.outputStr(1, recvDevDis),
self.outputStr(1, chronicCond),
self.outputStr(1, recvChronic),
self.outputStr(1, hivAids),
self.outputStr(1, recvHivAids),
self.outputStr(1, mental),
self.outputStr(1, mentalIndef),
self.outputStr(1, recvMental),
self.outputStr(1, substance),
self.outputStr(1, substanceIndef),
self.outputStr(1, recvSubstance),
self.outputStr(1, violence),
self.outputStr(1, violenceOccured),
self.outputStr(1, employed),
self.outputInt(hoursLastWk),
self.outputStr(1, tenure),
self.outputStr(1, looking),
self.outputStr(1, inSchool),
self.outputStr(1, vocational),
self.outputStr(1, highestSchool),
self.outputStr(1, degree),
self.outputStr(1, healthStatus),
self.outputStr(1, pregnant),
self.outputDate(dueDate),
self.outputStr(1, serviceEra),
self.outputInt(serviceDur),
self.outputStr(1, servedInWz),
self.outputStr(1, warZone),
self.outputInt(wzMonths),
self.outputStr(1, wzFire),
self.outputStr(1, branch),
self.outputStr(1, discharge),
self.outputStr(1, childInSchool),
self.outputStr(100, school),
self.outputStr(1, mvLiason),
self.outputStr(1, schoolType),
self.outputDate(lastSchoolDt),
self.outputInt(schoolBarrier),
self.outputStr(32, self.exportId)
]
try:
print("\n* DataRow (ClientHistorical)= ", dataRow)
self.historicalWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["historical"])
raise
self.createIncomeBenefitsRecs(phIndex, personId)
def createClientRecs(self, exportId):
for person in self.getPersonData(exportId):
try:
# Get the person index id to be used to get related data:
personIndex = person.id
# Get the fields in person table:
personId = self.chooseId(person.person_id_id_num,
person.person_id_id_str)
personId = self.chooseId(personId, person.person_id_hashed)
firstName = person.person_legal_first_name_unhashed
middleName = person.person_legal_middle_name_unhashed
lastName = person.person_legal_last_name_unhashed
nameSuffix = person.person_legal_suffix_unhashed
ssn = person.person_social_security_number_unhashed
ssnQual = person.person_social_sec_number_quality_code
dob = person.person_date_of_birth_unhashed
ethnicity = person.person_ethnicity_unhashed
gender = person.person_gender_unhashed
releaseOfInfo = self.getReleaseGrantedData(personIndex)
except:
print("Unable to interpret data from person table!")
raise
(primaryRace, secondaryRace) = self.getRacesData(personIndex)
# TBD: Other fields to implement:
orgId = None
dobQual = None
dateAdded = None
dateUpdated = None
updateOrDelete = None
idVerification = None
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputStr(32, personId),
self.outputStr(30, firstName),
self.outputStr(30, middleName),
self.outputStr(30, lastName),
self.outputStr(30, nameSuffix),
self.outputStr(11, ssn),
self.outputStr(1, ssnQual),
self.outputDate(dob),
self.outputStr(1, dobQual),
self.outputStr(1, primaryRace),
self.outputStr(1, secondaryRace),
self.outputStr(1, ethnicity),
self.outputStr(1, gender),
self.outputDate(dateAdded),
self.outputDate(dateUpdated),
self.outputStr(1, updateOrDelete),
self.outputStr(1, idVerification),
self.outputStr(1, releaseOfInfo),
self.outputStr(32, exportId)
]
try:
if self.debug:
print("\n* DataRow (Client)= ", dataRow)
self.clientWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["client"])
raise
self.createClientHistoricalRecs(personIndex, personId)
self.createParticipationRecs(personIndex, personId)
def createBedInventoryRecs(self, siteService, orgId):
for inventory in self.getInventoryData(siteService.id):
try:
# Get the fields in site_service table:
programId = siteService.service_id
siteId = siteService.site_id
# Get the fields in inventory table:
assetListId = inventory.inventory_id_id_num
assetListName = inventory.inventory_id_id_str
householdType = inventory.household_type
bedType = inventory.bed_type
bedAvail = inventory.bed_availability
bedInv = inventory.bed_inventory
chInv = inventory.chronic_homeless_bed
unitInv = inventory.unit_inventory
invStart = inventory.inventory_effective_period_start_date
invEnd = inventory.inventory_effective_period_end_date
hmisPartBeds = inventory.hmis_participating_beds
hmisStart = inventory.hmis_participation_period_start_date
hmisEnd = inventory.hmis_participation_period_end_date
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from inventory tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputStr(10, assetListId),
self.outputStr(30, assetListName),
self.outputStr(1, householdType),
self.outputStr(1, bedType),
self.outputStr(1, bedAvail),
self.outputInt(bedInv),
self.outputInt(chInv),
self.outputInt(unitInv),
self.outputDate(invStart),
self.outputDate(invEnd),
self.outputInt(hmisPartBeds),
self.outputDate(hmisStart),
self.outputDate(hmisEnd),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (Inventory)= ", dataRow)
self.inventoryWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["inventory"])
raise
def createRegionsRecs(self, siteService, orgId):
for region in self.getRegionData(siteService.key):
try:
# Get the fields in site_service table:
siteId = siteService.site_id
# Get the fields in region table:
#TBD: Which field is ID?
regionId = region.id
regionType = region.region_type
descript = region.region_description
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from region tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(siteId),
self.outputStr(2, regionId),
self.outputStr(8, regionType),
self.outputStr(30, descript),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (Regions)= ", dataRow)
self.regionsWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["regions"])
raise
def createSiteInformationRecs(self, site, orgId):
for siteService in self.getSiteServiceData(site.id):
try:
# Get the fields in site table:
siteId = site.airs_key
address = site.physical_address_line_1
city = site.physical_address_city
state = site.physical_address_state
zipCode = site.physical_address_zip_code
# Get the fields in site_service table:
geoCode = siteService.geographic_code
siteServiceType = siteService.site_service_type
housingType = siteService.housing_type
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from site, site_service tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(siteId),
self.outputStr(30, address),
self.outputStr(30, city),
self.outputStr(2, state),
self.outputStr(5, zipCode),
self.outputInt(geoCode),
self.outputStr(1, siteServiceType),
self.outputStr(1, housingType),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (SiteInfo)= ", dataRow)
self.siteInfoWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["siteInfo"])
raise
self.createRegionsRecs(siteService, orgId)
self.createBedInventoryRecs(siteService, orgId)
def createAgencyProgramRecs(self, exportIndex):
orgId = None
for agency, service, site in self.getAgencyProgramData(exportIndex):
try:
# Get the fields in agency table:
#agencyIndex = agency.id
orgId = agency.airs_key
orgName = agency.airs_name
# Get the fields in service table:
serviceIndex = service.id
programId = service.airs_key
programName = service.airs_name
directServiceCode = service.direct_service_code
programTypeCode = service.service_type
targetPopulationA = service.target_population_a
targetPopulationB = service.target_population_b
trackingMethod = service.residential_tracking_method
granteeIdentifier = service.grantee_identifier
# Get the fields in site table:
siteId = site.airs_key
# Get the fields in related funding_source table:
receivesMcKFunding = self.getReceivesMcKinneyFundingData(serviceIndex)
# TBD: Other fields to implement:
dateCreated = None
dateUpdated = None
except:
print("Unable to interpret data from agency, service, and/or site tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputStr(30, orgName),
self.outputInt(programId),
self.outputStr(30, programName),
self.outputStr(1, directServiceCode),
self.outputInt(siteId),
self.outputStr(1, programTypeCode),
self.outputStr(1, targetPopulationA),
self.outputStr(2, targetPopulationB),
self.outputStr(2, trackingMethod),
self.outputStr(10, granteeIdentifier),
self.outputStr(1, receivesMcKFunding),
self.outputDate(dateCreated),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (AgencyProgram)= ", dataRow)
self.agencyWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["agency"])
raise
self.createSiteInformationRecs(site, orgId)
def createExportRecs(self):
self.exportid = None
for export in self.getExportData():
try:
exportIndex = export.export_id
self.exportId = export.export_id
expDate = export.export_date
perStart = export.export_period_start_date
perEnd = export.export_period_end_date
# TBD: These moved to source for 3.0:
#swVendor = export.export_software_vendor
#swVersion = export.export_software_version
except:
print("Unable to interpret data from export table!")
raise
source = self.getSourceData(self.exportId)
try:
sourceId = getattr(source, "source_id", None)
sourceName = getattr(source, "source_name", None)
contactFirst = getattr(source, "source_contact_first", None)
contactLast = getattr(source, "source_contact_last", None)
contactPhone = getattr(source, "source_contact_phone", None)
contactExt = getattr(source, "source_contact_extension", None)
contactEmail = getattr(source, "source_email", None)
# TBD: These are moved from export for 3.0:
swVendor = getattr(source, "software_vendor", None)
swVersion = getattr(source, "software_version", None)
except:
print("Unable to interpret data from source table!")
raise
# TBD: Other fields to implement:
self.exportHashing = None
deltaRefresh = None
# Build data row list:
dataRow = \
[
self.outputStr(32, self.exportId),
self.outputStr(32, sourceId),
self.outputStr(50, sourceName),
self.outputStr(50, contactFirst),
self.outputStr(50, contactLast),
self.outputStr(30, contactPhone),
self.outputStr(10, contactExt),
self.outputStr(50, contactEmail),
self.outputDate(expDate),
self.outputDate(perStart),
self.outputDate(perEnd),
self.outputStr(1, self.exportHashing),
self.outputStr(50, swVendor),
self.outputStr(50, swVersion),
self.outputStr(50, HmisCSV30Writer.files["agency"]),
self.outputStr(50, HmisCSV30Writer.files["inventory"]),
self.outputStr(50, HmisCSV30Writer.files["client"]),
self.outputStr(50, HmisCSV30Writer.files["historical"]),
self.outputStr(50, HmisCSV30Writer.files["incBens"]),
None, # Outcome_measures file was removed from 3.0
self.outputStr(50, HmisCSV30Writer.files["regions"]),
self.outputStr(50, HmisCSV30Writer.files["participation"]),
self.outputStr(50, HmisCSV30Writer.files["serviceEvent"]),
self.outputStr(50, HmisCSV30Writer.files["siteInfo"]),
self.outputStr(1, deltaRefresh)
]
try:
if self.debug:
print("\n* DataRow (Export)= ", dataRow)
self.exportWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["export"])
raise
self.createAgencyProgramRecs(exportIndex)
self.createClientRecs(self.exportId)
def createCsvFiles(self):
self.createExportRecs();
def write(self):
self.startTransaction()
self.createCsvFiles()
self.closeCsvFiles()
self.commitTransaction()
print("Export finished.")
return True
```
#### File: synthesis/src/hmisxml30reader.py
```python
import os#, sys
from .reader import Reader
from zope.interface import implementer
from lxml import etree
import dateutil.parser
from .dbobjects import * # @UnusedWildImport
@implementer(Reader)
class HMISXML30Reader:
# print "Base.metadata before create in hmisxmlreader3: ", Base.metadata
# Base.metadata.create_all(pg_db_engine)
# print "Base.metadata after create in hmisxmlreader3: ", Base.metadata
''' Implements reader interface '''
#implements (Reader)
''' Define XML namespaces '''
hmis_namespace = "http://www.hudhdx.info/Resources/Vendors/3_0/HUD_HMIS.xsd"
airs_namespace = "http://www.hudhdx.info/Resources/Vendors/3_0/AIRS_3_0_mod.xsd"
nsmap = {"hmis" : hmis_namespace, "airs" : airs_namespace}
def __init__(self, xml_file, db):
''' Put XML file into local object '''
self.xml_file = xml_file
#if settings.DEBUG:
# print "does self.xml_file exist?", os.path.exists(self.xml_file)
''' Instantiate database object '''
#dbo = DB()
self.session = db.Session()
def read(self):
''' Takes an XML instance file and reads it into memory as a node tree '''
#print '** Raw XML:', self.xml_file
#if settings.DEBUG:
# print "does self.xml_file still exist?", os.path.exists(self.xml_file)
tree = etree.parse(self.xml_file)
#print '** Node tree:', tree
#self.xml_file.close()
return tree
def process_data(self, tree):
''' Shreds the XML document into the database and return the source ids '''
root_element = tree.getroot()
source_ids = parse_source(self, root_element)
return source_ids
''' Parse each table (other readers use these, so they're stand-alone methods)'''
def parse_source(self, root_element):
''' Loop through all sources and then traverse the tree for each export '''
''' There can be multiple sources with multiple exports inside each source '''
xpSources = '/hmis:Sources/hmis:Source'
source_list = root_element.xpath(xpSources, namespaces = self.nsmap)
if source_list is not None:
source_ids = []
for item in source_list:
self.parse_dict = {}
''' Element paths '''
xpSourceVersion = '../../@hmis:version'
xpSourceIDIDNum = 'hmis:SourceID/hmis:IDNum'
xpSourceIDIDStr = 'hmis:SourceID/hmis:IDStr'
xpSourceDelete = 'hmis:SourceID/@hmis:Delete'
xpSourceDeleteOccurredDate = 'hmis:SourceID/@hmis:DeleteOccurredDate'
xpSourceDeleteEffective = 'hmis:SourceID/@hmis:DeleteEffective'
xpSourceSoftwareVendor = 'hmis:SoftwareVendor'
xpSourceSoftwareVersion = 'hmis:SoftwareVersion'
xpSourceContactEmail = 'hmis:SourceContactEmail'
xpSourceContactExtension = 'hmis:SourceContactExtension'
xpSourceContactFirst = 'hmis:SourceContactFirst'
xpSourceContactLast = 'hmis:SourceContactLast'
xpSourceContactPhone = 'hmis:SourceContactPhone'
xpSourceName = 'hmis:SourceName'
#xp_source_exports = 'hmis:Export'
''' Map elements to database columns '''
existence_test_and_add(self, 'schema_version', item.xpath(xpSourceVersion, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'source_id_id_num', item.xpath(xpSourceIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_id_id_str', item.xpath(xpSourceIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_id_id_delete', item.xpath(xpSourceDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'source_id_id_delete_occurred_date', item.xpath(xpSourceDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'source_id_id_delete_effective_date', item.xpath(xpSourceDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'software_vendor', item.xpath(xpSourceSoftwareVendor, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'software_version', item.xpath(xpSourceSoftwareVersion, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_email', item.xpath(xpSourceContactEmail, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_extension', item.xpath(xpSourceContactExtension, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_first', item.xpath(xpSourceContactFirst, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_last', item.xpath(xpSourceContactLast, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_phone', item.xpath(xpSourceContactPhone, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_name', item.xpath(xpSourceName, namespaces = self.nsmap), 'text')
source_id_str = item.xpath(xpSourceIDIDStr, namespaces = self.nsmap)
source_id_num = item.xpath(xpSourceIDIDNum, namespaces = self.nsmap)
if source_id_str:
#source_id = source_id_str[0].text
existence_test_and_add(self, 'source_id', source_id_str, 'text')
elif source_id_num:
#source_id = source_id_num[0].text
existence_test_and_add(self, 'source_id', source_id_num, 'text')
''' Shred to database '''
# keep a list of source ids as they are discovered
source_id = shred(self, self.parse_dict, Source)
if source_id != None:
source_ids.append(source_id)
#print "self.source_index_id is: ", self.source_index_id
# ''' Parse all exports for this specific source '''
# parse_export(self, item)
return source_ids
def parse_export(self, element):
''' loop through all exports and traverse the tree '''
''' Element paths '''
xpExport = 'hmis:Export'
xpExportIDIDNum = 'hmis:ExportID/hmis:IDNum'
xpExportIDIDStr = 'hmis:ExportID/hmis:IDStr'
xpExportDelete = 'hmis:ExportID/@hmis:delete'
xpExportDeleteOccurredDate = 'hmis:ExportID/@hmis:deleteOccurredDate'
xpExportDeleteEffective = 'hmis:ExportID/@hmis:deleteEffective'
xpExportExportDate = 'hmis:ExportDate'
xpExportPeriodStartDate = 'hmis:ExportPeriod/hmis:StartDate'
xpExportPeriodEndDate = 'hmis:ExportPeriod/hmis:EndDate'
itemElements = element.xpath(xpExport, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
test = item.xpath(xpExportIDIDNum, namespaces = self.nsmap)
if len(test) is 0:
test = item.xpath(xpExportIDIDStr, namespaces = self.nsmap)
self.export_id = test
existence_test_and_add(self, 'export_id', test, 'text')
else:
self.export_id = test
existence_test_and_add(self, 'export_id', test, 'text')
existence_test_and_add(self, 'export_id_id_num', item.xpath(xpExportIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'export_id_id_str', item.xpath(xpExportIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'export_id_delete', item.xpath(xpExportDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'export_id_delete_occurred_date', item.xpath(xpExportDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'export_id_delete_effective_date', item.xpath(xpExportDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'export_date', item.xpath(xpExportExportDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'export_period_start_date', item.xpath(xpExportPeriodStartDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'export_period_end_date', item.xpath(xpExportPeriodEndDate, namespaces = self.nsmap), 'element_date')
''' Shred to database '''
shred(self, self.parse_dict, Export)
''' Create source to export link '''
record_source_export_link(self)
''' Parse sub-tables '''
parse_household(self, item)
parse_region(self, item)
parse_agency(self, item)
parse_person(self, item)
parse_service(self, item)
parse_site(self, item)
parse_site_service(self, item)
return
def parse_household(self, element):
''' Element paths '''
xpHousehold = 'hmis:Household'
xpHouseholdIDIDNum = 'hmis:HouseholdID/hmis:IDNum'
xpHouseholdIDIDStr = 'hmis:HouseholdID/hmis:IDStr'
xpHeadOfHouseholdIDUnhashed = 'hmis:HeadOfHouseholdID/hmis:Unhashed'
xpHeadOfHouseholdIDHashed = 'hmis:HeadOfHouseholdID/hmis:Hashed'
itemElements = element.xpath(xpHousehold, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'household_id_num', item.xpath(xpHouseholdIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_id_str', item.xpath(xpHouseholdIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'head_of_household_id_unhashed', item.xpath(xpHeadOfHouseholdIDUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'head_of_household_id_hashed', item.xpath(xpHeadOfHouseholdIDHashed, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Household)
''' Parse sub-tables '''
parse_members(self, item)
def parse_members(self, element):
''' Element paths '''
xpMembers = 'hmis:Members'
xpMember = 'hmis:Member'
xpPersonIDUnhashed = 'hmis:PersonID/hmis:Unhashed'
xpPersonIDHashed = 'hmis:PersonID/hmis:Hashed'
xpRelationshipToHeadOfHousehold = 'hmis:RelationshipToHeadOfHousehold'
test = element.xpath(xpMembers, namespaces = self.nsmap)
if len(test) > 0:
itemElements = test[0].xpath(xpMember, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'person_id_unhashed', item.xpath(xpPersonIDUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_id_hashed', item.xpath(xpPersonIDHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'relationship_to_head_of_household', item.xpath(xpRelationshipToHeadOfHousehold, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'household_index_id', self.household_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Members)
def parse_region(self, element):
''' Element paths '''
xpRegion = 'hmis:Region'
xpRegionIDIDNum = 'hmis:RegionID/hmis:IDNum'
xpRegionIDIDStr = 'hmis:RegionID/hmis:IDStr'
xpSiteServiceID = 'hmis:SiteServiceID'
xpRegionType = 'hmis:RegionType'
xpRegionTypeDateCollected = 'hmis:RegionType/@hmis:dateCollected'
xpRegionTypeDateEffective = 'hmis:RegionType/@hmis:dateEffective'
xpRegionTypeDataCollectionStage = 'hmis:RegionType/@hmis:dataCollectionStage'
xpRegionDescription = 'hmis:RegionDescription'
xpRegionDescriptionDateCollected = 'hmis:RegionDescription/@hmis:dateCollected'
xpRegionDescriptionDateEffective = 'hmis:RegionDescription/@hmis:dateEffective'
xpRegionDescriptionDataCollectionStage = 'hmis:RegionDescription/@hmis:dataCollectionStage'
itemElements = element.xpath(xpRegion, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'region_id_id_num', item.xpath(xpRegionIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_id_id_str', item.xpath(xpRegionIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_id', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_type', item.xpath(xpRegionType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_type_date_collected', item.xpath(xpRegionTypeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_type_date_effective', item.xpath(xpRegionTypeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_type_data_collection_stage', item.xpath(xpRegionTypeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'region_description', item.xpath(xpRegionDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_description_date_collected', item.xpath(xpRegionDescriptionDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_description_date_effective', item.xpath(xpRegionDescriptionDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_description_data_collection_stage', item.xpath(xpRegionDescriptionDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Region)
def parse_agency(self, element):
''' Element paths '''
xpAgency = 'hmis:Agency'
xpAgencyDelete = './@delete'
xpAgencyDeleteOccurredDate = './@deleteOccurredDate'
xpAgencyDeleteEffective = './@deleteEffective'
xpAirsKey = 'airs:Key'
xpAirsName = 'airs:Name'
xpAgencyDescription = 'airs:AgencyDescription'
xpIRSStatus = 'airs:IRSStatus'
xpSourceOfFunds = 'airs:SourceOfFunds'
#xpRecordOwner = '@hmis:RecordOwner'
xpRecordOwner = './@RecordOwner'
#xpFEIN = '@hmis:FEIN'
xpFEIN = './@FEIN'
xpYearInc = './@YearInc'
xpAnnualBudgetTotal = './@AnnualBudgetTotal'
xpLegalStatus = './@LegalStatus'
xpExcludeFromWebsite = './@ExcludeFromWebsite'
xpExcludeFromDirectory = './@ExcludeFromDirectory'
itemElements = element.xpath(xpAgency, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'agency_delete', item.xpath(xpAgencyDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'agency_delete_occurred_date', item.xpath(xpAgencyDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'agency_delete_effective_date', item.xpath(xpAgencyDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'airs_key', item.xpath(xpAirsKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_name', item.xpath(xpAirsName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'agency_description', item.xpath(xpAgencyDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'irs_status', item.xpath(xpIRSStatus, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_of_funds', item.xpath(xpSourceOfFunds, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'fein', item.xpath(xpFEIN, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'record_owner', item.xpath(xpRecordOwner, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'year_inc', item.xpath(xpYearInc, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'annual_budget_total', item.xpath(xpAnnualBudgetTotal, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'legal_status', item.xpath(xpLegalStatus, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_website', item.xpath(xpExcludeFromWebsite, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_directory', item.xpath(xpExcludeFromDirectory, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Agency)
''' Parse sub-tables '''
parse_agency_service(self, item)
parse_aka(self, item)
# SBB20100907 Missing, adding back in.
parse_agency_location(self, item)
# remove this once done with routine, shouldn't pollute keys for other values being parsed
self.agency_location_index_id = None
parse_phone(self, item)
parse_url(self, item)
parse_email(self, item)
parse_contact(self, item)
parse_license_accreditation(self, item)
parse_service_group(self, item)
# need to reset the contact index once we start processing a site element (because site can have their own contacts)
self.contact_index_id = None
parse_site(self, item)
# SBB20100907 clear out the agency primary key - fouling up other parsers
self.site_index_id = None
parse_resource_info(self, item)
# SBB20100907 clear out the agency primary key - fouling up other parsers
# done with Agency, clear out the agency primary key, don't want it floating down to other elements.
self.agency_index_id = None
def parse_person(self, element):
''' Element paths '''
xpPerson = 'hmis:Person'
xpPersonIDNum = 'hmis:PersonID/hmis:IDNum'
xpPersonIDStr = 'hmis:PersonID/hmis:IDStr'
xpPersonIDDeleteOccurredDate = 'hmis:PersonID/@hmis:DeleteOccurredDate'
xpPersonIDDeleteEffective = 'hmis:PersonID/@hmis:DeleteEffective'
xpPersonDelete = 'hmis:PersonID/@hmis:Delete'
xpPersonDateOfBirthHashed = 'hmis:DateOfBirth/hmis:Hashed'
xpPersonDateOfBirthHashedDateCollected = 'hmis:DateOfBirth/hmis:Hashed/@hmis:dateCollected'
xpPersonDateOfBirthUnhashed = 'hmis:DateOfBirth/hmis:Unhashed'
xpPersonDateOfBirthUnhashedDateCollected = 'hmis:DateOfBirth/hmis:Unhashed/@hmis:dateCollected'
xpPersonDateOfBirthType = 'hmis:DateOfBirth/hmis:DateOfBirthType'
xpPersonDateOfBirthTypeDateCollected = 'hmis:DateOfBirth/hmis:DateOfBirthType/@hmis:dateCollected'
xpPersonEthnicityHashedDateCollected = 'hmis:Ethnicity/hmis:Hashed/@hmis:dateCollected'
xpPersonEthnicityUnhashedDateCollected = 'hmis:Ethnicity/hmis:Unhashed/@hmis:dateCollected'
xpPersonEthnicityHashed = 'hmis:Ethnicity/hmis:Hashed'
xpPersonEthnicityUnhashed = 'hmis:Ethnicity/hmis:Unhashed'
xpPersonGenderHashed = 'hmis:Gender/hmis:Hashed'
xpPersonGenderUnhashed = 'hmis:Gender/hmis:Unhashed'
xpPersonGenderHashedDateCollected = 'hmis:Gender/hmis:Hashed/@hmis:dateCollected'
xpPersonGenderUnhashedDateCollected = 'hmis:Gender/hmis:Unhashed/@hmis:dateCollected'
xpPersonGenderHashedDateEffective = 'hmis:Gender/hmis:Hashed/@hmis:dateEffective'
xpPersonGenderUnhashedDateEffective = 'hmis:Gender/hmis:Unhashed/@hmis:dateEffective'
xpPersonLegalFirstNameHashed = 'hmis:LegalFirstName/hmis:Hashed'
xpPersonLegalFirstNameUnhashed = 'hmis:LegalFirstName/hmis:Unhashed'
xpPersonLegalFirstNameHashedDateEffective = 'hmis:LegalFirstName/hmis:Hashed/@hmis:dateEffective'
xpPersonLegalFirstNameUnhashedDateEffective = 'hmis:LegalFirstName/hmis:Unhashed/@hmis:dateEffective'
xpPersonLegalFirstNameHashedDateCollected = 'hmis:LegalFirstName/hmis:Hashed/@hmis:dateCollected'
xpPersonLegalFirstNameUnhashedDateCollected = 'hmis:LegalFirstName/hmis:Unhashed/@hmis:dateCollected'
xpPersonLegalLastNameHashed = 'hmis:LegalLastName/hmis:Hashed'
xpPersonLegalLastNameUnhashed = 'hmis:LegalLastName/hmis:Unhashed'
xpPersonLegalLastNameHashedDateEffective = 'hmis:LegalLastName/hmis:Hashed/@hmis:dateEffective'
xpPersonLegalLastNameUnhashedDateEffective = 'hmis:LegalLastName/hmis:Unhashed/@hmis:dateEffective'
xpPersonLegalLastNameHashedDateCollected = 'hmis:LegalLastName/hmis:Hashed/@hmis:dateCollected'
xpPersonLegalLastNameUnhashedDateCollected = 'hmis:LegalLastName/hmis:Unhashed/@hmis:dateCollected'
xpPersonLegalMiddleNameHashed = 'hmis:LegalMiddleName/hmis:Hashed'
xpPersonLegalMiddleNameUnhashed = 'hmis:LegalMiddleName/hmis:Unhashed'
xpPersonLegalMiddleNameHashedDateEffective = 'hmis:LegalMiddleName/hmis:Hashed/@hmis:dateEffective'
xpPersonLegalMiddleNameUnhashedDateEffective = 'hmis:LegalMiddleName/hmis:Unhashed/@hmis:dateEffective'
xpPersonLegalMiddleNameHashedDateCollected = 'hmis:LegalMiddleName/hmis:Hashed/@hmis:dateCollected'
xpPersonLegalMiddleNameUnhashedDateCollected = 'hmis:LegalMiddleName/hmis:Unhashed/@hmis:dateCollected'
xpPersonLegalSuffixHashed = 'hmis:LegalSuffix/hmis:Hashed'
xpPersonLegalSuffixUnhashed = 'hmis:LegalSuffix/hmis:Unhashed'
xpPersonLegalSuffixHashedDateEffective = 'hmis:LegalSuffix/hmis:Hashed/@hmis:dateEffective'
xpPersonLegalSuffixUnhashedDateEffective = 'hmis:LegalSuffix/hmis:Unhashed/@hmis:dateEffective'
xpPersonLegalSuffixHashedDateCollected = 'hmis:LegalSuffix/hmis:Hashed/@hmis:dateCollected'
xpPersonLegalSuffixUnhashedDateCollected = 'hmis:LegalSuffix/hmis:Unhashed/@hmis:dateCollected'
xpPersonSocialSecurityNumberHashed = 'hmis:SocialSecurityNumber/hmis:Hashed'
xpPersonSocialSecurityNumberUnhashed = 'hmis:SocialSecurityNumber/hmis:Unhashed'
xpPersonSocialSecurityNumberHashedDateCollected = 'hmis:SocialSecurityNumber/hmis:Hashed/@hmis:dateCollected'
xpPersonSocialSecurityNumberUnhashedDateCollected = 'hmis:SocialSecurityNumber/hmis:Unhashed/@hmis:dateCollected'
xpPersonSocialSecurityNumberHashedDateEffective = 'hmis:SocialSecurityNumber/hmis:Hashed/@hmis:dateEffective'
xpPersonSocialSecurityNumberUnhashedDateEffective = 'hmis:SocialSecurityNumber/hmis:Unhashed/@hmis:dateEffective'
xpPersonSocialSecurityNumberQualityCode = 'hmis:SocialSecurityNumber/hmis:SocialSecNumberQualityCode'
xpPersonSocialSecurityNumberQualityCodeDateEffective = 'hmis:SocialSecurityNumber/hmis:SocialSecNumberQualityCode/@hmis:dateEffective'
xpPersonSocialSecurityNumberQualityCodeDateCollected = 'hmis:SocialSecurityNumber/hmis:SocialSecNumberQualityCode/@hmis:dateCollected'
itemElements = element.xpath(xpPerson, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'person_id_id_num', item.xpath(xpPersonIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_id_id_str', item.xpath(xpPersonIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_id_delete_occurred_date', item.xpath(xpPersonIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_id_delete_effective_date', item.xpath(xpPersonIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_id_delete', item.xpath(xpPersonDelete, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_date_of_birth_hashed', item.xpath(xpPersonDateOfBirthHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_date_of_birth_hashed_date_collected', item.xpath(xpPersonDateOfBirthHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_date_of_birth_unhashed', item.xpath(xpPersonDateOfBirthUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_date_of_birth_unhashed_date_collected', item.xpath(xpPersonDateOfBirthUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_date_of_birth_type', item.xpath(xpPersonDateOfBirthType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_date_of_birth_type_date_collected', item.xpath(xpPersonDateOfBirthTypeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_ethnicity_hashed', item.xpath(xpPersonEthnicityHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_ethnicity_unhashed', item.xpath(xpPersonEthnicityUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_ethnicity_unhashed_date_collected', item.xpath(xpPersonEthnicityUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_ethnicity_hashed_date_collected', item.xpath(xpPersonEthnicityHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_gender_hashed', item.xpath(xpPersonGenderHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_gender_unhashed', item.xpath(xpPersonGenderUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_gender_unhashed_date_collected', item.xpath(xpPersonGenderUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_gender_hashed_date_collected', item.xpath(xpPersonGenderHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_gender_unhashed_date_effective', item.xpath(xpPersonGenderUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_gender_hashed_date_effective', item.xpath(xpPersonGenderHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_first_name_hashed', item.xpath(xpPersonLegalFirstNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_first_name_unhashed', item.xpath(xpPersonLegalFirstNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_first_name_hashed_date_collected', item.xpath(xpPersonLegalFirstNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_first_name_unhashed_date_collected', item.xpath(xpPersonLegalFirstNameUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_first_name_hashed_date_effective', item.xpath(xpPersonLegalFirstNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_first_name_unhashed_date_effective', item.xpath(xpPersonLegalFirstNameUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_last_name_hashed', item.xpath(xpPersonLegalLastNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_last_name_unhashed', item.xpath(xpPersonLegalLastNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_last_name_hashed_date_collected', item.xpath(xpPersonLegalLastNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_last_name_unhashed_date_collected', item.xpath(xpPersonLegalLastNameUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_last_name_hashed_date_effective', item.xpath(xpPersonLegalLastNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_last_name_unhashed_date_effective', item.xpath(xpPersonLegalLastNameUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_middle_name_hashed', item.xpath(xpPersonLegalMiddleNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_middle_name_unhashed', item.xpath(xpPersonLegalMiddleNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_middle_name_hashed_date_collected', item.xpath(xpPersonLegalMiddleNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_middle_name_unhashed_date_collected', item.xpath(xpPersonLegalMiddleNameUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_middle_name_hashed_date_effective', item.xpath(xpPersonLegalMiddleNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_middle_name_unhashed_date_effective', item.xpath(xpPersonLegalMiddleNameUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_suffix_hashed', item.xpath(xpPersonLegalSuffixHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_suffix_unhashed', item.xpath(xpPersonLegalSuffixUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_legal_suffix_hashed_date_collected', item.xpath(xpPersonLegalSuffixHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_suffix_unhashed_date_collected', item.xpath(xpPersonLegalSuffixUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_suffix_hashed_date_effective', item.xpath(xpPersonLegalSuffixHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_legal_suffix_unhashed_date_effective', item.xpath(xpPersonLegalSuffixUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_unhashed', item.xpath(xpPersonSocialSecurityNumberUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_social_security_number_hashed', item.xpath(xpPersonSocialSecurityNumberHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_social_security_number_hashed_date_collected', item.xpath(xpPersonSocialSecurityNumberHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_unhashed_date_collected', item.xpath(xpPersonSocialSecurityNumberUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_hashed_date_effective', item.xpath(xpPersonSocialSecurityNumberHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_unhashed_date_effective', item.xpath(xpPersonSocialSecurityNumberUnhashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_quality_code', item.xpath(xpPersonSocialSecurityNumberQualityCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_social_security_number_quality_code_date_effective', item.xpath(xpPersonSocialSecurityNumberQualityCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_social_security_number_quality_code_date_collected', item.xpath(xpPersonSocialSecurityNumberQualityCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Person)
''' Parse sub-tables '''
parse_site_service_participation(self, item)
parse_need(self, item)
parse_service_event(self, item, namespace='hmis:')
parse_person_historical(self, item)
parse_release_of_information(self, item)
parse_other_names(self, item)
parse_races(self, item)
def parse_service(self, element):
''' Element paths '''
xpService = 'hmis:Service'
xpServiceDeleteOccurredDate = '@hmis:DeleteOccurredDate'
xpServiceDeleteEffective = '@hmis:DeleteEffective'
xpServiceDelete = '@hmis:Delete'
xpAirsKey = 'airs:Key'
xpAirsAgencyKey = 'airs:AgencyKey'
xpAirsName = 'airs:Name'
xpCOCCode = 'hmis:COCCode'
xpConfiguration = 'hmis:Configuration'
xpDirectServiceCode = 'hmis:DirectServiceCode'
xpGranteeIdentifier = 'hmis:GranteeIdentifier'
xpIndividualFamilyCode = 'hmis:IndividualFamilyCode'
xpResidentialTrackingMethod = 'hmis:ResidentialTrackingMethod'
xpServiceType = 'hmis:ServiceType'
xpServiceEffectivePeriodStartDate = 'hmis:ServiceEffectivePeriod/hmis:StartDate'
xpServiceEffectivePeriodEndDate = 'hmis:ServiceEffectivePeriod/hmis:EndDate'
xpServiceRecordedDate = 'hmis:ServiceRecordedDate'
xpTargetPopulationA = 'hmis:TargetPopulationA'
xpTargetPopulationB = 'hmis:TargetPopulationB'
itemElements = element.xpath(xpService, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'service_delete_occurred_date', item.xpath(xpServiceDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_delete_effective_date', item.xpath(xpServiceDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_delete', item.xpath(xpServiceDelete, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_key', item.xpath(xpAirsKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'residential_tracking_method', item.xpath(xpAirsAgencyKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_name', item.xpath(xpAirsName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'coc_code', item.xpath(xpCOCCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'configuration', item.xpath(xpConfiguration, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'direct_service_code', item.xpath(xpDirectServiceCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'grantee_identifier', item.xpath(xpGranteeIdentifier, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'individual_family_code', item.xpath(xpIndividualFamilyCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'residential_tracking_method', item.xpath(xpResidentialTrackingMethod, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_type', item.xpath(xpServiceType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_effective_period_start_date', item.xpath(xpServiceEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_effective_period_end_date', item.xpath(xpServiceEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_recorded_date', item.xpath(xpServiceRecordedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'target_population_a', item.xpath(xpTargetPopulationA, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'target_population_b', item.xpath(xpTargetPopulationB, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Service)
''' Parse sub-tables '''
parse_funding_source(self, item)
parse_inventory(self, item)
def parse_site(self, element):
''' Element paths '''
xpSite = 'airs:Site'
xpSiteDeleteOccurredDate = '@airs:DeleteOccurredDate'
xpSiteDeleteEffective = '@airs:DeleteEffective'
xpSiteDelete = '@airs:Delete'
xpKey = 'airs:Key'
xpName = 'airs:Name'
xpSiteDescription = 'airs:SiteDescription'
xpPhysicalAddressPreAddressLine = 'airs:PhysicalAddress/airs:PreAddressLine'
xpPhysicalAddressLine1 = 'airs:PhysicalAddress/airs:Line1'
xpPhysicalAddressLine2 = 'airs:PhysicalAddress/airs:Line2'
xpPhysicalAddressCity = 'airs:PhysicalAddress/airs:City'
xpPhysicalAddressCounty = 'airs:PhysicalAddress/airs:County'#IGNORE:@UnusedVariable
xpPhysicalAddressState = 'airs:PhysicalAddress/airs:State'
xpPhysicalAddressZipCode = 'airs:PhysicalAddress/airs:ZipCode'
xpPhysicalAddressCountry = 'airs:PhysicalAddress/airs:Country'
xpPhysicalAddressReasonWithheld = 'airs:PhysicalAddress/airs:ReasonWithheld'
xpPhysicalAddressConfidential = 'airs:PhysicalAddress/@airs:Confidential'
xpPhysicalAddressDescription = 'airs:PhysicalAddress/@airs:Description'
xpMailingAddressPreAddressLine = 'airs:MailingAddress/airs:PreAddressLine'
xpMailingAddressLine1 = 'airs:MailingAddress/airs:Line1'
xpMailingAddressLine2 = 'airs:MailingAddress/airs:Line2'
xpMailingAddressCity = 'airs:MailingAddress/airs:City'
xpMailingAddressCounty = 'airs:MailingAddress/airs:County'#IGNORE:@UnusedVariable
xpMailingAddressState = 'airs:MailingAddress/airs:State'
xpMailingAddressZipCode = 'airs:MailingAddress/airs:ZipCode'
xpMailingAddressCountry = 'airs:MailingAddress/airs:Country'
xpMailingAddressReasonWithheld = 'airs:MailingAddress/airs:ReasonWithheld'
xpMailingAddressConfidential = 'airs:MailingAddress/@airs:Confidential'
xpMailingAddressDescription = 'airs:MailingAddress/@airs:Description'
xpNoPhysicalAddressDescription = 'airs:NoPhysicalAddress/airs:Description'
xpNoPhysicalAddressExplanation = 'airs:NoPhysicalAddress/airs:Explanation'
xpDisabilitiesAccess = 'airs:DisabilitiesAccess'
xpPhysicalLocationDescription = 'airs:PhysicalLocationDescription'
xpBusServiceAccess = 'airs:BusServiceAccess'
xpPublicAccessToTransportation = "../%s/%s" % (xpSite, '@PublicAccessToTransportation')
xpYearInc = "../%s/%s" % (xpSite, '@YearInc')
xpAnnualBudgetTotal = "../%s/%s" % (xpSite, '@AnnualBudgetTotal')
xpLegalStatus = "../%s/%s" % (xpSite, '@LegalStatus')
xpExcludeFromWebsite = "../%s/%s" % (xpSite, '@ExcludeFromWebsite')
xpExcludeFromDirectory = "../%s/%s" % (xpSite, '@ExcludeFromDirectory')
xpAgencyKey = 'airs:AgencyKey'
itemElements = element.xpath(xpSite, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'site_delete_occurred_date', item.xpath(xpSiteDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_delete_effective_date', item.xpath(xpSiteDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_delete', item.xpath(xpSiteDelete, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_key', item.xpath(xpKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_name', item.xpath(xpName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_description', item.xpath(xpSiteDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_pre_address_line', item.xpath(xpPhysicalAddressPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_line_1', item.xpath(xpPhysicalAddressLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_line_2', item.xpath(xpPhysicalAddressLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_city', item.xpath(xpPhysicalAddressCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_state', item.xpath(xpPhysicalAddressState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_zip_code', item.xpath(xpPhysicalAddressZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_country', item.xpath(xpPhysicalAddressCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_reason_withheld', item.xpath(xpPhysicalAddressReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_confidential', item.xpath(xpPhysicalAddressConfidential, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_description', item.xpath(xpPhysicalAddressDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_pre_address_line', item.xpath(xpMailingAddressPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_line_1', item.xpath(xpMailingAddressLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_line_2', item.xpath(xpMailingAddressLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_city', item.xpath(xpMailingAddressCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_state', item.xpath(xpMailingAddressState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_zip_code', item.xpath(xpMailingAddressZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_country', item.xpath(xpMailingAddressCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_reason_withheld', item.xpath(xpMailingAddressReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_confidential', item.xpath(xpMailingAddressConfidential, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_description', item.xpath(xpMailingAddressDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'no_physical_address_description', item.xpath(xpNoPhysicalAddressDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'no_physical_address_explanation', item.xpath(xpNoPhysicalAddressExplanation, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'disabilities_access', item.xpath(xpDisabilitiesAccess, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_location_description', item.xpath(xpPhysicalLocationDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bus_service_access', item.xpath(xpBusServiceAccess, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'public_access_to_transportation', item.xpath(xpPublicAccessToTransportation, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'year_inc', item.xpath(xpYearInc, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'annual_budget_total', item.xpath(xpAnnualBudgetTotal, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'legal_status', item.xpath(xpLegalStatus, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_website', item.xpath(xpExcludeFromWebsite, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_directory', item.xpath(xpExcludeFromDirectory, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'agency_key', item.xpath(xpAgencyKey, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Site)
''' Parse sub-tables '''
parse_url(self, item)
parse_spatial_location(self, item)
parse_other_address(self, item)
parse_cross_street(self, item)
parse_aka(self, item)
parse_site_service(self, item)
parse_languages(self, item)
parse_time_open(self, item)
parse_inventory(self, item)
#parse_contact(self, item)
parse_email(self, item)
parse_phone(self, item)
# SBB20100907 moved till after email and phone (which are part of the site record, contact will drive it's own searches for email and phone (of the contact))
parse_contact(self, item)
# SBB20100916 Adding namespace, site_service can be in both hmis and airs namespaces, needs to be passed by calling context defaulting to hmis, overridden from calling function as airs
def parse_site_service(self, element, namespace='hmis'):
''' Element paths '''
xpSiteService = '%s:SiteService' % namespace
xpSiteServiceDeleteOccurredDate = '@airs:DeleteOccurredDate'
xpSiteServiceDeleteEffective = '@airs:DeleteEffective'
xpSiteServiceDelete = '@airs:Delete'
xpName = 'airs:Name'
xpKey = 'airs:Key'
xpDescription = 'airs:Description'
xpFeeStructure = 'airs:FeeStructure'
xpGenderRequirements = 'airs:GenderRequirements'
xpAreaFlexibility = "../%s/@%s" % (xpSiteService, 'AreaFlexibility')
xpServiceNotAlwaysAvailable = "../%s/@%s" % (xpSiteService, 'ServiceNotAlwaysAvailable')
xpServiceGroupKey = "../%s/@%s" % (xpSiteService, 'ServiceGroupKey')
xpServiceID = 'airs:ServiceID'
xpSiteID = 'airs:SiteID'
xpGeographicCode = 'airs:GeographicCode'
xpGeographicCodeDateCollected = 'hmis:GeographicCode/@hmis:dateCollected'
xpGeographicCodeDateEffective = 'hmis:GeographicCode/@hmis:dateEffective'
xpGeographicCodeDataCollectionStage = 'hmis:GeographicCode/@hmis:dataCollectionStage'
xpHousingType = 'airs:HousingType'
xpHousingTypeDateCollected = 'hmis:HousingType/@hmis:dateCollected'
xpHousingTypeDateEffective = 'hmis:HousingType/@hmis:dateEffective'
xpHousingTypeDataCollectionStage = 'hmis:HousingType/@hmis:dataCollectionStage'
xpPrincipal = 'airs:Principal'
xpSiteServiceEffectivePeriodStartDate = 'airs:SiteServiceEffectivePeriod/hmis:StartDate'
xpSiteServiceEffectivePeriodEndDate = 'airs:SiteServiceEffectivePeriod/hmis:EndDate'
xpSiteServiceRecordedDate = 'airs:SiteServiceRecordedDate'
xpSiteServiceType = 'airs:SiteServiceType'
itemElements = element.xpath(xpSiteService, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'site_service_delete_occurred_date', item.xpath(xpSiteServiceDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_service_delete_effective_date', item.xpath(xpSiteServiceDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_service_delete', item.xpath(xpSiteServiceDelete, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'name', item.xpath(xpName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'key', item.xpath(xpKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'fee_structure', item.xpath(xpFeeStructure, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'gender_requirements', item.xpath(xpGenderRequirements, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'area_flexibility', item.xpath(xpAreaFlexibility, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'service_not_always_available', item.xpath(xpServiceNotAlwaysAvailable, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'service_group_key', item.xpath(xpServiceGroupKey, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'service_id', item.xpath(xpServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_id', item.xpath(xpSiteID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'geographic_code', item.xpath(xpGeographicCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'geographic_code_date_collected', item.xpath(xpGeographicCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'geographic_code_date_effective', item.xpath(xpGeographicCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'geographic_code_data_collection_stage', item.xpath(xpGeographicCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'housing_type', item.xpath(xpHousingType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'housing_type_date_collected', item.xpath(xpHousingTypeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'housing_type_date_effective', item.xpath(xpHousingTypeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'housing_type_data_collection_stage', item.xpath(xpHousingTypeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'principal', item.xpath(xpPrincipal, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_effective_period_start_date', item.xpath(xpSiteServiceEffectivePeriodStartDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'site_service_effective_period_end_date', item.xpath(xpSiteServiceEffectivePeriodEndDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'site_service_recorded_date', item.xpath(xpSiteServiceRecordedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_type', item.xpath(xpSiteServiceType, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
# SBB20100916 missing
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, SiteService)
''' Parse sub-tables '''
parse_seasonal(self, item)
parse_residency_requirements(self, item)
parse_pit_count_set(self, item)
parse_other_requirements(self, item)
parse_languages(self, item)
parse_time_open(self, item)
parse_inventory(self, item)
parse_income_requirements(self, item)
parse_hmis_asset(self, item)
parse_geographic_area_served(self, item)
parse_documents_required(self, item)
parse_aid_requirements(self, item)
parse_age_requirements(self, item)
parse_application_process(self, item)
parse_taxonomy(self, item)
parse_family_requirements(self, item)
parse_resource_info(self, item)
def parse_service_group(self, element):
''' Element paths '''
xpServiceGroup = 'airs:ServiceGroup'
xpAirsKey = 'airs:Key'
xpAirsName = 'airs:Name'
xpAirsAgencyKey = 'airs:ProgramName'
itemElements = element.xpath(xpServiceGroup, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'key', item.xpath(xpAirsKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'name', item.xpath(xpAirsName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'program_name', item.xpath(xpAirsAgencyKey, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ServiceGroup)
''' Parse sub-tables '''
def parse_license_accreditation(self, element):
''' Element paths '''
xpLicenseAccreditation = 'airs:LicenseAccreditation'
xpLicense = 'airs:License'
xpLicensedBy = 'airs:LicensedBy'
itemElements = element.xpath(xpLicenseAccreditation, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'license', item.xpath(xpLicense, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'licensed_by', item.xpath(xpLicensedBy, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, LicenseAccreditation)
''' Parse sub-tables '''
def parse_agency_service(self, element):
''' Element paths '''
xpAgencyService = 'airs:AgencyService'
xpAirsKey = 'airs:Key'
xpAgencyKey = 'airs:AgencyKey'
xpAgencyName = 'airs:Name'
itemElements = element.xpath(xpAgencyService, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'key', item.xpath(xpAirsKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'agency_key', item.xpath(xpAgencyKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'name', item.xpath(xpAgencyName, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, AgencyService)
''' Parse sub-tables '''
def parse_url(self, element):
''' Element paths '''
xpUrl = 'airs:URL'
xpAddress = 'airs:Address'
xpNote = 'airs:Note'
itemElements = element.xpath(xpUrl, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'address', item.xpath(xpAddress, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'note', item.xpath(xpNote, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Url)
''' Parse sub-tables '''
def parse_spatial_location(self, element):
''' Element paths '''
xpSpatialLocation = 'airs:SpatialLocation'
xpDescription = 'airs:Description'
xpDatum = 'airs:Datum'
xpLatitude = 'airs:Latitude'
xpLongitude = 'airs:Longitude'
itemElements = element.xpath(xpSpatialLocation, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'datum', item.xpath(xpDatum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'latitude', item.xpath(xpLatitude, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'longitude', item.xpath(xpLongitude, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try:existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, SpatialLocation)
''' Parse sub-tables '''
def parse_other_address(self, element):
''' Element paths '''
xpOtherAddress = 'airs:OtherAddress'
xpPreAddressLine = 'airs:PreAddressLine'
xpLine1 = 'airs:Line1'
xpLine2 = 'airs:Line2'
xpCity = 'airs:City'
xpCounty = 'airs:County'
xpState = 'airs:State'
xpZipCode = 'airs:ZipCode'
xpCountry = 'airs:Country'
xpReasonWithheld = 'airs:ReasonWithheld'
xpConfidential = "../%s/%s" % (xpOtherAddress, '@Confidential')
xpDescription = "../%s/%s" % (xpOtherAddress, '@Description')
itemElements = element.xpath(xpOtherAddress, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'pre_address_line', item.xpath(xpPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'line_1', item.xpath(xpLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'line_2', item.xpath(xpLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'city', item.xpath(xpCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'county', item.xpath(xpCounty, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'state', item.xpath(xpState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'zip_code', item.xpath(xpZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'country', item.xpath(xpCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_withheld', item.xpath(xpReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'confidential', item.xpath(xpConfidential, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
# SBB20100916 missing
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, OtherAddress)
''' Parse sub-tables '''
def parse_cross_street(self, element):
''' Element paths '''
xpCrossStreet = 'airs:CrossStreet'
itemElements = element.xpath(xpCrossStreet, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'cross_street', item, 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
# SBB20100916 missing..
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, CrossStreet)
''' Parse sub-tables '''
# SBB20100914 adding in..missing
def parse_agency_location(self, element):
''' Element paths '''
# base tag
xpAgencyLocation = 'airs:AgencyLocation'
xpKey = 'airs:Key'
xpName = 'airs:Name'#IGNORE:@UnusedVariable
xpSiteDescription = 'airs:SiteDescription'
xpPhysicalAddressPreAddressLine = 'airs:PhysicalAddress/airs:PreAddressLine'
xpPhysicalAddressLine1 = 'airs:PhysicalAddress/airs:Line1'
xpPhysicalAddressLine2 = 'airs:PhysicalAddress/airs:Line2'
xpPhysicalAddressCity = 'airs:PhysicalAddress/airs:City'
xpPhysicalAddressCounty = 'airs:PhysicalAddress/airs:County'#IGNORE:@UnusedVariable
xpPhysicalAddressState = 'airs:PhysicalAddress/airs:State'
xpPhysicalAddressZipCode = 'airs:PhysicalAddress/airs:ZipCode'
xpPhysicalAddressCountry = 'airs:PhysicalAddress/airs:Country'
xpPhysicalAddressReasonWithheld = 'airs:PhysicalAddress/airs:ReasonWithheld'
xpPhysicalAddressConfidential = "../%s/@%s" % ('airs:PhysicalAddress', 'Confidential')
xpPhysicalAddressDescription = "../%s/@%s" % ('airs:PhysicalAddress', 'Description')
xpMailingAddressPreAddressLine = 'airs:MailingAddress/airs:PreAddressLine'
xpMailingAddressLine1 = 'airs:MailingAddress/airs:Line1'
xpMailingAddressLine2 = 'airs:MailingAddress/airs:Line2'
xpMailingAddressCity = 'airs:MailingAddress/airs:City'
xpMailingAddressCounty = 'airs:MailingAddress/airs:County'#IGNORE:@UnusedVariable
xpMailingAddressState = 'airs:MailingAddress/airs:State'
xpMailingAddressZipCode = 'airs:MailingAddress/airs:ZipCode'
xpMailingAddressCountry = 'airs:MailingAddress/airs:Country'
xpMailingAddressReasonWithheld = 'airs:MailingAddress/airs:ReasonWithheld'
xpMailingAddressConfidential = "%s/@%s" % ('airs:MailingAddress', 'Confidential')
xpMailingAddressDescription = "%s/@%s" % ('airs:MailingAddress', 'Description')
xpNoPhysicalAddressDescription = 'airs:NoPhysicalAddress/airs:Description'
xpNoPhysicalAddressExplanation = 'airs:NoPhysicalAddress/airs:Explanation'
xpDisabilitiesAccess = 'airs:DisabilitiesAccess'
xpPhysicalLocationDescription = 'airs:PhysicalLocationDescription'
xpBusServiceAccess = 'airs:BusServiceAccess'
# attributes
xpPublicAccessToTransportation = "../%s/@%s" % (xpAgencyLocation, 'PublicAccessToTransportation')
xpYearInc = "../%s/@%s" % (xpAgencyLocation, 'YearInc')
xpAnnualBudgetTotal = "../%s/@%s" % (xpAgencyLocation, 'AnnualBudgetTotal')
xpLegalStatus = "../%s/@%s" % (xpAgencyLocation, 'LegalStatus')
xpExcludeFromWebsite = "../%s/@%s" % (xpAgencyLocation, 'ExcludeFromWebsite')
xpExcludeFromDirectory = "../%s/@%s" % (xpAgencyLocation, 'ExcludeFromDirectory')
xpName = 'airs:Name'
xpConfidential = 'airs:Confidential'#IGNORE:@UnusedVariable
xpDescription = 'airs:Description'#IGNORE:@UnusedVariable
itemElements = element.xpath(xpAgencyLocation, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'key', item.xpath(xpKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'name', item.xpath(xpName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_description', item.xpath(xpSiteDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_pre_address_line', item.xpath(xpPhysicalAddressPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_line_1', item.xpath(xpPhysicalAddressLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_line_2', item.xpath(xpPhysicalAddressLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_city', item.xpath(xpPhysicalAddressCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_state', item.xpath(xpPhysicalAddressState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_zip_code', item.xpath(xpPhysicalAddressZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_country', item.xpath(xpPhysicalAddressCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_reason_withheld', item.xpath(xpPhysicalAddressReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_confidential', item.xpath(xpPhysicalAddressConfidential, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_address_description', item.xpath(xpPhysicalAddressDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_pre_address_line', item.xpath(xpMailingAddressPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_line_1', item.xpath(xpMailingAddressLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_line_2', item.xpath(xpMailingAddressLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_city', item.xpath(xpMailingAddressCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_state', item.xpath(xpMailingAddressState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_zip_code', item.xpath(xpMailingAddressZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_country', item.xpath(xpMailingAddressCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_reason_withheld', item.xpath(xpMailingAddressReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mailing_address_confidential', item.xpath(xpMailingAddressConfidential, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'mailing_address_description', item.xpath(xpMailingAddressDescription, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'no_physical_address_description', item.xpath(xpNoPhysicalAddressDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'no_physical_address_explanation', item.xpath(xpNoPhysicalAddressExplanation, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'disabilities_access', item.xpath(xpDisabilitiesAccess, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'physical_location_description', item.xpath(xpPhysicalLocationDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bus_service_access', item.xpath(xpBusServiceAccess, namespaces = self.nsmap), 'text')
# attriubtes
existence_test_and_add(self, 'public_access_to_transportation', item.xpath(xpPublicAccessToTransportation, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'year_inc', item.xpath(xpYearInc, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'annual_budget_total', item.xpath(xpAnnualBudgetTotal, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'legal_status', item.xpath(xpLegalStatus, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_website', item.xpath(xpExcludeFromWebsite, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_directory', item.xpath(xpExcludeFromDirectory, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, AgencyLocation)
''' Parse sub-tables '''
parse_aka(self, item)
# need to set this up, off agency_location this doesn't exist yet but is needed to parse other_address
self.site_index_id = None
parse_other_address(self, item)
parse_cross_street(self, item)
parse_phone(self, item)
parse_url(self, item)
parse_email(self, item)
parse_contact(self, item)
parse_time_open(self, item)
parse_languages(self, item)
#not working yet
#parse_site_service(item, 'airs')
parse_spatial_location(self, item)
# reset the contacts index (used inside agency location but should not flow back up to Agency)
self.contact_index_id = None
def parse_aka(self, element):
''' Element paths '''
xpAka = 'airs:AKA'
xpName = 'airs:Name'
xpConfidential = 'airs:Confidential'
xpDescription = 'airs:Description'
itemElements = element.xpath(xpAka, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'name', item.xpath(xpName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'confidential', item.xpath(xpConfidential, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
# SBB20100914 new...
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Aka)
''' Parse sub-tables '''
def parse_seasonal(self, element):
''' Element paths '''
xpSeasonal = 'airs:Seasonal'
xpDescription = 'airs:Description'
xpStartDate = 'airs:StartDate'
xpEndDate = 'airs:EndDate'
itemElements = element.xpath(xpSeasonal, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'start_date', item.xpath(xpStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'end_date', item.xpath(xpEndDate, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Seasonal)
''' Parse sub-tables '''
def parse_residency_requirements(self, element):
''' Element paths '''
xpResidencyRequirements = 'airs:ResidencyRequirements'
itemElements = element.xpath(xpResidencyRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'residency_requirements', item.xpath(xpResidencyRequirements, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, ResidencyRequirements)
''' Parse sub-tables '''
def parse_pit_count_set(self, element):
''' Element paths '''
xpPitCountSet = 'hmis:PITCountSet'
xpPitCountSetIDNum = 'hmis:PitCountSetID/hmis:IDNum'
xpPitCountSetIDStr = 'hmis:PitCountSetID/hmis:IDStr'
xpPitCountSetIDDeleteOccurredDate = 'hmis:PitCountSetID/@hmis:deleteOccurredDate'
xpPitCountSetIDDeleteEffective = 'hmis:PitCountSetID/@hmis:deleteEffective'
xpPitCountSetIDDelete = 'hmis:PitCountSetID/@hmis:delete'
xpHUDWaiverReceived = 'hmis:HUDWaiverReceived'
xpHUDWaiverDate = 'hmis:HUDWaiverDate'
xpHUDWaiverEffectivePeriodStartDate = 'hmis:HUDWaiverEffectivePeriod/hmis:StartDate'
xpHUDWaiverEffectivePeriodEndDate = 'hmis:HUDWaiverEffectivePeriod/hmis:EndDate'
xpLastPITShelteredCountDate = 'hmis:LastPITShelteredCountDate'
xpLastPITUnshelteredCountDate = 'hmis:LastPITUnshelteredCountDate'
itemElements = element.xpath(xpPitCountSet, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'pit_count_set_id_id_num', item.xpath(xpPitCountSetIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_set_id_id_str', item.xpath(xpPitCountSetIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_set_id_delete_occurred_date', item.xpath(xpPitCountSetIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pit_count_set_id_delete_effective_date', item.xpath(xpPitCountSetIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pit_count_set_id_delete', item.xpath(xpPitCountSetIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'hud_waiver_received', item.xpath(xpHUDWaiverReceived, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hud_waiver_date', item.xpath(xpHUDWaiverDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hud_waiver_effective_period_start_date', item.xpath(xpHUDWaiverEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hud_waiver_effective_period_end_date', item.xpath(xpHUDWaiverEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'last_pit_sheltered_count_date', item.xpath(xpLastPITShelteredCountDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'last_pit_unsheltered_count_date', item.xpath(xpLastPITUnshelteredCountDate, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, PitCountSet)
''' Parse sub-tables '''
parse_pit_counts(self, item)
def parse_pit_counts(self, element):
''' Element paths '''
xpPITCountValue = 'hmis:PITCountValue'
XpPITCountEffectivePeriodStartDate = 'hmis:PITCountEffectivePeriod/hmis:StartDate'
XpPITCountEffectivePeriodEndDate = 'hmis:PITCountEffectivePeriod/hmis:EndDate'
xpPITCountRecordedDate = 'hmis:PITCountRecordedDate'
xpPITHouseholdType = 'hmis:pITHouseholdType'
itemElements = element.xpath(xpPITCountValue, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'pit_count_value', item.xpath(xpPITCountValue, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_effective_period_start_date', item.xpath(XpPITCountEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_effective_period_end_date', item.xpath(XpPITCountEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_recorded_date', item.xpath(xpPITCountRecordedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pit_count_household_type', item.xpath(xpPITHouseholdType, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'pit_count_set_index_id', self.pit_count_set_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, PitCounts)
''' Parse sub-tables '''
def parse_other_requirements(self, element):
''' Element paths '''
xpOtherRequirements = 'airs:OtherRequirements'
itemElements = element.xpath(xpOtherRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'other_requirements', item.xpath(xpOtherRequirements, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, OtherRequirements)
''' Parse sub-tables '''
def parse_languages(self, element):
''' Element paths '''
xpLanguages = 'airs:Languages'
xpName = 'airs:Name'
xpNotes = 'airs:Notes'
itemElements = element.xpath(xpLanguages, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
# SBB20100915 Don't use xpath to retreive values, since there are many languages under the languages element. Need all so using getchildren()
# These are Lists of values, need to iterate over them to stuff into the DB
valsName = item.xpath(xpName, namespaces = self.nsmap)
valsNotes = item.xpath(xpNotes, namespaces = self.nsmap)
# map over them together
for name, note in map(None, valsName, valsNotes):
existence_test_and_add(self, 'name', name,'text')
# test for missing
if not note is None:
existence_test_and_add(self, 'notes', note, 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Languages)
''' Parse sub-tables '''
parse_time_open(self, item)
def parse_time_open(self, element):
''' Unique method that has 2nd loop for each day of week '''
''' Element paths '''
xpTimeOpen = 'airs:TimeOpen'
xpNotes = 'airs:Notes'
itemElements = element.xpath(xpTimeOpen, namespaces={'airs': self.airs_namespace})
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
existence_test_and_add(self, 'notes', item.xpath(xpNotes, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'languages_index_id', self.languages_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, TimeOpen)
''' parse each specific day of week '''
weekDays = ('Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday')
for day in weekDays:
parse_time_open_day(self, item, day)
def parse_time_open_day(self, element, day):
''' Unique method -- Loop each day of the week '''
''' Element Paths '''
xpFrom = 'airs:From'
xpTo = 'airs:To'
xpDay = 'airs:%s' % (day)
itemElements = element.xpath(xpDay, namespaces={'airs': self.airs_namespace})
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'from', item.xpath(xpFrom, namespaces={'airs': self.airs_namespace}), 'text')
existence_test_and_add(self, 'to', item.xpath(xpTo, namespaces={'airs': self.airs_namespace}), 'text')
existence_test_and_add(self, 'day_of_week', day, 'no_handling')
''' Foreign Keys '''
existence_test_and_add(self, 'time_open_index_id', self.time_open_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, TimeOpenDays)
def parse_inventory(self, element):
''' Element paths '''
xpInventory = 'hmis:Inventory'
xpInventoryDeleteOccurredDate = '@hmis:deleteOccurredDate'
xpInventoryDeleteEffective = '@hmis:deleteEffective'
xpInventoryDelete = '@hmis:delete'
xpHMISParticipationPeriodStartDate = 'hmis:HMISParticipationPeriod/hmis:StartDate'
xpHMISParticipationPeriodEndDate = 'hmis:HMISParticipationPeriod/hmis:EndDate'
xpInventoryIDIDNum = 'hmis:InventoryID/hmis:IDNum'
xpInventoryIDIDStr = 'hmis:InventoryID/hmis:IDStr'
xpBedInventory = 'hmis:BedInventory'
xpBedAvailability = '@hmis:BedAvailability'
xpBedType = '@hmis:BedType'
xpBedIndividualFamilyType = '@hmis:BedIndividualFamilyType'
xpChronicHomelessBed = '@hmis:ChronicHomelessBed'
xpDomesticViolenceShelterBed = '@hmis:DomesticViolenceShelterBed'
xpHouseholdType = '@hmis:HouseholdType'
xpHMISParticipatingBeds = 'hmis:HMISParticipatingBeds'
xpInventoryEffectivePeriodStartDate = 'hmis:InventoryEffectivePeriod/hmis:StartDate'
xpInventoryEffectivePeriodEndDate = 'hmis:InventoryEffectivePeriod/hmis:EndDate'
xpInventoryRecordedDate = 'hmis:InventoryRecordedDate'
xpUnitInventory = 'hmis:UnitInventory'
itemElements = element.xpath(xpInventory, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'inventory_delete_occurred_date', item.xpath(xpInventoryDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'inventory_delete_effective_date', item.xpath(xpInventoryDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'inventory_delete', item.xpath(xpInventoryDelete, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hmis_participation_period_start_date', item.xpath(xpHMISParticipationPeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hmis_participation_period_end_date', item.xpath(xpHMISParticipationPeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'inventory_id_id_num', item.xpath(xpInventoryIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'inventory_id_id_str', item.xpath(xpInventoryIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bed_inventory', item.xpath(xpBedInventory, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bed_availability', item.xpath(xpBedAvailability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bed_type', item.xpath(xpBedType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'bed_individual_family_type', item.xpath(xpBedIndividualFamilyType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'chronic_homeless_bed', item.xpath(xpChronicHomelessBed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'domestic_violence_shelter_bed', item.xpath(xpDomesticViolenceShelterBed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_type', item.xpath(xpHouseholdType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hmis_participating_beds', item.xpath(xpHMISParticipatingBeds, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'inventory_effective_period_start_date', item.xpath(xpInventoryEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'inventory_effective_period_end_date', item.xpath(xpInventoryEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'inventory_recorded_date', item.xpath(xpInventoryRecordedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'unit_inventory', item.xpath(xpUnitInventory, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'service_index_id', self.service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Inventory)
''' Parse sub-tables '''
def parse_income_requirements(self, element):
''' Element paths '''
xpIncomeRequirements = 'airs:IncomeRequirements'
itemElements = element.xpath(xpIncomeRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'income_requirements', item.xpath(xpIncomeRequirements, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, IncomeRequirements)
''' Parse sub-tables '''
def parse_hmis_asset(self, element):
''' Element paths '''
xpHMISAsset = 'hmis:HMISAsset'
xpAssetIDIDNum = 'hmis:AssetID/hmis:IDNum'
xpAssetIDIDStr = 'hmis:AssetID/hmis:IDStr'
xpAssetIDDelete = 'hmis:AssetID/@hmis:delete'
xpAssetIDDeleteOccurredDate = 'hmis:AssetID/@hmis:deleteOccurredDate'
xpAssetIDDeleteEffective = 'hmis:AssetID/@hmis:deleteEffective'
xpAssetCount = 'hmis:AssetCount'
xpAssetCountBedAvailability = 'hmis:AssetCount/@hmis:bedAvailability'
xpAssetCountBedType = 'hmis:AssetCount/@hmis:bedType'
xpAssetCountBedIndividualFamilyType = 'hmis:AssetCount/@hmis:bedIndividualFamilyType'
xpAssetCountChronicHomelessBed = 'hmis:AssetCount/@hmis:chronicHomelessBed'
xpAssetCountDomesticViolenceShelterBed = 'hmis:AssetCount/@hmis:domesticViolenceShelterBed'
xpAssetCountHouseholdType = 'hmis:AssetCount/@hmis:householdType'
xpAssetType = 'hmis:AssetType'
xpAssetEffectivePeriodStartDate = 'hmis:AssetEffectivePeriod/hmis:StartDate'
xpAssetEffectivePeriodEndDate = 'hmis:AssetEffectivePeriod/hmis:EndDate'
xpAssetRecordedDate = 'hmis:RecordedDate'
itemElements = element.xpath(xpHMISAsset, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'asset_id_id_num', item.xpath(xpAssetIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_id_id_str', item.xpath(xpAssetIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_id_delete', item.xpath(xpAssetIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'asset_id_delete_occurred_date', item.xpath(xpAssetIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'asset_id_delete_effective_date', item.xpath(xpAssetIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'asset_count', item.xpath(xpAssetCount, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_bed_availability', item.xpath(xpAssetCountBedAvailability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_bed_type', item.xpath(xpAssetCountBedType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_bed_individual_family_type', item.xpath(xpAssetCountBedIndividualFamilyType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_chronic_homeless_bed', item.xpath(xpAssetCountChronicHomelessBed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_domestic_violence_shelter_bed', item.xpath(xpAssetCountDomesticViolenceShelterBed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_count_household_type', item.xpath(xpAssetCountHouseholdType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_type', item.xpath(xpAssetType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_effective_period_start_date', item.xpath(xpAssetEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_effective_period_end_date', item.xpath(xpAssetEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'asset_recorded_date', item.xpath(xpAssetRecordedDate, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, HmisAsset)
''' Parse sub-tables '''
parse_assignment(self, item)
def parse_assignment(self, element):
''' Element paths '''
xpAssignment = 'hmis:Assignment'
xpAssignmentIDIDNum = 'hmis:AssignmentID/hmis:IDNum'
xpAssignmentIDIDStr = 'hmis:AssignmentID/hmis:IDStr'
xpAssignmentIDDelete = 'hmis:AssignmentID/@hmis:delete'
xpAssignmentIDDeleteOccurredDate = 'hmis:AssignmentID/@hmis:deleteOccurredDate'
xpAssignmentIDDeleteEffective = 'hmis:AssignmentID/@hmis:deleteEffective'
xpPersonIDIDNum = 'hmis:PersonID/hmis:IDNum'
xpPersonIDIDStr = 'hmis:PersonID/hmis:IDStr'
xpHouseholdIDIDNum = 'hmis:HouseholdID/hmis:IDNum'
xpHouseholdIDIDStr = 'hmis:HouseholdID/hmis:IDStr'
itemElements = element.xpath(xpAssignment, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'assignment_id_id_num', item.xpath(xpAssignmentIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'assignment_id_id_str', item.xpath(xpAssignmentIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'assignment_id_delete', item.xpath(xpAssignmentIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'assignment_id_delete_occurred_date', item.xpath(xpAssignmentIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'assignment_id_delete_effective_date', item.xpath(xpAssignmentIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_id_id_num', item.xpath(xpPersonIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_id_id_str', item.xpath(xpPersonIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_id_id_num', item.xpath(xpHouseholdIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_id_id_str', item.xpath(xpHouseholdIDIDStr, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
existence_test_and_add(self, 'hmis_asset_index_id', self.hmis_asset_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Assignment)
''' Parse sub-tables '''
parse_assignment_period(self, item)
def parse_assignment_period(self, element):
''' Element paths '''
xpAssignmentPeriod = 'hmis:AssignmentPeriod'
xpAssignmentPeriodStartDate = 'hmis:StartDate'
xpAssignmentPeriodEndDate = 'hmis:EndDate'
itemElements = element.xpath(xpAssignmentPeriod, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'assignment_period_start_date', item.xpath(xpAssignmentPeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'assignment_period_end_date', item.xpath(xpAssignmentPeriodEndDate, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'assignment_index_id', self.assignment_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, AssignmentPeriod)
''' Parse sub-tables '''
def parse_geographic_area_served(self, element):
''' Element paths '''
xpGeographicAreaServed = 'airs:GeographicAreaServed'
xpZipCode = 'airs:ZipCode'
xpCensusTrack = 'airs:CensusTrack'
xpCity = 'airs:City'
xpCounty = 'airs:County'
xpState = 'airs:State'
xpCountry = 'airs:Country'
xpDescription = 'airs:Description'
itemElements = element.xpath(xpGeographicAreaServed, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'zipcode', item.xpath(xpZipCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'census_track', item.xpath(xpCensusTrack, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'city', item.xpath(xpCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'county', item.xpath(xpCounty, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'state', item.xpath(xpState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'country', item.xpath(xpCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, GeographicAreaServed)
''' Parse sub-tables '''
def parse_documents_required(self, element):
''' Element paths '''
xpDocumentsRequired = 'airs:DocumentsRequired'
xpDescription = 'airs:Description'
itemElements = element.xpath(xpDocumentsRequired, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'documents_required', item.xpath(xpDocumentsRequired, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DocumentsRequired)
''' Parse sub-tables '''
def parse_aid_requirements(self, element):
''' Element paths '''
xpAidRequirements = 'airs:AidRequirements'
itemElements = element.xpath(xpAidRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'aid_requirements', item.xpath(xpAidRequirements, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, AidRequirements)
''' Parse sub-tables '''
def parse_age_requirements(self, element):
''' Element paths '''
xpAgeRequirements = 'airs:AgeRequirements'
xpGender = '@airs:Gender'
xpMinimumAge = '@airs:MinimumAge'
xpMaximumAge = '@airs:MaximumAge'
itemElements = element.xpath(xpAgeRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'gender', item.xpath(xpGender, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'minimum_age', item.xpath(xpMinimumAge, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'maximum_age', item.xpath(xpMaximumAge, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, AgeRequirements)
''' Parse sub-tables '''
def parse_site_service_participation(self, element, pf='hmis'):
''' Element paths '''
xpSiteServiceParticipation = pf+':SiteServiceParticipation' # SVP5 has pf='ext'
xpSiteServiceParticipationIDIDNum = 'hmis:SiteServiceParticipationID/hmis:IDNum'
xpSiteServiceParticipationIDIDStr = 'hmis:SiteServiceParticipationID/hmis:IDStr'
xpSiteServiceParticipationIDDeleteOccurredDate = 'hmis:SiteServiceParticipationID/@hmis:deleteOccurredDate'
xpSiteServiceParticipationIDDeleteEffective = 'hmis:SiteServiceParticipationID/@hmis:deleteEffective'
xpSiteServiceParticipationIDDelete = 'hmis:SiteServiceParticipationID/@hmis:delete'
xpSiteServiceID = 'hmis:SiteServiceID'
xpHouseholdIDIDNum = 'hmis:HouseholdID/hmis:IDNum'
xpHouseholdIDIDStr = 'hmis:HouseholdID/hmis:IDStr'
xpStartDate = 'hmis:ParticipationDates/hmis:StartDate'
xpEndDate = 'hmis:ParticipationDates/hmis:EndDate'
itemElements = element.xpath(xpSiteServiceParticipation, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'site_service_participation_idid_num', item.xpath( xpSiteServiceParticipationIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_participation_idid_str', item.xpath(xpSiteServiceParticipationIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_participation_id_delete_occurred_date', item.xpath(xpSiteServiceParticipationIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_service_participation_id_delete_effective_date', item.xpath(xpSiteServiceParticipationIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_service_participation_id_delete', item.xpath(xpSiteServiceParticipationIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'site_service_idid_num', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_idid_num', item.xpath(xpHouseholdIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_idid_str', item.xpath(xpHouseholdIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'participation_dates_start_date', item.xpath(xpStartDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'participation_dates_end_date', item.xpath(xpEndDate, namespaces = self.nsmap), 'element_date')
''' Foreign Keys '''
if pf=='hmis':
existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
else:
existence_test_and_add(self, 'fk_participation_to_person', self.person_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, SiteServiceParticipation)
''' Parse sub-tables '''
parse_reasons_for_leaving(self, item)
parse_need(self, item)
parse_service_event(self, item, namespace = pf + ':')
if pf=='hmis':
parse_person_historical(self, item) # OCC reader has a different copy, parsed earlier
def parse_reasons_for_leaving(self, element):
''' Element paths '''
xpReasonsForLeaving = 'hmis:ReasonsForLeaving'#IGNORE:@UnusedVariable
xpReasonsForLeavingIDIDNum = 'hmis:ReasonsForLeavingID/hmis:IDNum'
xpReasonsForLeavingIDIDStr = 'hmis:ReasonsForLeavingID/hmis:IDStr'
xpReasonsForLeavingIDDelete = 'hmis:ReasonsForLeavingID/@hmis:delete'
xpReasonsForLeavingIDDeleteOccurredDate = 'hmis:ReasonsForLeavingID/@hmis:deleteOccurredDate'
xpReasonsForLeavingIDDeleteEffective = 'hmis:ReasonsForLeavingID/@hmis:deleteEffective'
xpReasonsForLeaving = 'hmis:ReasonsForLeaving'
xpReasonsForLeavingDateCollected = 'hmis:ReasonsForLeaving/@hmis:dateCollected'
xpReasonsForLeavingDateEffective = 'hmis:ReasonsForLeaving/@hmis:dateEffective'
xpReasonsForLeavingDataCollectionStage = 'hmis:ReasonsForLeaving/@hmis:dataCollectionStage'
xpReasonsForLeavingOther = 'hmis:ReasonsForLeavingOther'
xpReasonsForLeavingOtherDateCollected = 'hmis:ReasonsForLeavingOther/@hmis:dateCollected'
xpReasonsForLeavingOtherDateEffective = 'hmis:ReasonsForLeavingOther/@hmis:dateEffective'
xpReasonsForLeavingOtherDataCollectionStage = 'hmis:ReasonsForLeavingOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpReasonsForLeaving, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'reason_for_leaving_id_id_num', item.xpath(xpReasonsForLeavingIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_for_leaving_id_id_str', item.xpath(xpReasonsForLeavingIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_for_leaving_id_delete', item.xpath(xpReasonsForLeavingIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'reason_for_leaving_id_delete_occurred_date', item.xpath(xpReasonsForLeavingIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving_id_delete_effective_date', item.xpath(xpReasonsForLeavingIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving', item.xpath(xpReasonsForLeaving, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_for_leaving_date_collected', item.xpath(xpReasonsForLeavingDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving_date_effective', item.xpath(xpReasonsForLeavingDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving_data_collection_stage', item.xpath(xpReasonsForLeavingDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'reason_for_leaving_other', item.xpath(xpReasonsForLeavingOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_for_leaving_other_date_collected', item.xpath(xpReasonsForLeavingOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving_other_date_effective', item.xpath(xpReasonsForLeavingOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'reason_for_leaving_other_data_collection_stage', item.xpath(xpReasonsForLeavingOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_participation_index_id', self.site_service_participation_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ReasonsForLeaving)
''' Parse sub-tables '''
def parse_application_process(self, element):
''' Element paths '''
xpApplicationProcess = 'airs:ApplicationProcess'
xpStep = 'airs:Step'
xpDescription = 'airs:Description'
itemElements = element.xpath(xpApplicationProcess, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'step', item.xpath(xpStep, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ApplicationProcess)
''' Parse sub-tables '''
def parse_need(self, element, pf = 'hmis:'):
''' Element paths '''
xpNeed = pf + 'Need'
xpNeedIDIDNum = 'hmis:NeedID/hmis:IDNum'
xpNeedIDIDStr = 'hmis:NeedID/hmis:IDStr'
xpNeedIDDeleteOccurredDate = 'hmis:NeedID/@hmis:deleteOccurredDate'
xpNeedIDDeleteEffective = 'hmis:NeedID/@hmis:deleteEffective'
xpNeedIDDelete = 'hmis:NeedID/@hmis:delete'
xpSiteServiceID = 'hmis:SiteServiceID'
xpNeedEffectivePeriodStartDate = 'hmis:NeedEffectivePeriod/hmis:StartDate'
xpNeedEffectivePeriodEndDate = 'hmis:NeedEffectivePeriod/hmis:EndDate'
xpNeedRecordedDate = 'hmis:NeedRecordedDate'
xpNeedStatus = 'hmis:NeedStatus'
itemElements = element.xpath(xpNeed, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'need_idid_num', item.xpath(xpNeedIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'need_idid_str', item.xpath(xpNeedIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'need_id_delete_occurred_date', item.xpath(xpNeedIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'need_id_delete_delete_effective_date', item.xpath(xpNeedIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'need_id_delete', item.xpath(xpNeedIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'site_service_idid_num', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'need_effective_period_start_date', item.xpath(xpNeedEffectivePeriodStartDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'need_effective_period_end_date', item.xpath(xpNeedEffectivePeriodEndDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'need_recorded_date', item.xpath(xpNeedRecordedDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'need_status', item.xpath(xpNeedStatus, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_participation_index_id', self.site_service_participation_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Need)
''' Parse sub-tables '''
if pf == 'ext:': # This was done for TBC, which uses ext:Person/ext:Need/hmis:Taxonomy/airs:Code
parse_taxonomy(self, item, 'hmis:')
else: # Others are hmis:Person/hmis:Need/airs:Taxonomy/airs:Code
parse_taxonomy(self, item, 'airs:')
parse_service_event(self, item, namespace='hmis:')
def parse_taxonomy(self, element, pf = 'airs:'):
''' Element paths '''
xpTaxonomy = pf + 'Taxonomy'
xpCode = 'airs:Code'
itemElements = element.xpath(xpTaxonomy, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
# SBB20100916 Again, this returns a list of items which must be processed into the DB as rows
#existence_test_and_add(self, 'code', item.xpath(xpCode, namespaces = self.nsmap), 'text')
# These are Lists of values, need to iterate over them to stuff into the DB
valsName = item.xpath(xpCode, namespaces = self.nsmap)
# map over them together
for code in valsName:
existence_test_and_add(self, 'code', code, 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'need_index_id', self.need_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Taxonomy)
''' Parse sub-tables '''
def parse_service_event(self, element, namespace='hmis:'):
''' Element paths '''
xpServiceEvent = namespace + 'ServiceEvent'
xpServiceEventIDIDNum = namespace + 'ServiceEventID/hmis:IDNum'
xpServiceEventIDIDStr = namespace + 'ServiceEventID/hmis:IDStr'
xpServiceEventIDDeleteOccurredDate = namespace + 'ServiceEventID/@hmis:deleteOccurredDate'
xpServiceEventIDDeleteEffective = namespace + 'ServiceEventID/@hmis:deleteEffective'
xpServiceEventIDDelete = namespace + 'ServiceEventID/@hmis:delete'
xpSiteServiceID = namespace + 'SiteServiceID'
xpHouseholdIDIDNum = namespace + 'HouseholdID/hmis:IDNum'
xpHouseholdIDIDStr = namespace + 'HouseholdID/hmis:IDStr'
xpIsReferral = namespace + 'IsReferral'
xpQuantityOfServiceEvent = namespace + 'QuantityOfServiceEvent'
xpQuantityOfServiceEventUnit = namespace + 'QuantityOfServiceEventUnit'
xpServiceEventAIRSCode = namespace + 'ServiceEventAIRSCode'
xpServiceEventEffectivePeriodStartDate = namespace + 'ServiceEventEffectivePeriod/hmis:StartDate'
xpServiceEventEffectivePeriodEndDate = namespace + 'ServiceEventEffectivePeriod/hmis:EndDate'
xpServiceEventProvisionDate = namespace + 'ServiceEventProvisionDate'
xpServiceEventRecordedDate = namespace + 'ServiceEventRecordedDate'
xpServiceEventIndFam = namespace + 'ServiceEventIndFam'
xpHMISServiceEventCodeTypeOfService = namespace + 'HMISServiceEventCode/hmis:TypeOfService'
xpHMISServiceEventCodeTypeOfServiceOther = namespace + 'HMISServiceEventCode/hmis:TypeOfServiceOther'
xpHPRPFinancialAssistanceServiceEventCode = namespace + 'HPRPFinancialAssistanceService'
xpHPRPRelocationStabilizationServiceEventCode = namespace + 'HPRPRelocationStabilizationServiceEventCode'
itemElements = element.xpath(xpServiceEvent, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'service_event_idid_num', item.xpath(xpServiceEventIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_event_idid_str', item.xpath(xpServiceEventIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_event_id_delete_occurred_date', item.xpath(xpServiceEventIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_event_id_delete_effective_date', item.xpath(xpServiceEventIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_event_id_delete', item.xpath(xpServiceEventIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'site_service_id', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_idid_num', item.xpath(xpHouseholdIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_idid_str', item.xpath(xpHouseholdIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'is_referral', item.xpath(xpIsReferral, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'quantity_of_service', item.xpath(xpQuantityOfServiceEvent, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'quantity_of_service_measure', item.xpath(xpQuantityOfServiceEventUnit, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_airs_code', item.xpath(xpServiceEventAIRSCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_period_start_date', item.xpath(xpServiceEventEffectivePeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_period_end_date', item.xpath(xpServiceEventEffectivePeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_event_provision_date', item.xpath(xpServiceEventProvisionDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_event_recorded_date', item.xpath(xpServiceEventRecordedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_event_ind_fam', item.xpath(xpServiceEventIndFam, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hmis_service_event_code_type_of_service', item.xpath(xpHMISServiceEventCodeTypeOfService, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hmis_service_event_code_type_of_service_other', item.xpath(xpHMISServiceEventCodeTypeOfServiceOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hprp_financial_assistance_service_event_code', item.xpath(xpHPRPFinancialAssistanceServiceEventCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hprp_relocation_stabilization_service_event_code', item.xpath(xpHPRPRelocationStabilizationServiceEventCode, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'need_index_id', self.need_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_participation_index_id', self.site_service_participation_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, ServiceEvent)
''' Parse sub-tables '''
parse_service_event_notes(self, item, namespace)
parse_funding_source(self, item, namespace)
def parse_service_event_notes(self, element, pf='hmis:'): # Default so old code won't break
''' Element paths '''
xpServiceEventNotes = pf + 'ServiceEventNotes/hmis:note'
xpNoteIDIDNum = 'hmis:NoteID/hmis:IDNum'
xpNoteIDIDStr = 'hmis:NoteID/hmis:IDStr'
xpNoteIDDeleteOccurredDate = 'hmis:NoteID/@hmis:deleteOccurredDate'
xpNoteIDDeleteEffective = 'hmis:NoteID/@hmis:deleteEffective'
xpNoteIDDelete = 'hmis:NoteID/@hmis:delete'
xpNoteText = 'hmis:NoteText'
xpNoteTextDateCollected = 'hmis:NoteText/@hmis:dateCollected'
xpNoteTextDateEffective = 'hmis:NoteText/@hmis:dateEffective'
xpNoteTextDataCollectionStage = 'hmis:NoteText/@hmis:dataCollectionStage'
itemElements = element.xpath(xpServiceEventNotes, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'note_id_id_num', item.xpath(xpNoteIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'note_id_id_str', item.xpath(xpNoteIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'note_delete_occurred_date', item.xpath(xpNoteIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'note_delete_effective_date', item.xpath(xpNoteIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'note_delete', item.xpath(xpNoteIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'note_text', item.xpath(xpNoteText, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'note_text_date_collected', item.xpath(xpNoteTextDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'note_text_date_effective', item.xpath(xpNoteTextDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'note_text_data_collection_stage', item.xpath(xpNoteTextDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'service_event_index_id', self.service_event_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ServiceEventNotes)
''' Parse sub-tables '''
def parse_family_requirements(self, element):
''' Element paths '''
xpFamilyRequirements = 'airs:FamilyRequirements'
itemElements = element.xpath(xpFamilyRequirements, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'family_requirements', item.xpath(xpFamilyRequirements, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, FamilyRequirements)
''' Parse sub-tables '''
def parse_person_historical(self, element, pf = 'hmis:'):
''' Element paths '''
xpPersonHistorical = pf + 'PersonHistorical'
xpPersonHistoricalIDIDNum = 'hmis:PersonHistoricalID/hmis:IDNum'
xpPersonHistoricalIDIDStr = 'hmis:PersonHistoricalID/hmis:IDStr'
xpPersonHistoricalIDDelete = 'hmis:PersonHistoricalID/@hmis:delete'
xpPersonHistoricalIDDeleteEffective = 'hmis:PersonHistoricalID/@hmis:deleteEffective'
xpPersonHistoricalIDDeleteOccurredDate = 'hmis:PersonHistoricalID/@hmis:deleteOccurredDate'
xpSiteServiceID = 'hmis:SiteServiceID'
# xpPersonHistoricalPersonPhoneNumber = pf + 'PersonHistorical/hmis:PersonPhoneNumber' Not work????
xpPersonHistoricalPersonPhoneNumber = 'hmis:PersonPhoneNumber'
xpPersonHistoricalPersonPhoneNumberDateCollected = 'hmis:PersonPhoneNumber/@hmis:dateCollected'
itemElements = element.xpath(xpPersonHistorical, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'person_historical_id_id_num', item.xpath(xpPersonHistoricalIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_historical_id_id_str', item.xpath(xpPersonHistoricalIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_historical_id_delete', item.xpath(xpPersonHistoricalIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'person_historical_id_delete_effective_date', item.xpath(xpPersonHistoricalIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_historical_id_delete_occurred_date', item.xpath(xpPersonHistoricalIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'site_service_id', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
# JCS New 2012-01-11
existence_test_and_add(self, 'person_phone_number', item.xpath(xpPersonHistoricalPersonPhoneNumber, namespaces = self.nsmap), 'text') #'no_handling'??
existence_test_and_add(self, 'person_phone_number_date_collected', item.xpath(xpPersonHistoricalPersonPhoneNumberDateCollected, namespaces = self.nsmap), 'attribute_date')
''' Foreign Keys '''
try: existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_participation_index_id', self.site_service_participation_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
# FBY New 2016-08-16: Add foreign key for call table
try: existence_test_and_add(self, 'call_index_id', self.call_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, PersonHistorical)
''' Parse sub-tables '''
parse_housing_status(self, item)
parse_veteran(self, item)
parse_vocational_training(self, item)
parse_substance_abuse_problem(self, item)
parse_pregnancy(self, item)
parse_prior_residence(self, item)
parse_physical_disability(self, item)
parse_non_cash_benefits(self, item)
parse_non_cash_benefits_last_30_days(self, item)
parse_mental_health_problem(self, item)
parse_length_of_stay_at_prior_residence(self, item)
parse_income_total_monthly(self, item)
parse_hud_chronic_homeless(self, item)
parse_income_last_30_days(self, item)
parse_highest_school_level(self, item)
parse_hiv_aids_status(self, item)
parse_health_status(self, item)
parse_engaged_date(self, item)
parse_employment(self, item)
parse_domestic_violence(self, item)
parse_disabling_condition(self, item)
parse_developmental_disability(self, item)
parse_destinations(self, item)
parse_degree(self, item)
parse_currently_in_school(self, item)
parse_contact_made(self, item)
parse_child_enrollment_status(self, item)
parse_chronic_health_condition(self, item)
parse_income_and_sources(self, item)
parse_hud_homeless_episodes(self, item)
parse_person_address(self, item)
parse_email(self, item)
parse_phone(self, item)
def parse_housing_status(self, element):
''' Element paths '''
xpHousingStatus = 'hmis:HousingStatus'
xpHousingStatusDateCollected = '@hmis:dateCollected'
xpHousingStatusDateEffective = '@hmis:dateEffective'
xpHousingStatusDataCollectionStage = '@hmis:dataCollectionStage'
itemElements = element.xpath(xpHousingStatus, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'housing_status', item, 'text')
existence_test_and_add(self, 'housing_status_date_collected', item.xpath(xpHousingStatusDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'housing_status_date_effective', item.xpath(xpHousingStatusDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'housing_status_data_collection_stage', item.xpath(xpHousingStatusDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HousingStatus)
''' Parse sub-tables '''
def parse_veteran(self, element):
''' Unique method -- loops all veteran elements and launches sub parsers '''
''' Element paths '''
xpVeteran = 'hmis:Veteran'
itemElements = element.xpath(xpVeteran, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
parse_veteran_military_branches(self, item)
parse_veteran_served_in_war_zone(self, item)
parse_veteran_service_era(self, item)
parse_veteran_veteran_status(self, item)
parse_veteran_warzones_served(self, item)
def parse_veteran_military_branches(self, element):
''' Element paths '''
xpMilitaryBranches = 'hmis:MilitaryBranches'
xpMilitaryBranchIDIDNum = 'hmis:MilitaryBranchID/hmis:IDNum'
xpMilitaryBranchIDIDStr = 'hmis:MilitaryBranchID/hmis:IDStr'
xpMilitaryBranchIDDeleteOccurredDate = 'hmis:MilitaryBranchID/@hmis:deleteOccurredDate'
xpMilitaryBranchIDDeleteEffective = 'hmis:MilitaryBranchID/@hmis:deleteEffective'
xpMilitaryBranchIDDelete = 'hmis:MilitaryBranchID/@hmis:delete'
xpDischargeStatus = 'hmis:DischargeStatus'
xpDischargeStatusDateCollected = 'hmis:DischargeStatus/@hmis:dateCollected'
xpDischargeStatusDateEffective = 'hmis:DischargeStatus/@hmis:dateEffective'
xpDischargeStatusDataCollectionStage = 'hmis:DischargeStatus/@hmis:dataCollectionStage'
xpDischargeStatusOther = 'hmis:DischargeStatusOther'
xpDischargeStatusOtherDateCollected = 'hmis:DischargeStatusOther/@hmis:dateCollected'
xpDischargeStatusOtherDateEffective = 'hmis:DischargeStatusOther/@hmis:dateEffective'
xpDischargeStatusOtherDataCollectionStage = 'hmis:DischargeStatusOther/@hmis:dataCollectionStage'
xpMilitaryBranch = 'hmis:MilitaryBranch'
xpMilitaryBranchDateCollected = 'hmis:MilitaryBranch/@hmis:dateCollected'
xpMilitaryBranchDateEffective = 'hmis:MilitaryBranch/@hmis:dateEffective'
xpMilitaryBranchDataCollectionStage = 'hmis:MilitaryBranch/@hmis:dataCollectionStage'
xpMilitaryBranchOther = 'hmis:MilitaryBranch'
xpMilitaryBranchOtherDateCollected = 'hmis:MilitaryBranchOther/@hmis:dateCollected'
xpMilitaryBranchOtherDateEffective = 'hmis:MilitaryBranchOther/@hmis:dateEffective'
xpMilitaryBranchOtherDataCollectionStage = 'hmis:MilitaryBranchOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpMilitaryBranches, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'military_branch_id_id_id_num', item.xpath(xpMilitaryBranchIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'military_branch_id_id_id_str', item.xpath(xpMilitaryBranchIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'military_branch_id_id_delete_occurred_date', item.xpath(xpMilitaryBranchIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_id_id_delete_effective_date', item.xpath(xpMilitaryBranchIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_id_id_delete', item.xpath(xpMilitaryBranchIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'discharge_status', item.xpath(xpDischargeStatus, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'discharge_status_date_collected', item.xpath(xpDischargeStatusDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'discharge_status_date_effective', item.xpath(xpDischargeStatusDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'discharge_status_data_collection_stage', item.xpath(xpDischargeStatusDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'discharge_status_other', item.xpath(xpDischargeStatusOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'discharge_status_other_date_collected', item.xpath(xpDischargeStatusOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'discharge_status_other_date_effective', item.xpath(xpDischargeStatusOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'discharge_status_other_data_collection_stage', item.xpath(xpDischargeStatusOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'military_branch', item.xpath(xpMilitaryBranch, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'military_branch_date_collected', item.xpath(xpMilitaryBranchDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_date_effective', item.xpath(xpMilitaryBranchDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_data_collection_stage', item.xpath(xpMilitaryBranchDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'military_branch_other', item.xpath(xpMilitaryBranchOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'military_branch_other_date_collected', item.xpath(xpMilitaryBranchOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_other_date_effective', item.xpath(xpMilitaryBranchOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_branch_other_data_collection_stage', item.xpath(xpMilitaryBranchOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranMilitaryBranches)
''' Parse sub-tables '''
def parse_veteran_military_service_duration(self, element):
''' Element paths '''
xpMilitaryServiceDuration = 'hmis:MilitaryServiceDuration'
xpMilitaryServiceDurationDateCollected = 'hmis:MilitaryServiceDuration/@hmis:dateCollected'
xpMilitaryServiceDurationDateEffective = 'hmis:MilitaryServiceDuration/@hmis:dateEffective'
xpMilitaryServiceDurationDataCollectionStage = 'hmis:MilitaryServiceDuration/@hmis:dataCollectionStage'
itemElements = element.xpath(xpMilitaryServiceDuration, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'military_service_duration', item.xpath(xpMilitaryServiceDuration, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'military_service_duration_date_collected', item.xpath(xpMilitaryServiceDurationDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_service_duration_date_effective', item.xpath(xpMilitaryServiceDurationDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'military_service_duration_data_collection_stage', item.xpath(xpMilitaryServiceDurationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranMilitaryServiceDuration)
''' Parse sub-tables '''
def parse_veteran_served_in_war_zone(self, element):
''' Element paths '''
xpVeteranServedInWarZone = 'hmis:MilitaryServiceDuration'
xpVeteranServedInWarZoneDurationDateCollected = 'hmis:VeteranServedInWarZone/@hmis:dateCollected'
xpVeteranServedInWarZoneDurationDateEffective = 'hmis:VeteranServedInWarZone/@hmis:dateEffective'
xpVeteranServedInWarZoneDurationDataCollectionStage = 'hmis:VeteranServedInWarZone/@hmis:dataCollectionStage'
itemElements = element.xpath(xpVeteranServedInWarZone, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'served_in_war_zone', item.xpath(xpVeteranServedInWarZone, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'served_in_war_zone_date_collected', item.xpath(xpVeteranServedInWarZoneDurationDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'served_in_war_zone_date_effective', item.xpath(xpVeteranServedInWarZoneDurationDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'served_in_war_zone_data_collection_stage', item.xpath(xpVeteranServedInWarZoneDurationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranServedInWarZone)
''' Parse sub-tables '''
def parse_veteran_service_era(self, element):
''' Element paths '''
xpServiceEra = 'hmis:ServiceEra'
xpServiceEraDurationDateCollected = 'hmis:ServiceEra/@hmis:dateCollected'
xpServiceEraDurationDateEffective = 'hmis:ServiceEra/@hmis:dateEffective'
xpServiceEraDurationDataCollectionStage = 'hmis:ServiceEra/@hmis:dataCollectionStage'
itemElements = element.xpath(xpServiceEra, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'service_era', item.xpath(xpServiceEra, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'service_era_date_collected', item.xpath(xpServiceEraDurationDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_era_date_effective', item.xpath(xpServiceEraDurationDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'service_era_data_collection_stage', item.xpath(xpServiceEraDurationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranServiceEra)
''' Parse sub-tables '''
def parse_veteran_veteran_status(self, element):
''' Element paths '''
xpVeteranStatus = 'hmis:VeteranStatus'
xpVeteranStatusDurationDateCollected = './@hmis:dateCollected'
xpVeteranStatusDurationDateEffective = './@hmis:dateEffective'
xpVeteranStatusDurationDataCollectionStage = './@hmis:dataCollectionStage'
itemElements = element.xpath(xpVeteranStatus, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'veteran_status', item, 'text')
existence_test_and_add(self, 'veteran_status_date_collected', item.xpath(xpVeteranStatusDurationDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'veteran_status_date_effective', item.xpath(xpVeteranStatusDurationDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'veteran_status_data_collection_stage', item.xpath(xpVeteranStatusDurationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranVeteranStatus)
''' Parse sub-tables '''
def parse_veteran_warzones_served(self, element):
''' Element paths '''
xpVeteranWarZonesServed = 'hmis:WarZonesServed'
xpWarZoneIDIDNum = 'hmis:WarZoneID/hmis:IDNum'
xpWarZoneIDIDStr = 'hmis:WarZoneID/hmis:IDStr'
xpWarZoneIDDeleteOccurredDate = 'hmis:MilitaryBranchID/@hmis:deleteOccurredDate'
xpWarZoneIDDeleteEffective = 'hmis:MilitaryBranchID/@hmis:deleteEffective'
xpWarZoneIDDelete = 'hmis:MilitaryBranchID/@hmis:delete'
xpMonthsInWarZone = 'hmis:MonthsInWarZone'
xpMonthsInWarZoneDateCollected = 'hmis:MonthsInWarZone/@hmis:dateCollected'
xpMonthsInWarZoneDateEffective = 'hmis:MonthsInWarZone/@hmis:dateEffective'
xpMonthsInWarZoneDataCollectionStage = 'hmis:MonthsInWarZone/@hmis:dataCollectionStage'
xpReceivedFire = 'hmis:ReceivedFire'
xpReceivedFireDateCollected = 'hmis:ReceivedFire/@hmis:dateCollected'
xpReceivedFireDateEffective = 'hmis:ReceivedFire/@hmis:dateEffective'
xpReceivedFireDataCollectionStage = 'hmis:ReceivedFire/@hmis:dataCollectionStage'
xpWarZone = 'hmis:WarZone'
xpWarZoneDateCollected = 'hmis:WarZone/@hmis:dateCollected'
xpWarZoneDateEffective = 'hmis:WarZone/@hmis:dateEffective'
xpWarZoneDataCollectionStage = 'hmis:WarZone/@hmis:dataCollectionStage'
xpWarZoneOther = 'hmis:WarZoneOther'
xpWarZoneOtherDateCollected = 'hmis:WarZoneOther/@hmis:dateCollected'
xpWarZoneOtherDateEffective = 'hmis:WarZoneOther/@hmis:dateEffective'
xpWarZoneOtherDataCollectionStage = 'hmis:WarZoneOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpVeteranWarZonesServed, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'war_zone_id_id_id_num', item.xpath(xpWarZoneIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'war_zone_id_id_id_str', item.xpath(xpWarZoneIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'war_zone_id_id_delete_occurred_date', item.xpath(xpWarZoneIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_id_id_delete_effective_date', item.xpath(xpWarZoneIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_id_id_delete', item.xpath(xpWarZoneIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'months_in_war_zone', item.xpath(xpMonthsInWarZone, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'months_in_war_zone_date_collected', item.xpath(xpMonthsInWarZoneDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'months_in_war_zone_date_effective', item.xpath(xpMonthsInWarZoneDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'months_in_war_zone_data_collection_stage', item.xpath(xpMonthsInWarZoneDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'received_fire', item.xpath(xpReceivedFire, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'received_fire_date_collected', item.xpath(xpReceivedFireDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'received_fire_date_effective', item.xpath(xpReceivedFireDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'received_fire_data_collection_stage', item.xpath(xpReceivedFireDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'war_zone', item.xpath(xpWarZone, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'war_zone_date_collected', item.xpath(xpWarZoneDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_date_effective', item.xpath(xpWarZoneDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_data_collection_stage', item.xpath(xpWarZoneDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'war_zone_other', item.xpath(xpWarZoneOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'war_zone_other_date_collected', item.xpath(xpWarZoneOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_other_date_effective', item.xpath(xpWarZoneOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'war_zone_other_data_collection_stage', item.xpath(xpWarZoneOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VeteranWarzonesServed)
''' Parse sub-tables '''
def parse_vocational_training(self, element):
''' Element paths '''
xpVocationalTraining = 'hmis:VocationalTraining'
xpVocationalTrainingDateCollected = 'hmis:VocationalTraining/@hmis:dateCollected'
xpVocationalTrainingDateEffective = 'hmis:VocationalTraining/@hmis:dateEffective'
xpVocationalTrainingDataCollectionStage = 'hmis:VocationalTraining/@hmis:dataCollectionStage'
itemElements = element.xpath(xpVocationalTraining, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'vocational_training', item.xpath(xpVocationalTraining, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'vocational_training_date_collected', item.xpath(xpVocationalTrainingDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'vocational_training_date_effective', item.xpath(xpVocationalTrainingDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'vocational_training_data_collection_stage', item.xpath(xpVocationalTrainingDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, VocationalTraining)
''' Parse sub-tables '''
def parse_substance_abuse_problem(self, element):
''' Element paths '''
xpSubstanceAbuseProblem = 'hmis:SubstanceAbuseProblem'
xpHasSubstanceAbuseProblem = 'hmis:HasSubstanceAbuseProblem'
xpHasSubstanceAbuseProblemDateCollected = 'hmis:HasSubstanceAbuseProblem/@hmis:dateCollected'
xpHasSubstanceAbuseProblemDateEffective = 'hmis:HasSubstanceAbuseProblem/@hmis:dateEffective'
xpHasSubstanceAbuseProblemDataCollectionStage = 'hmis:HasSubstanceAbuseProblem/@hmis:dataCollectionStage'
xpSubstanceAbuseIndefinite = 'hmis:SubstanceAbuseIndefinite'
xpSubstanceAbuseIndefiniteDateCollected = 'hmis:SubstanceAbuseIndefinite/@hmis:dateCollected'
xpSubstanceAbuseIndefiniteDateEffective = 'hmis:SubstanceAbuseIndefinite/@hmis:dateEffective'
xpSubstanceAbuseIndefiniteDataCollectionStage = 'hmis:SubstanceAbuseIndefinite/@hmis:dataCollectionStage'
xpReceiveSubstanceAbuseServices = 'hmis:ReceiveSubstanceAbuseServices'
xpReceiveSubstanceAbuseServicesDateCollected = 'hmis:ReceiveSubstanceAbuseServices/@hmis:dateCollected'
xpReceiveSubstanceAbuseServicesDateEffective = 'hmis:ReceiveSubstanceAbuseServices/@hmis:dateEffective'
xpReceiveSubstanceAbuseServicesDataCollectionStage = 'hmis:ReceiveSubstanceAbuseServices/@hmis:dataCollectionStage'
itemElements = element.xpath(xpSubstanceAbuseProblem, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_substance_abuse_problem', item.xpath(xpHasSubstanceAbuseProblem, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_substance_abuse_problem_date_collected', item.xpath(xpHasSubstanceAbuseProblemDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_substance_abuse_problem_date_effective', item.xpath(xpHasSubstanceAbuseProblemDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_substance_abuse_problem_data_collection_stage', item.xpath(xpHasSubstanceAbuseProblemDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'substance_abuse_indefinite', item.xpath(xpSubstanceAbuseIndefinite, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'substance_abuse_indefinite_date_collected', item.xpath(xpSubstanceAbuseIndefiniteDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'substance_abuse_indefinite_date_effective', item.xpath(xpSubstanceAbuseIndefiniteDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'substance_abuse_indefinite_data_collection_stage', item.xpath(xpSubstanceAbuseIndefiniteDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_substance_abuse_services', item.xpath(xpReceiveSubstanceAbuseServices, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_substance_abuse_services_date_collected', item.xpath(xpReceiveSubstanceAbuseServicesDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_substance_abuse_services_date_effective', item.xpath(xpReceiveSubstanceAbuseServicesDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_substance_abuse_services_data_collection_stage', item.xpath(xpReceiveSubstanceAbuseServicesDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, SubstanceAbuseProblem)
''' Parse sub-tables '''
def parse_pregnancy(self, element):
''' Element paths '''
xpPregnancy = 'hmis:Pregnancy'
xpPregnancyIDIDNum = 'hmis:PregnancyID/hmis:IDNum'
xpPregnancyIDIDStr = 'hmis:PregnancyID/hmis:IDStr'
xpPregnancyIDDeleteOccurredDate = 'hmis:PregnancyID/@hmis:deleteOccurredDate'
xpPregnancyIDDeleteEffective = 'hmis:PregnancyID/@hmis:deleteEffective'
xpPregnancyIDDelete = 'hmis:PregnancyID/@hmis:delete'
xpPregnancyStatus = 'hmis:PregnancyStatus'
xpPregnancyStatusDateCollected = 'hmis:PregnancyStatus/@hmis:dateCollected'
xpPregnancyStatusDateEffective = 'hmis:PregnancyStatus/@hmis:dateEffective'
xpPregnancyStatusDataCollectionStage = 'hmis:PregnancyStatus/@hmis:dataCollectionStage'
xpDueDate = 'hmis:DueDate'
xpDueDateDateCollected = 'hmis:DueDate/@hmis:dateCollected'
xpDueDateDateEffective = 'hmis:DueDate/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpDueDateDataCollectionStage = 'hmis:DueDate/@hmis:dataCollectionStage'
itemElements = element.xpath(xpPregnancy, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'pregnancy_id_id_id_num', item.xpath(xpPregnancyIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pregnancy_id_id_id_str', item.xpath(xpPregnancyIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pregnancy_id_id_delete_occurred_date', item.xpath(xpPregnancyIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pregnancy_id_id_delete_effective_date', item.xpath(xpPregnancyIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pregnancy_id_id_delete', item.xpath(xpPregnancyIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'pregnancy_status', item.xpath(xpPregnancyStatus, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pregnancy_status_date_collected', item.xpath(xpPregnancyStatusDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pregnancy_status_date_effective', item.xpath(xpPregnancyStatusDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pregnancy_status_data_collection_stage', item.xpath(xpPregnancyStatusDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'due_date', item.xpath(xpDueDate, namespaces = self.nsmap), 'date')
existence_test_and_add(self, 'due_date_date_collected', item.xpath(xpDueDateDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'due_date_data_collection_stage', item.xpath(xpDueDateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Pregnancy)
''' Parse sub-tables '''
def parse_prior_residence(self, element):
''' Element paths '''
xpPriorResidence = 'hmis:PriorResidence'
xpPriorResidenceIDIDNum = 'hmis:PriorResidenceID/hmis:IDNum'
xpPriorResidenceIDIDStr = 'hmis:PriorResidenceID/hmis:IDStr'
xpPriorResidenceIDDeleteOccurredDate = 'hmis:PriorResidenceID/@hmis:deleteOccurredDate'
xpPriorResidenceIDDeleteEffective = 'hmis:PriorResidenceID/@hmis:deleteEffective'
xpPriorResidenceIDDelete = 'hmis:PriorResidenceID/@hmis:delete'
xpPriorResidenceCode = 'hmis:PriorResidenceCode'
xpPriorResidenceCodeDateCollected = 'hmis:PriorResidenceCode/@hmis:dateCollected'
xpPriorResidenceCodeDateEffective = 'hmis:PriorResidenceCode/@hmis:dateEffective'
xpPriorResidenceCodeDataCollectionStage = 'hmis:PriorResidenceCode/@hmis:dataCollectionStage'
xpPriorResidenceOther = 'hmis:PriorResidenceOther'
xpPriorResidenceOtherDateCollected = 'hmis:PriorResidenceOther/@hmis:dateCollected'
xpPriorResidenceOtherDateEffective = 'hmis:PriorResidenceOther/@hmis:dateEffective'
xpPriorResidenceOtherDataCollectionStage = 'hmis:PriorResidenceOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpPriorResidence, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'prior_residence_id_id_num', item.xpath(xpPriorResidenceIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'prior_residence_id_id_str', item.xpath(xpPriorResidenceIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'prior_residence_id_delete_occurred_date', item.xpath(xpPriorResidenceIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_id_delete_effective_date', item.xpath(xpPriorResidenceIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_id_delete', item.xpath(xpPriorResidenceIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'prior_residence_code', item.xpath(xpPriorResidenceCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'prior_residence_code_date_collected', item.xpath(xpPriorResidenceCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_code_date_effective', item.xpath(xpPriorResidenceCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_code_data_collection_stage', item.xpath(xpPriorResidenceCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'prior_residence_other', item.xpath(xpPriorResidenceOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'prior_residence_other_date_collected', item.xpath(xpPriorResidenceOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_other_date_effective', item.xpath(xpPriorResidenceOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'prior_residence_other_data_collection_stage', item.xpath(xpPriorResidenceOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, PriorResidence)
''' Parse sub-tables '''
def parse_physical_disability(self, element):
''' Element paths '''
xpPhysicalDisability = 'hmis:PhysicalDisability'
xpHasPhysicalDisability = 'hmis:HasPhysicalDisability'
xpHasPhysicalDisabilityDateCollected = 'hmis:HasPhysicalDisability/@hmis:dateCollected'
xpHasPhysicalDisabilityDateEffective = 'hmis:HasPhysicalDisability/@hmis:dateEffective'
xpHasPhysicalDisabilityDataCollectionStage = 'hmis:HasPhysicalDisability/@hmis:dataCollectionStage'
xpReceivePhysicalDisabilityServices = 'hmis:ReceivePhysicalDisabilityServices'
xpReceivePhysicalDisabilityServicesDateCollected = 'hmis:ReceivePhysicalDisabilityServices/@hmis:dateCollected'
xpReceivePhysicalDisabilityServicesDateEffective = 'hmis:ReceivePhysicalDisabilityServices/@hmis:dateEffective'
xpReceivePhysicalDisabilityServicesDataCollectionStage = 'hmis:ReceivePhysicalDisabilityServices/@hmis:dataCollectionStage'
itemElements = element.xpath(xpPhysicalDisability, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_physical_disability', item.xpath(xpHasPhysicalDisability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_physical_disability_date_collected', item.xpath(xpHasPhysicalDisabilityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_physical_disability_date_effective', item.xpath(xpHasPhysicalDisabilityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_physical_disability_data_collection_stage', item.xpath(xpHasPhysicalDisabilityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_physical_disability_services', item.xpath(xpReceivePhysicalDisabilityServices, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_physical_disability_services_date_collected', item.xpath(xpReceivePhysicalDisabilityServicesDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_physical_disability_services_date_effective', item.xpath(xpReceivePhysicalDisabilityServicesDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_physical_disability_services_data_collection_stage', item.xpath(xpReceivePhysicalDisabilityServicesDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, PhysicalDisability)
''' Parse sub-tables '''
def parse_non_cash_benefits(self, element):
''' Element paths '''
xpNonCashBenefit = 'hmis:NonCashBenefits/hmis:NonCashBenefit'
xpNonCashBenefitIDIDNum = 'hmis:NonCashBenefitID/hmis:IDNum'
xpNonCashBenefitIDIDStr = 'hmis:NonCashBenefitID/hmis:IDStr'
xpNonCashBenefitIDDeleteOccurredDate = 'hmis:NonCashBenefitID/@hmis:deleteOccurredDate'
xpNonCashBenefitIDDeleteEffective = 'hmis:NonCashBenefitID/@hmis:deleteEffective'
xpNonCashBenefitIDDelete = 'hmis:NonCashBenefitID/@hmis:delete'
xpNonCashSourceCode = 'hmis:NonCashSourceCode'
xpNonCashSourceCodeDateCollected = 'hmis:NonCashSourceCode/@hmis:dateCollected'
xpNonCashSourceCodeDateEffective = 'hmis:NonCashSourceCode/@hmis:dateEffective'
xpNonCashSourceCodeDataCollectionStage = 'hmis:NonCashSourceCode/@hmis:dataCollectionStage'
xpNonCashSourceOther = 'hmis:NonCashSourceOther'
xpNonCashSourceOtherDateCollected = 'hmis:NonCashSourceOther/@hmis:dateCollected'
xpNonCashSourceOtherDateEffective = 'hmis:NonCashSourceOther/@hmis:dateEffective'
xpNonCashSourceOtherDataCollectionStage = 'hmis:NonCashSourceOther/@hmis:dataCollectionStage'
xpReceivingNonCashSource = 'hmis:ReceivingNonCashSource'
xpReceivingNonCashSourceDateCollected = 'hmis:ReceivingNonCashSource/@hmis:dateCollected'
xpReceivingNonCashSourceDateEffective = 'hmis:ReceivingNonCashSource/@hmis:dateEffective'
xpReceivingNonCashSourceDataCollectionStage = 'hmis:ReceivingNonCashSource/@hmis:dataCollectionStage'
itemElements = element.xpath(xpNonCashBenefit, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'non_cash_benefit_id_id_id_num', item.xpath(xpNonCashBenefitIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'non_cash_benefit_id_id_id_str', item.xpath(xpNonCashBenefitIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'non_cash_benefit_id_id_delete_occurred_date', item.xpath(xpNonCashBenefitIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_benefit_id_id_delete_effective_date', item.xpath(xpNonCashBenefitIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_benefit_id_id_delete', item.xpath(xpNonCashBenefitIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'non_cash_source_code', item.xpath(xpNonCashSourceCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'non_cash_source_code_date_collected', item.xpath(xpNonCashSourceCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_source_code_date_effective', item.xpath(xpNonCashSourceCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_source_code_data_collection_stage', item.xpath(xpNonCashSourceCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'non_cash_source_other', item.xpath(xpNonCashSourceOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'non_cash_source_other_date_collected', item.xpath(xpNonCashSourceOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_source_other_date_effective', item.xpath(xpNonCashSourceOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'non_cash_source_other_data_collection_stage', item.xpath(xpNonCashSourceOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receiving_non_cash_source', item.xpath(xpReceivingNonCashSource, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receiving_non_cash_source_date_collected', item.xpath(xpReceivingNonCashSourceDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receiving_non_cash_source_date_effective', item.xpath(xpReceivingNonCashSourceDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receiving_non_cash_source_data_collection_stage', item.xpath(xpReceivingNonCashSourceDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, NonCashBenefits)
''' Parse sub-tables '''
def parse_non_cash_benefits_last_30_days(self, element):
''' Element paths '''
xpNonCashBenefitsLast30Days = 'hmis:NonCashBenefitsLast30Days'
xpNonCashBenefitsLast30DaysDateCollected = 'hmis:NonCashBenefitsLast30Days/@hmis:dateCollected'
xpNonCashBenefitsLast30DaysDateEffective = 'hmis:NonCashBenefitsLast30Days/@hmis:dateEffective'
xpNonCashBenefitsLast30DaysDataCollectionStage = 'hmis:NonCashBenefitsLast30Days/@hmis:dataCollectionStage'
itemElements = element.xpath(xpNonCashBenefitsLast30Days, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'income_last_30_days', item.xpath(xpNonCashBenefitsLast30Days, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_last_30_days_date_collected', item.xpath(xpNonCashBenefitsLast30DaysDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_last_30_days_date_effective', item.xpath(xpNonCashBenefitsLast30DaysDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_last_30_days_data_collection_stage', item.xpath(xpNonCashBenefitsLast30DaysDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, NonCashBenefitsLast30Days)
''' Parse sub-tables '''
def parse_mental_health_problem(self, element):
''' Element paths '''
xpMentalHealthProblem = 'hmis:MentalHealthProblem'
xpHasMentalHealthProblem = 'hmis:HasMentalHealthProblem'
xpHasMentalHealthProblemDateCollected = 'hmis:HasMentalHealthProblem/@hmis:dateCollected'
xpHasMentalHealthProblemDateEffective = 'hmis:HasMentalHealthProblem/@hmis:dateEffective'
xpHasMentalHealthProblemDataCollectionStage = 'hmis:HasMentalHealthProblem/@hmis:dataCollectionStage'
xpMentalHealthIndefinite = 'hmis:MentalHealthIndefinite'
xpMentalHealthIndefiniteDateCollected = 'hmis:MentalHealthIndefinite/@hmis:dateCollected'
xpMentalHealthIndefiniteDateEffective = 'hmis:MentalHealthIndefinite/@hmis:dateEffective'
xpMentalHealthIndefiniteDataCollectionStage = 'hmis:MentalHealthIndefinite/@hmis:dataCollectionStage'
xpReceiveMentalHealthServices = 'hmis:ReceiveMentalHealthServices'
xpReceiveMentalHealthServicesDateCollected = 'hmis:ReceiveMentalHealthServices/@hmis:dateCollected'
xpReceiveMentalHealthServicesDateEffective = 'hmis:ReceiveMentalHealthServices/@hmis:dateEffective'
xpReceiveMentalHealthServicesDataCollectionStage = 'hmis:ReceiveMentalHealthServices/@hmis:dataCollectionStage'
itemElements = element.xpath(xpMentalHealthProblem, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_mental_health_problem', item.xpath(xpHasMentalHealthProblem, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_mental_health_problem_date_collected', item.xpath(xpHasMentalHealthProblemDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_mental_health_problem_date_effective', item.xpath(xpHasMentalHealthProblemDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_mental_health_problem_data_collection_stage', item.xpath(xpHasMentalHealthProblemDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'mental_health_indefinite', item.xpath(xpMentalHealthIndefinite, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'mental_health_indefinite_date_collected', item.xpath(xpMentalHealthIndefiniteDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'mental_health_indefinite_date_effective', item.xpath(xpMentalHealthIndefiniteDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'mental_health_indefinite_data_collection_stage', item.xpath(xpMentalHealthIndefiniteDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_mental_health_services', item.xpath(xpReceiveMentalHealthServices, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_mental_health_services_date_collected', item.xpath(xpReceiveMentalHealthServicesDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_mental_health_services_date_effective', item.xpath(xpReceiveMentalHealthServicesDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_mental_health_services_data_collection_stage', item.xpath(xpReceiveMentalHealthServicesDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, MentalHealthProblem)
''' Parse sub-tables '''
def parse_length_of_stay_at_prior_residence(self, element):
''' Element paths '''
xpLengthOfStayAtPriorResidence = 'hmis:LengthOfStayAtPriorResidence'
xpLengthOfStayAtPriorResidenceDateCollected = '@hmis:dateCollected'
xpLengthOfStayAtPriorResidenceDateEffective = '@hmis:dateEffective'
xpLengthOfStayAtPriorResidenceDataCollectionStage = '@hmis:dataCollectionStage'
itemElements = element.xpath(xpLengthOfStayAtPriorResidence, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'length_of_stay_at_prior_residence', item, 'text')
existence_test_and_add(self, 'length_of_stay_at_prior_residence_date_collected', item.xpath(xpLengthOfStayAtPriorResidenceDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'length_of_stay_at_prior_residence_date_effective', item.xpath(xpLengthOfStayAtPriorResidenceDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'length_of_stay_at_prior_residence_data_collection_stage', item.xpath(xpLengthOfStayAtPriorResidenceDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, LengthOfStayAtPriorResidence)
''' Parse sub-tables '''
def parse_income_total_monthly(self, element):
''' Element paths '''
xpIncomeTotalMonthly = 'hmis:IncomeTotalMonthly'
xpIncomeTotalMonthlyDateCollected = 'hmis:IncomeTotalMonthly/@hmis:dateCollected'
xpIncomeTotalMonthlyDateEffective = 'hmis:IncomeTotalMonthly/@hmis:dateEffective'
xpIncomeTotalMonthlyDataCollectionStage = 'hmis:IncomeTotalMonthly/@hmis:dataCollectionStage'
itemElements = element.xpath(xpIncomeTotalMonthly, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'income_total_monthly', item.xpath(xpIncomeTotalMonthly, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_total_monthly_date_collected', item.xpath(xpIncomeTotalMonthlyDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_total_monthly_date_effective', item.xpath(xpIncomeTotalMonthlyDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_total_monthly_data_collection_stage', item.xpath(xpIncomeTotalMonthlyDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, IncomeTotalMonthly)
''' Parse sub-tables '''
def parse_hud_chronic_homeless(self, element):
''' Element paths '''
xpHUDChronicHomeless = 'hmis:HUDChronicHomeless'
xpHUDChronicHomelessDateCollected = 'hmis:HUDChronicHomeless/@hmis:dateCollected'
xpHUDChronicHomelessDateEffective = 'hmis:HUDChronicHomeless/@hmis:dateEffective'
xpHUDChronicHomelessDataCollectionStage = 'hmis:HUDChronicHomeless/@hmis:dataCollectionStage'
itemElements = element.xpath(xpHUDChronicHomeless, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'hud_chronic_homeless', item.xpath(xpHUDChronicHomeless, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hud_chronic_homeless_date_collected', item.xpath(xpHUDChronicHomelessDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hud_chronic_homeless_date_effective', item.xpath(xpHUDChronicHomelessDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hud_chronic_homeless_data_collection_stage', item.xpath(xpHUDChronicHomelessDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HudChronicHomeless)
''' Parse sub-tables '''
def parse_income_last_30_days(self, element):
''' Element paths '''
xpIncomeLast30Days = 'hmis:IncomeLast30Days'
xpIncomeLast30DaysDateCollected = 'hmis:IncomeLast30Days/@hmis:dateCollected'
xpIncomeLast30DaysDateEffective = 'hmis:IncomeLast30Days/@hmis:dateEffective'
xpIncomeLast30DaysDataCollectionStage = 'hmis:IncomeLast30Days/@hmis:dataCollectionStage'
itemElements = element.xpath(xpIncomeLast30Days, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'income_last_30_days', item.xpath(xpIncomeLast30Days, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_last_30_days_date_collected', item.xpath(xpIncomeLast30DaysDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_last_30_days_date_effective', item.xpath(xpIncomeLast30DaysDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_last_30_days_data_collection_stage', item.xpath(xpIncomeLast30DaysDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, IncomeLast30Days)
''' Parse sub-tables '''
def parse_highest_school_level(self, element):
''' Element paths '''
xpHighestSchoolLevel = 'hmis:HighestSchoolLevel'
xpHighestSchoolLevelDateCollected = 'hmis:HighestSchoolLevel/@hmis:dateCollected'
xpHighestSchoolLevelDateEffective = 'hmis:HighestSchoolLevel/@hmis:dateEffective'
xpHighestSchoolLevelDataCollectionStage = 'hmis:HighestSchoolLevel/@hmis:dataCollectionStage'
itemElements = element.xpath(xpHighestSchoolLevel, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'highest_school_level', item.xpath(xpHighestSchoolLevel, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'highest_school_level_date_collected', item.xpath(xpHighestSchoolLevelDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'highest_school_level_date_effective', item.xpath(xpHighestSchoolLevelDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'highest_school_level_data_collection_stage', item.xpath(xpHighestSchoolLevelDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HighestSchoolLevel)
''' Parse sub-tables '''
def parse_hiv_aids_status(self, element):
''' Element paths '''
xpHIVAIDSStatus = 'hmis:HIVAIDSStatus'
xpHasHIVAIDS = 'hmis:HasHIVAIDS'
xpHasHIVAIDSDateCollected = 'hmis:HasHIVAIDS/@hmis:dateCollected'
xpHasHIVAIDSDateEffective = 'hmis:HasHIVAIDS/@hmis:dateEffective'
xpHasHIVAIDSDataCollectionStage = 'hmis:HasHIVAIDS/@hmis:dataCollectionStage'
xpReceiveHIVAIDSServices = 'hmis:ReceiveHIVAIDSServices'
xpReceiveHIVAIDSServicesDateCollected = 'hmis:ReceiveHIVAIDSServices/@hmis:dateCollected'
xpReceiveHIVAIDSServicesDateEffective = 'hmis:ReceiveHIVAIDSServices/@hmis:dateEffective'
xpReceiveHIVAIDSServicesDataCollectionStage = 'hmis:ReceiveHIVAIDSServices/@hmis:dataCollectionStage'
itemElements = element.xpath(xpHIVAIDSStatus, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_hiv_aids', item.xpath(xpHasHIVAIDS, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_hiv_aids_date_collected', item.xpath(xpHasHIVAIDSDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_hiv_aids_date_effective', item.xpath(xpHasHIVAIDSDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_hiv_aids_data_collection_stage', item.xpath(xpHasHIVAIDSDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_hiv_aids_services', item.xpath(xpReceiveHIVAIDSServices, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_hiv_aids_services_date_collected', item.xpath(xpReceiveHIVAIDSServicesDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_hiv_aids_services_date_effective', item.xpath(xpReceiveHIVAIDSServicesDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_hiv_aids_services_data_collection_stage', item.xpath(xpReceiveHIVAIDSServicesDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HivAidsStatus)
''' Parse sub-tables '''
def parse_health_status(self, element):
''' Element paths '''
xpHealthStatus = 'hmis:HealthStatus'
xpHealthStatusDateCollected = 'hmis:HealthStatus/@hmis:dateCollected'
xpHealthStatusDateEffective = 'hmis:HealthStatus/@hmis:dateEffective'
xpHealthStatusDataCollectionStage = 'hmis:HealthStatus/@hmis:dataCollectionStage'
itemElements = element.xpath(xpHealthStatus, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'health_status', item.xpath(xpHealthStatus, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'health_status_date_collected', item.xpath(xpHealthStatusDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'health_status_date_effective', item.xpath(xpHealthStatusDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'health_status_data_collection_stage', item.xpath(xpHealthStatusDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HealthStatus)
''' Parse sub-tables '''
def parse_engaged_date(self, element):
''' Element paths '''
xpEngagedDate = 'hmis:EngagedDate'
xpEngagedDateDateCollected = 'hmis:EngagedDate/@hmis:dateCollected'
xpEngagedDateDateEffective = 'hmis:EngagedDate/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpEngagedDateDataCollectionStage = 'hmis:EngagedDate/@hmis:dataCollectionStage'
itemElements = element.xpath(xpEngagedDate, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'engaged_date', item.xpath(xpEngagedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'engaged_date_date_collected', item.xpath(xpEngagedDateDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'engaged_date_data_collection_stage', item.xpath(xpEngagedDateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, EngagedDate)
''' Parse sub-tables '''
def parse_employment(self, element):
''' Element paths '''
xpEmployment = 'hmis:Employment'
xpEmploymentIDIDNum = 'hmis:PriorResidenceID/hmis:IDNum'
xpEmploymentIDIDStr = 'hmis:PriorResidenceID/hmis:IDStr'
xpEmploymentIDDeleteOccurredDate = 'hmis:PriorResidenceID/@hmis:deleteOccurredDate'
xpEmploymentIDDeleteEffective = 'hmis:PriorResidenceID/@hmis:deleteEffective'
xpEmploymentIDDelete = 'hmis:PriorResidenceID/@hmis:delete'
xpCurrentlyEmployed = 'hmis:CurrentlyEmployed'
xpCurrentlyEmployedDateCollected = 'hmis:CurrentlyEmployed/@hmis:dateCollected'
xpCurrentlyEmployedDateEffective = 'hmis:CurrentlyEmployed/@hmis:dateEffective'
xpCurrentlyEmployedDataCollectionStage = 'hmis:CurrentlyEmployed/@hmis:dataCollectionStage'
xpHoursWorkedLastWeek = 'hmis:HoursWorkedLastWeek'
xpHoursWorkedLastWeekDateCollected = 'hmis:HoursWorkedLastWeek/@hmis:dateCollected'
xpHoursWorkedLastWeekDateEffective = 'hmis:HoursWorkedLastWeek/@hmis:dateEffective'
xpHoursWorkedLastWeekDataCollectionStage = 'hmis:HoursWorkedLastWeek/@hmis:dataCollectionStage'
xpEmploymentTenure = 'hmis:EmploymentTenure'
xpEmploymentTenureDateCollected = 'hmis:EmploymentTenure/@hmis:dateCollected'
xpEmploymentTenureDateEffective = 'hmis:EmploymentTenure/@hmis:dateEffective'
xpEmploymentTenureDataCollectionStage = 'hmis:EmploymentTenure/@hmis:dataCollectionStage'
xpLookingForWork = 'hmis:LookingForWork'
xpLookingForWorkDateCollected = 'hmis:LookingForWork/@hmis:dateCollected'
xpLookingForWorkDateEffective = 'hmis:LookingForWork/@hmis:dateEffective'
xpLookingForWorkDataCollectionStage = 'hmis:LookingForWork/@hmis:dataCollectionStage'
itemElements = element.xpath(xpEmployment, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'employment_id_id_id_num', item.xpath(xpEmploymentIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_id_id_id_str', item.xpath(xpEmploymentIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_id_id_delete_occurred_date', item.xpath(xpEmploymentIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_id_id_delete_effective_date', item.xpath(xpEmploymentIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_id_id_delete', item.xpath(xpEmploymentIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'currently_employed', item.xpath(xpCurrentlyEmployed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'currently_employed_date_collected', item.xpath(xpCurrentlyEmployedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_employed_date_effective', item.xpath(xpCurrentlyEmployedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_employed_data_collection_stage', item.xpath(xpCurrentlyEmployedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'hours_worked_last_week', item.xpath(xpHoursWorkedLastWeek, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hours_worked_last_week_date_collected', item.xpath(xpHoursWorkedLastWeekDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hours_worked_last_week_date_effective', item.xpath(xpHoursWorkedLastWeekDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hours_worked_last_week_data_collection_stage', item.xpath(xpHoursWorkedLastWeekDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'employment_tenure', item.xpath(xpEmploymentTenure, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_tenure_date_collected', item.xpath(xpEmploymentTenureDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_tenure_date_effective', item.xpath(xpEmploymentTenureDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_tenure_data_collection_stage', item.xpath(xpEmploymentTenureDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'looking_for_work', item.xpath(xpLookingForWork, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'looking_for_work_date_collected', item.xpath(xpLookingForWorkDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'looking_for_work_date_effective', item.xpath(xpLookingForWorkDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'looking_for_work_data_collection_stage', item.xpath(xpLookingForWorkDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Employment)
''' Parse sub-tables '''
def parse_domestic_violence(self, element):
''' Element paths '''
xpDomesticViolence = 'hmis:DomesticViolence'
xpDomesticViolenceSurvivor = 'hmis:DomesticViolenceSurvivor'
xpDomesticViolenceSurvivorDateCollected = 'hmis:DomesticViolenceSurvivor/@hmis:dateCollected'
xpDomesticViolenceSurvivorDateEffective = 'hmis:DomesticViolenceSurvivor/@hmis:dateEffective'
xpDomesticViolenceSurvivorDataCollectionStage = 'hmis:DomesticViolenceSurvivor/@hmis:dataCollectionStage'
xpDVOccurred = 'hmis:DVOccurred'
xpDVOccurredDateCollected = 'hmis:DVOccurred/@hmis:dateCollected'
xpDVOccurredDateEffective = 'hmis:DVOccurred/@hmis:dateEffective'
xpDVOccurredDataCollectionStage = 'hmis:DVOccurred/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDomesticViolence, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'domestic_violence_survivor', item.xpath(xpDomesticViolenceSurvivor, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'domestic_violence_survivor_date_collected', item.xpath(xpDomesticViolenceSurvivorDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'domestic_violence_survivor_date_effective', item.xpath(xpDomesticViolenceSurvivorDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'domestic_violence_survivor_data_collection_stage', item.xpath(xpDomesticViolenceSurvivorDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'dv_occurred', item.xpath(xpDVOccurred, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'dv_occurred_date_collected', item.xpath(xpDVOccurredDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'dv_occurred_date_effective', item.xpath(xpDVOccurredDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'dv_occurred_data_collection_stage', item.xpath(xpDVOccurredDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DomesticViolence)
''' Parse sub-tables '''
def parse_disabling_condition(self, element):
''' Element paths '''
xpDisablingCondition = 'hmis:DisablingCondition'
xpDisablingConditionDateCollected = 'hmis:DisablingCondition/@hmis:dateCollected'
xpDisablingConditionDateEffective = 'hmis:DisablingCondition/@hmis:dateEffective'
xpDisablingConditionDataCollectionStage = 'hmis:DisablingCondition/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDisablingCondition, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'disabling_condition', item.xpath(xpDisablingCondition, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'disabling_condition_date_collected', item.xpath(xpDisablingConditionDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'disabling_condition_date_effective', item.xpath(xpDisablingConditionDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'disabling_condition_data_collection_stage', item.xpath(xpDisablingConditionDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DisablingCondition)
''' Parse sub-tables '''
def parse_developmental_disability(self, element):
''' Element paths '''
xpDevelopmentalDisability = 'hmis:DevelopmentalDisability'
xpHasDevelopmentalDisability = 'hmis:HasDevelopmentalDisability'
xpHasDevelopmentalDisabilityDateCollected = 'hmis:HasDevelopmentalDisability/@hmis:dateCollected'
xpHasDevelopmentalDisabilityDateEffective = 'hmis:HasDevelopmentalDisability/@hmis:dateEffective'
xpHasDevelopmentalDisabilityDataCollectionStage = 'hmis:HasDevelopmentalDisability/@hmis:dataCollectionStage'
xpReceiveDevelopmentalDisability = 'hmis:ReceiveDevelopmentalDisability'
xpReceiveDevelopmentalDisabilityDateCollected = 'hmis:ReceiveDevelopmentalDisability/@hmis:dateCollected'
xpReceiveDevelopmentalDisabilityDateEffective = 'hmis:ReceiveDevelopmentalDisability/@hmis:dateEffective'
xpReceiveDevelopmentalDisabilityDataCollectionStage = 'hmis:ReceiveDevelopmentalDisability/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDevelopmentalDisability, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_developmental_disability', item.xpath(xpHasDevelopmentalDisability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_developmental_disability_date_collected', item.xpath(xpHasDevelopmentalDisabilityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_developmental_disability_date_effective', item.xpath(xpHasDevelopmentalDisabilityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_developmental_disability_data_collection_stage', item.xpath(xpHasDevelopmentalDisabilityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_developmental_disability', item.xpath(xpReceiveDevelopmentalDisability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_developmental_disability_date_collected', item.xpath(xpReceiveDevelopmentalDisabilityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_developmental_disability_date_effective', item.xpath(xpReceiveDevelopmentalDisabilityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_developmental_disability_data_collection_stage', item.xpath(xpReceiveDevelopmentalDisabilityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DevelopmentalDisability)
''' Parse sub-tables '''
def parse_destinations(self, element):
''' Element paths '''
xpDestinations = 'hmis:Destinations/hmis:Destination'
xpDestinationIDIDNum = 'hmis:DestinationID/hmis:IDNum'
xpDestinationIDIDStr = 'hmis:DestinationID/hmis:IDStr'
xpDestinationIDDeleteOccurredDate = 'hmis:DestinationID/@hmis:deleteOccurredDate'
xpDestinationIDDeleteEffective = 'hmis:DestinationID/@hmis:deleteEffective'
xpDestinationIDDelete = 'hmis:DestinationID/@hmis:delete'
xpDestinationCode = 'hmis:DestinationCode'
xpDestinationCodeDateCollected = 'hmis:DestinationCode/@hmis:dateCollected'
xpDestinationCodeDateEffective = 'hmis:DestinationCode/@hmis:dateEffective'
xpDestinationCodeDataCollectionStage = 'hmis:DestinationCode/@hmis:dataCollectionStage'
xpDestinationOther = 'hmis:DestinationOther'
xpDestinationOtherDateCollected = 'hmis:DestinationOther/@hmis:dateCollected'
xpDestinationOtherDateEffective = 'hmis:DestinationOther/@hmis:dateEffective'
xpDestinationOtherDataCollectionStage = 'hmis:DestinationOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDestinations, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'destination_id_id_num', item.xpath(xpDestinationIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_id_id_str', item.xpath(xpDestinationIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_id_delete_occurred_date', item.xpath(xpDestinationIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_id_delete_effective_date', item.xpath(xpDestinationIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_id_delete', item.xpath(xpDestinationIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'destination_code', item.xpath(xpDestinationCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_code_date_collected', item.xpath(xpDestinationCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_code_date_effective', item.xpath(xpDestinationCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_code_data_collection_stage', item.xpath(xpDestinationCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'destination_other', item.xpath(xpDestinationOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_other_date_collected', item.xpath(xpDestinationOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_other_date_effective', item.xpath(xpDestinationOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_other_data_collection_stage', item.xpath(xpDestinationOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Destinations)
''' Parse sub-tables '''
def parse_degree(self, element):
''' Element paths '''
xpDegree = 'hmis:Degree'
xpDegreeIDIDNum = './hmis:IDNum'
xpDegreeIDIDStr = './hmis:IDStr'
xpDegreeIDDeleteOccurredDate = './@hmis:deleteOccurredDate'
xpDegreeIDDeleteEffective = './@hmis:deleteEffective'
xpDegreeIDDelete = './@hmis:delete'
xpDegreeOther = 'hmis:DegreeOther'
xpDegreeOtherDateCollected = 'hmis:DegreeOther/@hmis:dateCollected'
xpDegreeOtherDateEffective = 'hmis:DegreeOther/@hmis:dateEffective'
xpDegreeOtherDataCollectionStage = 'hmis:DegreeOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDegree, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'degree_id_id_num', item.xpath(xpDegreeIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_id_id_str', item.xpath(xpDegreeIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_id_delete_occurred_date', item.xpath(xpDegreeIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_id_delete_effective_date', item.xpath(xpDegreeIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_id_delete', item.xpath(xpDegreeIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'degree_other', item.xpath(xpDegreeOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_other_date_collected', item.xpath(xpDegreeOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_other_date_effective', item.xpath(xpDegreeOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_other_data_collection_stage', item.xpath(xpDegreeOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Degree)
''' Parse sub-tables '''
parse_degree_code(self, item)
def parse_degree_code(self, element):
''' Element paths '''
xpDegreeCode = 'hmis:DegreeCode'
xpDegreeCodeDateCollected = 'hmis:DegreeCode/@hmis:dateCollected'
xpDegreeCodeDateEffective = 'hmis:DegreeCode/@hmis:dateEffective'
xpDegreeCodeDataCollectionStage = 'hmis:DegreeCode/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDegreeCode, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'degree_code', item.xpath(xpDegreeCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_date_collected', item.xpath(xpDegreeCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_date_effective', item.xpath(xpDegreeCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_data_collection_stage', item.xpath(xpDegreeCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'degree_index_id', self.degree_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DegreeCode)
''' Parse sub-tables '''
def parse_currently_in_school(self, element):
''' Element paths '''
xpCurrentlyInSchool = 'hmis:CurrentlyInSchool'
xpCurrentlyInSchoolDateCollected = 'hmis:CurrentlyInSchool/@hmis:dateCollected'
xpCurrentlyInSchoolDateEffective = 'hmis:CurrentlyInSchool/@hmis:dateEffective'
xpCurrentlyInSchoolDataCollectionStage = 'hmis:CurrentlyInSchool/@hmis:dataCollectionStage'
itemElements = element.xpath(xpCurrentlyInSchool, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'currently_in_school', item.xpath(xpCurrentlyInSchool, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'currently_in_school_date_collected', item.xpath(xpCurrentlyInSchoolDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_in_school_date_effective', item.xpath(xpCurrentlyInSchoolDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_in_school_data_collection_stage', item.xpath(xpCurrentlyInSchoolDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, CurrentlyInSchool)
''' Parse sub-tables '''
def parse_contact_made(self, element):
''' Element paths '''
xpContactsMade = 'hmis:ContactsMade/hmis:ContactMade'
xpContactIDIDNum = 'hmis:ContactID/hmis:IDNum'
xpContactIDIDStr = 'hmis:ContactID/hmis:IDStr'
xpContactIDDeleteOccurredDate = 'hmis:ContactID/@hmis:deleteOccurredDate'
xpContactIDDeleteEffective = 'hmis:ContactID/@hmis:deleteEffective'
xpContactIDDelete = 'hmis:ContactID/@hmis:delete'
xpContactDate = 'hmis:ContactDate'
xpContactDateDataCollectionStage = 'hmis:ContactDate/@hmis:dataCollectionStage'
xpContactLocation = 'hmis:ContactLocation'
xpContactLocationDataCollectionStage = 'hmis:ContactLocation/@hmis:dataCollectionStage'
itemElements = element.xpath(xpContactsMade, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'contact_id_id_num', item.xpath(xpContactIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'contact_id_id_str', item.xpath(xpContactIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'contact_id_delete_occurred_date', item.xpath(xpContactIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'contact_id_delete_effective_date', item.xpath(xpContactIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'contact_id_delete', item.xpath(xpContactIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'contact_date', item.xpath(xpContactDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'contact_date_data_collection_stage', item.xpath(xpContactDateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'contact_location', item.xpath(xpContactLocation, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'contact_location_data_collection_stage', item.xpath(xpContactLocationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ContactMade)
''' Parse sub-tables '''
def parse_child_enrollment_status(self, element):
''' Element paths '''
xpChildEnrollmentStatus = 'hmis:ChildEnrollmentStatus'
xpChildEnrollmentStatusIDIDNum = 'hmis:ChildEnrollmentStatusID/hmis:IDNum'
xpChildEnrollmentStatusIDIDStr = 'hmis:ChildEnrollmentStatusID/hmis:IDStr'
xpChildEnrollmentStatusIDDeleteOccurredDate = 'hmis:ChildEnrollmentStatusID/@hmis:deleteOccurredDate'
xpChildEnrollmentStatusIDDeleteEffective = 'hmis:ChildEnrollmentStatusID/@hmis:deleteEffective'
xpChildEnrollmentStatusIDDelete = 'hmis:ChildEnrollmentStatusID/@hmis:delete'
xpChildCurrentlyEnrolledInSchool = 'hmis:ChildCurrentlyEnrolledInSchool'
xpChildCurrentlyEnrolledInSchoolDateCollected = 'hmis:ChildCurrentlyEnrolledInSchool/@hmis:dateCollected'
xpChildCurrentlyEnrolledInSchoolDateEffective = 'hmis:ChildCurrentlyEnrolledInSchool/@hmis:dateEffective'
xpChildCurrentlyEnrolledInSchoolDataCollectionStage = 'hmis:ChildCurrentlyEnrolledInSchool/@hmis:dataCollectionStage'
xpChildSchoolName = 'hmis:ChildSchoolName'
xpChildSchoolNameDateCollected = 'hmis:ChildSchoolName/@hmis:dateCollected'
xpChildSchoolNameDateEffective = 'hmis:ChildSchoolName/@hmis:dateEffective'
xpChildSchoolNameDataCollectionStage = 'hmis:ChildSchoolName/@hmis:dataCollectionStage'
xpChildMcKinneyVentoLiaison = 'hmis:ChildMcKinneyVentoLiaison'
xpChildMcKinneyVentoLiaisonDateCollected = 'hmis:ChildMcKinneyVentoLiaison/@hmis:dateCollected'
xpChildMcKinneyVentoLiaisonDateEffective = 'hmis:ChildMcKinneyVentoLiaison/@hmis:dateEffective'
xpChildMcKinneyVentoLiaisonDataCollectionStage = 'hmis:ChildMcKinneyVentoLiaison/@hmis:dataCollectionStage'
xpChildSchoolType = 'hmis:ChildSchoolType'
xpChildSchoolTypeDateCollected = 'hmis:ChildSchoolType/@hmis:dateCollected'
xpChildSchoolTypeDateEffective = 'hmis:ChildSchoolType/@hmis:dateEffective'
xpChildSchoolTypeDataCollectionStage = 'hmis:ChildSchoolType/@hmis:dataCollectionStage'
xpChildSchoolLastEnrolledDate = 'hmis:ChildSchoolLastEnrolledDate'
xpChildSchoolLastEnrolledDateDateCollected = 'hmis:ChildSchoolLastEnrolledDate/@hmis:dateCollected'
xpChildSchoolLastEnrolledDateDateEffective = 'hmis:ChildSchoolLastEnrolledDate/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpChildSchoolLastEnrolledDateDataCollectionStage = 'hmis:ChildSchoolLastEnrolledDate/@hmis:dataCollectionStage'
itemElements = element.xpath(xpChildEnrollmentStatus, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'child_enrollment_status_id_id_num', item.xpath(xpChildEnrollmentStatusIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_enrollment_status_id_id_str', item.xpath(xpChildEnrollmentStatusIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_enrollment_status_id_delete_occurred_date', item.xpath(xpChildEnrollmentStatusIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_enrollment_status_id_delete_effective_date', item.xpath(xpChildEnrollmentStatusIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_enrollment_status_id_delete', item.xpath(xpChildEnrollmentStatusIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'child_currently_enrolled_in_school', item.xpath(xpChildCurrentlyEnrolledInSchool, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_currently_enrolled_in_school_date_collected', item.xpath(xpChildCurrentlyEnrolledInSchoolDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_currently_enrolled_in_school_date_effective', item.xpath(xpChildCurrentlyEnrolledInSchoolDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_currently_enrolled_in_school_data_collection_stage', item.xpath(xpChildCurrentlyEnrolledInSchoolDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'child_school_name', item.xpath(xpChildSchoolName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_school_name_date_collected', item.xpath(xpChildSchoolNameDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_school_name_date_effective', item.xpath(xpChildSchoolNameDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_school_name_data_collection_stage', item.xpath(xpChildSchoolNameDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'child_mckinney_vento_liaison', item.xpath(xpChildMcKinneyVentoLiaison, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_mckinney_vento_liaison_date_collected', item.xpath(xpChildMcKinneyVentoLiaisonDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_mckinney_vento_liaison_date_effective', item.xpath(xpChildMcKinneyVentoLiaisonDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_mckinney_vento_liaison_data_collection_stage', item.xpath(xpChildMcKinneyVentoLiaisonDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'child_school_type', item.xpath(xpChildSchoolType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_school_type_date_collected', item.xpath(xpChildSchoolTypeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_school_type_date_effective', item.xpath(xpChildSchoolTypeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_school_type_data_collection_stage', item.xpath(xpChildSchoolTypeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'child_school_last_enrolled_date', item.xpath(xpChildSchoolLastEnrolledDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'child_school_last_enrolled_date_date_collected', item.xpath(xpChildSchoolLastEnrolledDateDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'child_school_last_enrolled_date_data_collection_stage', item.xpath(xpChildSchoolLastEnrolledDateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ChildEnrollmentStatus)
''' Parse sub-tables '''
parse_child_enrollment_status_barrier(self, item)
def parse_child_enrollment_status_barrier(self, element):
''' Element paths '''
xpChildEnrollmentBarrier = 'hmis:ChildEnrollmentBarrier'
xpBarrierIDIDNum = 'hmis:BarrierID/hmis:IDNum'
xpBarrierIDIDStr = 'hmis:BarrierID/hmis:IDStr'
xpBarrierIDDeleteOccurredDate = 'hmis:BarrierID/@hmis:deleteOccurredDate'
xpBarrierIDDeleteEffective = 'hmis:BarrierID/@hmis:deleteEffective'
xpBarrierIDDelete = 'hmis:BarrierID/@hmis:delete'
xpBarrierCode = 'hmis:BarrierCode'
xpBarrierCodeDateCollected = 'hmis:BarrierCode/@hmis:dateCollected'
xpBarrierCodeDateEffective = 'hmis:BarrierCode/@hmis:dateEffective'
xpBarrierCodeDataCollectionStage = 'hmis:BarrierCode/@hmis:dataCollectionStage'
xpBarrierOther = 'hmis:BarrierOther'
xpBarrierOtherDateCollected = 'hmis:BarrierOther/@hmis:dateCollected'
xpBarrierOtherDateEffective = 'hmis:BarrierOther/@hmis:dateEffective'
xpBarrierOtherDataCollectionStage = 'hmis:BarrierOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpChildEnrollmentBarrier, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'barrier_id_id_num', item.xpath(xpBarrierIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'barrier_id_id_str', item.xpath(xpBarrierIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'barrier_id_delete_occurred_date', item.xpath(xpBarrierIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_id_delete_effective_date', item.xpath(xpBarrierIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_id_delete', item.xpath(xpBarrierIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'barrier_code', item.xpath(xpBarrierCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'barrier_code_date_collected', item.xpath(xpBarrierCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_code_date_effective', item.xpath(xpBarrierCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_code_data_collection_stage', item.xpath(xpBarrierCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'barrier_other', item.xpath(xpBarrierOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'barrier_other_date_collected', item.xpath(xpBarrierOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_other_date_effective', item.xpath(xpBarrierOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'barrier_other_data_collection_stage', item.xpath(xpBarrierOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'child_enrollment_status_index_id', self.child_enrollment_status_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ChildEnrollmentStatusBarrier)
''' Parse sub-tables '''
def parse_chronic_health_condition(self, element):
''' Element paths '''
xpChronicHealthCondition = 'hmis:ChronicHealthCondition'
xpHasChronicHealthCondition = 'hmis:HasChronicHealthCondition'
xpHasChronicHealthConditionDateCollected = 'hmis:HasChronicHealthCondition/@hmis:dateCollected'
xpHasChronicHealthConditionDateEffective = 'hmis:HasChronicHealthCondition/@hmis:dateEffective'
xpHasChronicHealthConditionDataCollectionStage = 'hmis:HasChronicHealthCondition/@hmis:dataCollectionStage'
xpReceiveChronicHealthServices = 'hmis:ReceiveChronicHealthServices'
xpReceiveChronicHealthServicesDateCollected = 'hmis:ReceiveChronicHealthServices/@hmis:dateCollected'
xpReceiveChronicHealthServicesDateEffective = 'hmis:ReceiveChronicHealthServices/@hmis:dateEffective'
xpReceiveChronicHealthServicesDataCollectionStage = 'hmis:ReceiveChronicHealthServices/@hmis:dataCollectionStage'
itemElements = element.xpath(xpChronicHealthCondition, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_chronic_health_condition', item.xpath(xpHasChronicHealthCondition, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_chronic_health_condition_date_collected', item.xpath(xpHasChronicHealthConditionDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_chronic_health_condition_date_effective', item.xpath(xpHasChronicHealthConditionDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_chronic_health_condition_data_collection_stage', item.xpath(xpHasChronicHealthConditionDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_chronic_health_services', item.xpath(xpReceiveChronicHealthServices, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_chronic_health_services_date_collected', item.xpath(xpReceiveChronicHealthServicesDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_chronic_health_services_date_effective', item.xpath(xpReceiveChronicHealthServicesDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_chronic_health_services_data_collection_stage', item.xpath(xpReceiveChronicHealthServicesDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ChronicHealthCondition)
''' Parse sub-tables '''
def parse_release_of_information(self, element):
''' Element paths '''
xpReleaseOfInformation = 'hmis:ReleaseOfInformation'
xpReleaseOfInformationIDIDNum = 'hmis:ReleaseOfInformationID/hmis:IDNum'
xpReleaseOfInformationIDIDStr = 'hmis:ReleaseOfInformationID/hmis:IDStr'
xpReleaseOfInformationIDDateCollected = 'hmis:ReleaseOfInformationID/@hmis:dateCollected'
xpReleaseOfInformationIDDateEffective = 'hmis:ReleaseOfInformationID/@hmis:dateEffective'
xpReleaseOfInformationIDDataCollectionStage = 'hmis:ReleaseOfInformationID/@hmis:dataCollectionStage'
xpSiteServiceID = 'hmis:SiteServiceID'
xpDocumentation = 'hmis:Documentation'
xpDocumentationDateCollected = 'hmis:Documentation/@hmis:dateCollected'
xpDocumentationDateEffective = 'hmis:Documentation/@hmis:dateEffective'
xpDocumentationDataCollectionStage = 'hmis:Documentation/@hmis:dataCollectionStage'
xpStartDate = 'hmis:EffectivePeriod/hmis:StartDate'
xpEndDate = 'hmis:EffectivePeriod/hmis:EndDate'
xpReleaseGranted = 'hmis:ReleaseGranted'
xpReleaseGrantedDateCollected = 'hmis:ReleaseGranted/@hmis:dateCollected'
xpReleaseGrantedDateEffective = 'hmis:ReleaseGranted/@hmis:dateEffective'
xpReleaseGrantedDataCollectionStage = 'hmis:ReleaseGranted/@hmis:dataCollectionStage'
itemElements = element.xpath(xpReleaseOfInformation, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'release_of_information_idid_num', item.xpath(xpReleaseOfInformationIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'release_of_information_idid_str', item.xpath(xpReleaseOfInformationIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'release_of_information_idid_str_date_collected', item.xpath(xpReleaseOfInformationIDDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'release_of_information_id_date_effective', item.xpath(xpReleaseOfInformationIDDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'release_of_information_id_data_collection_stage', item.xpath(xpReleaseOfInformationIDDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'site_service_idid_str', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'documentation', item.xpath(xpDocumentation, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'documentation_date_collected', item.xpath(xpDocumentationDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'documentation_date_effective', item.xpath(xpDocumentationDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'documentation_data_collection_stage', item.xpath(xpDocumentationDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'start_date', item.xpath(xpStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'end_date', item.xpath(xpEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'release_granted', item.xpath(xpReleaseGranted, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'release_granted_date_collected', item.xpath(xpReleaseGrantedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'release_granted_date_effective', item.xpath(xpReleaseGrantedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'release_granted_data_collection_stage', item.xpath(xpReleaseGrantedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, ReleaseOfInformation)
''' Parse sub-tables '''
def parse_income_and_sources(self, element):
''' Element paths '''
xpIncomeAndSources = 'hmis:IncomeAndSources/IncomeAndSource'
xpIncomeAndSourceIDIDNum = 'hmis:IncomeAndSourceID/hmis:IDNum'
xpIncomeAndSourceIDIDStr = 'hmis:IncomeAndSourceID/hmis:IDStr'
xpIncomeAndSourceIDDeleteOccurredDate = 'hmis:IncomeAndSourceID/@hmis:deleteOccurredDate'
xpIncomeAndSourceIDDeleteEffective = 'hmis:IncomeAndSourceID/@hmis:deleteEffective'
xpIncomeAndSourceIDDelete = 'hmis:IncomeAndSourceID/@hmis:delete'
xpIncomeSourceCode = 'hmis:IncomeSourceCode'
xpIncomeSourceCodeDateCollected = 'hmis:IncomeSourceCode/@hmis:dateCollected'
xpIncomeSourceCodeDateEffective = 'hmis:IncomeSourceCode/@hmis:dateEffective'
xpIncomeSourceCodeDataCollectionStage = 'hmis:IncomeSourceCode/@hmis:dataCollectionStage'
xpIncomeSourceOther = 'hmis:IncomeSourceOther'
xpIncomeSourceOtherDateCollected = 'hmis:IncomeSourceOther/@hmis:dateCollected'
xpIncomeSourceOtherDateEffective = 'hmis:IncomeSourceOther/@hmis:dateEffective'
xpIncomeSourceOtherDataCollectionStage = 'hmis:IncomeSourceOther/@hmis:dataCollectionStage'
xpReceivingIncomingSource = 'hmis:ReceivingIncomingSource'
xpReceivingIncomingSourceDateCollected = 'hmis:ReceivingIncomingSource/@hmis:dateCollected'
xpReceivingIncomingSourceDateEffective = 'hmis:ReceivingIncomingSource/@hmis:dateEffective'
xpReceivingIncomingSourceDataCollectionStage = 'hmis:ReceivingIncomingSource/@hmis:dataCollectionStage'
xpIncomeSourceAmount = 'hmis:IncomeSourceAmount'
xpIncomeSourceAmountDateCollected = 'hmis:IncomeSourceAmount/@hmis:dateCollected'
xpIncomeSourceAmountDateEffective = 'hmis:IncomeSourceAmount/@hmis:dateEffective'
xpIncomeSourceAmountDataCollectionStage = 'hmis:IncomeSourceAmount/@hmis:dataCollectionStage'
itemElements = element.xpath(xpIncomeAndSources, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'income_and_source_id_id_num', item.xpath(xpIncomeAndSourceIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_and_source_id_id_str', item.xpath(xpIncomeAndSourceIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_and_source_id_id_delete_occurred_date', item.xpath(xpIncomeAndSourceIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_and_source_id_id_delete_effective_date', item.xpath(xpIncomeAndSourceIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_and_source_id_id_delete', item.xpath(xpIncomeAndSourceIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'income_source_code', item.xpath(xpIncomeSourceCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_source_code_date_collected', item.xpath(xpIncomeSourceCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_code_date_effective', item.xpath(xpIncomeSourceCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_code_data_collection_stage', item.xpath(xpIncomeSourceCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'income_source_other', item.xpath(xpIncomeSourceOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_source_other_date_collected', item.xpath(xpIncomeSourceOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_other_date_effective', item.xpath(xpIncomeSourceOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_other_data_collection_stage', item.xpath(xpIncomeSourceOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receiving_income_source', item.xpath(xpReceivingIncomingSource, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receiving_income_source_date_collected', item.xpath(xpReceivingIncomingSourceDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receiving_income_source_date_effective', item.xpath(xpReceivingIncomingSourceDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receiving_income_source_data_collection_stage', item.xpath(xpReceivingIncomingSourceDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'income_source_amount', item.xpath(xpIncomeSourceAmount, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'income_source_amount_date_collected', item.xpath(xpIncomeSourceAmountDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_amount_date_effective', item.xpath(xpIncomeSourceAmountDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'income_source_amount_data_collection_stage', item.xpath(xpIncomeSourceAmountDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, IncomeAndSources)
''' Parse sub-tables '''
def parse_hud_homeless_episodes(self, element):
''' Element paths '''
xpHudHomelessEpisodes = 'hmis:HUDHomelessEpisodes'
xpStartDate = 'hmis:StartDate'
xpEndDate = 'hmis:EndDate'
itemElements = element.xpath(xpHudHomelessEpisodes, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'start_date', item.xpath(xpStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'end_date', item.xpath(xpEndDate, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, HUDHomelessEpisodes)
''' Parse sub-tables '''
def parse_person_address(self, element):
''' Element paths '''
xpPersonAddress = 'hmis:PersonAddress'
xpPersonAddressDateCollected = 'hmis:PersonAddress/@hmis:dateCollected'#IGNORE:@UnusedVariable
xpPersonAddressDateEffective = 'hmis:PersonAddress/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpPersonAddressDataCollectionStage = 'hmis:PersonAddress/@hmis:dataCollectionStage'#IGNORE:@UnusedVariable
xpAddressPeriodStartDate = 'hmis:AddressPeriod/hmis:StartDate'
xpAddressPeriodEndDate = 'hmis:AddressPeriod/hmis:EndDate'
xpPreAddressLine = 'hmis:PreAddressLine'
xpPreAddressLineDateCollected = 'hmis:PreAddressLine/@hmis:dateCollected'
xpPreAddressLineDateEffective = 'hmis:PreAddressLine/@hmis:dateEffective'
xpPreAddressLineDataCollectionStage = 'hmis:PreAddressLine/@hmis:dataCollectionStage'
xpLine1 = 'hmis:Line1'
xpLine1DateCollected = 'hmis:Line1/@hmis:dateCollected'
xpLine1DateEffective = 'hmis:Line1/@hmis:dateEffective'
xpLine1DataCollectionStage = 'hmis:Line1/@hmis:dataCollectionStage'
xpLine2 = 'hmis:Line2'
xpLine2DateCollected = 'hmis:Line2/@hmis:dateCollected'
xpLine2DateEffective = 'hmis:Line2/@hmis:dateEffective'
xpLine2DataCollectionStage = 'hmis:Line2/@hmis:dataCollectionStage'
xpCity = 'hmis:City'
xpCityDateCollected = 'hmis:City/@hmis:dateCollected'
xpCityDateEffective = 'hmis:City/@hmis:dateEffective'
xpCityDataCollectionStage = 'hmis:City/@hmis:dataCollectionStage'
xpCounty = 'hmis:County'
xpCountyDateCollected = 'hmis:County/@hmis:dateCollected'
xpCountyDateEffective = 'hmis:County/@hmis:dateEffective'
xpCountyDataCollectionStage = 'hmis:County/@hmis:dataCollectionStage'
xpState = 'hmis:State'
xpStateDateCollected = 'hmis:State/@hmis:dateCollected'
xpStateDateEffective = 'hmis:State/@hmis:dateEffective'
xpStateDataCollectionStage = 'hmis:State/@hmis:dataCollectionStage'
xpZIPCode = 'hmis:ZIPCode'
xpZIPCodeDateCollected = 'hmis:ZIPCode/@hmis:dateCollected'
xpZIPCodeDateEffective = 'hmis:ZIPCode/@hmis:dateEffective'
xpZIPCodeDataCollectionStage = 'hmis:ZIPCode/@hmis:dataCollectionStage'
xpCountry = 'hmis:Country'
xpCountryDateCollected = 'hmis:Country/@hmis:dateCollected'
xpCountryDateEffective = 'hmis:Country/@hmis:dateEffective'
xpCountryDataCollectionStage = 'hmis:Country/@hmis:dataCollectionStage'
xpIsLastPermanentZip = 'hmis:IsLastPermanentZIP'
xpIsLastPermanentZIPDateCollected = 'hmis:IsLastPermanentZIP/@hmis:dateCollected'
xpIsLastPermanentZIPDateEffective = 'hmis:IsLastPermanentZIP/@hmis:dateEffective'
xpIsLastPermanentZIPDataCollectionStage = 'hmis:IsLastPermanentZIP/@hmis:dataCollectionStage'
xpZipQualityCode = 'hmis:ZIPQualityCode'
xpZIPQualityCodeDateCollected = 'hmis:ZIPQualityCode/@hmis:dateCollected'
xpZIPQualityCodeDateEffective = 'hmis:ZIPQualityCode/@hmis:dateEffective'
xpZIPQualityCodeDataCollectionStage = 'hmis:ZIPQualityCode/@hmis:dataCollectionStage'
itemElements = element.xpath(xpPersonAddress, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'address_period_start_date', item.xpath(xpAddressPeriodStartDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'address_period_end_date', item.xpath(xpAddressPeriodEndDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pre_address_line', item.xpath(xpPreAddressLine, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'pre_address_line_date_collected', item.xpath(xpPreAddressLineDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pre_address_line_date_effective', item.xpath(xpPreAddressLineDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'pre_address_line', item.xpath(xpPreAddressLineDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'line1', item.xpath(xpLine1, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'line1_date_collected', item.xpath(xpLine1DateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'line1_date_effective', item.xpath(xpLine1DateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'line1_data_collection_stage', item.xpath(xpLine1DataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'line2', item.xpath(xpLine2, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'line2_date_collected', item.xpath(xpLine2DateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'line2_date_effective', item.xpath(xpLine2DateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'line2_data_collection_stage', item.xpath(xpLine2DataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'city', item.xpath(xpCity, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'city_date_collected', item.xpath(xpCityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'city_date_effective', item.xpath(xpCityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'city_data_collection_stage', item.xpath(xpCityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'county', item.xpath(xpCounty, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'county_date_collected', item.xpath(xpCountyDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'county_date_effective', item.xpath(xpCountyDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'county_data_collection_stage', item.xpath(xpCountyDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'state', item.xpath(xpState, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'state_date_collected', item.xpath(xpStateDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'state_date_effective', item.xpath(xpStateDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'state_data_collection_stage', item.xpath(xpStateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'zipcode', item.xpath(xpZIPCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'zipcode_date_collected', item.xpath(xpZIPCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'zipcod_date_effectivee', item.xpath(xpZIPCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'zipcode_data_collection_stage', item.xpath(xpZIPCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'country', item.xpath(xpCountry, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'country_date_collected', item.xpath(xpCountryDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'country_date_effective', item.xpath(xpCountryDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'country_data_collection_stage', item.xpath(xpCountryDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'is_last_permanent_zip', item.xpath(xpIsLastPermanentZip, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'is_last_permanent_zip_date_collected', item.xpath(xpIsLastPermanentZIPDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'is_last_permanent_zip_date_effective', item.xpath(xpIsLastPermanentZIPDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'is_last_permanent_zip_data_collection_stage', item.xpath(xpIsLastPermanentZIPDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'zip_quality_code', item.xpath(xpZipQualityCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'zip_quality_code_date_collected', item.xpath(xpZIPQualityCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'zip_quality_code_date_effective', item.xpath(xpZIPQualityCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'zip_quality_code_data_collection_stage', item.xpath(xpZIPQualityCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, PersonAddress)
''' Parse sub-tables '''
def parse_other_names(self, element):
''' Element paths '''
xpOtherNames = 'hmis:OtherNames'
xpOtherFirstNameUnhashed = 'hmis:OtherFirstName/hmis:Unhashed'
xpOtherFirstNameUnhashedDateCollected = 'hmis:OtherFirstName/hmis:Unhashed/@hmis:dateCollected'#IGNORE:@UnusedVariable
xpOtherFirstNameUnhashedDateEffective = 'hmis:OtherFirstName/hmis:Unhashed/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpOtherFirstNameUnhashedDataCollectionStage = 'hmis:OtherFirstName/hmis:Unhashed/@hmis:dataCollectionStage'#IGNORE:@UnusedVariable
xpOtherFirstNameHashed = 'hmis:OtherFirstName/hmis:Hashed'
xpOtherFirstNameHashedDateCollected = 'hmis:OtherFirstName/hmis:Hashed/@hmis:dateCollected'
xpOtherFirstNameHashedDateEffective = 'hmis:OtherFirstName/hmis:Hashed/@hmis:dateEffective'
xpOtherFirstNameHashedDataCollectionStage = 'hmis:OtherFirstName/hmis:Hashed/@hmis:dataCollectionStage'
xpOtherLastNameUnhashed = 'hmis:OtherLastName/hmis:Unhashed'
xpOtherLastNameUnhashedDateCollected = 'hmis:OtherLastName/hmis:Unhashed/@hmis:dateCollected'#IGNORE:@UnusedVariable
xpOtherLastNameUnhashedDateEffective = 'hmis:OtherLastName/hmis:Unhashed/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpOtherLastNameUnhashedDataCollectionStage = 'hmis:OtherLastName/hmis:Unhashed/@hmis:dataCollectionStage'#IGNORE:@UnusedVariable
xpOtherLastNameHashed = 'hmis:OtherLastName/hmis:Hashed'
xpOtherLastNameHashedDateCollected = 'hmis:OtherLastName/hmis:Hashed/@hmis:dateCollected'
xpOtherLastNameHashedDateEffective = 'hmis:OtherLastName/hmis:Hashed/@hmis:dateEffective'
xpOtherLastNameHashedDataCollectionStage = 'hmis:OtherLastName/hmis:Hashed/@hmis:dataCollectionStage'
xpOtherMiddleNameUnhashed = 'hmis:OtherMiddleName/hmis:Unhashed'
xpOtherMiddleNameUnhashedDateCollected = 'hmis:OtherMiddleName/hmis:Unhashed/@hmis:dateCollected'#IGNORE:@UnusedVariable
xpOtherMiddleNameUnhashedDateEffective = 'hmis:OtherMiddleName/hmis:Unhashed/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpOtherMiddleNameUnhashedDataCollectionStage = 'hmis:OtherMiddleName/hmis:Unhashed/@hmis:dataCollectionStage'#IGNORE:@UnusedVariable
xpOtherMiddleNameHashed = 'hmis:OtherMiddleName/hmis:Hashed'
xpOtherMiddleNameHashedDateCollected = 'hmis:OtherMiddleName/hmis:Hashed/@hmis:dateCollected'
xpOtherMiddleNameHashedDateEffective = 'hmis:OtherMiddleName/hmis:Hashed/@hmis:dateEffective'
xpOtherMiddleNameHashedDataCollectionStage = 'hmis:OtherMiddleName/hmis:Hashed/@hmis:dataCollectionStage'
xpOtherSuffixUnhashed = 'hmis:OtherSuffix/hmis:Unhashed'
xpOtherSuffixUnhashedDateCollected = 'hmis:OtherSuffix/hmis:Unhashed/@hmis:dateCollected'#IGNORE:@UnusedVariable
xpOtherSuffixUnhashedDateEffective = 'hmis:OtherSuffix/hmis:Unhashed/@hmis:dateEffective'#IGNORE:@UnusedVariable
xpOtherSuffixUnhashedDataCollectionStage = 'hmis:OtherSuffix/hmis:Unhashed/@hmis:dataCollectionStage'#IGNORE:@UnusedVariable
xpOtherSuffixHashed = 'hmis:OtherSuffix/hmis:Hashed'
xpOtherSuffixHashedDateCollected = 'hmis:OtherSuffix/hmis:Hashed/@hmis:dateCollected'
xpOtherSuffixHashedDateEffective = 'hmis:OtherSuffix/hmis:Hashed/@hmis:dateEffective'
xpOtherSuffixHashedDataCollectionStage = 'hmis:OtherSuffix/hmis:Hashed/@hmis:dataCollectionStage'
itemElements = element.xpath(xpOtherNames, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'other_first_name_unhashed', item.xpath(xpOtherFirstNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_first_name_hashed', item.xpath(xpOtherFirstNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_first_name_date_collected', item.xpath(xpOtherFirstNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_first_name_date_effective', item.xpath(xpOtherFirstNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_first_name_data_collection_stage', item.xpath(xpOtherFirstNameHashedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'other_last_name_unhashed', item.xpath(xpOtherLastNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_last_name_hashed', item.xpath(xpOtherLastNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_last_name_date_collected', item.xpath(xpOtherLastNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_last_name_date_effective', item.xpath(xpOtherLastNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_last_name_data_collection_stage', item.xpath(xpOtherLastNameHashedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'other_middle_name_unhashed', item.xpath(xpOtherMiddleNameUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_middle_name_hashed', item.xpath(xpOtherMiddleNameHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_middle_name_date_collected', item.xpath(xpOtherMiddleNameHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_middle_name_date_effective', item.xpath(xpOtherMiddleNameHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_middle_name_data_collection_stage', item.xpath(xpOtherMiddleNameHashedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'other_suffix_unhashed', item.xpath(xpOtherSuffixUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_suffix_hashed', item.xpath(xpOtherSuffixHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'other_suffix_date_collected', item.xpath(xpOtherSuffixHashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_suffix_date_effective', item.xpath(xpOtherSuffixHashedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'other_suffix_data_collection_stage', item.xpath(xpOtherSuffixHashedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, OtherNames)
''' Parse sub-tables '''
def parse_races(self, element, pf = 'hmis:'):
''' Element paths '''
xpRaces = pf + 'Race'
xpRaceUnhashed = 'hmis:Unhashed'
xpRaceUnhashedDateCollected = 'hmis:Unhashed/@hmis:dateCollected'
xpRaceUnhashedDataCollectionStage = 'hmis:Unhashed/@hmis:dataCollectionStage'
xpRaceHashed = 'hmis:Hashed'
itemElements = element.xpath(xpRaces, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'race_unhashed', item.xpath(xpRaceUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'race_date_collected', item.xpath(xpRaceUnhashedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'race_data_collection_stage', item.xpath(xpRaceUnhashedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'race_hashed', item.xpath(xpRaceHashed, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_index_id', self.person_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Races)
''' Parse sub-tables '''
def parse_funding_source(self, element):
''' Element paths '''
xpFundingSource = 'hmis:FundingSources/hmis:FundingSource'
xpFundingSourceIDIDNum = 'hmis:FundingSourceID/hmis:IDNum'
xpFundingSourceIDIDStr = 'hmis:FundingSourceID/hmis:IDStr'
xpFundingSourceIDDeleteOccurredDate = 'hmis:FundingSourceID/@hmis:deleteOccurredDate'
xpFundingSourceIDDeleteEffective = 'hmis:FundingSourceID/@hmis:deleteEffective'
xpFundingSourceIDDelete = 'hmis:FundingSourceID/@hmis:delete'
xpFederalCFDA = 'hmis:FederalCFDA'
xpReceivesMcKinneyFunding = 'hmis:ReceivesMcKinneyFunding'
xpAdvanceOrArrears = 'hmis:AdvanceOrArrears'
xpFinancialAssistanceAmount = 'hmis:FinancialAssistanceAmount'
itemElements = element.xpath(xpFundingSource, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'funding_source_id_id_num', item.xpath(xpFundingSourceIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'funding_source_id_id_str', item.xpath(xpFundingSourceIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'funding_source_id_delete_occurred_date', item.xpath(xpFundingSourceIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'funding_source_id_delete_effective_date', item.xpath(xpFundingSourceIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'funding_source_id_delete', item.xpath(xpFundingSourceIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'federal_cfda_number', item.xpath(xpFederalCFDA, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receives_mckinney_funding', item.xpath(xpReceivesMcKinneyFunding, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'advance_or_arrears', item.xpath(xpAdvanceOrArrears, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'financial_assistance_amount', item.xpath(xpFinancialAssistanceAmount, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'service_index_id', self.service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'service_event_index_id', self.service_event_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, FundingSource)
''' Parse sub-tables '''
def parse_resource_info(self, element):
''' Element paths '''
xpResourceInfo = 'airs:ResourceInfo'
xpResourceSpecialist = 'airs:ResourceSpecialist'
xpAvailableForDirectory = "../%s/%s" % (xpResourceInfo, '@AvailableForDirectory')
xpAvailableForReferral = "../%s/%s" % (xpResourceInfo, '@AvailableForReferral')
xpAvailableForResearch = "../%s/%s" % (xpResourceInfo, '@AvailableForResearch')
xpDateAdded = "../%s/%s" % (xpResourceInfo, '@DateAdded')
xpDateLastVerified = "../%s/%s" % (xpResourceInfo, '@DateLastVerified')
xpDateOfLastAction = "../%s/%s" % (xpResourceInfo, '@DateOfLastAction')
xpLastActionType = "../%s/%s" % (xpResourceInfo, '@LastActionType')
itemElements = element.xpath(xpResourceInfo, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'resource_specialist', item.xpath(xpResourceSpecialist, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'available_for_directory', item.xpath(xpAvailableForDirectory, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'available_for_referral', item.xpath(xpAvailableForReferral, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'available_for_research', item.xpath(xpAvailableForResearch, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'date_added', item.xpath(xpDateAdded, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'date_last_verified', item.xpath(xpDateLastVerified, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'date_of_last_action', item.xpath(xpDateOfLastAction, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'last_action_type', item.xpath(xpLastActionType, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_service_index_id', self.site_service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, ResourceInfo)
''' Parse sub-tables '''
parse_contact(self, item)
parse_email(self, item)
parse_phone(self, item)
def parse_contact(self, element):
''' Element paths '''
xpContact = 'airs:Contact'
xpTitle = 'airs:Title'
xpName = 'airs:Name'
xpType = "../%s/%s" % (xpContact, '@Type')
itemElements = element.xpath(xpContact, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'title', item.xpath(xpTitle, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'name', item.xpath(xpName, namespaces = self.nsmap), 'text')
# SBB20100909 wrong type element (attribute)
existence_test_and_add(self, 'type', item.xpath(xpType, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
# SBB20100908 Need to test this. A site doesn't have resource Info but contacts do under other elements
try: existence_test_and_add(self, 'resource_info_index_id', self.resource_info_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Contact)
''' Parse sub-tables '''
parse_email(self, item)
parse_phone(self, item)
def parse_email(self, element):
''' Element paths '''
xpEmail = 'airs:Email'
xpAddress = 'airs:Address'
xpNote = 'airs:Note'
xpPersonEmail = 'airs:PersonEmail'
xpPersonEmailDateCollected = 'hmis:PersonEmail/@hmis:dateCollected'
xpPersonEmailDateEffective = 'hmis:PersonEmail/@hmis:dateEffective'
xpPersonEmailDataCollectionStage = 'hmis:PersonEmail/@hmis:dataCollectionStage'
itemElements = element.xpath(xpEmail, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'address', item.xpath(xpAddress, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'note', item.xpath(xpNote, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_email', item.xpath(xpPersonEmail, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_email_date_collected', item.xpath(xpPersonEmailDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_email_date_effective', item.xpath(xpPersonEmailDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'person_email_data_collection_stage', item.xpath(xpPersonEmailDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'contact_index_id', self.contact_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'resource_info_index_id', self.resource_info_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Email)
''' Parse sub-tables '''
def parse_phone(self, element):
''' Element paths '''
xpPhone = 'airs:Phone'
xpPhoneNumber = 'airs:PhoneNumber'
xpReasonWithheld = 'airs:ReasonWithheld'
xpExtension = 'airs:Extension'
xpDescription = 'airs:Description'
xpType = 'airs:Type'
xpFunction = 'airs:Function'
xpTollFree = "../%s/%s" % (xpPhone, '@TollFree')
xpConfidential = "../%s/%s" % (xpPhone, '@Confidential')
itemElements = element.xpath(xpPhone, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'phone_number', item.xpath(xpPhoneNumber, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'reason_withheld', item.xpath(xpReasonWithheld, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'extension', item.xpath(xpExtension, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'description', item.xpath(xpDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'type', item.xpath(xpType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'function', item.xpath(xpFunction, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'toll_free', item.xpath(xpTollFree, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'confidential', item.xpath(xpConfidential, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
try: existence_test_and_add(self, 'agency_location_index_id', self.agency_location_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'contact_index_id', self.contact_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'resource_info_index_id', self.resource_info_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'site_index_id', self.site_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'service_index_id', self.service_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
except: pass
try: existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
except: pass
''' Shred to database '''
shred(self, self.parse_dict, Phone)
''' Parse sub-tables '''
''' Build link/bridge tables '''
def record_source_export_link(self):
''' record the link between source and export '''
existence_test_and_add(self, 'source_index_id', self.source_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
shred(self, self.parse_dict, SourceExportLink)
return
def record_agency_child_link(self):
''' record the link between agency and any children '''
existence_test_and_add(self, 'agency_index_id', self.agency_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
shred(self, self.parse_dict, AgencyChild)
return
''' Utility methods '''
def existence_test_and_add(self, db_column, query_string, handling):
''' Checks that the query actually has a result and adds to dictionary '''
if handling == 'no_handling':
persist(self, db_column, query_string = query_string)
#print 'Query string:', query_string
return True
elif len(query_string) is not 0 or None:
if handling == 'attribute_text':
persist(self, db_column, query_string = str(query_string[0]))
#print 'Query string:', query_string
return True
if handling == 'text':
persist(self, db_column, query_string = query_string[0].text)
#print 'Query string:', query_string
return True
elif handling == 'attribute_date':
#print "Column = ", db_column
#print "dateutil.parser.parse(query_string[0]) is: ", dateutil.parser.parse(query_string[0])
#print "==== query_string[0] = ", query_string[0]
persist(self, db_column, query_string = dateutil.parser.parse(query_string[0]))
#print 'Query string:', query_string
return True
elif handling == 'element_date':
#print "==== Column = ", db_column
#print "==== query_string[0].text = ", query_string[0].text
#print "dateutil.parser.parse(query_string[0].text) is:", dateutil.parser.parse(query_string[0].text), ":"
persist(self, db_column, query_string = dateutil.parser.parse(query_string[0].text))
#print '==== Query string:', query_string
return True
else:
print("Need to specify the handling")
return False
else:
# SBB20100915 added to handle non-list element values
if type(query_string) == type(list()):
return False
# not empty list now evaluate it
else:
if handling == 'attribute_text':
persist(self, db_column, query_string = str(query_string))
#print query_string
return True
if handling == 'text':
#print "query_string.text", query_string.text
persist(self, db_column, query_string = query_string.text)
#print query_string
return True
elif handling == 'attribute_date':
persist(self, db_column, query_string = dateutil.parser.parse(query_string))
#print query_string
return True
elif handling == 'element_date':
persist(self, db_column, query_string = dateutil.parser.parse(query_string.text))
#print query_string
return True
def persist(self, db_column, query_string):
''' Adds dictionary item with database column and associated data element '''
self.parse_dict.__setitem__(db_column, query_string)
return
def shred(self, records_dict, map_class):
''' commits the record set to the database '''
print('map_class: ', map_class)
print('data: ', records_dict)
#print 'self is:', self
#{'source_name': 'Orange County Corrections', 'software_version': '0.1', 'software_vendor': 'Orange County Corrections', 'source_contact_email': '<EMAIL>', 'source_id_id_id_str': '003', 'source_id': '003'}
mapped = map_class(**records_dict)
self.session.add(mapped)
print("done adding")
#self.session.add(mapped)
self.session.commit()
''' store foreign keys '''
if map_class.__name__ == "Source":
self.source_index_id = mapped.id
print("Source: ", map_class.source_id)
if map_class.__name__ == "Export":
self.export_index_id = mapped.id
print("Export: ", self.export_index_id)
if map_class.__name__ == "Household":
self.household_index_id = mapped.id
print("Household: ", self.household_index_id)
if map_class.__name__ == "Agency":
self.agency_index_id = mapped.id
print("Agency: ", self.agency_index_id)
if map_class.__name__ == "AgencyLocation":
self.agency_location_index_id = mapped.id
print("Agency Location: ", self.agency_location_index_id)
if map_class.__name__ == "Site":
self.site_index_id = mapped.id
print("Site: ", self.site_index_id)
if map_class.__name__ == "SiteService":
self.site_service_index_id = mapped.id
print("SiteService: ", self.site_service_index_id)
if map_class.__name__ == "PitCountSet":
self.pit_count_set_index_id = mapped.id
print("PitCountSet: ", self.pit_count_set_index_id)
if map_class.__name__ == "Languages":
self.languages_index_id = mapped.id
print("Languages:",self.languages_index_id)
if map_class.__name__ == "Service":
self.service_index_id = mapped.id
print("Service:",self.service_index_id)
if map_class.__name__ == "HmisAsset":
self.hmis_asset_index_id = mapped.id
print("HmisAsset:",self.hmis_asset_index_id)
if map_class.__name__ == "Assignment":
self.assignment_index_id = mapped.id
print("Assignment:",self.assignment_index_id)
if map_class.__name__ == "Person":
self.person_index_id = mapped.id
print("Person:",self.person_index_id)
if map_class.__name__ == "SiteServiceParticipation":
self.site_service_participation_index_id = mapped.id
print("SiteServiceParticipation:",self.site_service_participation_index_id)
if map_class.__name__ == "Need":
self.need_index_id = mapped.id
print("Need:",self.need_index_id)
if map_class.__name__ == "Referral":
self.referral_index_id = mapped.id
print("Referral:",self.referral_index_id)
if map_class.__name__ == "ServiceEvent":
self.service_event_index_id = mapped.id
print("ServiceEvent:",self.service_event_index_id)
if map_class.__name__ == "PersonHistorical":
self.person_historical_index_id = mapped.id
print("PersonHistorical:",self.person_historical_index_id)
if map_class.__name__ == "Degree":
self.degree_index_id = mapped.id
print("Degree:",self.degree_index_id)
if map_class.__name__ == "ChildEnrollmentStatus":
self.child_enrollment_status_index_id = mapped.id
print("ChildEnrollmentStatus:",self.child_enrollment_status_index_id)
if map_class.__name__ == "ResourceInfo":
self.resource_info_index_id = mapped.id
print("ResourceInfo:",self.resource_info_index_id)
if map_class.__name__ == "Contact":
self.contact_index_id = mapped.id
print("Contact:",self.contact_index_id)
if map_class.__name__ == "TimeOpen":
self.time_open_index_id = mapped.id
print("TimeOpen:",self.time_open_index_id)
# FBY New 2016-08-16: Make the call id accessible when setting it as a foreign key to
# in the person_historical table
if map_class.__name__ == "Call":
self.call_index_id = mapped.id
print("Call:",self.call_index_id)
self.parse_dict ={}
if "source_id" in records_dict.keys():
return records_dict["source_id"]
else:
return None
def main(argv=None):
''' Manually test this Reader class '''
if argv is None:
argv = sys.argv
## clear db tables (may have to run twice to get objects linked properly)
from . import postgresutils
UTILS = postgresutils.Utils()
UTILS.blank_database()
#inputFile = os.path.join("%s" % settings.BASE_PATH, "%s" % settings.INPUTFILES_PATH, "HUD_HMIS_3_0_Instance.xml")
inputFile = "/mnt/mariah/HUD_HMIS_XML/HUD_HMIS_Instance-modified-3.0.xml"
if settings.DB_PASSWD == "":
settings.DB_PASSWD = raw_input("Please enter your password: ")
if os.path.isfile(inputFile) is True:#_adapted_further
try:
xml_file = open(inputFile,'r')
except:
print("Error opening import file")
reader = HMISXML30Reader(xml_file)
tree = reader.read()
reader.process_data(tree)
xml_file.close()
if __name__ == "__main__":
sys.exit(main())
```
#### File: synthesis/src/interpretpicklist.py
```python
class Interpretpicklist:
def __init__(self):
print("started Interpretpicklist")
self.pickList = {
# SBB20091021 HMIS only defines 1-4, I enumerated the remaining types
"RELATIONSHIPSPickOption":
{
"5":"daughter",
"6":"father",
"7":"granddaughter",
"8":"grandfather",
"9":"grandmother",
"10":"grandson",
"11":"husband",
"12":"mother",
"13":"other non-relative",
"3":"other relative",
"14":"self",
"4":"significant other",
"1":"son",
"15":"step-daughter",
"16":"step-son",
"17":"unknown",
"2":"wife"
}
,"LENGTHOFTHESTAYPickOption":
{
"1":"don't know (hud)",
"2":"more than one week, but less than one month (hud)",
"3":"more than three months, but less than one year (hud)",
"4":"one to three months (hud)",
"5":"one week or less (hud)",
"6":"one year or longer (hud)",
"7":"other",
"8":"refused (hud)"
}
# New picklist, straight out of the schema - Are numbers correct?
,"LIVINGSITTYPESPickOption":
{
"1":"don't know (hud)",
"2":"emergency shelter, including hotel or motel paid for with emergency shelter voucher(hud)",
"3":"foster care home or foster care group home (hud)",
"4":"hospital (non-psychiatric) (hud)",
"5":"hotel or motel paid for without emergency shelter voucher (hud)",
"6":"jail, prison or juvenile detention facility (hud)",
"7":"other (hud)",
"8":"owned by client, no housing subsidy (hud)",
"9":"owned by client, with housing subsidy (hud)",
"10":"permanent housing for formerly homeless persons(such as shp, s+c, or sro mod rehab)(hud)",
"11":"place not meant for habitation inclusive of 'non-housing service site(outreach programs only)'(hud)",
"12":"psychiatric hospital or other psychiatric facility (hud)",
"13":"refused (hud)",
"14":"rental by client, no housing subsidy (hud)",
"15":"rental by client, with other (non-vash) housing subsidy (hud)",
"16":"rental by client, with vash housing subsidy (hud)",
"17":"safe haven (hud)",
"18":"staying or living in a family member's room, apartment or house (hud)",
"19":"staying or living in a friend's room, apartment or house (hud)",
"20":"subsidized housing",
"21":"substance abuse treatment facility or detox center (hud)",
"22":"transitional housing for homeless persons (including homeless youth) (hud)"
}
# This is the old picklist - xml doesn't validate if use this
# ,"LIVINGSITTYPESPickOption":{
# "Domestic Violence":"domestic violence situation",
# "?":"don't know (hud)",
# "Emergency Shelter/TH":"emergency shelter (hud)",
# "Foster Care":"foster care/group home (hud)",
# "Hospital":"hospital (hud)",
# "Hotel/Motel":"hotel/motel without emergency shelter(hud)",
# "Jail/Prison":"jail, prison or juvenile facility (hud)",
# "Living With Family":"living with family (hud)",
# "Living with Friend":"living with friends (hud)",
# "Living With Friend":"living with friends (hud)",
# "Own Home":"own house/apartment (hud)",
# "Halfway House":"permanent housing for formerly homeless (hud)",
# "Street":"place not meant for habitation (hud)",
# "Psychiatric Facility":"psychiatric hospital or facility (hud)",
# "Refused":"refused (hud)",
# "Rented":"rental house/apartment (hud)",
# "Housing Subsized":"subsidized housing",
# "Treatment Center":"substance abuse treatment center (hud)",
# "Transitional Housing":"transitional housing for homeless(hud)",
# "1":"emergency shelter (hud)",
# "2":"transitional housing for homeless(hud)",
# "3":"permanent housing for formerly homeless (hud)",
# "4":"psychiatric hospital or facility (hud)",
# "5":"substance abuse treatment center (hud)",
# "6":"hospital (hud)",
# "7":"jail, prison or juvenile facility (hud)",
# "8":"don't know (hud)",
# "9":"refused (hud)",
# "10":"rental house/apartment (hud)",
# "11":"own house/apartment (hud)",
# "12":"living with family (hud)",
# "13":"living with friends (hud)",
# "14":"hotel/motel without emergency shelter(hud)",
# "15":"foster care/group home (hud)",
# "16":"place not meant for habitation (hud)",
# "17":"other (hud)"
# }
,"HOUSINGSTATUSPickOption":
{
"1":"don't know (hud)",
"2":"imminently losing their housing (hud)",
"3":"literally homeless (hud)",
"4":"refused (hud)",
"5":"stably housed (hud)",
"6":"unstably housed and at-risk of losing their housing (hud)",
"7":None,
"8":None
}
,"ROIDocumentationPickOption":
{
"4":"none",
"3":"other",
"1":"signed statement from client",
"2":"verbal consent",
"5":"verification from other institiution"
}
,"MILITARYBRANCHPickOption":
{
'2':"air force (hud)",
'1':"army (hud)",
'4':"marines (hud)",
'3':"navy (hud)",
'5':"other (hud)" # Coast Guard, National Reserve...
}
,"INCOMETYPESPickOption":{
"":"a veteran's disability payment (hud)",
"":"alimony",
"":"alimony or other spousal support (hud)",
"":"annuities",
"":"child support (hud)",
"Stipend":"contributions from other people",
"":"dividends (investments)",
"Employment Income":"earned income (hud)",
"":"employment/job",
"Food Stamps":"food stamps (hud)",
"":"general assistance (hud)",
"":"interest (bank)",
"":"medicaid (hud)",
"":"medicare (hud)",
"No Financial Resources":"no financial resources (hud)",
"":"other (hud)",
"":"other tanf-funded services (hud)",
"":"pension from a former job (hud)",
"":"pension/retirement",
"":"private disability insurance (hud)",
"":"railroad retirement",
"":"rental income",
"":"retirement disability",
"Social Security":"retirement income from social security (hud)",
"":"schip (hud)",
"":"section 8, public housing or rental assistance (hud)",
"Self-Employed":"self employment wages",
"":"special supplemental nutrition program for wic (hud)",
"Social Security Disability Income (SSD)":"ssdi (hud)",
"Supplemental Security Income (SSI)":"ssi (hud)",
"":"state disability",
"":"tanf (hud)",
"":"tanf child care services (hud)",
"":"tanf transportation services (hud)",
"Unemployment Benefits":"unemployment insurance (hud)",
"Veteran's Health Care":"veteran's administration (va) medical services (hud)",
"Veteran's Benefits":"veteran's pension (hud)",
"":"worker's compensation (hud)"
}
# 2.8 Picklists <xsd:simpleType name="residenceBase">
# <xsd:annotation>
# <xsd:documentation xml:lang="en">
# Residence Type
# 1 = Emergency shelter
# 2 = Transitional housing
# 3 = Permanent housing
# 4 = Psychiatric hospital or other psychiatric facility
# 5 = Substance abuse treatment facility or detox center
# 6 = Hospital (non-psychiatric)
# 7 = Jail, prison or juvenile detention facility
# 8 = Don't know
# 9 = Refused
# 10 = Room apartment or house that you rent
# 11 = Apartment or house that you own
# 12 = Staying or living in a family member's room, apartment or house
# 13 = Staying or living in a friend's room, apartment or house
# 14 = Hotel or motel paid for without emergency shelter voucher
# 15 = Foster care home or foster care group home
# 16 = Place not meant for habitation
# 17 = Other
# </xsd:documentation>
#
,"ENHANCEDYESNOPickOption":{
"dontknow":"don't know (hud)",
"8":"don't know (hud)",
"FALSE":"no (hud)",
"?":"refused (hud)",
"9":"refused (hud)",
"TRUE":"yes (hud)",
"1":"yes (hud)",
"0":"no (hud)",
"":"no (hud)",
"other":"ENHANCEDYESNOPickOption"
}
,"HOMELESSREASONPickOption":{
"Addiction":"addiction",
"Divorce":"divorce",
"Domestic Violence":"domestic violence",
"Evicted within past week":"evicted within past week",
"Family-Personal Illness":"family/personal illness",
"Jail/Prison":"jail/prison",
"Moved to seek work":"moved to seek work",
"Other":"other",
"Physical-Mental Disability":"physical/mental disabilities",
"Unable to pay rent-mortgage":"unable to pay rent/mortgage",
"other":"HOMELESSREASONPickOption"
}
,"EntryExitTypePick":{
"test":"hud-40118",
"":"basic entry/exit",
"":"standard entry",
"other":"quick call"}
,"FundSourcesPick":{
"":"cap",
"":"fema",
"":"hud shelter+care",
"":"hud supportive housing program",
"":"internal budget",
"other":"title iii"}
,"ReasonUnmetNeedPick":{
"":"all services full",
"":"client not eligible",
"":"client refused service",
"":"service does not exist",
"other":"service not accessible"}
,"ServiceOutcomePick":{
"":"fully met",
"":"not met",
"":"partially met",
"other":"service pending"}
,"EeDestinationPick":{
"LOC-ES":"emergency shelter",
"TX-IADF":"institution: inpatient alcohol/drug facility",
"J-IJP":"institution: jail/prison",
"LOC-IPH":"institution: psychiatric hospital",
"OTH-OTH":"other",
"TH-OSPH":"other: other supportive housing",
"UNK-OS":"other: places not meant for habitation (street)",
"ISH-PHS":"permanent: home subsidized house/apartment",
"INH-PHO":"permanent: homeownership",
"INH-PFF":"permanent: moved in with family/friends",
"ISH-POSH":"permanent: other subsidized house/apartment",
"ISH-PPH":"permanent: public housing",
"INH-PR":"permanent: rental house/apartment (no subsidy)",
"ISH-PS8":"permanent: section 8",
"ISH-PSPC":"permanent: shelter plus care",
"TH-TFF":"transitional: moved in with family/friends",
"TH-TFH":"transitional: transitional housing for homeless",
"UNK-UNK":"unknown",
"LOC-HM":"Hospital - Medical",
"LOC-NH":"Nursing Home",
"other":"Test Value"
}
,"EereasonLeavingPick":{
"":"completed program",
"":"criminal activity / violence",
"":"death",
"":"disagreement with rules/persons",
"":"left for housing opp. before completing program",
"":"needs could not be met",
"":"non-compliance with program",
"":"non-payment of rent",
"":"other",
"":"reached maximum time allowed",
"other":"unknown/disappeared"
}
,"RacePick":{
'alaskan':"american indian or alaska native (hud)"
,'1':"american indian or alaska native (hud)"
,'american indian black':"american indian/alaskan native & black/african american (hud 40118)"
,'american indian white':"american indian/alaskan native & white (hud 40118)"
,'asian white':"asian & white (hud 40118)"
,'asian':"asian (hud)"
,'2':"asian (hud)"
,'black':"black or african american (hud)"
,'3':"black or african american (hud)"
,'black white':"black/african american & white (hud 40118)"
,'native hawaiian':"native hawaiian/other pacific islander (hud 40118)"
,'4':"native hawaiian/other pacific islander (hud 40118)"
,'other':"other"
,'other multi-racial':"other multi-racial (hud 40118)"
,'pacific islander':"native hawaiian/other pacific islander (hud 40118)"
,'white':"white (hud)"
,'5':"white (hud)"
,'hispanic':"other"
,'':'other'
}
,"SexPick":{
"female":"female",
"0":"female",
"male":"male",
"1":"male",
"confused":"transgender", #ECJ20100808 omg!!!
"other":"unknown"
}
,"EmploymentPick":{
"Full Time":"full time"
,'Part Time':"part time"
,'Retired':"retired"
,'Temporary':"seasonal work"
,'Volunteer':"volunteer work only"
,'Disability Check':'retired'
,'School':'seasonal work'
,'Unemployed':'seasonal work'
}
,"EthnicityPick":{
"0":"non-hispanic/non-latino (hud)",
"1":"hispanic/latino (hud)",
"8":"don't know (hud)",
"9":"refused (hud)"
}
}
def getValue(self, pList, lookupValue):
if self.pickList.has_key(pList):
if self.pickList[pList].has_key(lookupValue):
return self.pickList[pList][lookupValue]
else:
return ""
if __name__ == "__main__":
vld = Interpretpicklist()
files = vld.getValue("entryExitTypePick", "test")
lookupValues = vld.pickList.keys()
for dicts in lookupValues:
print(vld.getValue(dicts, "other"))
print("Valid Files are: ", files)
```
#### File: synthesis/src/selector.py
```python
import os
from . import fileutils
import sys
import time
from .fileinputwatcher import FileInputWatcher
from .hmisxml28reader import HMISXML28Reader
from .hmisxml30reader import HMISXML30Reader
from .tbchmisxml30reader import TBCHUDHMISXML30Reader # JCS New 2012-01-05
from .jfcsxmlreader import JFCSXMLReader
from .occhmisxml30reader import OCCHUDHMISXML30Reader
#from synthesis.parxmlreader import PARXMLReader
from lxml import etree
import queue
from .conf import settings
from .conf import inputConfiguration, outputConfiguration
from synthesis.emailprocessor import XMLProcessorNotifier
from .filerouter import Router
from os import path
import traceback
import copy
from synthesis.socketcomm import ServiceController
from . import dbobjects
import threading
from multiprocessing import Array, Value
from .smtplibrary import smtpInterface
from .nodebuilder import NodeBuilder
from synthesis.queryobject import QueryObject
from .selector_tests import HUDHMIS30XMLTest, HUDHMIS28XMLTest, OCCHUDHMIS30XMLTest, JFCSXMLTest, TBCExtendHUDHMISXMLTest
import socket
timeout = 30
socket.setdefaulttimeout(timeout)
class FileHandler:
'''Sets up the watch on the directory, and handles the file once one comes in'''
def __init__(self):
# define thread list in shared memory so each thread can alter it
self.thread_list = Array('i', [0 for i in range(0, settings.NUMBER_OF_THREADS)])
self.exception = Value('i', 0)
#change this so that it gets the watch_dir from the .ini file
dir_to_watch = inputConfiguration.INPUTFILES_PATH
self.Queue = queue.Queue(0)
self.file_input_watcher = FileInputWatcher(dir_to_watch, self.Queue)
#Check the file to see if it validates against one of the tests.
self.selector = Selector()
#ECJ20100815 Commented out for now until debugged
#self.crypto = Security()
if settings.GUI:
if settings.DEBUG:
print("Now running the GUI ServiceController code")
#ECJ 20100815 todo: make it so this also works for UNIX, not just win32
# SBB20100612 adding listener for data comm (win32 shutdown from GUI)
sc = ServiceController(True) # True is Server
sc.listen()
#If GUI to control the app being used, just run code with pyinotify or windows listener
#ECJ 20100815 Make sure windows listener is working without the GUI
else:
if settings.DEBUG:
print("Now running FileHandler.nonGUIRun()")
self.nonGUIRun()
print("returned to FileHandler.__init__ from nonGUIRun")
print("calling sys.exit")
sys.exit()
def setProcessingOptions(self, docName):
''' ProcessingOptions is a dictionary on a per file/sender basis.
Dictionary contains settings like does the sender use encryption etc.
self.ProcessingOptions =
{
'SMTPTOADDRESS': ['<EMAIL>,],
'SMTPTOADDRESSCC': [],
'SMTPTOADDRESSBCC': [],
'FINGERPRINT':'',
'USES_ENCRYPTION':True
}
'''
folderName = path.split(docName)[0]
if settings.DEBUG:
print("folder name to email is", folderName)
if os.path.isdir(folderName):
try:
self.ProcessingOptions = inputConfiguration.SMTPRECIPIENTS[folderName]
except:
raise
else:
print("folder", folderName, "is not a directory")
def processFiles(self, new_file_loc):
self.setProcessingOptions(new_file_loc)
self.email = XMLProcessorNotifier(new_file_loc)
self.Router = Router()
valid = False
# test if the sender encrypts data, if so, decrypt, if not, just process
print("The settings indicate that, for this folder, encryption is:",self.ProcessingOptions['USES_ENCRYPTION'])
if self.ProcessingOptions['USES_ENCRYPTION']:
# decrypt the file
fileStream = self.crypto.decryptFile2Stream(new_file_loc)
print("stream",fileStream)
else:
if settings.DEBUG:
print("No encryption, so just opening the file", new_file_loc)
# Work around bug? file object is not same as CstringIO object in that you can't copy a fileStream to another one, CStringIO you can. So we convert this to a CStringIO object and we can make true copies.
#fileStream = StringIO(fileStream.read())
if settings.DEBUG:
print("attempting validation tests on", new_file_loc)
#print "os.path.isfile(new_file_loc) is ", os.path.isfile(new_file_loc)
if os.path.isfile(new_file_loc):
results = self.selector.validate(new_file_loc)
for item in results:
if item == True:
valid = True
try:
self.email.notifyValidationSuccess()
except:
print(traceback.print_exc(file=sys.stdout))
pass
if settings.DEBUG:
print("moving to used_files", end=' ')
new_file_name = self.Router.moveUsed(new_file_loc)
# break
return (True, new_file_name)
if valid == False:
if settings.DEBUG:
print("We did not have any successful validations")
self.email.notifyValidationFailure()
if settings.DEBUG:
print("moving to Failed")
if os.path.isfile(new_file_loc):
new_file_name = self.Router.moveFailed(new_file_loc)
else:
if settings.DEBUG:
print("Can't move because file doesn't exist. Shouldn't be trying to move anything to Failed if isn't there.")
return (False, None)
# except etree.XMLSyntaxError, error:
# self.email.notifyValidationFailure(error)
# self.Router.moveFailed(new_file_loc)
#
# except DuplicateXMLDocumentError, inst:
# print type(inst) # the exception instance
# print inst.args # arguments stored in .args
# print inst # __str__ allows args to printed directly
#
# self.email.notifyDuplicateDocumentError(inst.message)
# self.Router.moveFailed(new_file_loc)
# return False
def processExisting(self):
''' this function churns through the input path(s) and processes files that are already there.
iNotify only fires events since program was started so existing files don't get processed
'''
from . import loadconfiguration
if settings.DEBUG:
print("loading data from defined in the loadconfigation module")
loadconfiguration.loadData()
# get a list of files in the input path
listOfFiles = list()
# Loop over list of file locations [list]
for folder in inputConfiguration.INPUTFILES_PATH:
listOfFiles.extend(fileutils.grabFiles(path.join(folder,'*')))
if settings.DEBUG:
print("list of files grabbed in processExisting is", listOfFiles)
for inputFile in listOfFiles:
successful, processed_file = self.processFiles(inputFile)
if successful:
# Send processed files to the processed files folder:
if settings.DEBUG:
print('Made it this far so send used files to the processed files folder.')
print('Copying ' + inputConfiguration.USEDFILES_PATH + '/' + processed_file + ' to ' + outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file)
fileutils.copyFile(inputConfiguration.USEDFILES_PATH + '/' + processed_file, outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file)
# *******************************
# transfer control to nodebuilder
# *******************************
# first, setup options for nodebuilder
optParse = QueryObject(suppress_usage_message=True)
for paired_id in self.selector.paired_ids:
source_id = paired_id[0]
export_id = paired_id[1]
if settings.DEBUG:
print("nodebuilder generating output for source id:", source_id)
# next call nodebuilder for each source id
# options are: ['startDate', 'endDate', 'alldates', 'reported', 'unreported', 'configID']
(options, args) = optParse.parser.parse_args(['-a', '-u', '-i%s' % source_id])
try:
NODEBUILDER = NodeBuilder(options, export_id=export_id)
except:
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
synthesis_error = traceback.format_exc()
print(synthesis_error)
smtp = smtpInterface(settings)
smtp.setMessageSubject("ERROR -- Synthesis:FileHandler:processExisting")
smtp.setRecipients(inputConfiguration.SMTPRECIPIENTS['testSource'])
smtp.setMessage("%s\r\n" % synthesis_error )
try:
print("trying to send message")
smtp.sendMessage()
except:
print('send failed')
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
RESULTS = NODEBUILDER.run()
# empty list of paired ids
self.selector.paired_ids = list()
def nonGUIPOSIXRun(self):
#First, see if there are any existing files and process them
if settings.DEBUG:
print("First, looking for preexisting files in input location.")
self.processExisting()
# This will wait until files arrive, once processed, it will loop and start over (unless we get ctrl-C or break)
if settings.DEBUG:
print('monitoring starting ...')
new_files = self.monitor()
if settings.DEBUG:
print('monitoring done ...')
print('new_files is', new_files)
if not new_files:
print("No new files, returning")
return
for new_file in new_files:
if settings.DEBUG:
print('Processing: %s' % new_file)
self.processFiles(new_file)
def nonGUIWindowsRun(self):
BASE_PATH = os.getcwd()
path_to_watch = os.path.join(BASE_PATH, "InputFiles")
before = dict ([(f, None) for f in os.listdir (path_to_watch)])
try:
while 1:
time.sleep(10)
after = dict ([(f, None) for f in os.listdir (path_to_watch)])
added = [f for f in after if not f in before]
removed = [f for f in before if not f in after]
if added:
print("Added: ", ", ".join (added))
self.processExisting()
if removed:
print("Removed: ", ", ".join (removed))
before = after
except KeyboardInterrupt:
return
def nonGUIRun(self):
'''looks for and handles files, if there is no gui controlling the daemon, as specified by the GUI option in settings.py.'''
#Figure out if we are running POSIX or UNIX non-gui
if os.name == 'nt':
if settings.DEBUG:
print("We have a Windows system, as determined by nonGUIRun. So handing off to nonGUIWindowsRun()")
self.nonGUIWindowsRun()
else:
if settings.DEBUG:
print("We have a POSIX system, as determined by nonGUIRun(). So handing off to nonGUIPOSIXRun()")
self.nonGUIPOSIXRun()
print("back to nonGUIRun, so returning")
def monitor(self):
'function to start and stop the monitor'
try:
self.file_input_watcher.monitor()
if settings.DEBUG:
print("waiting for new input...")
#print "result of fileinputwatcher.monitor passed back up to selector.monitor is", result
#now make a file whilst pyinotify thread is running need to keep pulling from the queue (set to timeout after 5 seconds: subsequent passes)
#In other words, the Queue fills up while pyinotify is working. This empties the Queue, without stopping its function
files = list()
processed_files = list()
_QTO = 5
# if settings.DEBUG:
# wait_counter = 0
while 1:
# if an exception was encountered within a spawned thread, wait for all spawned
# threads to stop then exit
if self.exception.value == 1:
while self.thread_list[:].count(1) > 0:
pass
sys.exit(1)
#Queue emptying while loop, this always runs until Ctrl+C is called. If it ever stops, the found files get collected, but go nowhere
# if settings.DEBUG:
# print "waiting for new files...", wait_counter
# wait_counter+=1
# time.sleep(3)
# Empty out list created in Selector class
self.selector.issues = list()
self.selector.current_tests = list()
try:
if settings.USE_SPAWNED_THREADS:
file_found_path = self.Queue.get(False)
else:
file_found_path = self.Queue.get(block='true', timeout=_QTO)
_QTO = 5
if file_found_path != None:
# append all files into the file stack to be processed.
if settings.DEBUG:
print("appending files")
files.append(file_found_path)
if settings.USE_SPAWNED_THREADS:
raise queue.Empty
if settings.DEBUG:
print("files found so far in while loop are ", files)
continue
except queue.Empty:
#Stop emptying the Queue and process the result and let it fill up again, since pyinotify is still watching
if not files:
#if settings.DEBUG:
#print "Queue may be empty, but list of files is also empty, so let's keep monitoring"
continue
if settings.USE_SPAWNED_THREADS:
# since this flag is True, spawn threads to process the files
for i, thread_state in enumerate(self.thread_list):
if thread_state == 0:
# signify that this thread is running by setting its value in the thread list to 1
self.thread_list[i] = 1
if settings.DEBUG:
print("Thread list status: ", self.thread_list[:])
self._spawn_worker_thread(i, [files.pop(0), ])
print("number of files waiting to be processed: %d" % len(files))
break
else:
while files:
if settings.DEBUG:
print("queue.Empty exception, but files list is not empty, so files to process are", files)
filepathitem = files.pop()
print("processing ", filepathitem)
status, new_file_name = self.processFiles(filepathitem)
if settings.DEBUG:
print("Readying files to copy into the processed folder")
if status == True:
processed_files.append(new_file_name)
# *******************************
# transfer control to nodebuilder
# *******************************
# first, setup options for nodebuilder
optParse = QueryObject(suppress_usage_message=True)
paired_ids = source_ids = list(set(self.selector.paired_ids))
for paired_id in paired_ids:
source_id = paired_id[0]
export_id = paired_id[1]
if settings.DEBUG:
print("nodebuilder generating output for source id:", source_id)
# next call nodebuilder for each source id
# options are: ['startDate', 'endDate', 'alldates', 'reported', 'unreported', 'configID']
(options, args) = optParse.parser.parse_args(['-a', '-u', '-i%s' % source_id])
try:
NODEBUILDER = NodeBuilder(options)
except:
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
synthesis_error = traceback.format_exc()
print(synthesis_error)
smtp = smtpInterface(settings)
smtp.setMessageSubject("ERROR -- Synthesis:FileHandler:monitor")
smtp.setRecipients(inputConfiguration.SMTPRECIPIENTS['testSource'])
smtp.setMessage("%s\r\n" % synthesis_error )
try:
print("trying to send message")
smtp.sendMessage()
except:
print('send failed')
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
continue
RESULTS = NODEBUILDER.run()
# empty list of paired ids
self.selector.paired_ids = list()
# Send processed files to the processed files folder:
if settings.DEBUG:
print('Made it this far so send used files to the processed files folder.')
#for processed_file in processed_files:
# print 'Copying ' + inputConfiguration.USEDFILES_PATH + '/' + processed_file + ' to ' + outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file
# fileutils.copyFile(inputConfiguration.USEDFILES_PATH + '/' + processed_file, outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file)
# # Remove processed file from list
# processed_files.pop(processed_files.index(processed_file))
while len(processed_files) > 0:
# This for loop is contained within the while to verify that all files are copied
# to processed_files
for processed_file in processed_files:
print('Copying ' + inputConfiguration.USEDFILES_PATH + '/' + processed_file + ' to ' + outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file)
fileutils.copyFile(inputConfiguration.USEDFILES_PATH + '/' + processed_file, outputConfiguration.PROCESSEDFILES_PATH + '/' + processed_file)
# Remove processed file from list
processed_files.pop(processed_files.index(processed_file))
#now go back to checking the Queue
continue
except KeyboardInterrupt:
print("KeyboardInterrupt caught in selector.monitor() while loop")
print("shutting down...")
i = 0
while i < len(self.thread_list):
self.thread_list[i] = -1
i += 1
# comment the following for loop if you don't want spawned threads to finish
# processing files before exiting.
for thread_state in self.thread_list:
if thread_state == 1:
continue
self.file_input_watcher.stop_monitoring()
break
except KeyboardInterrupt:
print("KeyboardInterrupt caught in selector.monitor() main section")
self.file_input_watcher.stop_monitoring()
except:
print("General Exception")
self.file_input_watcher.stop_monitoring()
raise
def _spawn_worker_thread(self, id, files): # @ReservedAssignment
# spawn thread to process files. don't wait for spawned thread to finish
threading._DummyThread._Thread__stop = lambda x: 42
t = threading.Thread(target=self._worker_thread, args=(id, files, ))
t.daemon = True
t.start()
def _worker_thread(self, id, files): # @ReservedAssignment
if settings.DEBUG:
print("entering worker thread named: Thread-%d" % (id + 1))
while files:
# if the thread_list contains a -1, this means that someone stopped the
# pyramid server. exit this spawned thread.
if settings.DEBUG:
print("self.thread_list", self.thread_list[:])
if min(self.thread_list) == -1:
print("The pyramid server was stopped.....exiting Thread-%d" % (id + 1))
return
# if an exception was encountered within a spawned thread, raise it
if self.exception.value == 1:
self.thread_list[id] = 0
sys.exit(1)
#if settings.DEBUG:
# print "queue.Empty exception, but files list is not empty, so files to process are", files
filepathitem = files.pop()
if settings.DEBUG:
print("%s" % ("*" * 32))
print("Within Thread-%d" % (id + 1))
print("%s" % ("*" * 32))
print("processing ", filepathitem)
try:
self.processFiles(filepathitem)
print(filepathitem)
except KeyboardInterrupt:
self.thread_list[id] = 0
print("KeyboardInterrupt caught in selector._worker_thread()")
#self.file_input_watcher.stop_monitoring()
self.exception.value = 1
except:
self.thread_list[id] = 0
print("General Exception")
#self.file_input_watcher.stop_monitoring()
self.exception.value = 1
raise
# *******************************
# transfer control to nodebuilder
# *******************************
try:
# first, setup options for nodebuilder
optParse = QueryObject(suppress_usage_message=True)
if len(self.selector.paired_ids) > 0:
paired_id = self.selector.paired_ids.pop(0)
source_id = paired_id[0]
export_id = paired_id[1]
if settings.DEBUG:
print("nodebuilder generating output for source id:", source_id)
# next call nodebuilder for each source id
# options are: ['startDate', 'endDate', 'alldates', 'reported', 'unreported', 'configID']
(options, args) = optParse.parser.parse_args(['-a', '-u', '-i%s' % source_id])
try:
NODEBUILDER = NodeBuilder(options, export_id=export_id)
except:
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
synthesis_error = traceback.format_exc()
print(synthesis_error)
smtp = smtpInterface(settings)
smtp.setMessageSubject("ERROR -- Synthesis:FileHandler:_worker_thread")
smtp.setRecipients(inputConfiguration.SMTPRECIPIENTS['testSource'])
smtp.setMessage("%s\r\n" % synthesis_error )
try:
print("trying to send message")
smtp.sendMessage()
except:
print('send failed')
print("*****************************************************************")
print("*****************************************************************")
print("*****************************************************************")
RESULTS = NODEBUILDER.run()
except KeyboardInterrupt:
self.thread_list[id] = 0
print("KeyboardInterrupt caught in selector._worker_thread()")
#self.file_input_watcher.stop_monitoring()
self.exception.value = 1
except:
self.thread_list[id] = 0
print("General Exception")
#self.file_input_watcher.stop_monitoring()
self.exception.value = 1
raise
# signify that the thread is no longer running
self.thread_list[id] = 0
if settings.DEBUG:
print("Thread list status: ", self.thread_list[:])
if settings.DEBUG:
print("exiting worker thread named: Thread-%d" % (id + 1))
class Selector:
'''Figures out which data format is being received.'''
def __init__(self):
self.db = dbobjects.DB()
self.db.Base.metadata.create_all()
self.paired_ids = []
if settings.DEBUG:
print("selector instantiated and figuring out what schema are available")
for item in settings.SCHEMA_DOCS:
print('schema to potentially load: ' + settings.SCHEMA_DOCS[item])
self.current_tests = [] # Added by FBY on 2012-01-19
self.issues = [] # Added by FBY on 2012-01-19
def validate(self, instance_file_loc, shred=True):
'''Validates against the various available schema and csv records.\
If not specified in the configs, it keeps trying each available \
test to find the first which successfully validates. You just \
pass it a test, and the xml instance data.'''
#tests = [HUDHMIS28XMLTest, HUDHMIS30XMLTest, JFCSXMLTest, PARXMLTest]
#tests = [HUDHMIS30XMLTest,HUDHMIS28XMLTest]
#tests = [HUDHMIS30XMLTest, HUDHMIS28XMLTest, OCCHUDHMIS30XMLTest, JFCSXMLTest, TBCExtendHUDHMISXMLTest]
#tests = [HUDHMIS30XMLTest, HUDHMIS28XMLTest, JFCSXMLTest, TBCExtendHUDHMISXMLTest]
#tests = [HUDHMIS30XMLTest]
#tests = [HUDHMIS28XMLTest]
tests = [TBCExtendHUDHMISXMLTest]
if settings.DEBUG:
print("tests are", tests)
#readers = [HUDHMIS28XMLReader, HUDHMIS30XMLReader, JFCSXMLInputReader, PARXMLInputReader]
#readers = {HUDHMIS30XMLTest:HUDHMIS30XMLInputReader, HUDHMIS28XMLTest:HUDHMIS28XMLInputReader, OCCHUDHMIS30XMLTest:OCCHUDHMIS30XMLInputReader, JFCSXMLTest:JFCSXMLInputReader, TBCExtendHUDHMISXMLTest:TBCHUDHMISXML30InputReader}
#readers = {HUDHMIS30XMLTest:GenericXMLReader,HUDHMIS28XMLTest:GenericXMLReader,OCCHUDHMIS30XMLTest:GenericXMLReader}
readers = {TBCExtendHUDHMISXMLTest:TBCHUDHMISXML30InputReader}
self.current_tests = tests # Added by FBY on 2012-01-19
if settings.SKIP_VALIDATION_TEST is True:
print('skipping tests battery for debugging')
print("just shredding with JFCSXMLReader service_event schema")
JFCSXMLInputReader.data_type = 'service_event'
readers[JFCSXMLTest](instance_file_loc, self.db).shred()
return
if settings.DEBUG:
print("readers are", readers)
results = []
for test in tests:
test_instance = test()
result = test_instance.validate(instance_file_loc)
self.issues.append(test_instance.issues)
results.append(result)
if settings.DEBUG:
print("validation return result is", result)
print("results are cumulatively", results)
if True in results:
#finds the first 'True' in the list; even if there are many, it gets the first one
loc_true = results.index(True)
#if settings.DEBUG:
#print "loc_true is", loc_true
length_list = len(results)
#if settings.DEBUG:
#print "len(results) is: ", length_list
#if the first 'True' validation was the last validation result, go shred/move
if loc_true == (length_list-1):
#if settings.DEBUG:
#print "loc_true is", (loc_true), "and that matches (length_list - 1) of ", (length_list - 1)
if settings.DEBUG:
print("we haven't had a positive validation until now, so go ahead and shred/move it")
if result:
if settings.DEBUG:
print("shredding...")
if shred:
if settings.DEBUG:
print("readers[test] is: ", readers[test])
print("instance_file_loc: ", instance_file_loc)
source_id, export_id = readers[test](instance_file_loc, self.db).shred()
if source_id != None:
self.paired_ids.append((source_id, export_id))
if not results:
print("results empty")
self.results = results # Added by FBY on 2012-01-19
return results
class VendorXMLTest:
'''Stub for any specific vendor's non-standardized XML format.'''
def __init__(self):
self.name = 'Vendor XML'
print('running the', self.name, 'test')
def validate(self, instance_filename):
'''implementation of interface's validate method'''
print('\nThe', self.name, 'test not implemented.')
print('...but intended to validate', instance_filename)
return False
class TBCExtendHUDHMISXMLTest: # JCS New 2012-01-05
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'TBCExtendHUDHMISXML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml']
print("settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml'] is: ", settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class HUDHMIS28XMLTest:
'''Load in the HUD HMIS Schema, version 2.8.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'HUDHMIS28XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['hud_hmis_xml_2_8']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class HUDHMIS30XMLTest:
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'HUDHMIS30XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['hud_hmis_xml_3_0']
print("settings.SCHEMA_DOCS['hud_hmis_xml_3_0'] is: ", settings.SCHEMA_DOCS['hud_hmis_xml_3_0'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class OCCHUDHMIS30XMLTest:
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'OCCHUDHMIS30XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0']
print("settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0'] is: ", settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
try:
schema = open(self.schema_filename,'r')
except:
print("couldn't open schema file", self.schema_filename)
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class SvcPoint20XMLTest:
'''Load in the SVCPoint Schema, version 2.0.'''
def __init__(self):
self.name = 'Svcpt 2.0 XML'
print('running the Svcpt 2.0 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_2_0_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
class SvcPoint406XMLTest:
'''Load in the SVCPoint Schema, version 4.06'''
def __init__(self):
self.name = 'Svc406 XML'
print('running the Svcpt 4.06 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_4_0_6_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
class SvcPoint5XMLTest:
'''Load in the SVCPoint Schema, version 5.00'''
def __init__(self):
self.name = 'Svc5 XML'
print('running the Svcpt 5.00 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_5_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
raise
class hl7CCDXMLTest:
'''Load in the HL7 CCD Schema'''
def __init__(self):
self.name = 'hl7 CCD XML'
print('running the hl7 CCD XML test')
self.schema_filename = settings.SCHEMA_DOCS['hl7_ccd_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
raise
class JFCSXMLTest:
''' Tests for JFCS data
* There are 2 possible data source types ('service_event' or 'client')
Steps: (will stop and return True on first success)
1 - Attempt to validate against 'service_event' schema: 'JFCS_SERVICE.xsd'
2 - Attempt to validate against 'client' schema: 'JFCS_CLIENT.xsd'
3 - Check for known 'service_event' elements anywhere in the tree
4 - Check for known 'client' elements anywhere in the tree
'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'JFCS'
print('running the', self.name, 'test')
''' Define schemas and elements for testing '''
self.service_event_schema_filename = settings.SCHEMA_DOCS['jfcs_service_event_xml']
self.client_schema_filename = settings.SCHEMA_DOCS['jfcs_client_xml']
self.service_event_elements = ['c4clientid','qprogram','serv_code','trdate','end_date','cunits']
#self.client_elements = ['aprgcode','a_date','t_date','family_id','c4clientid','c4dob','hispanic','c4sex','c4firstname','c4lastname','c4mi','ethnicity','c4ssno','c4last_s01']
self.client_elements = ['aprgcode','a_date','t_date','family_id','c4clientid','c4dob','hispanic','c4sex','c4firstname','c4lastname','c4mi','ethnicity','c4ssno']
def validate(self, instance_filename, ):
'''JFCS data format validation process'''
copy_instance_stream = copy.copy(instance_filename)
try:
print("Determining by service event schema")
results = self.schemaTest(copy_instance_stream, self.service_event_schema_filename)
if results == True:
fileutils.makeBlock('JFCS service event XML data found. Determined by service event schema.')
JFCSXMLInputReader.data_type = 'service_event'
return results
print("Determining by client schema")
results = self.schemaTest(copy_instance_stream, self.client_schema_filename)
if results == True:
fileutils.makeBlock('JFCS client XML data found. Determined by client schema.')
JFCSXMLInputReader.data_type = 'client'
return results
print("Determining by service event elements.")
if self.service_event_elements is not None:
print(self.service_event_elements)
results = self.elementTest(copy_instance_stream, self.service_event_elements)
if results == True:
fileutils.makeBlock('JFCS service event XML data found. Determined by service event elements.')
JFCSXMLInputReader.data_type = 'service_event'
return results
print("Determining by client elements.")
if self.client_elements is not None:
print(self.client_elements)
results = self.elementTest(copy_instance_stream, self.client_elements)
if results == True:
fileutils.makeBlock('JFCS client XML data found. Determined by client elements.')
JFCSXMLInputReader.data_type = 'client'
return results
print("returning False")
return False
else:
print("All the JFCS Tests Failed, returning False")
self.issues = "All the JFCS Tests Failed, returning False"
return False
except Exception as exception:
print('XML Syntax Error in validate. There appears to be malformed XML. ', exception)
self.issues = 'XML Syntax Error in validate. There appears to be malformed XML. %s' % str(exception)
return False
def schemaTest(self, copy_instance_stream, schema_filename):
'''Attempt to validate input file against specific schema'''
schema = open(schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(copy_instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error in schemaTest. There appears to be malformed XML. ', error)
return False
def elementTest(self, copy_instance_stream, elements):
'''Attempt to find elements in the input file by searching the tree'''
print("inside element test")
print("elements are: ", elements)
xml_doc = etree.parse(copy_instance_stream)
for e in elements:
search_term = ".//" + e
if xml_doc.find(search_term) is None:
print("returning False from inside elementTest")
return False
print("returning True from inside elementTest")
return True
class PARXMLTest:
'''Load in the HUD HMIS Extended Schema for Operation PAR'''
def __init__(self):
self.name = 'PARXML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['operation_par_xml']
'''Find elements with or without specific xsd type'''
def find_elements_by_type(self, schema_doc, type_content):
element_names = schema_doc.xpath("//xsd:element[@type != $n]/@name", namespaces={"xsd":"http://www.w3.org/2001/XMLSchema", 'ext':'http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd', 'hmis':'http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd'},n=type_content)
return element_names
def validate(self, instance_stream):
#return True ## use this to skip the validation test
#return False ## use this to fail validation test
'''This specific data format's validation process.'''
'''Import schema for Operation PARS'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
## if schema fails to compile, catch exception here (except Exception, e: print e.error_log)
# make a copy of the file stream, validate against the copy not the real stream
copy_instance_stream = copy.copy(instance_stream)
xml_doc = etree.parse(copy_instance_stream)
'''
Explicit check for 'ext' namespace since HUD_HMIS_2.8 xml
validates against the extended Operation PAR schema
'''
ext_namespace_check = xml_doc.xpath('/ext:SourceDatabase', namespaces={'ext': 'http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd'})
if len(ext_namespace_check) != 1: return False
try:
instance_parsed = etree.parse(copy_instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
'''
Elements that do not have the maxLength attribute
in schema must be checked to ensure string length
conforms to database field. Lengths exceeding 32
characters will cause the xml to be deemed invalid.
This adds extra weight to this process and should
be removed if maxLength is implemented for all
elements in the schema.
'''
'''import original HUD HMIS 2.8 xsd that Operation PARS extended'''
schema_hudhmis_filename = settings.SCHEMA_DOCS['hud_hmis_2_8_xml']
schema_hudhmis_raw = open(schema_hudhmis_filename,'r')
schema_hudhmis_parsed = etree.parse(schema_hudhmis_raw)
'''get lists of elements with maxLength attribute greater than 32'''
elements_string50 = self.find_elements_by_type(schema_parsed, 'hmis:string50')
elements_string50_ns = []
for e in elements_string50:
elem_with_ns = '{http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd}' + e
elements_string50_ns.append(elem_with_ns)
elements_string50 = self.find_elements_by_type(schema_hudhmis_parsed, 'hmis:string50')
for e in elements_string50:
elem_with_ns = '{http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd}' + e
elements_string50_ns.append(elem_with_ns)
'''combine lists if your looking for multiple types'''
elements_maxlength = elements_string50_ns
'''find elements without specific attribute and check length'''
xml_root = xml_doc.getroot()
for e in xml_root.iter():
if str(e.tag) in elements_maxlength:
if len(e.text) > 32:
print('XML Error. Value %s exceeds database field length.' % str(e.tag))
return False ## remove this when testing and perform manual truncate in PARXMLReader()
#return False ## return invalid, use this to only test validation of string lengths and exit
fileutils.makeBlock('The Operation PAR XML successfully validated.')
return results
if results == False:
print('The xml did not successfully validate against \
Operation PAR XML.')
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
#class GenericXMLInputReader(readertype):
# def __init__(self, instance_filename):
# self.reader = OCCHUDHMISXML30Reader(instance_filename)
# if settings.DEBUG:
# print "self.reader to be read is: ", self.reader
# print "does self.reader exist?", os.path.exists(self.reader)
#
# def shred(self):
# tree = self.reader.read()
# try:
# self.reader.process_data(tree)
# except:
# raise
class TBCHUDHMISXML30InputReader(TBCHUDHMISXML30Reader):
def __init__(self, instance_filename, db):
self.reader = TBCHUDHMISXML30Reader(instance_filename, db)
def shred(self):
tree = self.reader.read()
try:
source_id, export_id = self.reader.process_data(tree)
if source_id != None:
return source_id, export_id
except:
raise
class HUDHMIS28XMLInputReader(HMISXML28Reader):
def __init__(self, instance_filename, db):
self.reader = HMISXML28Reader(instance_filename, db)
def shred(self):
tree = self.reader.read()
try:
source_ids = self.reader.process_data(tree)
if source_ids != None:
return source_ids
except:
raise
class HUDHMIS30XMLInputReader(HMISXML30Reader):
def __init__(self, instance_filename, db):
self.reader = HMISXML30Reader(instance_filename, db)
def shred(self):
tree = self.reader.read()
try:
source_ids = self.reader.process_data(tree)
if source_ids != None:
return source_ids
except:
raise
class OCCHUDHMIS30XMLInputReader(OCCHUDHMISXML30Reader):
def __init__(self, instance_filename, db):
#if settings.DEBUG:
#print "does ", instance_filename, "exist?", os.path.exists(instance_filename)
self.reader = OCCHUDHMISXML30Reader(instance_filename, db)
if settings.DEBUG:
print("self.reader to be read is: ", self.reader)
def shred(self):
tree = self.reader.read()
try:
source_ids = self.reader.process_data(tree)
if source_ids != None:
return source_ids
except:
raise
class JFCSXMLInputReader(JFCSXMLReader):
def __init__(self, instance_filename):
self.reader = JFCSXMLReader(instance_filename)
def shred(self):
tree = self.reader.read()
try:
source_ids = self.reader.process_data(tree, self.data_type)
if source_ids != None:
return source_ids
except:
raise
#class PARXMLInputReader(PARXMLReader):
# def __init__(self, instance_filename):
# self.reader = PARXMLReader(instance_filename)
#
# def shred(self):
# tree = self.reader.read()
# try:
# self.reader.process_data(tree)
# except:
# raise
class VendorXMLInputReader():
def __init__(self, xml_instance_file):
self.name = 'Vendor XML'
pass
def shred(self):
'''implementation of interface's shred method'''
print('\nThe', self.name, 'test not implemented.')
print('...but intended to shred the XML Document: %s' % self.instance_filename)
return False
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
```
#### File: synthesis/src/selector_tests.py
```python
from lxml import etree
from .conf import settings
from . import fileutils
class VendorXMLTest:
'''Stub for any specific vendor's non-standardized XML format.'''
def __init__(self):
self.name = 'Vendor XML'
print('running the', self.name, 'test')
def validate(self, instance_filename):
'''implementation of interface's validate method'''
print('\nThe', self.name, 'test not implemented.')
print('...but intended to validate', instance_filename)
return False
class TBCExtendHUDHMISXMLTest: # JCS New 2012-01-05
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'TBCExtendHUDHMISXML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml']
print("settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml'] is: ", settings.SCHEMA_DOCS['tbc_extend_hud_hmis_xml'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class HUDHMIS28XMLTest:
'''Load in the HUD HMIS Schema, version 2.8.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'HUDHMIS28XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['hud_hmis_xml_2_8']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class HUDHMIS30XMLTest:
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'HUDHMIS30XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['hud_hmis_xml_3_0']
print("settings.SCHEMA_DOCS['hud_hmis_xml_3_0'] is: ", settings.SCHEMA_DOCS['hud_hmis_xml_3_0'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class OCCHUDHMIS30XMLTest:
'''Load in the HUD HMIS Schema, version 3.0.'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'OCCHUDHMIS30XML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0']
print("settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0'] is: ", settings.SCHEMA_DOCS['occ_hud_hmis_xml_3_0'])
def validate(self, instance_stream):
'''This specific data format's validation process.'''
try:
schema = open(self.schema_filename,'r')
except:
print("couldn't open schema file", self.schema_filename)
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
self.issues = detailed_results # Added by FBY on 2012-01-19
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
self.issues = error # Added by FBY on 2012-01-19
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
pass
class SvcPoint20XMLTest:
'''Load in the SVCPoint Schema, version 2.0.'''
def __init__(self):
self.name = 'Svcpt 2.0 XML'
print('running the Svcpt 2.0 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_2_0_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
class SvcPoint406XMLTest:
'''Load in the SVCPoint Schema, version 4.06'''
def __init__(self):
self.name = 'Svc406 XML'
print('running the Svcpt 4.06 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_4_0_6_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
class SvcPoint5XMLTest:
'''Load in the SVCPoint Schema, version 5.00'''
def __init__(self):
self.name = 'Svc5 XML'
print('running the Svcpt 5.00 XML test')
self.schema_filename = settings.SCHEMA_DOCS['svcpoint_5_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
# make a copy of the stream, validate against the copy not the real stream
#copy_instance_stream = copy.copy(instance_stream)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
raise
class hl7CCDXMLTest:
'''Load in the HL7 CCD Schema'''
def __init__(self):
self.name = 'hl7 CCD XML'
print('running the hl7 CCD XML test')
self.schema_filename = settings.SCHEMA_DOCS['hl7_ccd_xml']
def validate(self, instance_stream):
'''This specific data format's validation process.'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
fileutils.makeBlock('The %s successfully validated.' % self.name)
return results
if results == False:
print('The xml did not successfully validate against %s' % self.name)
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. ', error)
raise
class JFCSXMLTest:
''' Tests for JFCS data
* There are 2 possible data source types ('service_event' or 'client')
Steps: (will stop and return True on first success)
1 - Attempt to validate against 'service_event' schema: 'JFCS_SERVICE.xsd'
2 - Attempt to validate against 'client' schema: 'JFCS_CLIENT.xsd'
3 - Check for known 'service_event' elements anywhere in the tree
4 - Check for known 'client' elements anywhere in the tree
'''
def __init__(self):
self.issues = "" # Added by FBY on 2012-01-19
self.name = 'JFCS'
print('running the', self.name, 'test')
''' Define schemas and elements for testing '''
self.service_event_schema_filename = settings.SCHEMA_DOCS['jfcs_service_event_xml']
self.client_schema_filename = settings.SCHEMA_DOCS['jfcs_client_xml']
self.service_event_elements = ['c4clientid','qprogram','serv_code','trdate','end_date','cunits']
#self.client_elements = ['aprgcode','a_date','t_date','family_id','c4clientid','c4dob','hispanic','c4sex','c4firstname','c4lastname','c4mi','ethnicity','c4ssno','c4last_s01']
self.client_elements = ['aprgcode','a_date','t_date','family_id','c4clientid','c4dob','hispanic','c4sex','c4firstname','c4lastname','c4mi','ethnicity','c4ssno']
def validate(self, instance_filename, ):
'''JFCS data format validation process'''
copy_instance_stream = copy.copy(instance_filename)
try:
print("Determining by service event schema")
results = self.schemaTest(copy_instance_stream, self.service_event_schema_filename)
if results == True:
fileutils.makeBlock('JFCS service event XML data found. Determined by service event schema.')
JFCSXMLInputReader.data_type = 'service_event'
return results
print("Determining by client schema")
results = self.schemaTest(copy_instance_stream, self.client_schema_filename)
if results == True:
fileutils.makeBlock('JFCS client XML data found. Determined by client schema.')
JFCSXMLInputReader.data_type = 'client'
return results
print("Determining by service event elements.")
if self.service_event_elements is not None:
print(self.service_event_elements)
results = self.elementTest(copy_instance_stream, self.service_event_elements)
if results == True:
fileutils.makeBlock('JFCS service event XML data found. Determined by service event elements.')
JFCSXMLInputReader.data_type = 'service_event'
return results
print("Determining by client elements.")
if self.client_elements is not None:
print(self.client_elements)
results = self.elementTest(copy_instance_stream, self.client_elements)
if results == True:
fileutils.makeBlock('JFCS client XML data found. Determined by client elements.')
JFCSXMLInputReader.data_type = 'client'
return results
print("returning False")
return False
else:
print("All the JFCS Tests Failed, returning False")
self.issues = "All the JFCS Tests Failed, returning False"
return False
except Exception as exception:
print('XML Syntax Error in validate. There appears to be malformed XML. ', exception)
self.issues = 'XML Syntax Error in validate. There appears to be malformed XML. %s' % str(exception)
return False
def schemaTest(self, copy_instance_stream, schema_filename):
'''Attempt to validate input file against specific schema'''
schema = open(schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
try:
instance_parsed = etree.parse(copy_instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error in schemaTest. There appears to be malformed XML. ', error)
return False
def elementTest(self, copy_instance_stream, elements):
'''Attempt to find elements in the input file by searching the tree'''
print("inside element test")
print("elements are: ", elements)
xml_doc = etree.parse(copy_instance_stream)
for e in elements:
search_term = ".//" + e
if xml_doc.find(search_term) is None:
print("returning False from inside elementTest")
return False
print("returning True from inside elementTest")
return True
class PARXMLTest:
'''Load in the HUD HMIS Extended Schema for Operation PAR'''
def __init__(self):
self.name = 'PARXML'
print('running the', self.name, 'test')
self.schema_filename = settings.SCHEMA_DOCS['operation_par_xml']
'''Find elements with or without specific xsd type'''
def find_elements_by_type(self, schema_doc, type_content):
element_names = schema_doc.xpath("//xsd:element[@type != $n]/@name", namespaces={"xsd":"http://www.w3.org/2001/XMLSchema", 'ext':'http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd', 'hmis':'http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd'},n=type_content)
return element_names
def validate(self, instance_stream):
#return True ## use this to skip the validation test
#return False ## use this to fail validation test
'''This specific data format's validation process.'''
'''Import schema for Operation PARS'''
schema = open(self.schema_filename,'r')
schema_parsed = etree.parse(schema)
schema_parsed_xsd = etree.XMLSchema(schema_parsed)
## if schema fails to compile, catch exception here (except Exception, e: print e.error_log)
# make a copy of the file stream, validate against the copy not the real stream
copy_instance_stream = copy.copy(instance_stream)
xml_doc = etree.parse(copy_instance_stream)
'''
Explicit check for 'ext' namespace since HUD_HMIS_2.8 xml
validates against the extended Operation PAR schema
'''
ext_namespace_check = xml_doc.xpath('/ext:SourceDatabase', namespaces={'ext': 'http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd'})
if len(ext_namespace_check) != 1: return False
try:
instance_parsed = etree.parse(copy_instance_stream)
results = schema_parsed_xsd.validate(instance_parsed)
if results == True:
'''
Elements that do not have the maxLength attribute
in schema must be checked to ensure string length
conforms to database field. Lengths exceeding 32
characters will cause the xml to be deemed invalid.
This adds extra weight to this process and should
be removed if maxLength is implemented for all
elements in the schema.
'''
'''import original HUD HMIS 2.8 xsd that Operation PARS extended'''
schema_hudhmis_filename = settings.SCHEMA_DOCS['hud_hmis_2_8_xml']
schema_hudhmis_raw = open(schema_hudhmis_filename,'r')
schema_hudhmis_parsed = etree.parse(schema_hudhmis_raw)
'''get lists of elements with maxLength attribute greater than 32'''
elements_string50 = self.find_elements_by_type(schema_parsed, 'hmis:string50')
elements_string50_ns = []
for e in elements_string50:
elem_with_ns = '{http://xsd.alexandriaconsulting.com/cgi-bin/trac.cgi/export/344/trunk/synthesis/xsd/Operation_PAR_Extend_HUD_HMIS_2_8.xsd}' + e
elements_string50_ns.append(elem_with_ns)
elements_string50 = self.find_elements_by_type(schema_hudhmis_parsed, 'hmis:string50')
for e in elements_string50:
elem_with_ns = '{http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd}' + e
elements_string50_ns.append(elem_with_ns)
'''combine lists if your looking for multiple types'''
elements_maxlength = elements_string50_ns
'''find elements without specific attribute and check length'''
xml_root = xml_doc.getroot()
for e in xml_root.iter():
if str(e.tag) in elements_maxlength:
if len(e.text) > 32:
print('XML Error. Value %s exceeds database field length.' % str(e.tag))
return False ## remove this when testing and perform manual truncate in PARXMLReader()
#return False ## return invalid, use this to only test validation of string lengths and exit
fileutils.makeBlock('The Operation PAR XML successfully validated.')
return results
if results == False:
print('The xml did not successfully validate against \
Operation PAR XML.')
try:
detailed_results = schema_parsed_xsd.assertValid\
(instance_parsed)
print(detailed_results)
return results
except etree.DocumentInvalid as error:
print('Document Invalid Exception. Here is the detail:')
print(error)
return results
if results == None:
print("The validator erred and couldn't determine if the xml \
was either valid or invalid.")
return results
except etree.XMLSyntaxError as error:
print('XML Syntax Error. There appears to be malformed XML. '\
, error)
raise
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
```
#### File: synthesis/src/svcpointxml5writer.py
```python
from .interpretpicklist import Interpretpicklist
from . import dateutils
from datetime import datetime
from . import xmlutilities
from synthesis.exceptions import DataFormatError#, SoftwareCompatibilityError
from . import logger
#from sys import version
from . import dbobjects
from .writer import Writer
from zope.interface import implementer
from sqlalchemy import or_, and_, between
from .conf import settings
from lxml import etree as ET
def buildWorkhistoryAttributes(element):
element.attrib['date_added'] = datetime.now().isoformat()
element.attrib['date_effective'] = datetime.now().isoformat()
@implementer(Writer)
class SvcPointXML5Writer():
# Writer Interface
hmis_namespace = "http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd"
airs_namespace = "http://www.hmis.info/schema/2_8/AIRS_3_0_draft5_mod.xsd"
nsmap = {"hmis" : hmis_namespace, "airs" : airs_namespace}
svcpt_version = '5.00'
def __init__(self, poutDirectory, processingOptions, debugMessages=None):
#print "%s Class Initialized" % self.__name__
if settings.DEBUG:
print("XML File to be dumped to: %s" % poutDirectory)
self.log = logger.Logger(configFile=settings.LOGGING_INI, loglevel=40) # JCS 10/3/11
self.outDirectory = poutDirectory
self.pickList = Interpretpicklist()
# SBB20070626 Adding the declaration for outcomes list
self.options = processingOptions
# SBB20070628 adding a buffer for errors to be displayed at the end of the process.
self.errorMsgs = []
self.db = dbobjects.DB() # JCS 10/05/11
self.db.Base.metadata.create_all()
def write(self):
self.startTransaction()
self.processXML()
self.prettify()
print('==== Self:', self)
xmlutilities.writeOutXML(self, xml_declaration=True, encoding="UTF-8") # JCS, 1 Sep 2012
#self.commitTransaction()
return True
def updateReported(self, currentObject):
# update the reported field of the currentObject being passed in. These should all exist.
try:
if settings.DEBUG:
print('Updating reporting for object: %s' % currentObject.__class__)
currentObject.reported = True
#currentObject.update()
self.commitTransaction()
except:
print("Exception occurred during update the 'reported' flag")
pass
def prettify(self):
xmlutilities.indent(self.root_element)
def dumpErrors(self):
print("Error Reporting")
print("-" * 80)
for row in range(len(self.errorMsgs)):
print("%s %s" % (row, self.errorMsgs[row]))
def setSysID(self, pSysID):
self.sysID = pSysID
def commitTransaction(self):
self.session.commit()
def startTransaction(self):
self.session = self.db.Session()
def pullConfiguration(self, pExportID):
# need to use both ExportID and Processing Mode (Test or Prod)
export = self.session.query(dbobjects.Export).filter(dbobjects.Export.export_id == pExportID).one()
if settings.DEBUG:
print("trying to do pullConfiguration")
#print "export is:", export, "pExportID is", pExportID
#print "export.export_id is: ", export.export_id
#print "dbobjects.SystemConfiguration.source_id is ", dbobjects.SystemConfiguration.source_id
selink = self.session.query(dbobjects.SourceExportLink).filter(dbobjects.SourceExportLink.export_index_id == export.id).one()
#print '==== Selink.id:', selink.id
source = self.session.query(dbobjects.Source).filter(dbobjects.Source.id == selink.source_index_id).one()
#print '==== Source.id:', source.id
self.configurationRec = self.session.query(dbobjects.SystemConfiguration).filter(and_(dbobjects.SystemConfiguration.source_id == source.source_id, dbobjects.SystemConfiguration.processing_mode == settings.MODE)).one()
#print '==== sys config.id', self.configurationRec.id
def processXML(self): # records represents whatever element you're tacking more onto, like entry_exits or clients
if settings.DEBUG:
print("processXML: Appending XML to Base Record")
self.root_element = self.createDoc() #makes root element with XML header attributes
#print '==== root created'
clients = self.createClients(self.root_element) # JCS - tag is <clientRecords> Only node under clients is <Client>
print('==== clientRecords created')
if self.options.reported == True:
Persons = self.session.query(dbobjects.Person).filter(dbobjects.Person.reported == True)
elif self.options.unreported == True:
Persons = self.session.query(dbobjects.Person).filter(or_(dbobjects.Person.reported == False, dbobjects.Person.reported == None))
elif self.options.reported == None:
Persons = self.session.query(dbobjects.Person)
# Now apply the dates to the result set.
if self.options.alldates == None:
Persons = Persons.filter(between(dbobjects.Person.person_id_date_collected, self.options.startDate, self.options.endDate))
pulledConfigID = 0 # JCS Only pull it if it has changed
for self.person in Persons:
#print "person is: ", self.person
export = self.person.fk_person_to_export # this is a single record because:
# person has: export_index_id = Column(Integer, ForeignKey('export.id'))
# export has: fk_export_to_person = relationship('Person', backref='fk_person_to_export')
# Therefore there are multiple persons to one export - but only one export to a person
#print "==== export before pullconfig:", export.id, export # JCS
if pulledConfigID != export.id:
self.pullConfiguration(export.export_id)
pulledConfigID = export.id
self.ph = self.person.fk_person_to_person_historical # JCS This is a list of records
self.race = self.person.fk_person_to_races
self.site_service_part = self.person.site_service_participations # JCS
#information_releases = self.person.fk_person_to_release_of_information # JCS a set
#self.service_event = self.person.fk_person_to_service_event
# Instead of generating a number (above), use the client number that is already provided in the legacy system
# or
# self.iDG.initializeSystemID(self.person.id)
self.sysID = self.person.id # JCS beware set self.sysID
#if settings.DEBUG:
#print "self.person is:", self.person
if self.person: # and not self.person.person_legal_first_name_unhashed+self.person.person_legal_last_name_unhashed == None:
self.client = self.createClient(clients) # JCS - no clients in svc5? yes as clientRecords
# Sub can be: active, anonymous, firstName, suffix, unnamedClient, alias, middleName, childEntryExit,
# childReleaseOfInfo, childGoal
self.customizeClient(self.client)
self.customizeClientPersonalIdentifiers(self.client, self.person)
self.assessment_data = self.createAssessmentData(self.client) # JCS New - self?
self.customizeAssessmentData(self.assessment_data)
if self.site_service_part: # JCS 21 Dec 2012
self.child_entry_exit = self.createChildEntryExit(self.client)
for ssp in self.site_service_part:
self.createEntryExit(self.child_entry_exit, ssp)
# update the reported flag for person (This needs to be applied to all objects that we are getting data from)
self.updateReported(self.person)
# Query Mechanism for Site Service Participation (Entry Exits) same as for Person?
# This is only if we want to create an EE summary at the end for all Clients
# if self.options.reported == True:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation).filter(dbobjects.SiteServiceParticipation.reported == True)
# elif self.options.unreported == True:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation).filter(or_(dbobjects.SiteServiceParticipation.reported == False, dbobjects.SiteServiceParticipation.reported == None))
# elif self.options.reported == None:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation)
# else:
# pass
#
# # setup the date filter also
# site_service_part = site_service_part.filter(between(dbobjects.SiteServiceParticipation.site_service_participation_idid_num_date_collected, self.options.startDate, self.options.endDate))
#
# entry_exits = self.createEntryExits(self.root_element)
# for EE in site_service_part:
# # SBB20100405 do this to pull the configuration record
# person = EE.fk_participation_to_person
# export = person.fk_person_to_export
# self.pullConfiguration(export.export_id)
# self.updateReported(EE) # Reporting Update
# self.sysID = EE.id # JCS beware set self.sysID
# self.createEntryExit(entry_exits, EE)
# End of ProcessXML()
def createDoc(self):
# From hl7
#self.mymap = { None : "urn:hl7-org:v3",
# "voc" : "urn:hl7-org:v3/voc",
# "xsi" : "http://www.w3.org/2001/XMLSchema-instance"}
#root_element = ET.Element("ClinicalDocument", nsmap=self.mymap)
#root_element.attrib["{"+self.mymap["xsi"]+"}schemaLocation"] = "urn:hl7-org:v3 infrastructure/cda/CDA.xsd"
# From hl7 end
#sp5_instance looks like this
# <records xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# xsi:noNamespaceSchemaLocation="file:/home/eric/workspace/servicepoint_schema/sp5/sp5.xsd"
# odb_identifier="qwo7Wsoi"
# import_identifier="v7:1bl.e">
self.mymap = {"xsi" : "http://www.w3.org/2001/XMLSchema-instance"} # Yes lxml
#root_element = ET.Element("records") # Non-lxml
root_element = ET.Element("records", nsmap=self.mymap) # Yes lxml
#root_element.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" # Non-lxml
#root_element.attrib["xsi:noNamespaceSchemaLocation"] = "sp5.xsd" # Non-lxml
root_element.attrib["{"+self.mymap["xsi"]+"}noNamespaceSchemaLocation"] = "sp5.xsd" # Yes lxml
# Added by JCS 1 Sep 2012
root_element.attrib["odb_identifier"] = "qwo7Wsoi" # Yes lxml
root_element.attrib["import_identifier"] = "v7:1bl.e" # Yes lxml
#root_element.attrib["schema_revision"] = "300_108" # JCS Not in Schema
#root_element.text = "\n"
return root_element
def createClients(self, root_element):
clients = ET.SubElement(root_element, "clientRecords")
return clients
def createClient(self, clients):
client = ET.SubElement(clients, "Client") # Cap 'C' in svc5
return client
def createChildEntryExit(self,client):
child_entry_exit = ET.SubElement(client, "childEntryExit") # JCS new - sub-client
return child_entry_exit
def createEntryExits(self,root_element):
entry_exits = ET.SubElement(root_element, "entryExitRecords") # JCS - not in SVP5?
return entry_exits
def customizeClient(self, client):
#print "==== Customize Client:", self.configurationRec.odbid, self.person.person_id_id_num
client.attrib["record_id"] = "CL-" + str(self.person.id)
#client.attrib["external_id"] = self.person.person_id_id_num # JCS -this item is optional
client.attrib["system_id"] = self.person.person_id_id_num # JCS just a guess ????
client.attrib["date_added"] = dateutils.fixDate(datetime.now())
client.attrib["date_updated"] = dateutils.fixDate(datetime.now())
# SBB20070702 check if self.intakes has none, this is a daily census that is alone
def customizeClientPersonalIdentifiers(self,client,recordset): # params are: self.client, self.person
if recordset.person_legal_first_name_unhashed != "" and recordset.person_legal_first_name_unhashed != None:
first_name = ET.SubElement(client, "firstName")
first_name.text = recordset.person_legal_first_name_unhashed
if recordset.person_legal_last_name_unhashed != "" and recordset.person_legal_last_name_unhashed != None:
last_name = ET.SubElement(client, "lastName")
last_name.text = recordset.person_legal_last_name_unhashed
#we don't have the following elements for daily_census only clients, but SvcPt requires them:
# I simulated this w/my datasets. Column names are as in the program
if recordset.person_legal_middle_name_unhashed != "" and recordset.person_legal_middle_name_unhashed != None:
mi_initial = ET.SubElement(client, "mi_initial")
mi_initial.text = self.fixMiddleInitial(recordset.person_legal_middle_name_unhashed)
# SBB20070831 incoming SSN's are 123456789 and need to be 123-45-6789
fixedSSN = self.fixSSN(recordset.person_social_security_number_unhashed) # JCS .person_SSN_unhashed)
if fixedSSN != "" and fixedSSN != None: #ECJ20071111 Omit SSN if it's blank
soc_sec_no = ET.SubElement(client, "socSecNoDashed")
soc_sec_no.text = fixedSSN
ssn_data_quality = ET.SubElement(client, "ssnDataQualityValue")
ssn_data_quality.text = "full ssn reported (hud)"
def createEntryExit(self, entry_exits, EE): # Outer Node, one EntryExit(ssp)
entry_exit = ET.SubElement(entry_exits, "EntryExit")
entry_exit.attrib["record_id"] = "EE-"+str(EE.id)
# ssp-idid-num looks like it ought to be unique, but isn't in sample input data, so append client id????
entry_exit.attrib["system_id"] = EE.site_service_participation_idid_num+"-"+EE.person.person_id_id_num
# person.site_service_participations = relationship("SiteServiceParticipation", backref="person")
entry_exit.attrib["date_added"] = dateutils.fixDate(datetime.now())
entry_exit.attrib["date_updated"] = dateutils.fixDate(datetime.now())
self.customizeEntryExit(entry_exit, EE)
return entry_exit
def customizeEntryExit(self, entry_exit, EE):
# Schema expects one of ( active, typeEntryExit, client, exitDate, reasonLeavingValue, reasonLeavingOther,
# destinationValue, destinationOther, notes, group )
# There is no type in our input XML, nor a field in ssp. Schema needs {'basic', 'basic center program entry/exit',
# 'hprp', 'hud', 'path', 'quick call', 'standard', 'transitional living program entry/exit'}
type1 = ET.SubElement(entry_exit, "typeEntryExit") # JCS this is a fudge to pass validation
type1.text = "basic" # "hud-40118"
provider_id = ET.SubElement(entry_exit, "provider")
provider_id.text = '%s' % self.configurationRec.providerid
if EE.participation_dates_start_date != "" and EE.participation_dates_start_date != None:
entry_date = ET.SubElement(entry_exit, "entryDate")
entry_date.text = dateutils.fixDate(EE.participation_dates_start_date)
if EE.participation_dates_end_date != "" and EE.participation_dates_end_date != None:
exit_date = ET.SubElement(entry_exit, "exitDate")
exit_date.text = dateutils.fixDate(EE.participation_dates_end_date)
return
def createAssessmentData(self, client): # dynamic content type
assessment_data = ET.SubElement(client, "assessmentData")
return assessment_data
def customizeAssessmentData(self, assessment_data):
if self.person.person_gender_unhashed != "" and self.person.person_gender_unhashed != None:
persGender = ET.SubElement(assessment_data, "svpprofgender" ) #"gender")
persGender.attrib["date_added"] = dateutils.fixDate(self.person.person_gender_unhashed_date_collected)
persGender.attrib["date_effective"] = dateutils.fixDate(self.person.person_gender_unhashed_date_effective)
persGender.text = str(self.person.person_gender_unhashed)
# dob (Date of Birth) lots of:SVPPROFDOB a few:DATEOFBIRTH
if self.person.person_date_of_birth_unhashed != "" and self.person.person_date_of_birth_unhashed != None:
dob = ET.SubElement(assessment_data, "svpprofdob")
dob.attrib["date_added"] = dateutils.fixDate(self.person.person_date_of_birth_unhashed_date_collected)
dob.attrib["date_effective"] = dateutils.fixDate(datetime.now()) # No date effect. in Person
dob.text = dateutils.fixDate(self.person.person_date_of_birth_unhashed)
# Ethnicity lots of:SVPPROFETH a few:Ethnicity uses:ETHNICITYPickOption
if self.person.person_ethnicity_unhashed != "" and self.person.person_ethnicity_unhashed != None:
# Our Interpretpicklist basically has 2 options. The schema has 23
ethText = self.pickList.getValue("EthnicityPick",str(self.person.person_ethnicity_unhashed))
eth = ET.SubElement(assessment_data, "svpprofeth")
eth.attrib["date_added"] = dateutils.fixDate(self.person.person_ethnicity_unhashed_date_collected)
eth.attrib["date_effective"] = dateutils.fixDate(datetime.now()) # No date effect. in Person
eth.text = ethText # str(self.person.person_ethnicity_unhashed)
# Race more than one?? JCS
for race in self.race:
# JCS schema has 'RACEPickOption' - using existing RacePick for now
raceText = self.pickList.getValue("RacePick",str(race.race_unhashed))
# print '==== race:', race.race_unhashed, raceText
if raceText != None:
raceNode = ET.SubElement(assessment_data, "svpprofrace") # JCS "primaryrace" or "svpprofrace"?
raceNode.attrib["date_added"] = dateutils.fixDate(race.race_date_collected)
raceNode.attrib["date_effective"] = dateutils.fixDate(race.race_date_effective)
raceNode.text = raceText
for ph in self.ph:
#print '==== ph person id:', ph.person_index_id #, ph.__dict__
# JCS - Fails if none - seen in going from tbc to here - but don't know if that ever happens
hs = self.session.query(dbobjects.HousingStatus).filter(dbobjects.HousingStatus.person_historical_index_id == ph.id).one()
hsText = self.pickList.getValue("HOUSINGSTATUSPickOption",hs.housing_status)
#print '==== hs:', hsText
if hsText != None:
housingStatus = ET.SubElement(assessment_data, "svp_hud_housingstatus") # JCS
housingStatus.attrib["date_added"] = dateutils.fixDate(hs.housing_status_date_collected)
housingStatus.attrib["date_effective"] = dateutils.fixDate(hs.housing_status_date_effective)
housingStatus.text = hsText
foster = self.session.query(dbobjects.FosterChildEver).filter(dbobjects.FosterChildEver.person_historical_index_id == ph.id).one()
fosterText = self.pickList.getValue("ENHANCEDYESNOPickOption",str(foster.foster_child_ever))
if fosterText != None:
fosterEver = ET.SubElement(assessment_data, "x20wereyoueverafoster") # JCS
fosterEver.attrib["date_added"] = dateutils.fixDate(foster.foster_child_ever_date_collected)
fosterEver.attrib["date_effective"] = dateutils.fixDate(foster.foster_child_ever_date_effective)
fosterEver.text = fosterText
# length of stay at prior residence
losapr = self.session.query(dbobjects.LengthOfStayAtPriorResidence).filter(dbobjects.LengthOfStayAtPriorResidence.person_historical_index_id == ph.id).one()
losaprText = self.pickList.getValue("LENGTHOFTHESTAYPickOption",losapr.length_of_stay_at_prior_residence)
#print '==== losapr:', losaprText
if losaprText != None:
lengthOfStay = ET.SubElement(assessment_data, "hud_lengthofstay") # JCS
lengthOfStay.attrib["date_added"] = dateutils.fixDate(losapr.length_of_stay_at_prior_residence_date_collected)
lengthOfStay.attrib["date_effective"] = dateutils.fixDate(losapr.length_of_stay_at_prior_residence_date_effective)
lengthOfStay.text = losaprText
# "Prior Residence" becomes "typeoflivingsituation"
tols = self.session.query(dbobjects.PriorResidence).filter(dbobjects.PriorResidence.person_historical_index_id == ph.id).one()
tolsText = self.pickList.getValue("LIVINGSITTYPESPickOption",tols.prior_residence_code)
#print '==== (prior) tols:', tolsText
if tolsText != None:
priorLiving = ET.SubElement(assessment_data, "typeoflivingsituation") # JCS
priorLiving.attrib["date_added"] = dateutils.fixDate(tols.prior_residence_code_date_collected)
priorLiving.attrib["date_effective"] = dateutils.fixDate(tols.prior_residence_code_date_effective)
priorLiving.text = tolsText
# There's also a prior_residence_id_id_num populated with a 13 digit number as string JCS
# Physical Disability - Boolean
pdyn = self.session.query(dbobjects.PhysicalDisability).filter(dbobjects.PhysicalDisability.person_historical_index_id == ph.id).one()
pdynText = pdyn.has_physical_disability
#print '==== pdyn:', pdynText
if pdynText != None:
physDisabYN = ET.SubElement(assessment_data, "svpphysicaldisabilit") # JCS
physDisabYN.attrib["date_added"] = dateutils.fixDate(pdyn.has_physical_disability_date_collected)
# This is required, but input is usually blank - something plugs in now()
physDisabYN.attrib["date_effective"] = dateutils.fixDate(pdyn.has_physical_disability_date_effective)
physDisabYN.text = pdynText
# There is also a complex type "disabilities_1"
# Veteran Status - Uses "ENHANCEDYESNOPickOption" which is a union, and allows anything
vvs = self.session.query(dbobjects.VeteranVeteranStatus).filter(dbobjects.VeteranVeteranStatus.person_historical_index_id == ph.id).one()
vvsText = vvs.veteran_status
#print '==== vvs:', vvsText
if vvsText != None:
vetStat = ET.SubElement(assessment_data, "veteran") # JCS
vetStat.attrib["date_added"] = dateutils.fixDate(vvs.veteran_status_date_collected)
vetStat.attrib["date_effective"] = dateutils.fixDate(vvs.veteran_status_date_effective)
vetStat.text = vvsText
# def customizeDisabilities_1(self, disabilities_1, ph):
# #if self.intakes['DisabilityDiscription'] != "":
# noteondisability = ET.SubElement(disabilities_1,'noteondisability')
# noteondisability.attrib["date_added"] = dateutils.fixDate(datetime.now())
# noteondisability.attrib["date_effective"] = dateutils.fixDate(ph.physical_disability_date_collected)
# noteondisability.text = ph.physical_disability
def current_picture(self, node):
''' Internal function. Debugging aid for the export module.'''
if settings.DEBUG:
print("Current XML Picture is")
print("======================\n" * 2)
ET.dump(node)
print("======================\n" * 2)
def calcHourlyWage(self, monthlyWage):
if monthlyWage != "":
if monthlyWage.strip().isdigit():
if float(monthlyWage) > 5000.00:
hourlyWage = float(monthlyWage) / 160.00#IGNORE:@UnusedVariable
else:
hourlyWage = float(monthlyWage)#IGNORE:@UnusedVariable
else:
hourlyWage = 0.00
return str(round(hourlyWage,2))
def fixMiddleInitial(self, middle_initial):
fixed_middle_initial = middle_initial[0].upper().lstrip()
return fixed_middle_initial
def fixSSN(self, incomingSSN):
originalSSN = incomingSSN
if incomingSSN == "" or incomingSSN == None:
return incomingSSN
dashCount = incomingSSN.count('-')
if dashCount > 0:
if dashCount == 2:
# already has the dashes, return the string
if settings.DEBUG:
self.debugMessages.log("incoming SSN is correctly formatted: %s\n" % (incomingSSN))
return incomingSSN
else: # incoming SSN has 1 dash but not 2. This is an error
# fix this data
incomingSSN = incomingSSN.replace( '-', '')
if len(incomingSSN) < 9:
# reformat the string and return
theError = (1020, 'Data format error discovered in trying to cleanup incoming SSN: %s, original SSN: %s' % (incomingSSN, originalSSN))
if settings.DEBUG:
self.debugMessages.log(">>>> Incoming SSN is INcorrectly formatted. Original SSN from input file is: %s and Attempted cleaned up SSN is: %s\n" % (originalSSN, incomingSSN))
raise DataFormatError(theError)
# If we are here, we can simply reformat the string into dashes
if settings.DEBUG:
pass # JCS
# self.debugMessages.log("incoming SSN is INcorrectly formatted: %s. Reformatting to: %s\n" % (incomingSSN, '%s-%s-%s' % (incomingSSN[0:3], incomingSSN[3:5], incomingSSN[5:10])))
return '%s-%s-%s' % (incomingSSN[0:3], incomingSSN[3:5], incomingSSN[5:10])
#if __name__ == "__main__":
# vld = SVCPOINTXMLWriter(".")
# vld.write()
``` |
{
"source": "212726320/arxiv-converter",
"score": 2
} |
#### File: tests/test_convert/test_convert_files.py
```python
import os
import sys
import pytest
_file_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(_file_dir, "..", ".."))
import convert
_paper_dir = os.path.join(_file_dir, "mypaper")
def test_main_file():
res = convert.main_file(_paper_dir)
assert isinstance(res, str)
assert res == "main.tex"
def test_read_file():
lines = convert.read_file(os.path.join(_paper_dir, "section.tex"))
assert isinstance(lines, list)
assert all([isinstance(l, str) for l in lines])
``` |
{
"source": "212726320/BEBO-1",
"score": 2
} |
#### File: experiments/optimization/main.py
```python
import os
import sys
import argparse
from time import time
from functools import partial
import numpy as np
import scipy
import matplotlib.pyplot as plt
from gptorch.util import TensorType
import torch
base_path = os.path.join(os.path.dirname(__file__), "..", "..")
sys.path.append(base_path)
from src.embedders import GaussianEmbedder, DeterministicEmbedder
from src.bayesian_optimization import WithFunction, StaticDataset
from src import systems
util_path = os.path.join(os.path.dirname(__file__), "..")
if util_path not in sys.path:
sys.path.append(util_path)
from experiment_utils import doe, get_x_bounds, get_system, get_legacy_data
from experiment_utils import initialize_model, pre_train
from experiment_utils import train_function_egp, train_function_gptorch
from experiment_utils import predict_function_begp, predict_function_egp
torch.set_num_threads(1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--system",
type=str,
default="forrester",
choices=["forrester", "synthetic"],
help="Which problem to run.",
)
parser.add_argument(
"--num-legacy",
type=int,
default=5,
help="How many legacy systems are available",
)
parser.add_argument(
"--data-per-legacy",
type=int,
default=5,
help="How many data from each legacy system are available",
)
parser.add_argument(
"--model",
type=str,
default="BEGP",
choices=["BEGP", "EGP", "BGP"],
help="Which model to run",
)
parser.add_argument(
"--seed", type=int, default=0, help="Random seed for experiment"
)
parser.add_argument("--show", action="store_true", help="show running best")
parser.add_argument("--save", action="store_true", help="Save results")
return parser.parse_args()
def get_bo_functions(model_name, model, system):
"""
functions for the metamodel being used.
* Train
* predict
* append
"""
if model_name == "BEGP":
return (
partial(train_function_egp, model),
partial(predict_function_begp, model, system),
append_function_egp,
)
elif model_name == "EGP":
return (
partial(train_function_egp, model),
partial(predict_function_egp, model, system),
append_function_egp,
)
elif model_name == "BGP":
return (model.fit, model.predict_y, model.append_data)
else:
raise ValueError("Unhandled model_name %s" % model_name)
# Train functions used during BO
def train_function_gpr(max_iter=100):
if model.num_data == 0:
return
if model.num_data == 1:
max_iter = min(max_iter, 5)
train_function_gptorch(model, method="L-BFGS-B", max_iter=max_iter)
# Append functions used during BO
def append_function_egp(x_new, y_new):
x_new, y_new = np.atleast_2d(x_new), np.atleast_2d(y_new)
n_new = x_new.shape[0]
xg_new = np.array([["0"] * system.general_dimensions] * n_new)
model.xr = torch.cat((model.xr, TensorType(x_new)))
model.xg = np.concatenate((model.xg, xg_new))
model.Y = torch.cat((model.Y, TensorType(y_new)))
def append_function_gpr(x_new, y_new):
model.X = torch.cat((model.X, TensorType(np.atleast_2d(x_new))))
model.Y = torch.cat((model.Y, TensorType(np.atleast_2d(y_new))))
def append_function_bgp(x_new, y_new):
model.x = torch.cat((model.X, TensorType(np.atleast_2d(x_new))))
model.Y = torch.cat((model.Y, TensorType(np.atleast_2d(y_new))))
def train_callback():
if system.real_dimensions == 1 and system.has_function:
# Plot the posterior over the whole 1D input space
x_test = np.linspace(0, 1, 100)
m, v = bo.predict_function(x_test[:, np.newaxis])
m, u = m.flatten(), 2.0 * np.sqrt(v.flatten())
plt.figure()
plt.fill_between(x_test, m - u, m + u, color=[0.8] * 3)
plt.plot(x_test, m, label="Prediction")
plt.plot(x_test, eval_function(x_test), label="Ground truth")
plt.scatter(np.array(bo.x).flatten(), np.array(bo.y).flatten())
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.show()
if not system.has_function:
# Validation plot
m, v = bo.predict_function(bo.x_all)
m, u = m.flatten(), 2.0 * np.sqrt(v.flatten())
plt.figure()
plt.errorbar(bo.y_all.flatten(), m, u, color="C0", linestyle="none", marker="o")
plt.plot(plt.xlim(), plt.xlim(), linestyle="--", color="C1")
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.show()
def show_results(system, bo):
plt.plot(scipy.minimum.accumulate(bo.y))
plt.xlabel("Number of evaluations")
plt.ylabel("Running best")
plt.show()
if system.real_dimensions == 1:
plt.figure()
plt.scatter(
np.array(bo.x).flatten(), np.array(bo.y).flatten(), c=np.arange(len(bo.y))
)
plt.xlabel("x")
plt.ylabel("f(x)")
plt.show()
if isinstance(bo, StaticDataset):
plt.figure()
# Colors: red (high) to blue (low)
color = lambda a: a * np.array([1, 0, 0]) + (1 - a) * np.array([0, 0, 1])
alpha = (bo.y_all - min(bo.y_all)) / (max(bo.y_all) - min(bo.y_all))
for i, p in enumerate(np.array(bo.p_best).T):
plt.plot(p, label="Datum %i" % i, color=color(alpha[i]))
plt.xlabel("Iteration")
plt.ylabel("p(best)")
# plt.legend()
plt.show()
if __name__ == "__main__":
t0 = time()
args = parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
system = get_system(args.system, args.seed)
xr, xg, y = get_legacy_data(system, args)
model = initialize_model(xr, xg, y, args.model)
pre_train(model, args.model)
# A few things to get ready for the BO class:
assert system.general_dimensions == 1
if system.has_function:
eval_function = lambda xr: system(xr, [0])
xr_bounds = np.tile(np.array([get_x_bounds()]), (system.real_dimensions, 1))
bo = WithFunction(
xr_bounds, *get_bo_functions(args.model, model, system), eval_function
)
n_points = 10
else:
assert system.general_dimensions == 1
x_all, y_all = system.get_data([0])
bo = StaticDataset(x_all, y_all, *get_bo_functions(args.model, model, system))
n_points = x_all.shape[0]
# bo.register_pre_selection_callback(train_callback)
bo.add_points(n_points, verbose=True)
if args.show:
show_results(system, bo)
if args.save:
path = os.path.join(
os.path.dirname(__file__),
"output",
"%s_legacy_%i_data_%i"
% (args.system, args.num_legacy, args.data_per_legacy),
"results",
args.model,
)
filename = os.path.join(path, "%i.npy" % args.seed)
print("Saving results to %s" % filename)
if not os.path.isdir(path):
os.makedirs(path)
np.save(filename, np.array(bo.y).flatten())
print("Done. Run time = %i secs" % int(time() - t0))
```
#### File: experiments/regression/main.py
```python
import os
import sys
import argparse
from time import time
from functools import partial
import json
import numpy as np
import scipy
import matplotlib.pyplot as plt
from gptorch.models.gpr import GPR
from gptorch.kernels import Rbf
from gptorch.util import TensorType
import gptorch
import torch
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if base_path not in sys.path:
sys.path.append(base_path)
from src.models import EGP, SafeGPR as GPR
from src.embedders import GaussianEmbedder, DeterministicEmbedder
from src.bayesian_optimization import WithFunction, StaticDataset
from src import systems
from src.util import train_test_split
util_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if util_path not in sys.path:
sys.path.append(util_path)
from experiment_utils import doe, get_x_bounds, get_system, get_legacy_data
from experiment_utils import pre_train, train_function_gptorch
from experiment_utils import predict_function_begp, predict_function_egp
import experiment_utils
torch.set_num_threads(1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--system",
type=str,
default="forrester",
choices=["forrester", "synthetic"],
help="Which problem to run."
)
parser.add_argument(
"--current-task",
type=int,
default=0,
help="For static, specify which task is the non-legacy task"
)
parser.add_argument(
"--num-legacy",
type=int,
default=5,
help="How many legacy systems are available"
)
parser.add_argument(
"--data-per-legacy",
type=int,
default=5,
help="How many data from each legacy system are available"
)
parser.add_argument(
"--model",
type=str,
default="BEGP",
choices=["BEGP", "EGP", "BGP"],
help="Which model to run"
)
parser.add_argument("--seed", type=int, default=0, help="Random seed for experiment")
parser.add_argument(
"--train-current",
type=int,
default=1,
help="Number of training examples from the current task"
)
parser.add_argument("--show", action="store_true", help="Show results")
parser.add_argument("--save", action="store_true", help="Save results")
return parser.parse_args()
def get_data(system, args):
xr_leg, xg_leg, y_leg = get_legacy_data(system, args)
xr_current, xg_current, y_current = _get_current_task_data(system, args)
xrc_train, xr_test, xgc_train, xg_test, yc_train, y_test = train_test_split(
xr_current,
xg_current,
y_current,
train_size=args.train_current,
random_state=args.seed
)
xr_train = np.concatenate((xr_leg, xrc_train))
xg_train = np.concatenate((xg_leg, xgc_train))
y_train = np.concatenate((y_leg, yc_train))
return xr_train, xr_test, xg_train, xg_test, y_train, y_test
def _get_current_task_data(system, args):
assert system.general_dimensions == 1, "For now."
if system.has_function:
n = args.train_current + 1000 # 1000 for testing.
xr = doe(n, system.real_dimensions)
y = system(xr, [0]) # 0 for current task by convention.
else:
xr, y = system.get_data([0])
xg = np.array([["0"] * system.general_dimensions] * xr.shape[0])
return xr, xg, y
def initialize_model(xr, xg, y, model_type):
if model_type == "GP":
assert xg.shape[1] == 1
i = np.where(xg.flatten() == "0")[0]
xr, xg, y = xr[i], xg[i], y[i]
return GPR(
xr,
y,
Rbf(xr.shape[1], ARD=True),
likelihood=gptorch.likelihoods.Gaussian(variance=0.001)
)
else:
return experiment_utils.initialize_model(xr, xg, y, model_type)
def train(model, model_type):
if model_type == "BGP":
model.fit()
else:
pre_train(model, model_type)
def predict(model, model_type, system, xr):
"""
Predictions, assuming we're predicting on the current task, task "0".
"""
return {
"GP": model.predict_y,
"BGP": partial(_bgp_predict_wrapper, model),
"EGP": partial(predict_function_egp, model, system),
"BEGP": partial(predict_function_begp, model, system)
}[model_type](xr)
def _bgp_predict_wrapper(model, *args, **kwargs):
"""
Just to ensure that the outgoing shapes are right (i.e. 2D).
"""
mean, cov = model.predict_y(*args, **kwargs)
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov
def get_performance(means, stds, targets):
"""
Compute prediction metrics MNLP, MAE, and RMSE
"""
mnlp = -np.median(scipy.stats.norm.logpdf(targets, loc=means, scale=stds))
mae = np.abs(targets - means).mean()
rmse = np.sqrt(((targets - means) ** 2).mean())
return {"MNLP": mnlp, "MAE": mae, "RMSE": rmse}
def show_results(inputs, means, stds, targets):
assert targets.shape[1] == 1
means, stds, targets = means.flatten(), stds.flatten(), targets.flatten()
unc = 2.0 * stds
plt.figure()
plt.errorbar(targets, means, unc, linestyle="none", marker="o")
plt.plot(plt.xlim(), plt.xlim(), linestyle="--", color="C1")
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.show()
if inputs.shape[1] == 1:
inputs = inputs.flatten()
i = np.argsort(inputs)
plt.fill_between(inputs[i], (means - unc)[i], (means + unc)[i], color=[0.8] * 3)
plt.plot(inputs[i], targets[i], marker=".", color="C1", linestyle="none")
plt.show()
if __name__ == "__main__":
t0 = time()
args = parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
system = get_system(args.system, args.current_task)
xr_train, xr_test, xg_train, xg_test, y_train, y_test = get_data(system, args)
model = initialize_model(xr_train, xg_train, y_train, args.model)
train(model, args.model)
# Assert xg_test is current task?
pred_mean, pred_std = predict(model, args.model, system, xr_test)
performance = get_performance(pred_mean, pred_std, y_test)
if args.show:
print(
"Performace:\n MNLP : %.6e\n MAE : %.6e\n RMSE : %.6e" %
(performance["MNLP"], performance["MAE"], performance["RMSE"])
)
print("show_results()...")
show_results(xr_test, pred_mean, pred_std, y_test)
if args.save:
path = os.path.join(
os.path.dirname(__file__),
"output",
"%s_currenttask_%i_legacytasks_%i_legacydata_%i_currentdata_%i" % (
args.system,
args.current_task,
args.num_legacy,
args.data_per_legacy,
args.train_current
),
"results",
args.model
)
filename = os.path.join(path, "%i.json" % args.seed)
print("Saving results to %s" % filename)
if not os.path.isdir(path):
os.makedirs(path)
with open(filename, "w") as f:
json.dump(performance, f)
print("Done. Run time = %i secs" % int(time() - t0))
```
#### File: BEBO-1/src/embedders.py
```python
import abc
import numpy as np
import torch
from torch.distributions.transforms import ExpTransform
from gptorch.model import Param
from gptorch.util import torch_dtype
class Embedder(torch.nn.Module):
"""
Embedding layer.
Maps one-dimensional categorical variables to latents with real value dimensions.
To embed a multi-dimensional input space, just make a different embedder for each
input dimension.
"""
def __init__(self, d_out):
super().__init__()
self._d_out = d_out
def forward(self, x):
# Clean inputs?
x = Embedder.clean_inputs(x)
return self._embed(x)
@property
def d_out(self):
return self._d_out
@staticmethod
def clean_inputs(x):
return [str(xi).replace(".", "dot") for xi in x]
@abc.abstractmethod
def _embed(self, x):
raise NotImplementedError()
class DeterministicEmbedder(Embedder):
"""
Deterministic embedding
"""
def __init__(self, d_out):
super().__init__(d_out)
self._loc = torch.nn.ParameterDict()
self._unseen_policy = "random"
@property
def loc(self):
"""
Posterior mean dictionary
"""
return self._loc
@property
def unseen_policy(self):
return self._unseen_policy
@unseen_policy.setter
def unseen_policy(self, val):
if not val in ["prior", "random"]:
raise ValueError("Unhandled unseen policy %s" % val)
self._unseen_policy = val
def _embed(self, x):
return torch.stack([self._embed_one(xi) for xi in x])
def _embed_one(self, x):
if x not in self._loc:
if self.unseen_policy == "random":
loc = torch.randn(self.d_out, dtype=torch_dtype)
elif self.unseen_policy == "prior":
loc = torch.zeros(self.d_out, dtype=torch_dtype)
self._loc[x] = torch.nn.Parameter(loc)
return self.loc[x]
class GaussianEmbedder(Embedder):
"""
Embedding layer.
Maps one-dimensional categorical variables to multivariate distributions in
Euclidean space.
Stochastic embedding to Gaussian distributions
"""
def __init__(self, d_out):
super().__init__(d_out)
# Key is an input;
# Value is a location (mean) / scale (std) for a Gaussian.
self._loc = torch.nn.ParameterDict()
self._scale = torch.nn.ParameterDict()
self._num_terms = 2
# How we are currently doing embeddings: according to the prior or posterior
self.mode = "prior"
# When True, embed calls go randomly to samples from the output
# distribution.
# When False, we embed to the mode of the distribution
self.random = True
# What to do with previously-unseen inputs ("random" or "prior")
self.unseen_policy = "random"
@property
def loc(self):
"""
Posterior mean dictionary
"""
return self._loc
@property
def scale(self):
"""
Posterior std dictionary
"""
return self._scale
def _embed(self, x):
"""
Embed all inputs
"""
return {"prior": self._embed_prior, "posterior": self._embed_posterior}[
self.mode
](x, self._get_epsilon(x))
def _embed_prior(self, x, epsilon):
"""
Map an individual input
"""
return torch.stack([epsilon[xi] for xi in x])
def _embed_posterior(self, x, epsilon):
for xi in x:
if xi not in self.loc:
# Randomly initialize to break symmetry and prevent posteriors from
# starting in the same spot
self._loc[xi] = torch.nn.Parameter({
"random": torch.randn,
"prior": torch.zeros
}[self.unseen_policy](self.d_out))
self._scale[xi] = Param({
"random": lambda s: 0.1 * torch.ones(s),
"prior": torch.ones
}[self.unseen_policy](self.d_out), transform=ExpTransform())
# NB "epsilon" takes care of whether self.random or not.
return torch.stack([
self.loc[xi] + self.scale[xi].transform() * epsilon[xi]
for xi in x
])
def _get_epsilon(self, x):
"""
We need a call to embed all data consistently.
This function gets a single epsilon for each distinct entry in x so that
we can make sure that e.g. "foo" always maps to the same thing.
"""
return {xi: torch.randn(self.d_out) if self.random else torch.zeros(self.d_out)
for xi in np.unique(x)}
```
#### File: src/systems/forrester.py
```python
import numpy as np
from .base import WithFunction
class Forrester(WithFunction):
def __init__(self):
super().__init__()
self.real_dimensions = 1
self.general_dimensions = 1
self.num_types = [1000]
self._fidelity_matrix = None
self._cache_fidelity_params()
def _call(self, x, i):
a, b, c = self._fidelity_matrix[i[0]]
f_hi = (6.0 * x - 2.0) ** 2 * np.sin(12.0 * x - 4.0)
return a * f_hi + b * (x - 0.5) + c
def _cache_fidelity_params(self):
# Initialize and cache:
rng_state = np.random.get_state()
np.random.seed(42)
self._fidelity_matrix = np.concatenate(
(
np.array([[1.0, 0.0, 0.0], [0.5, 10.0, -5.0]]),
np.array([[0.0, 0.0, -5.0]]) + np.array([[1.0, 10.0, 5.0]]) * \
np.random.rand(998, 3)
)
)
# ...And resume previous state
np.random.set_state(rng_state)
```
#### File: BEBO-1/src/util.py
```python
import numpy as np
from sklearn.model_selection import train_test_split as _tts
def leave_one_in(*args, seed=None):
"""
Use for one-shot learning with small datasets because train-test split may
pick the same split frequently.
:param args: Things to be split.
:param seed: Which datum to leave in, actually. None=pick randomly.
"""
n = None
for arg in args:
if n is None:
n = len(arg)
else:
if len(arg) != n:
raise ValueError("Not all args have the same number of entries")
if seed is None:
seed = np.random.randint(0, high=n)
if seed >= n:
raise ValueError(
"Tried to leave out entry %i, but only %i entries exist." %
(seed, n)
)
i_all = set(np.arange(n).tolist())
i_train = [seed]
i_test = list(i_all - set(i_train))
out = []
for arg in args:
out.append(arg[i_train])
out.append(arg[i_test])
return tuple(out)
def train_test_split(*args, **kwargs):
"""
Train-test split that allows for train_size=0 ("zero-shot" learning)
"""
# Zero-shot
if "train_size" in kwargs and kwargs["train_size"] == 0:
outputs = []
for a in args:
outputs += [a[:0], a]
return outputs
# One-shot--use leave-one-in
elif "train_size" in kwargs and kwargs["train_size"] == 1:
seed = kwargs["random_state"] if "random_state" in kwargs else None
return leave_one_in(*args, seed=seed)
elif "test_size" in kwargs and kwargs["test_size"] == 0:
raise ValueError("Test size must be positive")
else: # Nonzero-shot
return _tts(*args, **kwargs)
``` |
{
"source": "212726320/BHPM-Ultrasound",
"score": 3
} |
#### File: jax_code/models/kernels.py
```python
from functools import partial
import jax
import jax.numpy as np
from .transforms import transform_params
def _squared_distance(x1, x2, scales=None):
z1, z2 = (x1, x2) if scales is None else (x1 / scales, x2 / scales)
return ( # clip_up( FIXME
np.sum(z1 * z1, axis=1, keepdims=True)
- 2.0 * z1 @ z2.T
+ np.sum(z2 * z2, axis=1, keepdims=True).T
)
_remat_squared_distance = jax.remat(_squared_distance)
def vmap(k, diag=False):
"""
Vectorize a "single" kernel of the form k(params, x1, x2)
diag: k(params, x): (N,DX) -> (N,)
full: k(params, x1, x2): (N,DX), (M,DX) -> (N,M)
"""
if diag:
# k(params, x)
return jax.vmap(lambda params, x: k(params, x, x), (None, 0))
else:
# k(params, x1, x2)
inside = jax.vmap(lambda params, x1, x2: k(params, x1, x2), (None, None, 0))
return jax.vmap(lambda params, x1, x2: inside(params, x1, x2), (None, 0, None))
# Is this faster?
# return jax.vmap(
# lambda params, x1, x2: jax.vmap(
# lambda x2: k(params, x1, x2)
# )(x2),
# (None, 0, None)
# )
def periodic():
"""
From Duvenaud, "Automatic model construction with Gaussian processes" (2014)
Fig. 2.1
"""
t_wrapper = partial(transform_params, transform=np.exp)
def init_fun(rng, input_shape):
params = {
"raw_variance": np.array(0.0),
"raw_scale": np.array(0.0),
"raw_periods": np.zeros((input_shape[1],)),
}
return (input_shape[0], input_shape[0]), params
@t_wrapper
def apply_fun(params, x1, x2):
r = np.sqrt(_squared_distance(x1, x2, scales=params["periods"]))
return params["variance"] * np.exp(
-params["scale"] * np.power(np.sin(np.pi * r), 2)
)
@t_wrapper
def apply_diag_fun(params, x):
return params["variance"] * np.ones(x.shape[0])
@t_wrapper
def apply_single_fun(params, x1, x2):
"""
Maps a pair of 1D vectors to a scalar (use this for grads)
"""
dr = (x1 - x2) / params["periods"]
r = np.sqrt(np.dot(dr, dr))
return params["variance"] * np.exp(
-params["scale"] * np.power(np.sin(np.pi * r), 2)
)
return {
"init": init_fun,
"apply": apply_fun,
"apply_diag": apply_diag_fun,
"apply_single": apply_single_fun,
}
def rbf():
t_wrapper = partial(transform_params, transform=np.exp)
def init_fun(rng, input_shape, scales=None, variance=None):
params = {
"raw_variance": np.log(variance) if variance is not None else np.array(0.0),
"raw_scales": np.log(scales)
if scales is not None
else np.zeros((input_shape[1],)),
}
return (input_shape[0], input_shape[0]), params
@t_wrapper
def apply_fun(params, x1, x2, remat=False):
"""
:param remat: if True, slam the squared distance calculaation with a remat to
prevent XLA fusion bug w/ x64.
"""
sd = _squared_distance if not remat else _remat_squared_distance
return params["variance"] * np.exp(-sd(x1, x2, scales=params["scales"]))
@t_wrapper
def safe_apply_func(params, x1, x2):
# "Safe" version that doesn't cause https://github.com/google/jax/issues/3122
return vmap(apply_single_fun)(params, x1, x2)
@t_wrapper
def apply_diag_fun(params, x):
return params["variance"] * np.ones(x.shape[0])
@t_wrapper
def apply_single_fun(params, x1, x2):
"""
Maps a pair of 1D vectors to a scalar (use this for grads)
"""
dr = (x1 - x2) / params["scales"]
r2 = np.dot(dr, dr)
return params["variance"] * np.exp(-r2)
return {
"init": init_fun,
"apply": apply_fun,
"apply_diag": apply_diag_fun,
"apply_single": apply_single_fun,
"safe_apply": safe_apply_func,
}
```
#### File: jax_code/models/likelihoods.py
```python
from functools import partial
import jax.numpy as np
from .transforms import transform_params
def gaussian():
t_wrapper = partial(transform_params, transform=np.exp)
def init_fun(rng, input_shape, noise=None):
return (
input_shape,
{"raw_noise": np.log(noise) if noise is not None else np.array(-2.0)},
)
@t_wrapper
def apply_fun(params, mean, cov):
if cov.ndim == 1:
cov = cov + params["noise"]
else:
cov = cov + params["noise"] * np.eye(*cov.shape)
return mean, cov
return {"init": init_fun, "apply": apply_fun}
```
#### File: examples/finite_difference/main.py
```python
import abc
import json
import os
from argparse import ArgumentParser
from collections import namedtuple
from time import time
from celluloid import Camera
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.io import loadmat
from torch import nn
from tqdm import tqdm
from bhpm.util import plot_triple, timestamp
DIM = 2 # spatial dimension of the problem
torch.manual_seed(42)
# torch.set_default_dtype(torch.double)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Utilities ============================================================================
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default=None,
help="If provided, load model from specified outdir instead of training a new one",
)
parser.add_argument(
"--crack",
action="store_true",
help="Use the crack case (overridden if outdir is provided)",
)
return parser.parse_args()
def ensure_config(args):
if args.outdir is None:
outdir = os.path.join(os.path.dirname(__file__), "output", timestamp())
crack = args.crack
new_model = True
if not os.path.isdir(outdir):
os.makedirs(outdir)
with open(os.path.join(outdir, "args.json"), "w") as f:
json.dump(args.__dict__, f, indent=4)
else:
outdir = args.outdir
if not os.path.isdir(outdir):
raise RuntimeError("Failed to find specified run at %s" % outdir)
with open(os.path.join(outdir, "args.json"), "r") as f:
crack = json.load(f)["crack"]
new_model = not os.path.isfile(os.path.join(outdir, "solver.pt"))
return outdir, crack, new_model
def squared_distance(x1, x2):
return (
torch.power(x1, 2).sum(dim=1, keepdim=True)
- 2.0 * x1 @ x2.T
+ torch.power(x2, 2).sum(dim=1, keepdim=True).T
)
def _reverse(x):
"""
Reverse a torch array since [::-1] isn't allowed by the current API
"""
return x[torch.arange(len(x) - 1, -1, -1)]
class Elementwise(nn.Module):
def __init__(self, f):
super().__init__()
self._f = f
def forward(self, x):
return self._f(x)
# Data =================================================================================
_us_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "ultrasound_data")
)
_data_info = (
# 0: Grains
{
"filename": os.path.abspath(
os.path.join(_us_dir, "Hsample SAW 5MHz n2", "wvf.mat",)
),
"crop": ((180, 230), (100, 150), (370, None)), # T,Y,X
},
# 1: Crack, 0 degrees
{
"filename": os.path.abspath(
os.path.join(
_us_dir,
"30Jan15 Nist crack1 240x240 12x12mm avg20 5MHz 0deg grips",
"wvf.mat",
)
),
"crop": ((None, None), (None, None), (None, None)),
},
)
def _load_data(case, verbose=False):
"""
Load data cube from a file
:return: namedtuple "Data" with np.ndarrays w/ following shapes:
* x (NX,)
* y (NY,)
* t (NT,)
* wavefield (NT, NY, NX)
"""
filename = _data_info[case]["filename"]
if not os.path.isfile(filename):
raise FileNotFoundError(
"Failed to find ultrasound data at %s.\nHave you downloaded it from Box?"
% filename
)
wvf_mat = np.array(loadmat(filename)["wvf"])[:200] # Stored as (NY,NX,NT)
wvf = np.transpose(wvf_mat, (2, 0, 1)) # (NT,NY,NX)
# crop:
crop = _data_info[case]["crop"]
wvf = wvf[crop[0][0] : crop[0][1], crop[1][0] : crop[1][1], crop[2][0] : crop[2][1]]
nt, ny, nx = wvf.shape
if verbose:
print("Loaded %i measurements after cropping" % wvf.size)
# Get coordinates
xy_scale = 0.05 # Units: mm
t_scale = 1.0 / 5.0 # Units: us (5 MHz sampling in time)
x = xy_scale * np.arange(nx)
y = xy_scale * np.arange(ny)
t = t_scale * np.arange(nt)
return namedtuple("_Data", ("x", "y", "t", "wvf"))(x, y, t, wvf)
def get_data(crack=False):
"""
:return: (NT,NY,NX)
"""
if crack:
data = _load_data(1)
wvf = data.wvf[570:630, 90:150, 70:160] # 60 x 60 x 90
rot = False
else:
data = _load_data(0)
wvf = data.wvf # 50 x 50 x 30
# Rotate so source comes from below, not the right
wvf = np.transpose(wvf[:, ::-1], (0, 2, 1)).copy() # 50 x 30 x 50
rot = True
return namedtuple("Data", ("wvf", "rot"))(torch.Tensor(wvf).to(device), rot)
# Solver ===============================================================================
class CField(nn.Module):
"""
Parent class for speed of sound fields
"""
def forward(self, x, y):
return self._forward(self._tile_cfield_inputs(x, y)).reshape(
(y.numel(), x.numel())
)
@staticmethod
def _tile_cfield_inputs(x, y):
rev_y = _reverse(y)
nx, ny = x.numel(), y.numel()
x_ = torch.stack([x for _ in range(ny)])
y_ = torch.stack([rev_y for _ in range(nx)]).T
xy = torch.stack((x_.flatten(), y_.flatten())).T
return xy
@abc.abstractmethod
def _forward(self, xy):
"""
:param xy: (NY*NX, 2)
:return: (NY*NX, 1)
"""
raise NotImplementedError()
class CFieldConstant(CField):
def __init__(self):
super().__init__()
self._raw_c = nn.Parameter(torch.tensor(0.0))
self._c_transform = torch.distributions.transforms.ExpTransform()
@property
def c(self):
return self._c_transform(self._raw_c)
def _forward(self, xy):
return self.c + 0.0 * xy[:, [0]]
class CFieldNet(CField):
def __init__(self, units=64, layers=5):
super().__init__()
self._net = nn.Sequential(
nn.Linear(2, units),
Elementwise(torch.sin),
*([nn.Linear(units, units), Elementwise(torch.sin)] * (layers - 1)),
nn.Linear(units, 1),
Elementwise(torch.exp),
)
# Init tweaks
self._net._modules["0"].weight.data = self._net._modules["0"].weight.data * 10.0
for i in range(1, len(self._net._modules)):
istr = str(i)
if hasattr(self._net._modules[istr], "bias"):
self._net._modules[istr].bias.data = (
self._net._modules[istr].bias.data * 0.0
)
def _forward(self, xy):
return self._net(xy)
def get_solver_params(data, t_edge, x_edge, y_edge, t_oversample, s_oversample):
"""
Reparameterize in terms of things that aare relative to the data being analyzed
"""
nt_data, ny_data, nx_data = data.wvf.shape
dt = 0.02 / t_oversample
h = 0.05 / s_oversample
window_stride = (t_oversample, s_oversample, s_oversample)
window_corner = (
t_edge * t_oversample,
y_edge * s_oversample,
x_edge * s_oversample,
)
nt = nt_data * t_oversample + window_corner[0] + 2
ny = s_oversample * (ny_data + y_edge + 2)
nx = s_oversample * (nx_data + 2 * x_edge)
return {
"nx": nx,
"ny": ny,
"nt": nt,
"dt": dt,
"h": h,
"window_corner": window_corner,
"window_stride": window_stride,
"data": data,
}
class Solver(nn.Module):
"""
Spatial units: mm
temporal units: usec
"""
def __init__(
self,
nx=240,
ny=140,
nt=5000,
dt=0.005, # Data is 0.2 per
h=0.05, # Data is 0.05 per
window_corner=None,
window_stride=None,
data=None,
):
super().__init__()
self._h = h
self._dt = dt
self._x = nn.Parameter(h * torch.arange(nx), requires_grad=False)
self._y = nn.Parameter(h * torch.arange(ny), requires_grad=False)
self._t = nn.Parameter(dt * torch.arange(-2, nt), requires_grad=False)
# T,Y,X
self._window_corner = (30, 20, 30) if window_corner is None else window_corner
self._window_stride = (40, 2, 2) if window_stride is None else window_stride
# self.c_field = CFieldConstant()
self.c_field = CFieldNet()
# Source f(x,t) across the whole bottom of the simulation domain
units, layers = 32, 5
self.source = nn.Sequential(
nn.Linear(2, units),
Elementwise(torch.sin),
*([nn.Linear(units, units), Elementwise(torch.sin)] * (layers - 1)),
nn.Linear(units, 1),
)
# Apply physics via convolution
self.step_kernel = nn.Conv2d(
1, 1, 3, bias=False, padding=1, padding_mode="replicate"
)
self.step_kernel.requires_grad_(False)
# Laplacian kernel:
# 0 1 0
# 1 -4 1
# 0 1 0
self.step_kernel.weight.data = torch.Tensor(
[[0.0, 1.0, 0.0], [1.0, -4.0, 1.0], [0.0, 1.0, 0.0]]
)[None][None]
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def t(self):
return self._t
@property
def nx(self):
return len(self.x)
@property
def ny(self):
return len(self.y)
@property
def nt(self):
# Account for the two "dummy" input for the two IC slices.
return len(self.t) - 2
@property
def window_corner(self):
return self._window_corner
@property
def window_stride(self):
return self._window_stride
def forward(self):
return self.simulate()
def simulate(self, verbose=False, saveas=None):
"""
:return: (NT, NY, NX)
"""
c = self.c_field(self.x, self.y)
coef = (self._dt * c / self._h) ** 2
source_f = self.source(self._source_points()).reshape((self.nt + 2, self.nx))
def step(u, u_prev, f):
"""
perform a time step
"""
u_step = coef * self.step_kernel(u[None][None])[0, 0]
u_next = self._apply_source(-u_prev + 2.0 * u + u_step, f)
return u_next
u_list = self._initial_condition(source_f[:2])
if verbose:
print("Solve...")
f_list = tqdm(source_f[2:])
else:
f_list = source_f[2:]
for f in f_list:
u_list.append(step(u_list[-1], u_list[-2], f))
u = torch.stack(u_list[2:]) # NT, NY, NX
# Visualize
if saveas is not None:
print("Save .npy...")
np.save(saveas + ".npy", u.detach().cpu().numpy())
print("Animate...")
fig = plt.figure()
ax = fig.gca()
camera = Camera(fig)
for ui in tqdm(u.detach().cpu().numpy()[::5]):
ax.imshow(ui, vmin=-0.3, vmax=0.3) # , cmap="bone")
camera.snap()
animation = camera.animate(interval=1)
animation.save(saveas + ".gif")
plt.close()
print("Done!")
return u
def apply_window(self, u):
c, s = self._window_corner, self._window_stride
return u[c[0] :: s[0], c[1] :: s[1], c[2] :: s[2]]
def to_data(self, data):
nt, ny, nx = data.wvf.shape
return self.apply_window(self.simulate())[:nt, :ny, :nx]
def loss(self, data):
"""
Assume dense measurement data for the moment
"""
u = self.to_data(data)
if not all([s_sim == s_data for s_sim, s_data in zip(u.shape, data.wvf.shape)]):
msg = (
"Simulation window can't match data (probably too small).\n"
+ "Simulation shape : "
+ str(u.shape)
+ "\n"
+ "Data shape : "
+ str(data.wvf.shape)
)
raise ValueError(msg)
return nn.MSELoss()(u, data.wvf)
def _source_points(self):
"""
:return: (x, t), shape ((NT+2)*NX, 2)
"""
x_tiled = torch.stack([self.x for _ in range(self.nt + 2)])
t_tiled = torch.stack([self.t for _ in range(self.nx)]).T
return torch.stack((x_tiled.flatten(), t_tiled.flatten())).T
def _initial_condition(self, source_f):
"""
TODO complete IC field u0(x,y) instead of initializing at zero?
:param source_f: (2, NX)
"""
return [
self._apply_source(torch.zeros(self.ny, self.nx).to(fi.device), fi)
for fi in source_f
]
def _apply_source(self, u, f):
"""
:param u: (NY,NX)
:param f: (NX,)
"""
u[-1] = f
u[-2] = f
return u
# Inference ============================================================================
def train_from_scratch(data, outdir):
iters = 10000
lr_start = 3.0e-3
lr_end = 3.0e-4
solver = Solver(**get_solver_params(data, 10, 10, 10, 40, 2)).to(device)
optimizer = torch.optim.Adam(solver.parameters(), lr=lr_start)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, iters, eta_min=lr_end
)
animate(solver, data, outdir, i=0)
plot_crack(solver, outdir, i=0, wvf=data.wvf, rot=data.rot)
losses = []
t0 = time()
for i in range(1, iters + 1):
losses.append(train(solver, optimizer, data))
print(
"t = %6i | Iteration %6i / %6i | Loss = %.2e"
% (time() - t0, i, iters, losses[-1])
)
if i % 100 == 0:
torch.save(
solver.state_dict(), os.path.join(outdir, "solver_ckpt_%i.pt" % i)
)
animate(solver, data, outdir, i=i)
plot_crack(solver, outdir, i=i, wvf=data.wvf, rot=data.rot)
scheduler.step()
torch.save(solver.state_dict(), os.path.join(outdir, "solver.pt"))
plt.figure()
plt.semilogy(losses)
plt.xlabel("Iteration")
plt.ylabel("Loss")
[plt.savefig(os.path.join(outdir, "loss." + ft)) for ft in ("png", "pdf")]
plt.close()
return solver
def load_solver(data, filename):
solver = Solver(**get_solver_params(data, 10, 10, 10, 40, 2)).to(device)
solver.load_state_dict(torch.load(filename))
return solver
def train(solver, optimizer, data):
solver.train()
optimizer.zero_grad()
loss = solver.loss(data)
loss.backward()
optimizer.step()
return loss.item()
# Visualization ========================================================================
def animate(solver, data, outdir, i=None):
"""
Make an animation to show how we did.
"""
filename = "animation.gif" if i is None else ("animation_iter_%i.gif" % i)
fig = plt.figure(figsize=(8, 4))
ax_solver = fig.add_subplot(1, 2, 1)
ax_data = fig.add_subplot(1, 2, 2)
camera = Camera(fig)
def _plot(ax, u, title=None, rot=False):
if rot:
# Unrotate
u = u.T[::-1]
ax.imshow(u, vmin=-10.0, vmax=10.0)
ax.set_xticks(())
ax.set_yticks(())
if title is not None:
ax.set_title(title)
with torch.no_grad():
for u_sol, u_data in zip(
solver.to_data(data).detach().cpu().numpy(), data.wvf.detach().cpu().numpy()
):
_plot(ax_solver, u_sol, "Solver", rot=data.rot)
_plot(ax_data, u_data, "Data", rot=data.rot)
camera.snap()
animation = camera.animate()
print("Saving animation...")
animation.save(os.path.join(outdir, filename))
print("Done!")
plt.close()
def plot_crack(solver, outdir, i=None, wvf=None, rot=False):
"""
:param wvf: If provided, clip to it
"""
filebase = "crack" if i is None else ("crack_%i" % i)
with torch.no_grad():
c = solver.c_field(solver.x, solver.y).detach().cpu().numpy()
if wvf is not None:
# Don't subsample
c = c[
solver._window_corner[1] : solver._window_stride[1] * wvf.shape[1],
solver._window_corner[2] : solver._window_stride[2] * wvf.shape[2],
]
if rot:
c = c.T[::-1]
plt.figure()
plt.imshow(c, vmin=0.0)
plt.axis("off")
plt.colorbar()
[
plt.savefig(os.path.join(outdir, filebase + ft), bbox_inches="tight")
for ft in (".png", ".pdf")
]
plt.close()
def plot_observed(solver, data, idx, outdir):
"""
Compare simulation to data at a specific time index `idx` and save the figs.
"""
with torch.no_grad():
pred = solver.to_data(data)[idx].detach().cpu().numpy()
targets = data.wvf[idx].detach().cpu().numpy()
if data.rot:
pred, targets = [z.T[::-1] for z in (pred, targets)]
vmin, vmax = -10.0, 10.0
plot_triple(targets, pred, vmin, vmax, saveas=os.path.join(outdir, "observation"))
# ======================================================================================
def main(args):
outdir, crack, new_model = ensure_config(args)
data = get_data(crack=crack)
if new_model:
solver = train_from_scratch(data, outdir)
else:
solver = load_solver(data, os.path.join(outdir, "solver.pt"))
animate(solver, data, outdir)
print("Plotting figures...")
plot_observed(solver, data, int(0.75 * data.wvf.shape[0]), outdir)
plot_crack(solver, outdir, wvf=data.wvf, rot=data.rot)
print("...Done!")
if __name__ == "__main__":
main(parse_args())
```
#### File: BHPM-Ultrasound/tests/test_demos.py
```python
import importlib
import inspect
import os
import sys
import pytest
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
jax_dirs = ["examples", "jax"]
torch_dirs = ["examples", "torch"]
def generate(base, dirs):
@pytest.mark.demo
def wrapped():
path = os.path.join(base_path, *dirs)
need_path = path not in sys.path
if need_path:
sys.path.append(path)
m = importlib.import_module(".".join(dirs + [base]))
assert hasattr(m, "main"), "Need a main()"
main_kwargs = (
{"testing": True}
if "testing" in inspect.signature(m.main).parameters
else {}
)
m.main(**main_kwargs)
if need_path:
sys.path.pop(-1)
return wrapped
locals().update({"test_" + name: generate(name, jax_dirs) for name in ["mo_kernel"]})
# locals().update(
# {
# "test_" + name: generate(name, torch_dirs)
# for name in ["multi_output", "1st_order_pde"]
# }
# )
``` |
{
"source": "212726320/PIRATE-1",
"score": 3
} |
#### File: examples/adaptive/ode.py
```python
import operator
import os
import random
import sys
from time import time
from typing import Callable
import deap.gp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from pirate.function import Function
import pirate.operator as pirate_operator
from pirate.data.experiment import Experiment
from pirate.experiment_util import (
parse_args_adaptive,
check_answer,
ground_truth_fitness,
)
from pirate.models.gaussian_process import GP
from pirate.symbolic_regression.base import get_primitive_set
from pirate.symbolic_regression.base import SymbolicRegression
from pirate.symbolic_regression.fitness import DifferentialResidual
from pirate.symbolic_regression.fitness import add_memoization
from pirate.systems.ode import SecondOrder
from pirate.util import rms
torch.set_default_dtype(torch.double)
FitnessFunction = add_memoization(DifferentialResidual)
def make_experiments(system, dataset):
t = dataset["t"].values
lhs = {}
x = dataset["x"].values
gp = GP(t[:, np.newaxis], x[:, np.newaxis])
gp.train(num_epochs=1000, learning_rate=0.1) # , show_loss=True)
gp.predict_mode()
# Extract function for modeled dataset
fx = Function(gp.model.get_mean_function())
experiments = [Experiment({"x": fx}, pd.DataFrame({"t": t}))]
# Extras
fitness_threshold = ground_truth_fitness(experiments, ground_truth_model)
return experiments, fitness_threshold
def ground_truth_model(experiment) -> Function:
x = experiment.left_hand_side["x"]
return x.gradient(0).gradient(0) + x.gradient(0) + x
def get_best_individual_residual(symbolic_regression) -> Callable:
"""
Take the SR object and build a model (function) out of its fittest
individual.
"""
f_best = deap.gp.compile(
expr=symbolic_regression.hall_of_fame[0], pset=symbolic_regression.primitive_set
)
def func(experiment, x):
# Takes care of the inhomogeneous term automatically!
residual = symbolic_regression.fitness_function.residual(
f_best, experiment, inputs=x
)
assert residual.shape[1] == 1 # Need 1D output!
return residual.flatten()
return func
def find_next_x(symbolic_regression, t_test):
residuals = [] # Acquisition function samples
residual_function = get_best_individual_residual(symbolic_regression)
for experiment in symbolic_regression.fitness_function.experiments:
residuals.append(residual_function(experiment, t_test[:, np.newaxis]))
abs_residual = np.abs(np.array(residuals).mean(axis=0))
# Find the next point that isn't already being used:
i_list = np.argsort(abs_residual)[::-1]
for i in i_list:
t_new = t_test[i]
if (
t_new
in symbolic_regression.fitness_function.experiments[0].data["t"].values
):
print("Already added %f, use the next" % t_new)
continue
max_residual = abs_residual[i]
return t_new, max_residual
raise RuntimeError("Ran out of data!")
def iteration(system, dataset, testing):
experiments, fitness_threshold = make_experiments(system, dataset)
# Do symbolic regression
t_start = time()
operators = (
operator.add,
operator.neg,
operator.mul,
pirate_operator.ScalarGradient(0),
)
primitive_set = get_primitive_set(operators, variable_names=("x",))
fitness_function = FitnessFunction(
experiments, primitive_set, differential_operators=["ddx0"]
)
symbolic_regression = SymbolicRegression(
primitive_set,
fitness_function,
population=512,
mating_probability=0.2,
mutation_probability=0.8,
)
symbolic_regression.run(
iterations=2 if testing else 200,
verbose=True,
fitness_threshold=(0.99 * fitness_threshold,),
)
t_elapsed = time() - t_start
print("Elapsed time = {}".format(t_elapsed))
return symbolic_regression
def append_datum(system, dataset, t):
i = np.where(system._t == t)[0][0]
x = system._x[i]
dataset_new = dataset.append(pd.DataFrame({"t": [t], "x": [x]})).reset_index(
drop=True
)
print("New dataset:\n", dataset_new)
return dataset_new
def main(testing=False):
args = parse_args_adaptive(testing)
if not args.no_write:
result_file = os.path.join(
os.path.dirname(__file__),
"..",
"output",
"ode_adaptive",
"results",
"s_%i.txt" % args.seed,
)
result_dir = os.path.dirname(result_file)
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
if os.path.isfile(result_file) and not args.overwrite:
print("Already found %s; exit." % result_file)
exit()
# RNG seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Dataset & experiment
system = SecondOrder()
system.system_parameters = np.array([10.0, 5.0])
dataset = system.sample(4) # initial design
n, residuals = [], []
for step in range(2 if testing else 256):
symbolic_regression = iteration(system, dataset, testing)
t_new, residual = find_next_x(symbolic_regression, system._t)
n.append(dataset.shape[0])
residuals.append(residual)
print("This iteration:\n", n[-1], residuals[-1])
if residual < args.threshold:
break
else:
dataset = append_datum(system, dataset, t_new)
# Check if we got it right
t_test = system._t.copy()
res = check_answer(t_test, symbolic_regression, ground_truth_model)
print("Result = %i" % res)
if not args.no_write:
np.savetxt(result_file, [res], fmt="%d")
print("N:\n", n)
print("Residuals:\n", residuals)
if __name__ == "__main__":
main()
```
#### File: pirate/symbolic_regression/algorithms.py
```python
import abc
from typing import Tuple
from warnings import warn
from deap.algorithms import varAnd, varOr
from deap import tools
class AlgorithmState(object):
"""
Class handling the state of the algorithm
"""
def __init__(self, ngen: int):
self.ngen = ngen
self.complete_iterations = 0
# How many new fitness evaluations we do in the current iteration
self.nevals_this_iteration = None
# Whether something has happened that means we need to terminate the
# algorithm:
self.flag_break = False
# Ensure we're fresh:
self.reset()
def proceed(self) -> bool:
"""
Whether we are good going with another generation of the algorithm
"""
if self.flag_break:
return False
# Other criteria...
if self.ngen is None:
raise RuntimeError("Must set ngen")
return self.complete_iterations < self.ngen
def reset(self, ngen: int = None):
"""
Reset before starting iteration through generations
The values we reset to are:
ngen -> what was provided (if applicable)
complete_iterations -> 0
flag_break -> False
"""
if ngen is not None:
self.ngen = ngen
if self.ngen is None:
raise ValueError("Must provide number of generations!")
self.complete_iterations = 0
self.flag_break = False
class Base(abc.ABC):
"""
Base class for the algorithms
"""
_gen_end_hooks = []
def __init__(
self,
population,
toolbox,
cxpb,
mutpb,
ngen,
stats=None,
halloffame=None,
verbose=__debug__,
fitness_threshold: Tuple[float] = None,
):
"""
:param fitness_threshold: If the fittest individual beats this
threshold, then signal to terminate the loop.
"""
self.population = population
self.toolbox = toolbox
self.cxpb = cxpb
self.mutpb = mutpb
self._algorithm_state = AlgorithmState(ngen)
self.stats = stats
self.halloffame = halloffame
self.verbose = verbose
self.fitness_threshold = fitness_threshold
# Hooks for the end of each iteration:
self._gen_end_hooks = [
self._increment_completed_generations,
self._update_logbook,
self._check_threshold,
]
# Initialized in self._pre_run():
self.logbook = None
def run(self, ngen: int = None):
self._pre_run()
self._algorithm_state.reset(ngen=ngen)
while self._algorithm_state.proceed():
self._algorithm_state.nevals_this_iteration = self._generation()
for hook in self._gen_end_hooks:
hook()
self._post_run()
# Post-iteration hooks:
def _update_logbook(self):
"""
Append the current generation statistics to the logbook
"""
record = self.stats.compile(self.population) if self.stats else {}
self.logbook.record(
gen=self._algorithm_state.complete_iterations,
nevals=self._algorithm_state.nevals_this_iteration,
**record
)
if self.verbose:
print(self.logbook.stream)
def _increment_completed_generations(self):
"""
Hook for end of an iteration
Increment the number of completed iterations
"""
self._algorithm_state.complete_iterations += 1
def _check_threshold(self):
"""
Check to see if the fittest individual beats the provided threshold.
If so, then set the algorithm state to flag for termination.
"""
if self.fitness_threshold is None:
return
for ind in self.population:
if all(
[
fit_i < thresh_i
for fit_i, thresh_i in zip(
ind.fitness.values, self.fitness_threshold
)
]
):
self._algorithm_state.flag_break = True
break
# Methods implementing before, during, and after the loop:
def _generation(self) -> int:
"""
This function shall implement a single iteration (generation) of the
evolutionary algorithm
:return: number of function evaluations this iteration
"""
warn("Base class does not evolve the population.")
def _pre_run(self):
"""
Tasks to be run before the main loop of the algorithm
"""
self.logbook = tools.Logbook()
self.logbook.header = ["gen", "nevals"] + (
self.stats.fields if self.stats else []
)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in self.population if not ind.fitness.valid]
fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if self.halloffame is not None:
self.halloffame.update(self.population)
record = self.stats.compile(self.population) if self.stats else {}
self.logbook.record(gen=0, nevals=len(invalid_ind), **record)
if self.verbose:
print(self.logbook.stream)
def _post_run(self):
"""
Any post-hooks we might want after the loop terminates
"""
pass
class EvolutionaryAlgorithmSimple(Base):
"""
cf. deap.algorithms.eaSimple
"""
def _generation(self) -> int:
# Select the next generation individuals
offspring = self.toolbox.select(self.population, len(self.population))
# Vary the pool of individuals
offspring = varAnd(offspring, self.toolbox, self.cxpb, self.mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if self.halloffame is not None:
self.halloffame.update(offspring)
# Replace the current population by the offspring
self.population[:] = offspring
return len(invalid_ind)
def eaSimple(
population,
toolbox,
cxpb,
mutpb,
ngen,
stats=None,
halloffame=None,
verbose=__debug__,
fitness_threshold: Tuple[float] = None,
):
"""
Wrapper to conform to the API for DEAP
Additional kwargs:
:param fitness_threshold: See Base
"""
alg = EvolutionaryAlgorithmSimple(
population,
toolbox,
cxpb,
mutpb,
ngen,
stats=stats,
halloffame=halloffame,
verbose=verbose,
fitness_threshold=fitness_threshold,
)
alg.run()
return alg.population, alg.logbook
```
#### File: symbolic_regression/calibration/functional.py
```python
import deap.gp
import matplotlib.pyplot as plt
import pyro
from pyro.distributions import Distribution, MultivariateNormal, Normal
from pyro.distributions.transforms import LowerCholeskyTransform
import torch
from torch.distributions import Distribution, Normal
from tqdm import tqdm
from ...data.experiment import Experiment
from ...util import rms
from ..deap_extensions import Parameter, PrimitiveTree, compile
from ..util import get_residual_function, tensor_to_parameter_dict
from .results import CalibrationResults
class Flat(Distribution):
"""
(Improper) flat distribution
"""
def __init__(self, shape):
super().__init__()
self._shape = shape
def sample(self, sample_shape=()):
return torch.zeros(*sample_shape, *self._shape)
def log_prob(self, x):
return 0.0 * x.sum(dim=[-(i + 1) for i in range(len(self._shape))])
class Calibrator(torch.nn.Module):
"""
Calibrates parameters in a provided individual:
* Ephemeral constants (if any)
* Observation likelihood (i.e. variance of a Gaussian likelihood)
All updating is done in-place.
"""
def __init__(
self,
individual: PrimitiveTree,
pset: deap.gp.PrimitiveSet,
experiment: Experiment,
):
super().__init__()
pyro.clear_param_store()
self.individual = individual
self.pset = pset
self.experiment = experiment
self.f_residual = get_residual_function(
compile(individual, pset), experiment, pset
)
# Variational posterior variational parameters (multivariate Gaussian)
# self.q_mu = torch.zeros(self.num_parameters)
# self.raw_q_sqrt = torch.zeros(self.num_parameters, self.num_parameters)
# Input points at which we evaluate the residual function
self._inputs = self._inputs = torch.Tensor(self.experiment.data.values)
self._inputs.requires_grad_(True)
self._scheduler = None
self._svi = None
@property
def num_parameters(self):
return self.individual.num_parameters
@property
def q_mu(self) -> torch.Tensor:
"""
Variational posterior mean
"""
return pyro.param("q_mu")
@property
def q_sqrt(self) -> torch.Tensor:
"""
Parameters' posterior covariance
"""
return pyro.param("q_sqrt")
def calibrate(self, iters=200, verbose=True, show=False):
"""
Perform calibration
"""
lr_initial = 0.1
lr_final = 0.001
final_factor = lr_final / lr_initial
self._scheduler = pyro.optim.ExponentialLR(
{
"optimizer": torch.optim.Adam, # NOT pyro!
"optim_args": {"lr": lr_initial},
"gamma": final_factor ** (1 / iters),
}
)
self._svi = pyro.infer.SVI(
self._model, self._guide, self._scheduler, loss=pyro.infer.Trace_ELBO()
)
losses = []
iterator = tqdm(range(iters)) if verbose else range(iters)
for i in iterator:
losses.append(self._svi.step())
self._scheduler.step()
if verbose:
print("Calibrated:")
print(" expr: %s" % str(self.individual))
print(" q_mu: %s" % str(self.q_mu))
print(" q_var: %s" % str((self.q_sqrt @ self.q_sqrt.t()).diag().sqrt()))
if show:
plt.figure()
plt.plot(losses)
plt.xlabel("SVI iteration")
plt.ylabel("Loss")
plt.title("expr = %s" % str(self.individual))
plt.show()
def get_loss(self, num_particles=128, verbose=True):
"""
Get an estimate of the negative evidence lower bound based on the
current variational posterior.
:param num_particles: Number of particles used to estimate the ELBO
"""
svi = pyro.infer.SVI(
self._model,
self._guide,
self._scheduler,
loss=pyro.infer.Trace_ELBO(num_particles=num_particles),
)
# .evaluate_loss() doesn't seem to work if there are grads inside the
# model?
# NB: loss = -ELBO
loss = svi.evaluate_loss()
if verbose:
print("Loss = %f" % loss)
return loss
def get_results(self, detach_posterior=True) -> CalibrationResults:
"""
Distill the results of the calibration
:param detach: If true, make sure that the posterior's parameters are
detached (can cause issues if you try deepcopying)
"""
loc, scale_tril = self.q_mu, self.q_sqrt
if detach_posterior:
loc, scale_tril = loc.detach(), scale_tril.detach()
return CalibrationResults(
self.experiment,
self.get_loss(),
MultivariateNormal(loc, scale_tril=scale_tril),
)
def _model(self):
"""
Prior + posterior for the calibration model
"""
with torch.enable_grad(): # Because pyro tries to disable it...
theta = pyro.sample("theta", Flat((self.num_parameters,)))
parameters = tensor_to_parameter_dict(theta)
# Could also calibrate random functions...
r = self.f_residual(self._inputs, **parameters)
sigma = torch.sqrt(torch.mean(r ** 2))
with pyro.plate("targets"):
pyro.sample("residual", Normal(torch.zeros(1), sigma), obs=r)
def _guide(self):
"""
Pyro variational posterior ("guide")
"""
q_mu = pyro.param("q_mu", torch.zeros(self.num_parameters))
q_sqrt = pyro.param(
"q_sqrt",
torch.eye(self.num_parameters),
constraint=torch.distributions.constraints.lower_cholesky,
)
pyro.sample("theta", MultivariateNormal(q_mu, scale_tril=q_sqrt))
def calibrate(
individual: PrimitiveTree,
pset: deap.gp.PrimitiveSet,
experiment: Experiment,
verbose=True,
show=False,
) -> None:
"""
Calibrate the parameters of the provided individual.
:return: (CalibrationResults) Information for calculating the fitness.
"""
c = Calibrator(individual, pset, experiment)
c.calibrate(verbose=verbose, show=show)
return c.get_results()
```
#### File: pirate/symbolic_regression/util.py
```python
from typing import Callable
import deap.gp
import torch
from ..data.experiment import Experiment
from ..function import Function
def get_residual_function(
op: Callable, experiment: Experiment, pset: deap.gp.PrimitiveSet
) -> Function:
"""
Create the parametric residual function r(x; Theta)
:param op: Operator over functions, aka a graph as a compiled function
:return: Callable with signature r(x, theta_0=val, ...theta_m-1=val)
"""
# TODO would like to make it easier to see how many parameters "op" expects
def residual(x, **parameters):
# First, evaluate the operator over functions and parameters:
func = op(
*[experiment.left_hand_side[key] for key in pset.arguments], **parameters
)
# Then, subtract the inhomogeneous function
if experiment.inhomogeneous is not None:
func = func - experiment.inhomogeneous
return func(x)
return residual
def tensor_to_parameter_dict(x: torch.Tensor) -> dict:
"""
Take an array of parameter values and restructure it as a dict that's a
valid input for the **parameters kwarg for a residual function returned by
`get_residual_function()`
:param x: 1D array of parameters
:return: (dict) parameter specification
"""
return {"theta_%i" % i: val for i, val in enumerate(x)}
```
#### File: pirate/systems/base.py
```python
import abc
from typing import List
import numpy as np
from pandas import DataFrame
class System(object):
pass
class UnsupervisedSystem(System, abc.ABC):
"""
Base class for supervised data.
We adopt the "generative" convention, in which the unlabeled data are
regarded as "outputs"
"""
def __init__(self, dy: int):
"""
:param dy: output dimension
"""
self._output_dimensions = tuple(["y" + str(i + 1) for i in range(dy)])
@property
def dy(self) -> int:
"""
Output dimensionality of the system.
"""
return len(self.output_dimensions)
@property
def observables(self):
raise NotImplementedError("Define observable dimensions for your system")
@property
def output_dimensions(self):
"""
Get the list of names of the output dimensions
:return: Iterable whose elements are str instances
"""
return self._output_dimensions
@abc.abstractclassmethod
def sample(self, n: int = 1) -> DataFrame:
"""
Produce data randomly from the system.
To generate data, we (1) draw random samples from a distribution over
the inputs, then (2) compute the corresponding outputs.
:param n: How many data to produce
:return: (DataFrame, [n x dx+dy]), the data. Column names should
match the input & output dimension names.
"""
raise NotImplementedError("")
class SupervisedSystem(UnsupervisedSystem):
"""
Base class for supervised data
"""
def __init__(self, dx: int, dy: int):
"""
:param dx: input dimension
:param dy: output dimension
"""
super().__init__(dy)
self._input_dimensions = tuple(["x" + str(i + 1) for i in range(dx)])
@property
def dx(self) -> int:
"""
Input dimensionality of the system.
:return: int
"""
return len(self.input_dimensions)
@property
def input_dimensions(self):
"""
Get the list of names of the input dimensions
:return: Iterable whose elements are str instances
"""
return self._input_dimensions
@abc.abstractclassmethod
def sample(self, n: int = 1) -> DataFrame:
"""
Produce data randomly from the system.
To generate data, we (1) draw random samples from a distribution over
the inputs, then (2) compute the corresponding outputs.
:param n: How many data to produce
:return: (DataFrame, [n x dx+dy]), the data. Column names should
match the input & output dimension names.
"""
raise NotImplementedError("")
class Parameterizable(abc.ABC):
"""
A system with a parameterization
"""
def __init__(self, system_parameters=None):
# Just for declaration within __init__:
self._system_parameters = None
# Store callables in here that will be invoked at the end of setting
# self.system_parameters
self._system_parameters_setter_hooks = []
# The actual setter:
# Note: this won't have child classes' hooks yet, so you'll probably
# need to invoke it again after the child classes' super().__init__()...
self.system_parameters = (
self._default_system_parameters()
if system_parameters is None
else system_parameters
)
@property
def system_parameters(self):
return self._system_parameters
@system_parameters.setter
def system_parameters(self, val: np.ndarray):
if not np.all(val.shape == self._default_system_parameters().shape):
raise ValueError(
"System parameters shape {} ".format(val.shape)
+ "doesn't match expected {}".format(
self._default_system_parameters().shape
)
)
self._system_parameters = val
for hook in self._system_parameters_setter_hooks:
hook()
@abc.abstractmethod
def randomize_system_parameters(self) -> None:
"""
Pick a new random value for the system parameters and set it to
self._system_parameters.
"""
raise NotImplementedError()
@abc.abstractmethod
def _default_system_parameters(self):
raise NotImplementedError("")
class Ensemble(object):
"""
Class for an ensemble of systems, where each System is e.g. a particular
solution, and this Ensemble shares the same underlying governing equations.
One example of this is the elliptic PDE with different a(x) and f(x)'es,
where we could talk about an ensemble of particular solutions that are all
from the same governing PDE.
"""
def __init__(
self,
system_type: type(UnsupervisedSystem),
system_init_args=None,
system_init_kwargs=None,
common_inputs: np.ndarray = None,
):
"""
:param system_type: The System type that we'll build an ensemble out of.
:param system_init_args:
:param system init_kwargs:
:param common_inputs:
"""
self._system_type = system_type
self._system_init_args = (
system_init_args if system_init_args is not None else ()
)
self._system_init_kwargs = (
system_init_kwargs if system_init_kwargs is not None else {}
)
print("kwargs = {}".format(self._system_init_kwargs))
# Hold a list of systems that have been generated
self._systems = []
# So that we can sample each System instance at the same inputs.
self._common_inputs = common_inputs
# self._check_init()
def new_system(self, n: int = 1, system_parameters: List[np.ndarray] = None):
"""
Initialize new systems in this ensemble
:param n: How many enw systems to instantiate.
:param system_parameters: Can use this to get a specific ensemble of
systems
"""
system_parameters = (
[None] * n if system_parameters is None else system_parameters
)
assert (
len(system_parameters) == n
), "Number of provided system_parameters must match n"
kwargs = self._system_init_kwargs
if self._common_inputs is not None:
kwargs.update({"input_points": self._common_inputs})
for _, sp in zip(range(n), system_parameters):
new_system = self._system_type(*self._system_init_args, **kwargs)
if sp is None:
new_system.randomize_system_parameters()
else:
new_system.system_parameters = sp
self._systems.append(new_system)
def sample(self, *args, **kwargs):
return [system.sample(*args, **kwargs) for system in self.systems()]
def systems(self):
"""
Generator to yield the systems in this ensemble
"""
for s in self._systems:
yield s
def systems(self):
"""
Generator to yield the systems in this ensemble
"""
for s in self._systems:
yield s
def _check_init(self):
pass
```
#### File: tests/test_examples/test_adaptive.py
```python
from functools import partial
import importlib
from os.path import abspath, join, dirname
import sys
src_path = abspath(join(dirname(__file__), "..", ".."))
examples_path = abspath(join(dirname(__file__), "..", "..", "examples", "adaptive"))
for p in [src_path, examples_path]:
if p not in sys.path:
sys.path.append(p)
def t(name):
m = importlib.import_module(name)
m.main(testing=True)
test_ode = partial(t, "ode")
test_e_het = partial(t, "e_het")
test_e_nonlinear = partial(t, "e_nonlinear")
``` |
{
"source": "2133649586/Coronavirus-Tracker",
"score": 3
} |
#### File: 2133649586/Coronavirus-Tracker/resulting.py
```python
import ssl
# 全局取消证书验证
ssl._create_default_https_context = ssl._create_unverified_context
import pandas as pd
from bokeh.core.properties import value
from bokeh.layouts import column, row
from bokeh.models import CustomJS, DatePicker, Div, ColumnDataSource, TableColumn, DataTable, HoverTool
from bokeh.plotting import figure, curdoc
total_path = "https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/cdph-state-totals.csv"
race_path = "https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/cdph-race-ethnicity.csv"
total_data = pd.read_csv(total_path)
race_data = pd.read_csv(race_path)
last_update_date = total_data["date"][0]
# to represent the source of data
introduce_of_data = Div(text = """all of the data we used in this visualization project is published by the California \
Department of Public Health, and we get the dataset with url from github repository : https://github.com/datadesk/california-coronavirus-data.
<b>Last update date: {}<b>""".format(last_update_date), width = 700, height= 80 )
# design date select options
date_picker = DatePicker(title='Select date', value="2020-08-11", min_date="2020-06-25", max_date=last_update_date)
selected_date = date_picker.value
# question a
increase_case = {"date":[selected_date],"confirm case":[]}
for i in range(len(total_data["date"])-1):
if total_data["date"][i] == selected_date:
increase_case["confirm case"].append(total_data["confirmed_cases"][i] - total_data["confirmed_cases"][i+1])
source_q1 = ColumnDataSource(increase_case)
columns1 = [TableColumn(field="date", title="Date"),TableColumn(field="confirm case", title="Increase confirm case")]
data_table1 = DataTable(source = source_q1, columns=columns1, width=400, height=120)
#question b,c
description = Div(text = """we can get answer of second and third question here, 2.For a particular day, what is the %percent cases by race compared to their representation in the
general population? 3.For a particular day, what is the %percent deaths by race compared to their representation in the
general population? And when you move mouse to certain area, the detail of parameter will displayed in right box. <b>Attention: if there is no any data in table or figure
, that means the search date is not exist in our data<b>""")
some = race_data[(race_data["age"]=="all") & (race_data["date"]==selected_date)]
particular_column = ["date","race","confirmed_cases_percent","deaths_percent","population_percent"]
particular_data = some[particular_column]
source_q23 = ColumnDataSource(data = particular_data)
columns23 = [TableColumn(field="date", title="Date"),TableColumn(field="race", title="Race"),
TableColumn(field="confirmed_cases_percent", title="Confirmed cases percent")
,TableColumn(field="deaths_percent", title="Deaths percent"),TableColumn(field="population_percent", title="Population percent")]
data_table23 = DataTable(source = source_q23, columns=columns23, width=800, height=280)
# 创建数据
colors = ["#c9d9d3", "#718dbf", "#e84d60"]
para_column = ["confirmed_cases_percent","deaths_percent","population_percent"]
p = figure(x_range=particular_data["race"], plot_height=350, title="",tools="")
renderers = p.vbar_stack(para_column, #可以用years代替,就是上边设置的变量 # 设置堆叠值,这里source中包含了不同年份的值,years变量用于识别不同堆叠层
x='race', # 设置x坐标
source=source_q23, #包含了2015/2016/2017的数据的; 主要设置的就是这3个参数
width=0.9, color=colors,
legend=[value(x) for x in para_column], name=para_column) #对整个数据做一个分组集合变成一个列表
# 绘制堆叠图
# 注意第一个参数需要放years
h = HoverTool(
tooltips=[('confirmed cases percent %', '@confirmed_cases_percent'), ('deaths cases percent %', '@deaths_percent'), ('population percent %', '@population_percent')])
p.add_tools(h)
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
# 设置其他参数
def call_back(attr, old, new):
# question a
global selected_date
selected_date = date_picker.value
increase_case = {"date": [selected_date], "confirm case": []}
for i in range(len(total_data["date"]) - 1):
if total_data["date"][i] == selected_date:
increase_case["confirm case"].append(
total_data["confirmed_cases"][i] - total_data["confirmed_cases"][i + 1])
source_q1.data = increase_case
# question b,c
some = race_data[(race_data["age"] == "all") & (race_data["date"] == selected_date)]
particular_column = ["date", "race", "confirmed_cases_percent", "deaths_percent", "population_percent"]
particular_data = some[particular_column]
source_q23.data = particular_data
date_picker.on_change("value", call_back)
q1 = column(introduce_of_data,date_picker,data_table1)
q23 = column(description,p)
#show(column(q1,q23))
curdoc().add_root(column(q1,column(data_table23,q23)))
``` |
{
"source": "2141040025/Aplicacion-CRUD-Django",
"score": 2
} |
#### File: Aplicacion-CRUD-Django/new_app/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
#
from .models import *
from .forms import *
#-------------------------------------------------------------------------
def index(request): # Profesor Lista.
profesores = Profesor.objects.all()
context = {
"profesores": profesores,
}
return render(request, "index.html", context)
#-------------------------------------------------------------------------
def profesor_detalle(request, id): # Profesor Ver.
profesor = get_object_or_404(Profesor, id=id)
context = {
"profesor": profesor,
}
return render(request, "profesor_detalle.html", context)
#-------------------------------------------------------------------------
def profesor_borrar(request, id,): # Profesor Borrar
profesor = get_object_or_404(Profesor, id=id)
if request.method == 'POST':
profesor.delete()
return redirect('/')
context = {
"profesor": profesor,
}
return render(request, "confirm_delete.html", context)
#-------------------------------------------------------------------------
def profesor_crear(request): # Profesor Crear.
form = ProfesorForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('/')
context = {
"form": form,
}
return render(request, "profesor_form.html", context)
#-------------------------------------------------------------------------
def profesor_editar(request, id): # Profesor Editar
profesor = get_object_or_404(Profesor, id=id)
form = ProfesorForm(request.POST or None, instance=profesor)
# form = bookformset(request.POST, request.FILES, instance=a)
if form.is_valid():
form.save()
return redirect('/')
context = {
"form": form,
}
return render(request, "profesor_form.html", context)
``` |
{
"source": "214929177/pyNastran",
"score": 3
} |
#### File: bdf/bdf_interface/attributes.py
```python
from __future__ import annotations
from collections import defaultdict
from typing import List, Dict, Optional, Any, Union, TYPE_CHECKING
from numpy import array # type: ignore
from pyNastran.utils import object_attributes, object_methods, deprecated
#from pyNastran.bdf.case_control_deck import CaseControlDeck
from pyNastran.bdf.cards.coordinate_systems import CORD2R
#from pyNastran.bdf.cards.constraints import ConstraintObject
from pyNastran.bdf.cards.aero.zona import ZONA
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.cards.dmig import DMIG, DMI, DMIJ, DMIK, DMIJI
class BDFAttributes:
"""defines attributes of the BDF"""
def __init__(self):
"""creates the attributes for the BDF"""
self.__init_attributes()
self._is_cards_dict = False
self.is_nx = False
self.is_msc = False
self.is_nasa95 = False
self.is_zona = False
self.save_file_structure = False
self.is_superelements = False
self.set_as_msc()
self.units = [] # type: List[str]
def set_as_msc(self):
self._nastran_format = 'msc'
self.is_nx = False
self.is_msc = True
self.is_nasa95 = False
self.is_zona = False
def set_as_nx(self):
self._nastran_format = 'nx'
self.is_nx = True
self.is_msc = False
self.is_nasa95 = False
self.is_zona = False
def set_as_zona(self):
self._nastran_format = 'zona'
self.is_nx = False
self.is_msc = False
self.is_nasa95 = False
self.is_zona = True
def __properties__(self):
"""the list of @property attributes"""
return ['nastran_format', 'is_long_ids', 'sol', 'subcases',
'nnodes', 'node_ids', 'point_ids', 'npoints',
'nelements', 'element_ids', 'nproperties', 'property_ids',
'nmaterials', 'material_ids', 'ncoords', 'coord_ids',
'ncaeros', 'caero_ids', 'wtmass', 'is_bdf_vectorized', 'nid_map']
def object_attributes(self, mode: str='public',
keys_to_skip: Optional[List[str]]=None,
filter_properties: bool=False) -> List[str]:
"""
List the names of attributes of a class as strings. Returns public
attributes as default.
Parameters
----------
mode : str
defines what kind of attributes will be listed
* 'public' - names that do not begin with underscore
* 'private' - names that begin with single underscore
* 'both' - private and public
* 'all' - all attributes that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
filter_properties: bool: default=False
filters the @property objects
Returns
-------
attribute_names : List[str]
sorted list of the names of attributes of a given type or None
if the mode is wrong
"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [
#'case_control_deck',
'log',
'node_ids', 'coord_ids', 'element_ids', 'property_ids',
'material_ids', 'caero_ids', 'is_long_ids',
'nnodes', 'ncoords', 'nelements', 'nproperties',
'nmaterials', 'ncaeros', 'npoints',
'point_ids', 'subcases',
'_card_parser', '_card_parser_b', '_card_parser_prepare',
'object_methods', 'object_attributes',
]
return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip,
filter_properties=filter_properties)
def object_methods(self, mode: str='public', keys_to_skip: Optional[List[str]]=None) -> List[str]:
"""
List the names of methods of a class as strings. Returns public methods
as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of methods will be listed
* "public" - names that do not begin with underscore
* "private" - names that begin with single underscore
* "both" - private and public
* "all" - all methods that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
method : List[str]
sorted list of the names of methods of a given type
or None if the mode is wrong
"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [] # type: List[str]
my_keys_to_skip = [
#'case_control_deck',
'log', #'mpcObject', 'spcObject',
'node_ids', 'coord_ids', 'element_ids', 'property_ids',
'material_ids', 'caero_ids', 'is_long_ids',
'nnodes', 'ncoords', 'nelements', 'nproperties',
'nmaterials', 'ncaeros',
'point_ids', 'subcases',
'_card_parser', '_card_parser_b',
'object_methods', 'object_attributes',
]
return object_methods(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
def deprecated(self, old_name: str, new_name: str, deprecated_version: str) -> None:
"""deprecates methods"""
return deprecated(old_name, new_name, deprecated_version, levels=[0, 1, 2])
def clear_attributes(self) -> None:
"""removes the attributes from the model"""
self.__init_attributes()
self.nodes = {}
self.loads = {} # type: Dict[int, List[Any]]
self.load_combinations = {} # type: Dict[int, List[Any]]
def reset_errors(self) -> None:
"""removes the errors from the model"""
self._ixref_errors = 0
self._stored_xref_errors = []
def __init_attributes(self) -> None:
"""
Creates storage objects for the BDF object.
This would be in the init but doing it this way allows for better
inheritance
References:
1. http://www.mscsoftware.com/support/library/conf/wuc87/p02387.pdf
"""
self.reset_errors()
self.bdf_filename = None
self.punch = None
self._encoding = None
self._is_long_ids = False # ids > 8 characters
#: ignore any ECHOON flags
self.force_echo_off = True
#: list of Nastran SYSTEM commands
self.system_command_lines = [] # type: List[str]
#: list of execive control deck lines
self.executive_control_lines = [] # type: List[str]
#: list of case control deck lines
self.case_control_lines = [] # type: List[str]
# dictionary of BDFs
self.superelement_models = {}
self.initial_superelement_models = [] # the keys before superelement mirroring
self._auto_reject = False
self._solmap_to_value = {
'NONLIN': 101, # 66 -> 101 per Reference 1
'SESTATIC': 101,
'SESTATICS': 101,
'SEMODES': 103,
'BUCKLING': 105,
'SEBUCKL': 105,
'NLSTATIC': 106,
'SEDCEIG': 107,
'SEDFREQ': 108,
'SEDTRAN': 109,
'SEMCEIG': 110,
'SEMFREQ': 111,
'SEMTRAN': 112,
'CYCSTATX': 114,
'CYCMODE': 115,
'CYCBUCKL': 116,
'CYCFREQ': 118,
'NLTRAN': 129,
'AESTAT': 144,
'FLUTTR': 145,
'SEAERO': 146,
'NLSCSH': 153,
'NLTCSH': 159,
'DBTRANS': 190,
'DESOPT': 200,
# guessing
#'CTRAN' : 115,
'CFREQ' : 118,
# solution 200 names
'STATICS': 101,
'MODES': 103,
'BUCK': 105,
'DFREQ': 108,
'MFREQ': 111,
'MTRAN': 112,
'DCEIG': 107,
'MCEIG': 110,
#'HEAT' : None,
#'STRUCTURE': None,
#'DIVERGE' : None,
'FLUTTER': 145,
'SAERO': 146,
}
self.rsolmap_to_str = {
66: 'NONLIN',
101: 'SESTSTATIC', # linear static
103: 'SEMODES', # modal
105: 'BUCKLING', # buckling
106: 'NLSTATIC', # non-linear static
107: 'SEDCEIG', # direct complex frequency response
108: 'SEDFREQ', # direct frequency response
109: 'SEDTRAN', # direct transient response
110: 'SEMCEIG', # modal complex eigenvalue
111: 'SEMFREQ', # modal frequency response
112: 'SEMTRAN', # modal transient response
114: 'CYCSTATX',
115: 'CYCMODE',
116: 'CYCBUCKL',
118: 'CYCFREQ',
129: 'NLTRAN', # nonlinear transient
144: 'AESTAT', # static aeroelastic
145: 'FLUTTR', # flutter/aeroservoelastic
146: 'SEAERO', # dynamic aeroelastic
153: 'NLSCSH', # nonlinear static thermal
159: 'NLTCSH', # nonlinear transient thermal
#187 - Dynamic Design Analysis Method
190: 'DBTRANS',
200: 'DESOPT', # optimization
}
# ------------------------ bad duplicates ----------------------------
self._iparse_errors = 0
self._nparse_errors = 0
self._stop_on_parsing_error = True
self._stop_on_duplicate_error = True
self._stored_parse_errors = [] # type: List[str]
self._duplicate_nodes = [] # type: List[str]
self._duplicate_elements = [] # type: List[str]
self._duplicate_properties = [] # type: List[str]
self._duplicate_materials = [] # type: List[str]
self._duplicate_masses = [] # type: List[str]
self._duplicate_thermal_materials = [] # type: List[str]
self._duplicate_coords = [] # type: List[str]
self.values_to_skip = {} # type: Dict[str, List[int]]
# ------------------------ structural defaults -----------------------
#: the analysis type
self._sol = None
#: used in solution 600, method
self.sol_method = None
#: the line with SOL on it, marks ???
self.sol_iline = None # type : Optional[int]
self.case_control_deck = None # type: Optional[Any]
#: store the PARAM cards
self.params = {} # type: Dict[str, Any]
# ------------------------------- nodes -------------------------------
# main structural block
#: stores POINT cards
self.points = {} # type: Dict[int, Any]
#self.grids = {}
self.spoints = {} # type: Dict[int, Any]
self.epoints = {} # type: Dict[int, Any]
#: stores GRIDSET card
self.grdset = None # type: Optional[Any]
#: stores SEQGP cards
self.seqgp = None # type: Optional[Any]
## stores RINGAX
self.ringaxs = {} # type: Dict[int, Any]
## stores GRIDB
self.gridb = {} # type: Dict[int, Any]
#: stores elements (CQUAD4, CTRIA3, CHEXA8, CTETRA4, CROD, CONROD,
#: etc.)
self.elements = {} # type: Dict[int, Any]
#: stores CBARAO, CBEAMAO
self.ao_element_flags = {} # type: Dict[int, Any]
#: stores BAROR
self.baror = None # type: Optional[Any]
#: stores BEAMOR
self.beamor = None # type: Optional[Any]
#: stores SNORM
self.normals = {} # type: Dict[int, Any]
#: stores rigid elements (RBE2, RBE3, RJOINT, etc.)
self.rigid_elements = {} # type: Dict[int, Any]
#: stores PLOTELs
self.plotels = {} # type: Optional[Any]
#: stores CONM1, CONM2, CMASS1,CMASS2, CMASS3, CMASS4, CMASS5
self.masses = {} # type: Dict[int, Any]
#: stores PMASS
self.properties_mass = {} # type: Dict[int, Any]
#: stores NSM, NSM1, NSML, NSML1
self.nsms = {} # type: Dict[int, List[Any]]
#: stores NSMADD
self.nsmadds = {} # type: Dict[int, List[Any]]
#: stores LOTS of propeties (PBAR, PBEAM, PSHELL, PCOMP, etc.)
self.properties = {} # type: Dict[int, Any]
#: stores MAT1, MAT2, MAT3, MAT8, MAT10, MAT11
self.materials = {} # type: Dict[int, Any]
#: defines the MAT4, MAT5
self.thermal_materials = {} # type: Dict[int, Any]
#: defines the MATHE, MATHP
self.hyperelastic_materials = {} # type: Dict[int, Any]
#: stores MATSx
self.MATS1 = {} # type: Dict[int, Any]
self.MATS3 = {} # type: Dict[int, Any]
self.MATS8 = {} # type: Dict[int, Any]
#: stores MATTx
self.MATT1 = {} # type: Dict[int, Any]
self.MATT2 = {} # type: Dict[int, Any]
self.MATT3 = {} # type: Dict[int, Any]
self.MATT4 = {} # type: Dict[int, Any]
self.MATT5 = {} # type: Dict[int, Any]
self.MATT8 = {} # type: Dict[int, Any]
self.MATT9 = {} # type: Dict[int, Any]
self.nxstrats = {} # type: Dict[int, Any]
#: stores the CREEP card
self.creep_materials = {} # type: Dict[int, Any]
self.tics = {} # type: Optional[Any]
# stores DLOAD entries.
self.dloads = {} # type: Dict[int, Any]
# stores ACSRCE, RLOAD1, RLOAD2, TLOAD1, TLOAD2, and ACSRCE,
# and QVECT entries.
self.dload_entries = {} # type: Dict[int, Any]
#self.gusts = {} # Case Control GUST = 100
#self.random = {} # Case Control RANDOM = 100
#: stores coordinate systems
origin = array([0., 0., 0.])
zaxis = array([0., 0., 1.])
xzplane = array([1., 0., 0.])
coord = CORD2R(cid=0, rid=0, origin=origin, zaxis=zaxis, xzplane=xzplane)
self.coords = {0 : coord} # type: Dict[int, Any]
# --------------------------- constraints ----------------------------
#: stores SUPORT1s
#self.constraints = {} # suport1, anything else???
self.suport = [] # type: List[Any]
self.suport1 = {} # type: Dict[int, Any]
self.se_suport = [] # type: List[Any]
#: stores SPC, SPC1, SPCAX, GMSPC
self.spcs = {} # type: Dict[int, List[Any]]
#: stores SPCADD
self.spcadds = {} # type: Dict[int, List[Any]]
self.spcoffs = {} # type: Dict[int, List[Any]]
self.mpcs = {} # type: Dict[int, List[Any]]
self.mpcadds = {} # type: Dict[int, List[Any]]
# --------------------------- dynamic ----------------------------
#: stores DAREA
self.dareas = {} # type: Dict[int, Any]
self.dphases = {} # type: Dict[int, Any]
self.pbusht = {} # type: Dict[int, Any]
self.pdampt = {} # type: Dict[int, Any]
self.pelast = {} # type: Dict[int, Any]
#: frequencies
self.frequencies = {} # type: Dict[int, List[Any]]
# ----------------------------------------------------------------
#: direct matrix input - DMIG
self.dmi = {} # type: Dict[str, Any]
self.dmig = {} # type: Dict[str, Any]
self.dmij = {} # type: Dict[str, Any]
self.dmiji = {} # type: Dict[str, Any]
self.dmik = {} # type: Dict[str, Any]
self.dmiax = {} # type: Dict[str, Any]
self.dti = {} # type: Dict[str, Any]
self._dmig_temp = defaultdict(list) # type: Dict[str, List[str]]
# ----------------------------------------------------------------
#: SETy
self.sets = {} # type: Dict[int, Any]
self.asets = [] # type: List[Any]
self.omits = [] # type: List[Any]
self.bsets = [] # type: List[Any]
self.csets = [] # type: List[Any]
self.qsets = [] # type: List[Any]
self.usets = {} # type: Dict[str, Any]
#: SExSETy
self.se_bsets = [] # type: List[Any]
self.se_csets = [] # type: List[Any]
self.se_qsets = [] # type: List[Any]
self.se_usets = {} # type: Dict[str, Any]
self.se_sets = {} # type: Dict[str, Any]
# ----------------------------------------------------------------
#: parametric
self.pset = {}
self.pval = {}
self.gmcurv = {}
self.gmsurf = {}
self.feedge = {}
self.feface = {}
# ----------------------------------------------------------------
#: tables
# TABLES1, ...
self.tables = {} # type: Dict[int, TABLES1]
# TABLEDx
self.tables_d = {} # type: Dict[int, Union[TABLED1, TABLED2, TABLED3, TABLED4]]
# TABLEMx
self.tables_m = {} # type: Dict[int, Union[TABLEM1, TABLEM2, TABLEM3, TABLEM4]]
#: random_tables
self.random_tables = {} # type: Dict[int, Any]
#: TABDMP1
self.tables_sdamping = {} # type: Dict[int, TABDMP1]
# ----------------------------------------------------------------
#: EIGB, EIGR, EIGRL methods
self.methods = {} # type: Dict[int, Union[EIGR, EIGRL, EIGB]]
# EIGC, EIGP methods
self.cMethods = {} # type: Dict[int, Union[EIGC, EIGP]]
# ---------------------------- optimization --------------------------
# optimization
self.dconadds = {} # type: Dict[int, DCONADD]
self.dconstrs = {} # type: Dict[int, DCONSTR]
self.desvars = {} # type: Dict[int, DESVAR]
self.topvar = {} # type: Dict[int, TOPVAR]
self.ddvals = {} # type: Dict[int, DDVAL]
self.dlinks = {} # type: Dict[int, DLINK]
self.dresps = {} # type: Dict[int, Union[DRESP1, DRESP2, DRESP3]]
self.dtable = None # type: Optional[DTABLE]
self.dequations = {} # type: Dict[int, DEQATN]
#: stores DVPREL1, DVPREL2...might change to DVxRel
self.dvprels = {} # type: Dict[int, Union[DVPREL1, DVPREL2]]
self.dvmrels = {} # type: Dict[int, Union[DVMREL1, DVMREL2]]
self.dvcrels = {} # type: Dict[int, Union[DVCREL1, DVCREL2]]
self.dvgrids = {} # type: Dict[int, DVGRID]
self.doptprm = None # type: Optional[DOPTPRM]
self.dscreen = {} # type: Dict[int, DSCREEN]
# ------------------------- nonlinear defaults -----------------------
#: stores NLPCI
self.nlpcis = {} # type: Dict[int, NLPCI]
#: stores NLPARM
self.nlparms = {} # type: Dict[int, NLPARM]
#: stores TSTEPs, TSTEP1s
self.tsteps = {} # type: Dict[int, Union[TSTEP, TSTEP1]]
#: stores TSTEPNL
self.tstepnls = {} # type: Dict[int, TSTEPNL]
#: stores TF
self.transfer_functions = {} # type: Dict[int, TF]
#: stores DELAY
self.delays = {} # type: Dict[int, DELAY]
#: stores ROTORD, ROTORG
self.rotors = {} # type: Dict[int, Union[ROTORD, ROTORG]]
# --------------------------- aero defaults --------------------------
# aero cards
#: stores CAEROx
self.caeros = {} # type: Dict[int, Union[CAERO1, CAERO2, CAERO3, CAERO4, CAERO5]]
#: stores PAEROx
self.paeros = {} # type: Dict[int, Union[PAERO1, PAERO2, PAERO3, PAERO4, PAERO5]]
# stores MONPNT1
self.monitor_points = [] # type: List[Union[MONPNT1, MONPNT2, MONPNT3]]
#: stores AECOMP
self.aecomps = {} # type: Dict[int, AECOMP]
#: stores AEFACT
self.aefacts = {} # type: Dict[int, AEFACT]
#: stores AELINK
self.aelinks = {} # type: Dict[int, List[AELINK]]
#: stores AELIST
self.aelists = {} # type: Dict[int, AELIST]
#: stores AEPARAM
self.aeparams = {} # type: Dict[int, AEPARAM]
#: stores AESURF
self.aesurf = {} # type: Dict[int, AESURF]
#: stores AESURFS
self.aesurfs = {} # type: Dict[int, AESURFS]
#: stores AESTAT
self.aestats = {} # type: Dict[int, AESTAT]
#: stores CSSCHD
self.csschds = {} # type: Dict[int, CSSCHD]
#: store SPLINE1,SPLINE2,SPLINE4,SPLINE5
self.splines = {} # type: Dict[int, Union[SPLINE1, SPLINE2, SPLINE3, SPLINE4, SPLINE5]]
self.zona = ZONA(self)
# axisymmetric
self.axic = None # type: Optional[AXIC]
self.axif = None # type: Optional[AXIF]
self.ringfl = {} # type: Dict[int, RINGFL]
self._is_axis_symmetric = False
# cyclic
self.cyax = None # type: Optional[CYAX]
self.cyjoin = {} # type: Dict[int, CYJOIN]
self.modtrak = None # type: Optional[MODTRAK]
# acoustic
self.acmodl = None
# ------ SOL 144 ------
#: stores AEROS
self.aeros = None # type: Optional[AEROS]
#: stores TRIM, TRIM2
self.trims = {} # type: Dict[int, Union[TRIM, TRIM2]]
#: stores DIVERG
self.divergs = {} # type: Dict[int, DIVERG]
# ------ SOL 145 ------
#: stores AERO
self.aero = None # type: Optional[AERO]
#: stores FLFACT
self.flfacts = {} # type: Dict[int, FLFACT]
#: stores FLUTTER
self.flutters = {} # type: Dict[int, FLUTTER]
#: mkaeros
self.mkaeros = [] # type: List[Union[MKAERO1,MKAERO2]]
# ------ SOL 146 ------
#: stores GUST cards
self.gusts = {} # type: Dict[int, GUST]
# ------------------------- thermal defaults -------------------------
# BCs
#: stores thermal boundary conditions - CONV,RADBC
self.bcs = {} # type: Dict[int, Union[CONV, RADBC]]
#: stores PHBDY
self.phbdys = {} # type: Dict[int, PHBDY]
#: stores convection properties - PCONV, PCONVM ???
self.convection_properties = {} # type: Dict[int, Union[PCONV, PCONVM]]
#: stores TEMPD
self.tempds = {} # type: Dict[int, TEMPD]
#: stores VIEW
self.views = {} # type: Dict[int, VIEW]
#: stores VIEW3D
self.view3ds = {} # type: Dict[int, VIEW3D]
self.radset = None
self.radcavs = {} # type: Dict[int, RADCAV]
self.radmtx = {} # type: Dict[int, RADMTX]
# -------------------------contact cards-------------------------------
self.bcrparas = {} # type: Dict[int, BCRPARA]
self.bctadds = {} # type: Dict[int, BCTADD]
self.bctparas = {} # type: Dict[int, BCTPARA]
self.bctsets = {} # type: Dict[int, BCTSET]
self.bsurf = {} # type: Dict[int, BSURF]
self.bsurfs = {} # type: Dict[int, BSURFS]
self.bconp = {} # type: Dict[int, BCONP]
self.blseg = {} # type: Dict[int, BLSEG]
self.bfric = {} # type: Dict[int, BFRIC]
self.bgadds = {} # type: Dict[int, BGADD]
self.bgsets = {} # type: Dict[int, BGSET]
self.bctparms = {} # type: Dict[int, BCTPARAM]
#--------------------------superelements------------------------------
self.setree = {} # type: Dict[int, SETREE]
self.senqset = {} # type: Dict[int, Union[SENQSET, SENQSET1]]
self.sebulk = {} # type: Dict[int, SEBULK]
self.sebndry = {} # type: Dict[int, SEBNDRY]
self.release = {} # type: Dict[int, RELEASE]
self.seloc = {} # type: Dict[int, SELOC]
self.sempln = {} # type: Dict[int, SEMPLN]
self.seconct = {} # type: Dict[int, SECONCT]
self.selabel = {} # type: Dict[int, SELABEL]
self.seexcld = {} # type: Dict[int, SEEXCLD]
self.seelt = {} # type: Dict[int, SEELT]
self.seload = {} # type: Dict[int, SELOAD]
self.csuper = {} # type: Dict[int, CSUPER]
self.csupext = {} # type: Dict[int, CSUPEXT]
# ---------------------------------------------------------------------
self._type_to_id_map = defaultdict(list) # type: Dict[int, List[Any]]
self._slot_to_type_map = {
'params' : ['PARAM'],
'nodes' : ['GRID', 'SPOINT', 'EPOINT'], # 'RINGAX',
'points' : ['POINT'],
'ringaxs' : ['RINGAX', 'POINTAX'],
'ringfl' : ['RINGFL'],
'axic' : ['AXIC'],
'axif' : ['AXIF'],
'acmodl' : ['ACMODL'],
'grdset' : ['GRDSET'],
'gridb' : ['GRIDB'],
'seqgp' : ['SEQGP'],
'ao_element_flags' : ['CBARAO'],
#'POINTAX', 'RINGAX',
# CMASS4 lies in the QRG
'masses' : ['CONM1', 'CONM2', 'CMASS1', 'CMASS2', 'CMASS3', 'CMASS4'],
'elements' : [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
# 'CELAS5',
'CBUSH', 'CBUSH1D', 'CBUSH2D',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5',
'CFAST', 'GENEL',
'CBAR', 'CROD', 'CTUBE', 'CBEAM', 'CBEAM3', 'CONROD', 'CBEND',
'CTRIA3', 'CTRIA6', 'CTRIAR',
'CQUAD4', 'CQUAD8', 'CQUADR', 'CQUAD',
'CPLSTN3', 'CPLSTN6', 'CPLSTN4', 'CPLSTN8',
'CPLSTS3', 'CPLSTS6', 'CPLSTS4', 'CPLSTS8',
'CTRAX3', 'CTRAX6', 'CTRIAX', 'CTRIAX6',
'CQUADX', 'CQUADX4', 'CQUADX8',
'CCONEAX',
'CTETRA', 'CPYRAM', 'CPENTA', 'CHEXA', 'CIHEX1', 'CIHEX2',
'CSHEAR', 'CVISC', 'CRAC2D', 'CRAC3D',
'CGAP',
# thermal
'CHBDYE', 'CHBDYG', 'CHBDYP',
# acoustic
'CHACAB', 'CAABSF', 'CHACBR',
],
'normals' : ['SNORM'],
'nsms' : ['NSM', 'NSM1', 'NSML', 'NSML1'],
'nsmadds' : ['NSMADD'],
'rigid_elements' : ['RBAR', 'RBAR1', 'RBE1', 'RBE2', 'RBE3', 'RROD', 'RSPLINE', 'RSSCON'],
'plotels' : ['PLOTEL'],
'properties_mass' : ['PMASS'],
#'properties_acoustic' : ['PACABS'],
'properties' : [
# acoustic
'PACABS', 'PAABSF', 'PACBAR',
# 0d
'PELAS', 'PGAP', 'PFAST',
'PBUSH', 'PBUSH1D',
'PDAMP', 'PDAMP5',
# 1d
'PROD', 'PBAR', 'PBARL', 'PBEAM', 'PTUBE', 'PBEND', 'PBCOMP', 'PBRSECT', 'PBMSECT',
'PBEAML', # not fully supported
'PBEAM3',
# 2d
'PLPLANE', 'PPLANE',
'PSHELL', 'PCOMP', 'PCOMPG', 'PSHEAR',
'PSOLID', 'PLSOLID', 'PVISC', 'PRAC2D', 'PRAC3D',
'PIHEX', 'PCOMPS',
'PCONEAX',
],
'pdampt' : ['PDAMPT'],
'pelast' : ['PELAST'],
'pbusht' : ['PBUSHT'],
# materials
'materials' : ['MAT1', 'MAT2', 'MAT3', 'MAT8', 'MAT9', 'MAT10', 'MAT11',
'MAT3D', 'MATG'],
'hyperelastic_materials' : ['MATHE', 'MATHP'],
'creep_materials' : ['CREEP'],
'MATT1' : ['MATT1'],
'MATT2' : ['MATT2'],
'MATT3' : ['MATT3'],
'MATT4' : ['MATT4'], # thermal
'MATT5' : ['MATT5'], # thermal
'MATT8' : ['MATT8'],
'MATT9' : ['MATT9'],
'MATS1' : ['MATS1'],
'MATS3' : ['MATS3'],
'MATS8' : ['MATS8'],
'nxstrats' : ['NXSTRAT'],
# 'MATHE'
#'EQUIV', # testing only, should never be activated...
# thermal materials
'thermal_materials' : ['MAT4', 'MAT5'],
# spc/mpc constraints - TODO: is this correct?
'spcadds' : ['SPCADD'],
'spcs' : ['SPC', 'SPC1', 'SPCAX', 'GMSPC'],
'spcoffs' : ['SPCOFF', 'SPCOFF1'],
'mpcadds' : ['MPCADD'],
'mpcs' : ['MPC'],
'suport' : ['SUPORT'],
'suport1' : ['SUPORT1'],
'se_suport' : ['SESUP'],
'setree' : ['SETREE'],
'senqset' : ['SENQSET'],
'sebulk' : ['SEBULK'],
'sebndry' : ['SEBNDRY'],
'release' : ['RELEASE'],
'seloc' : ['SELOC'],
'sempln' : ['SEMPLN'],
'seconct' : ['SECONCT'],
'selabel' : ['SELABEL'],
'seexcld' : ['SEEXCLD'],
'seelt' : ['SEELT'],
'seload' : ['SELOAD'],
'csuper' : ['CSUPER'],
'csupext' : ['CSUPEXT'],
# loads
'load_combinations' : ['LOAD', 'LSEQ', 'CLOAD'],
'loads' : [
'FORCE', 'FORCE1', 'FORCE2',
'MOMENT', 'MOMENT1', 'MOMENT2',
'GRAV', 'ACCEL', 'ACCEL1',
'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4',
'RFORCE', 'RFORCE1', 'SLOAD',
'GMLOAD', 'SPCD', 'LOADCYN', 'LOADCYH', 'DEFORM',
# thermal
'TEMP', 'TEMPB3', 'TEMPRB',
'QBDY1', 'QBDY2', 'QBDY3', 'QHBDY', 'QVOL',
# axisymmetric
'PLOADX1', 'FORCEAX', 'PRESAX', 'TEMPAX',
],
'cyjoin' : ['CYJOIN'],
'cyax' : ['CYAX'],
'modtrak' : ['MODTRAK'],
'dloads' : ['DLOAD'],
# stores RLOAD1, RLOAD2, TLOAD1, TLOAD2, and ACSRCE entries.
'dload_entries' : ['ACSRCE', 'TLOAD1', 'TLOAD2', 'RLOAD1', 'RLOAD2',
'QVECT', 'RANDPS', 'RANDT1'],
# aero cards
'aero' : ['AERO'],
'aeros' : ['AEROS'],
'gusts' : ['GUST', 'GUST2'],
'flutters' : ['FLUTTER'],
'flfacts' : ['FLFACT'],
'mkaeros' : ['MKAERO1', 'MKAERO2'],
'aecomps' : ['AECOMP', 'AECOMPL'],
'aefacts' : ['AEFACT'],
'aelinks' : ['AELINK'],
'aelists' : ['AELIST'],
'aeparams' : ['AEPARM'],
'aesurf' : ['AESURF'],
'aesurfs' : ['AESURFS'],
'aestats' : ['AESTAT'],
'caeros' : ['CAERO1', 'CAERO2', 'CAERO3', 'CAERO4', 'CAERO5', 'CAERO7', 'BODY7'],
'paeros' : ['PAERO1', 'PAERO2', 'PAERO3', 'PAERO4', 'PAERO5', 'SEGMESH'],
'monitor_points' : ['MONPNT1', 'MONPNT2', 'MONPNT3', 'MONDSP1'],
'splines' : ['SPLINE1', 'SPLINE2', 'SPLINE3', 'SPLINE4', 'SPLINE5', 'SPLINE6', 'SPLINE7'],
'panlsts' : ['PANLST1', 'PANLST2', 'PANLST3'],
'csschds' : ['CSSCHD',],
#'SPLINE3', 'SPLINE6', 'SPLINE7',
'trims' : ['TRIM', 'TRIM2'],
'divergs' : ['DIVERG'],
# coords
'coords' : ['CORD1R', 'CORD1C', 'CORD1S',
'CORD2R', 'CORD2C', 'CORD2S',
'GMCORD', 'ACOORD', 'CORD3G'],
# temperature cards
'tempds' : ['TEMPD'],
'phbdys' : ['PHBDY'],
'convection_properties' : ['PCONV', 'PCONVM'],
# stores thermal boundary conditions
'bcs' : ['CONV', 'CONVM', 'RADBC', 'RADM', 'TEMPBC'],
# dynamic cards
'dareas' : ['DAREA'],
'tics' : ['TIC'],
'dphases' : ['DPHASE'],
'nlparms' : ['NLPARM'],
'nlpcis' : ['NLPCI'],
'tsteps' : ['TSTEP'],
'tstepnls' : ['TSTEPNL', 'TSTEP1'],
'transfer_functions' : ['TF'],
'delays' : ['DELAY'],
'rotors' : ['ROTORG', 'ROTORD'],
'frequencies' : ['FREQ', 'FREQ1', 'FREQ2', 'FREQ3', 'FREQ4', 'FREQ5'],
# direct matrix input cards
'dmig' : ['DMIG'],
'dmiax' : ['DMIAX'],
'dmij' : ['DMIJ'],
'dmiji' : ['DMIJI'],
'dmik' : ['DMIK'],
'dmi' : ['DMI'],
'dti' : ['DTI'],
# optimzation
'dequations' : ['DEQATN'],
'dtable' : ['DTABLE'],
'dconstrs' : ['DCONSTR', 'DCONADD'],
'desvars' : ['DESVAR'],
'topvar' : ['TOPVAR'],
'ddvals' : ['DDVAL'],
'dlinks' : ['DLINK'],
'dresps' : ['DRESP1', 'DRESP2', 'DRESP3'],
'dvprels' : ['DVPREL1', 'DVPREL2'],
'dvmrels' : ['DVMREL1', 'DVMREL2'],
'dvcrels' : ['DVCREL1', 'DVCREL2'],
'dvgrids' : ['DVGRID'],
'doptprm' : ['DOPTPRM'],
'dscreen' : ['DSCREEN'],
# sets
'asets' : ['ASET', 'ASET1'],
'omits' : ['OMIT', 'OMIT1'],
'bsets' : ['BSET', 'BSET1'],
'qsets' : ['QSET', 'QSET1'],
'csets' : ['CSET', 'CSET1'],
'usets' : ['USET', 'USET1'],
'sets' : ['SET1', 'SET3'],
# super-element sets
'se_bsets' : ['SEBSET', 'SEBSET1'],
'se_csets' : ['SECSET', 'SECSET1'],
'se_qsets' : ['SEQSET', 'SEQSET1'],
'se_usets' : ['SEUSET', 'SEQSET1'],
'se_sets' : ['SESET'],
'radset' : ['RADSET'],
'radcavs' : ['RADCAV', 'RADLST'],
'radmtx' : ['RADMTX'],
# SEBSEP
# parametric
'pset' : ['PSET'],
'pval' : ['PVAL'],
'gmcurv' : ['GMCURV'],
'gmsurf' : ['GMSURF'],
'feedge' : ['FEEDGE'],
'feface' : ['FEFACE'],
# tables
'tables' : [
'TABLEH1', 'TABLEHT',
'TABLES1', 'TABLEST',
],
'tables_d' : ['TABLED1', 'TABLED2', 'TABLED3', 'TABLED4', 'TABLED5'],
'tables_m' : ['TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4'],
'tables_sdamping' : ['TABDMP1'],
'random_tables' : ['TABRND1', 'TABRNDG'],
# initial conditions - sid (set ID)
##'TIC', (in bdf_tables.py)
# methods
'methods' : ['EIGB', 'EIGR', 'EIGRL'],
# cMethods
'cMethods' : ['EIGC', 'EIGP'],
# contact
'bctparas' : ['BCTPARA'],
'bcrparas' : ['BCRPARA'],
'bctparms' : ['BCTPARM'],
'bctadds' : ['BCTADD'],
'bctsets' : ['BCTSET'],
'bgadds' : ['BGADD'],
'bgsets' : ['BGSET'],
'bsurf' : ['BSURF'],
'bsurfs' : ['BSURFS'],
'bconp' : ['BCONP'],
'blseg' : ['BLSEG'],
'bfric' : ['BFRIC'],
'views' : ['VIEW'],
'view3ds' : ['VIEW3D'],
## other
#'INCLUDE', # '='
#'ENDDATA',
} # type: Dict[str, List[str]]
self._type_to_slot_map = self.get_rslot_map()
@property
def type_slot_str(self) -> str:
"""helper method for printing supported cards"""
nchars = len('Card Group')
#nchars_cards = 0
for card_group in self._slot_to_type_map:
nchars = max(nchars, len(card_group))
nline = 58
fmt = '| %%-%ss | %%-%ss |\n' % (nchars, nline)
fmt_plus = '+%%-%ss+%%-%ss+\n' % (nchars + 2, nline + 2)
dash1 = '-' * (nchars + 2)
dash2 = '-' * (nline + 2)
dash_plus = fmt_plus % (dash1, dash2)
html_msg = [
dash_plus,
fmt % ('Card Group', 'Cards'),
]
for card_group, card_types in sorted(self._slot_to_type_map.items()):
valid_cards = [card_type for card_type in card_types
if card_type in self.cards_to_read]
valid_cards.sort()
if len(valid_cards) == 0:
continue
#i = 0
sublines = []
subline = ''
while valid_cards:
card_type = valid_cards.pop(0)
# the +2 is for the comma and space
len_card_type = len(card_type) + 2
nline_new = len(subline) + len_card_type
if nline_new > nline:
sublines.append(subline.rstrip(' '))
subline = ''
subline += '%s, ' % card_type
if subline:
sublines.append(subline.rstrip(', '))
html_msg.append(dash_plus)
for isub, subline in enumerate(sublines):
if isub > 0: # adds intermediate dash lines
html_msg.append(dash_plus)
html_msg.append(fmt % (card_group, subline))
card_group = ''
html_msg.append(dash_plus)
#for card_group, card_types in sorted(self._slot_to_type_map.items()):
#html_msg.append('| %s | %s |' % (card_group, ', '.join(card_types)))
#html_msg.append(
#fmt_plus % ('-'*(nchars + 2), '-'*(nline + 2))
#)
msg = ''.join(html_msg)
return msg
@property
def nastran_format(self) -> str:
return self._nastran_format
@nastran_format.setter
def nastran_format(self, nastran_format: str) -> None:
fmt_lower = nastran_format.lower().strip()
if fmt_lower not in ['nx', 'msc', 'zona']:
raise RuntimeError(nastran_format)
self._nastran_format = fmt_lower
@property
def is_long_ids(self) -> bool:
return self._is_long_ids
#if self._nastran_format == 'nx' or self._is_long_ids:
#return True
#return False
@property
def sol(self) -> int:
"""gets the solution (e.g. 101, 103)"""
return self._sol
@sol.setter
def sol(self, sol: int) -> int:
"""sets the solution (e.g. 101, 103)"""
self._sol = sol
if len(self.executive_control_lines) == 0:
self.executive_control_lines = ['SOL %s' % sol, 'CEND']
self.sol_iline = 0
return self._sol
@property
def subcases(self) -> Dict[int, Optional[Any]]:
"""gets the subcases"""
if self.case_control_deck is None:
return {}
return self.case_control_deck.subcases
#@property
#def grids(self):
#"""might be renaming self.nodes to self.grids"""
#return self.nodes
#@property.setter
#def grids(self, grids):
#"""might be renaming self.nodes to self.grids"""
#self.nodes = grids
@property
def nnodes(self) -> int:
"""gets the number of GRIDs"""
return len(self.nodes)
@property
def node_ids(self):
"""gets the GRID ids"""
return self.nodes.keys()
@property
def point_ids(self):
"""gets the GRID, SPOINT, EPOINT ids"""
return set(self.node_ids) | set(list(self.spoints.keys())) | set(list(self.epoints.keys()))
@property
def npoints(self) -> int:
"""gets the number of GRID, SPOINT, EPOINT ids"""
return len(self.point_ids)
#--------------------
# Elements CARDS
@property
def nelements(self) -> int:
"""gets the number of element"""
return len(self.elements)
@property
def element_ids(self):
"""gets the element ids"""
return self.elements.keys()
#--------------------
# Property CARDS
@property
def nproperties(self) -> int:
"""gets the number of properties"""
return len(self.properties)
@property
def property_ids(self):
"""gets the property ids"""
return self.properties.keys()
#--------------------
# Material CARDS
@property
def nmaterials(self) -> int:
"""gets the number of materials"""
return len(self.materials)
@property
def material_ids(self):
"""gets the material ids"""
return self.materials.keys()
#--------------------
# Coords CARDS
@property
def ncoords(self) -> int:
"""gets the number of coordinate systems"""
return len(self.coords)
@property
def coord_ids(self):
"""gets the number of coordinate system ids"""
return self.coords.keys()
#--------------------
@property
def ncaeros(self) -> int:
"""gets the number of CAEROx panels"""
return len(self.caeros)
@property
def caero_ids(self):
"""gets the CAEROx ids"""
return self.caeros.keys()
@property
def wtmass(self):
"""
Gets the PARAM,WTMASS value, which defines the weight to mass
conversion factor
kg -> kg : 1.0
lb -> slug : 1/32.2
lb -> slinch : 1/(32.2*12)=1/386.4
"""
wtmass = 1.0
if 'WTMASS' in self.params:
param = self.params['WTMASS']
wtmass = param.values[0]
return wtmass
def set_param(self, key: str, values: Union[int, float, str, List[float]], comment: str='') -> None:
"""sets a param card; creates it if necessary"""
if isinstance(values, (int, float, str)):
values = [values]
key = key.upper()
if key in self.params:
param = self.params[key]
param.update_values(*values)
else:
self.add_param(key, values, comment=comment)
def get_param(self, key: str, default: Union[int, float, str, List[float]]
) -> Union[int, float, str, List[float]]:
"""gets a param card"""
key = key.upper()
if key in self.params:
param = self.params[key]
return param.value
return default
#--------------------
# deprecations
@property
def dmis(self) -> Dict[str, DMI]:
return self.dmi
@property
def dmigs(self) -> Dict[str, DMIG]:
return self.dmig
@property
def dmiks(self) -> Dict[str, DMIK]:
return self.dmik
@property
def dmijs(self) -> Dict[str, DMIJ]:
return self.dmij
@property
def dmijis(self) -> Dict[str, DMIJI]:
return self.dmiji
@dmis.setter
def dmis(self, dmi):
self.dmi = dmi
@dmigs.setter
def dmigs(self, dmig):
self.dmig = dmig
@dmiks.setter
def dmiks(self, dmik):
self.dmik = dmik
@dmijs.setter
def dmijs(self, dmij):
self.dmij = dmij
@dmijis.setter
def dmijis(self, dmiji):
self.dmiji = dmiji
```
#### File: cards/loads/loads.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
#from pyNastran.bdf.errors import CrossReferenceError
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard, _node_ids
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank, components_or_blank,
string, string_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.field_writer_double import print_card_double
from pyNastran.utils.numpy_utils import integer_types, float_types
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class Load(BaseCard):
"""defines the DefaultLoad class"""
type = 'DefLoad'
def __init__(self):
self.cid = None
self.nodes = None
@property
def node_ids(self):
"""get the node ids"""
try:
return self._node_ids()
except:
#raise
raise RuntimeError('error processing nodes for \n%s' % str(self))
def _node_ids(self, nodes=None):
"""returns node ids for repr functions"""
if not nodes:
nodes = self.nodes
if isinstance(nodes[0], integer_types):
return [node for node in nodes]
else:
return [node.nid for node in nodes]
class LoadCombination(BaseCard):
"""Common method for LOAD, DLOAD"""
def __init__(self, sid, scale, scale_factors, load_ids, comment=''):
"""
Common method for LOAD, DLOAD
Parameters
----------
sid : int
load id
scale : float
overall scale factor
scale_factors : List[float]
individual scale factors (corresponds to load_ids)
load_ids : List[int]
individual load_ids (corresponds to scale_factors)
comment : str; default=''
a comment for the card
"""
BaseCard.__init__(self)
if comment:
self.comment = comment
#: load ID
self.sid = sid
#: overall scale factor
self.scale = scale
#: individual scale factors (corresponds to load_ids)
if isinstance(scale_factors, float):
scale_factors = [scale_factors]
self.scale_factors = scale_factors
#: individual load_ids (corresponds to scale_factors)
if isinstance(load_ids, int):
load_ids = [load_ids]
self.load_ids = load_ids
assert 0 not in load_ids, self
self.load_ids_ref = None
def validate(self):
msg = ''
if not isinstance(self.scale, float_types):
msg += 'scale=%s must be a float; type=%s\n' % (self.scale, type(self.scale))
assert isinstance(self.scale_factors, list), self.scale_factors
assert isinstance(self.load_ids, list), self.load_ids
if len(self.scale_factors) != len(self.load_ids):
msg += 'scale_factors=%s load_ids=%s\n' % (self.scale_factors, self.load_ids)
if msg:
raise IndexError(msg)
for scalei, load_id in zip(self.scale_factors, self.get_load_ids()):
assert isinstance(scalei, float_types), scalei
assert isinstance(load_id, integer_types), load_id
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
scale = double(card, 2, 'scale')
scale_factors = []
load_ids = []
# alternating of scale factor & load set ID
nloads = len(card) - 3
assert nloads % 2 == 0, 'card=%s' % card
for i in range(nloads // 2):
n = 2 * i + 3
scale_factors.append(double(card, n, 'scale_factor'))
load_ids.append(integer(card, n + 1, 'load_id'))
assert len(card) > 3, 'len(%s card) = %i\ncard=%s' % (cls.__name__, len(card), card)
return cls(sid, scale, scale_factors, load_ids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
sid = data[0]
scale = data[1]
scale_factors = data[2]
load_ids = data[3]
assert len(data) == 4, '%s data=%s' % (cls.type, data)
return cls(sid, scale, scale_factors, load_ids, comment=comment)
def LoadID(self, lid):
if isinstance(lid, integer_types):
return lid
elif isinstance(lid, list):
return lid[0].sid
else:
raise NotImplementedError(lid)
def get_load_ids(self):
"""
xref/non-xref way to get the load ids
"""
if self.load_ids_ref is None:
return self.load_ids
load_ids = []
supported_loads = [
'FORCE', 'FORCE1', 'FORCE2', 'MOMENT', 'MOMENT1', 'MOMENT2',
'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4', 'GRAV', 'SPCD', 'GMLOAD',
'RLOAD1', 'RLOAD2', 'TLOAD1', 'TLOAD2', 'PLOADX1', 'LOAD',
'RFORCE', 'RFORCE1', #'RFORCE2'
'ACCEL', 'ACCEL1', 'SLOAD', 'ACSRCE',
]
for loads in self.load_ids_ref:
load_idsi = []
for load in loads:
if isinstance(load, integer_types):
load_ids.append(load)
#elif load.type == 'LOAD':
#load_ids.append(load.sid)
elif load.type in supported_loads:
load_idsi.append(load.sid)
else:
msg = ('The get_load_ids method doesnt support %s cards.\n'
'%s' % (load.__class__.__name__, str(load)))
raise NotImplementedError(msg)
load_idi = list(set(load_idsi))
assert len(load_idi) == 1, load_idsi
load_ids.append(load_idi[0])
return load_ids
def get_loads(self):
"""
.. note:: requires a cross referenced load
"""
loads = []
for all_loads in self.load_ids_ref:
assert not isinstance(all_loads, int), 'all_loads=%s\n%s' % (str(all_loads), str(self))
for load in all_loads:
try:
loads += load.get_loads()
except RuntimeError:
print('recursion error on load=\n%s' % str(load))
raise
#loads += self.ID #: :: todo: what does this mean, was uncommented
return loads
class LSEQ(BaseCard): # Requires LOADSET in case control deck
"""
Defines a sequence of static load sets
.. todo:: how does this work...
+------+-----+----------+-----+-----+
| 1 | 2 | 3 | 4 | 5 |
+======+=====+==========+=====+=====+
| LSEQ | SID | EXCITEID | LID | TID |
+------+-----+----------+-----+-----+
ACSRCE : If there is no LOADSET Case Control command, then EXCITEID
may reference DAREA and SLOAD entries. If there is a LOADSET
Case Control command, then EXCITEID may reference DAREA
entries as well as SLOAD entries specified by the LID field
in the selected LSEQ entry corresponding to EXCITEID.
DAREA : Refer to RLOAD1, RLOAD2, TLOAD1, TLOAD2, or ACSRCE entries
for the formulas that define the scale factor Ai in dynamic
analysis.
DPHASE :
SLOAD : In the static solution sequences, the load set ID (SID) is
selected by the Case Control command LOAD. In the dynamic
solution sequences, SID must be referenced in the LID field
of an LSEQ entry, which in turn must be selected by the Case
Control command LOADSET.
LSEQ LID : Load set identification number of a set of static load
entries such as those referenced by the LOAD Case Control
command.
LSEQ, SID, EXCITEID, LID, TID
#--------------------------------------------------------------
# F:\\Program Files\\Siemens\\NXNastran\\nxn10p1\\nxn10p1\\nast\\tpl\\cube_iter.dat
DLOAD 1001 1.0 1.0 55212
sid = 1001
load_id = [55212] -> RLOAD2.SID
RLOAD2, SID, EXCITEID, DELAYID, DPHASEID, TB, TP, TYPE
RLOAD2 55212 55120 55122 55123 55124
EXCITEID = 55120 -> DAREA.SID
DPHASEID = 55122 -> DPHASE.SID
DARA SID NID COMP SCALE
DAREA 55120 913 3 9.9E+9
SID = 55120 -> RLOAD2.SID
DPHASE SID POINTID C1 TH1
DPHASE 55122 913 3 -90.0
SID = 55122
POINTID = 913 -> GRID.NID
GRID NID X Y Z
GRID 913 50. 0.19 -39.9
"""
type = 'LSEQ'
@classmethod
def _init_from_empty(cls):
sid = 1
excite_id = 2
lid = 3
return LSEQ(sid, excite_id, lid, tid=None, comment='')
def __init__(self, sid, excite_id, lid, tid=None, comment=''):
"""
Creates a LSEQ card
Parameters
----------
sid : int
loadset id; LOADSET points to this
excite_id : int
set id assigned to this static load vector
lid : int
load set id of a set of static load entries;
LOAD in the Case Control
tid : int; default=None
temperature set id of a set of thermal load entries;
TEMP(LOAD) in the Case Control
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
self.excite_id = excite_id
self.lid = lid
self.tid = tid
self.lid_ref = None
self.tid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a LSEQ card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
excite_id = integer(card, 2, 'excite_id')
load_id = integer_or_blank(card, 3, 'lid')
temp_id = integer_or_blank(card, 4, 'tid')
if load_id is None and temp_id is None:
msg = 'LSEQ load_id/temp_id must not be None; load_id=%s temp_id=%s' % (load_id, temp_id)
raise RuntimeError(msg)
assert len(card) <= 5, 'len(LSEQ card) = %i\ncard=%s' % (len(card), card)
return LSEQ(sid, excite_id, load_id, tid=temp_id, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds an LSEQ card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid = data[0]
excite_id = data[1]
lid = data[2]
tid = data[3]
return LSEQ(sid, excite_id, lid, tid, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by LSEQ=%s' % (self.sid)
if self.lid is not None:
self.lid_ref = model.Load(self.lid, consider_load_combinations=True, msg=msg)
#self.excite_id = model.Node(self.excite_id, msg=msg)
if self.tid:
# TODO: temperature set, not a table?
self.tid_ref = model.Load(self.tid, consider_load_combinations=True, msg=msg)
#self.tid_ref = model.Table(self.tid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
return self.cross_reference(model)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.lid = self.Lid()
self.tid = self.Tid()
self.lid_ref = None
self.tid_ref = None
def LoadID(self, lid):
# type: (int) -> int
if isinstance(lid, list):
sid = self.LoadID(lid[0])
elif isinstance(lid, integer_types):
sid = lid
else:
sid = lid.sid
return sid
def get_loads(self):
# type: () -> Any
return self.lid_ref
def Lid(self):
# type: () -> int
if self.lid_ref is not None:
return self.LoadID(self.lid_ref)
return self.lid
#@property
#def node_id(self):
#print('self.excite_id =', self.excite_id)
#if isinstance(self.excite_id, integer_types):
#return self.excite_id
#return self.excite_id.nid
#def Tid(self):
#if self.tid_ref is not None:
#return self.tid_ref.tid
#return self.tid
def Tid(self):
if self.tid_ref is not None:
return self.LoadID(self.tid_ref)
return self.tid
def raw_fields(self):
list_fields = ['LSEQ', self.sid, self.excite_id, self.Lid(), self.Tid()]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
class LOADCYN(Load):
type = 'LOADCYN'
@classmethod
def _init_from_empty(cls):
sid = 1
scale = 1.
segment_id = 2
scales = [1., 2.]
load_ids = [10, 20]
return LOADCYN(sid, scale, segment_id, scales, load_ids, segment_type=None, comment='')
def __init__(self, sid, scale, segment_id, scales, load_ids, segment_type=None, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.scale = scale
self.segment_id = segment_id
self.scales = scales
self.load_ids = load_ids
self.segment_type = segment_type
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a LOADCYN card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
scale = double(card, 2, 'scale')
segment_id = integer(card, 3, 'segment_id')
segment_type = string_or_blank(card, 4, 'segment_type')
scalei = double(card, 5, 'scale1')
loadi = integer(card, 6, 'load1')
scales = [scalei]
load_ids = [loadi]
scalei = double_or_blank(card, 7, 'scale2')
if scalei is not None:
loadi = double_or_blank(card, 8, 'load2')
scales.append(scalei)
load_ids.append(loadi)
return LOADCYN(sid, scale, segment_id, scales, load_ids,
segment_type=segment_type, comment=comment)
def get_loads(self):
return [self]
def cross_reference(self, model: BDF) -> None:
pass
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
pass
def safe_cross_reference(self, model, xref_errors):
return self.cross_reference(model)
def raw_fields(self):
end = []
for scale, load in zip(self.scales, self.load_ids):
end += [scale, load]
list_fields = ['LOADCYN', self.sid, self.scale, self.segment_id, self.segment_type
] + end
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
class LOADCYH(BaseCard):
"""
Harmonic Load Input for Cyclic Symmetry
Defines the harmonic coefficients of a static or dynamic load for
use in cyclic symmetry analysis.
+---------+-----+----------+-----+-------+-----+------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+=====+==========+=====+=======+=====+======+======+======+
| LOADCYH | SID | S | HID | HTYPE | S1 | L1 | S2 | L2 |
+---------+-----+----------+-----+-------+-----+------+------+------+
"""
type = 'LOADCYH'
@classmethod
def _init_from_empty(cls):
sid = 1
scale = 1.
hid = 0
htype = 'C'
scales = [1.]
load_ids = [2]
return LOADCYH(sid, scale, hid, htype, scales, load_ids, comment='')
def __init__(self, sid, scale, hid, htype, scales, load_ids, comment=''):
"""
Creates a LOADCYH card
Parameters
----------
sid : int
loadset id; LOADSET points to this
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
self.scale = scale
self.hid = hid
self.htype = htype
self.scales = scales
self.load_ids = load_ids
assert htype in {'C', 'S', 'CSTAR', 'SSTAR', 'GRAV', 'RFORCE', None}, htype
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a LOADCYH card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
scale = double(card, 2, 's')
hid = integer(card, 3, 'hid')
htype = string_or_blank(card, 4, 'htype')
scale1 = double(card, 5, 'scale1')
load1 = integer_or_blank(card, 6, 'load1')
scale2 = double_or_blank(card, 7, 'scale2')
load2 = integer_or_blank(card, 8, 'load2')
scales = []
load_ids = []
if load1 != 0:
load_ids.append(load1)
scales.append(scale1)
if load2 != 0:
load_ids.append(load2)
scales.append(scale2)
assert len(card) <= 7, 'len(LOADCYH card) = %i\ncard=%s' % (len(card), card)
return LOADCYH(sid, scale, hid, htype, scales, load_ids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds an LSEQ card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
raise NotImplementedError()
#sid = data[0]
#excite_id = data[1]
#lid = data[2]
#tid = data[3]
#return LSEQ(sid, excite_id, lid, tid, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by LOADCYH=%s' % (self.sid)
def safe_cross_reference(self, model, xref_errors):
pass
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
pass
def get_loads(self):
return [self]
def raw_fields(self):
list_fields = ['LOADCYH', self.sid, self.scale, self.hid, self.htype]
for scale, load_id in zip(self.scales, self.load_ids):
list_fields += [scale, load_id]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
class DAREA(BaseCard):
"""
Defines scale (area) factors for static and dynamic loads. In dynamic
analysis, DAREA is used in conjunction with ACSRCE, RLOADi and TLOADi
entries.
RLOAD1 -> DAREA by SID
+-------+-----+----+----+-----+----+----+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=======+=====+====+====+=====+====+====+======+
| DAREA | SID | P1 | C1 | A1 | P2 | C2 | A2 |
+-------+-----+----+----+-----+----+----+------+
| DAREA | 3 | 6 | 2 | 8.2 | 15 | 1 | 10.1 |
+-------+-----+----+----+-----+----+----+------+
"""
type = 'DAREA'
_properties = ['node_ids']
@classmethod
def _init_from_empty(cls):
sid = 1
nodes = [1]
components = [1]
scales = [1.]
return DAREA(sid, nodes, components, scales, comment='')
def __init__(self, sid, nodes, components, scales, comment=''):
"""
Creates a DAREA card
Parameters
----------
sid : int
darea id
nodes : List[int]
GRID, EPOINT, SPOINT id
components : List[int]
Component number. (0-6; 0-EPOINT/SPOINT; 1-6 GRID)
scales : List[float]
Scale (area) factor
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
if isinstance(nodes, integer_types):
nodes = [nodes]
if isinstance(components, integer_types):
components = [components]
if isinstance(scales, float):
scales = [scales]
self.nodes = nodes
self.components = components
assert isinstance(components, list), 'components=%r' % components
for component in components:
assert 0 <= component <= 6, 'component=%r' % component
self.scales = scales
self.nodes_ref = None
@classmethod
def add_card(cls, card, icard=0, comment=''):
noffset = 3 * icard
sid = integer(card, 1, 'sid')
nid = integer(card, 2 + noffset, 'p')
component = int(components_or_blank(card, 3 + noffset, 'c', 0))
scale = double(card, 4 + noffset, 'scale')
return DAREA(sid, nid, component, scale, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a DAREA card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid = data[0]
p = data[1]
c = data[2]
scale = data[3]
assert len(data) == 4, 'data = %s' % data
return DAREA(sid, p, c, scale, comment=comment)
def add(self, darea):
assert self.sid == darea.sid, 'sid=%s darea.sid=%s' % (self.sid, darea.sid)
if darea.comment:
if hasattr(self, '_comment'):
self._comment += darea.comment
else:
self._comment = darea.comment
self.nodes += darea.nodes
self.components += darea.components
self.scales += darea.scales
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by DAREA=%s' % (self.sid)
self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
def safe_cross_reference(self, model, xref_errors, debug=True):
nids2 = []
msg = ', which is required by DAREA=%s' % (self.sid)
for nid in self.node_ids:
try:
nid2 = model.Node(nid, msg=msg)
except KeyError:
if debug:
msg = 'Couldnt find nid=%i, which is required by DAREA=%s' % (
nid, self.sid)
print(msg)
continue
nids2.append(nid2)
self.nodes_ref = nids2
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes_ref = None
@property
def node_ids(self):
if self.nodes_ref is None:
return self.nodes
msg = ', which is required by DAREA=%s' % (self.sid)
return _node_ids(self, nodes=self.nodes_ref, allow_empty_nodes=False, msg=msg)
def raw_fields(self):
for nid, comp, scale in zip(self.node_ids, self.components, self.scales):
list_fields = ['DAREA', self.sid, nid, comp, scale]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
for nid, comp, scale in zip(self.node_ids, self.components, self.scales):
msg += print_card_8(['DAREA', self.sid, nid, comp, scale])
return msg
class DynamicLoad(BaseCard):
def __init__(self):
pass
class SPCD(Load):
"""
Defines an enforced displacement value for static analysis and an
enforced motion value (displacement, velocity or acceleration) in
dynamic analysis.
+------+-----+-----+-----+------+----+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=====+=====+=====+======+====+====+====+
| SPCD | SID | G1 | C1 | D1 | G2 | C2 | D2 |
+------+-----+-----+-----+------+----+----+----+
| SPCD | 100 | 32 | 436 | -2.6 | 5 | 2 | .9 |
+------+-----+-----+-----+------+----+----+----+
"""
type = 'SPCD'
_properties = ['node_ids']
@classmethod
def _init_from_empty(cls):
sid = 1
nodes = [1]
components = ['1']
enforced = 1.
return SPCD(sid, nodes, components, enforced, comment='')
def __init__(self, sid, nodes, components, enforced, comment=''):
"""
Creates an SPCD card, which defines the degree of freedoms to be
set during enforced motion
Parameters
----------
conid : int
constraint id
nodes : List[int]
GRID/SPOINT ids
components : List[str]
the degree of freedoms to constrain (e.g., '1', '123')
enforced : List[float]
the constrained value for the given node (typically 0.0)
comment : str; default=''
a comment for the card
.. note:: len(nodes) == len(components) == len(enforced)
.. warning:: Non-zero enforced deflection requires an SPC/SPC1 as well.
Yes, you really want to constrain the deflection to 0.0
with an SPC1 card and then reset the deflection using an
SPCD card.
"""
if comment:
self.comment = comment
self.sid = sid
if isinstance(nodes, int):
nodes = [nodes]
if isinstance(components, str):
components = [components]
elif isinstance(components, int):
components = [str(components)]
if isinstance(enforced, float):
enforced = [enforced]
self.nodes = nodes
self.components = components
self.enforced = enforced
self.nodes_ref = None
assert isinstance(self.nodes, list), self.nodes
assert isinstance(self.components, list), self.components
assert isinstance(self.enforced, list), self.enforced
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SPCD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
if card.field(5) in [None, '']:
nodes = [integer(card, 2, 'G1'),]
components = [components_or_blank(card, 3, 'C1', 0)]
enforced = [double_or_blank(card, 4, 'D1', 0.0)]
else:
nodes = [
integer(card, 2, 'G1'),
integer(card, 5, 'G2'),
]
# :0 if scalar point 1-6 if grid
components = [components_or_blank(card, 3, 'C1', 0),
components_or_blank(card, 6, 'C2', 0)]
enforced = [double_or_blank(card, 4, 'D1', 0.0),
double_or_blank(card, 7, 'D2', 0.0)]
return SPCD(sid, nodes, components, enforced, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds an SPCD card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid = data[0]
nodes = [data[1]]
components = [data[2]]
enforced = [data[3]]
return SPCD(sid, nodes, components, enforced, comment=comment)
@property
def constraints(self):
self.deprecated('constraints', 'components', '1.2')
return self.components
@constraints.setter
def constraints(self, constraints):
self.deprecated('constraints', 'components', '1.2')
self.components = constraints
@property
def node_ids(self):
if self.nodes_ref is None:
return self.nodes
msg = ', which is required by SPCD=%s' % (self.sid)
return _node_ids(self, nodes=self.nodes_ref, allow_empty_nodes=True, msg=msg)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by SPCD=%s' % (self.sid)
self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)
def safe_cross_reference(self, model, xref_errors, debug=True):
msg = ', which is required by SPCD=%s' % (self.sid)
self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes = self.node_ids
self.nodes_ref = None
def get_loads(self):
return [self]
def raw_fields(self):
fields = ['SPCD', self.sid]
for (nid, component, enforced) in zip(self.node_ids, self.components,
self.enforced):
fields += [nid, component, enforced]
return fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
if size == 8:
return self.comment + print_card_8(card)
elif is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
class DEFORM(Load):
"""
Defines an enforced displacement value for static analysis.
+--------+-----+-----+------+----+----+----+----+
| 1 | 2 | 3 | 5 | 6 | 8 | 6 | 8 |
+========+=====+=====+======+====+====+====+====+
| DEFORM | SID | E1 | D1 | E2 | D2 | E3 | D3 |
+--------+-----+-----+------+----+----+----+----+
| DEFORM | 100 | 32 | -2.6 | 5 | .9 | 6 | .9 |
+--------+-----+-----+------+----+----+----+----+
"""
type = 'DEFORM'
@classmethod
def _init_from_empty(cls):
sid = 1
eid = 1
deformation = 1.
return DEFORM(sid, eid, deformation, comment='')
def __init__(self, sid, eid, deformation, comment=''):
"""
Creates an DEFORM card, which defines applied deformation on
a 1D elemment. Links to the DEFORM card in the case control
deck.
Parameters
----------
sid : int
load id
eid : int
CTUBE/CROD/CONROD/CBAR/CBEAM element id
deformation : float
the applied deformation
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
self.eid = eid
self.deformation = deformation
self.eid_ref = None
@classmethod
def add_card(cls, card, icard=0, comment=''):
"""
Adds a DEFORM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
offset = 2 * icard
sid = integer(card, 1, 'sid')
eid = integer(card, 2 + offset, 'eid%i' % (icard + 1))
deformation = double(card, 3 + offset, 'D%i' % (icard + 1))
return DEFORM(sid, eid, deformation, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds an DEFORM card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid = data[0]
eid = data[1]
deformation = data[2]
return DEFORM(sid, eid, deformation, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by DEFORM=%s' % (self.sid)
self.eid_ref = model.Element(self.eid, msg)
def safe_cross_reference(self, model, xref_errors, debug=True):
msg = ', which is required by DEFORM=%s' % (self.sid)
self.eid_ref = model.safe_element(self.eid, self.sid, xref_errors, msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.eid = self.Eid()
self.eid_ref = None
def get_loads(self):
return [self]
def Eid(self):
if self.eid_ref is None:
return self.eid
return self.eid_ref.eid
def raw_fields(self):
fields = ['DEFORM', self.sid, self.Eid(), self.deformation]
return fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
class SLOAD(Load):
"""
Static Scalar Load
Defines concentrated static loads on scalar or grid points.
+-------+-----+----+-----+----+------+----+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=======+=====+====+=====+====+======+====+=======+
| SLOAD | SID | S1 | F1 | S2 | F2 | S3 | F3 |
+-------+-----+----+-----+----+------+----+-------+
| SLOAD | 16 | 2 | 5.9 | 17 | -6.3 | 14 | -2.93 |
+-------+-----+----+-----+----+------+----+-------+
.. note:: Can be used in statics OR dynamics.
If Si refers to a grid point, the load is applied to component T1 of the
displacement coordinate system (see the CD field on the GRID entry).
"""
type = 'SLOAD'
_properties = ['node_ids']
@classmethod
def _init_from_empty(cls):
sid = 1
nodes = [1]
mags = [1.]
return SLOAD(sid, nodes, mags, comment='')
def __init__(self, sid, nodes, mags, comment=''):
"""
Creates an SLOAD (GRID/SPOINT load)
Parameters
----------
sid : int
load id
nodes : int; List[int]
the GRID/SPOINT ids
mags : float; List[float]
the load magnitude
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
if isinstance(nodes, integer_types):
nodes = [nodes]
if isinstance(mags, float):
mags = [mags]
#: load ID
self.sid = sid
self.nodes = nodes
self.mags = mags
self.nodes_ref = None
def validate(self):
assert len(self.nodes) == len(self.mags), 'len(nodes)=%s len(mags)=%s' % (len(self.nodes), len(self.mags))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SLOAD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nfields = len(card) - 2
ngroups = nfields // 2
if nfields % 2 == 1:
ngroups += 1
msg = 'Missing last magnitude on SLOAD card=%s' % card.fields()
raise RuntimeError(msg)
nodes = []
mags = []
for i in range(ngroups):
j = 2 * i + 2
nodes.append(integer(card, j, 'nid' + str(i)))
mags.append(double(card, j + 1, 'mag' + str(i)))
return SLOAD(sid, nodes, mags, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds an SLOAD card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(sid, nid, scale_factor) = data
return SLOAD(sid, [nid], [scale_factor], comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by SLOAD=%s' % (self.sid)
self.nodes_ref = []
for nid in self.nodes:
self.nodes_ref.append(model.Node(nid, msg=msg))
#self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)
def safe_cross_reference(self, model, xref_errors):
return self.cross_reference(model)
#msg = ', which is required by SLOAD=%s' % (self.sid)
#self.nodes_ref = model.safe_empty_nodes(self.nodes, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes = self.node_ids
self.nodes_ref = None
def Nid(self, node):
if isinstance(node, integer_types):
return node
return node.nid
def get_loads(self):
"""
.. todo:: not done
"""
return []
@property
def node_ids(self):
if self.nodes_ref is None:
return self.nodes
return [self.Nid(nid) for nid in self.nodes_ref]
def raw_fields(self):
list_fields = ['SLOAD', self.sid]
for nid, mag in zip(self.node_ids, self.mags):
list_fields += [nid, mag]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
def write_card_16(self, is_double: bool=False) -> str:
card = self.raw_fields()
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
class RFORCE(Load):
type = 'RFORCE'
_properties = ['node_id']
@classmethod
def _init_from_empty(cls):
sid = 1
nid = 1
scale = 1.
r123 = [1., 0., 1.]
return RFORCE(sid, nid, scale, r123,
cid=0, method=1, racc=0., mb=0, idrf=0, comment='')
def __init__(self, sid, nid, scale, r123, cid=0, method=1, racc=0.,
mb=0, idrf=0, comment=''):
"""
idrf doesn't exist in MSC 2005r2; exists in MSC 2016
Parameters
----------
sid : int
load set id
nid : int
grid point through which the rotation vector acts
scale : float
scale factor of the angular velocity in revolutions/time
r123 : List[float, float, float] / (3, ) float ndarray
rectangular components of the rotation vector R that passes
through point G (R1**2+R2**2+R3**2 > 0 unless A and RACC are
both zero).
cid : int; default=0
Coordinate system defining the components of the rotation vector.
method : int; default=1
Method used to compute centrifugal forces due to angular velocity.
racc : int; default=0.0
Scale factor of the angular acceleration in revolutions per
unit time squared.
mb : int; default=0
Indicates whether the CID coordinate system is defined in the main
Bulk Data Section (MB = -1) or the partitioned superelement Bulk
Data Section (MB = 0). Coordinate systems referenced in the main
Bulk Data Section are considered stationary with respect to the
assembly basic coordinate system.
idrf : int; default=0
ID indicating to which portion of the structure this particular
RFORCE entry applies. It is possible to have multiple RFORCE
entries in the same subcase for SOL 600 to represent different
portions of the structure with different rotational accelerations.
IDRF corresponds to a SET3 entry specifying the elements with this
acceleration. A BRKSQL entry may also be specified with a matching
IDRF entry.
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
self.nid = nid
self.cid = cid
self.scale = scale
self.r123 = r123
self.method = method
self.racc = racc
self.mb = mb
self.idrf = idrf
self.nid_ref = None
self.cid_ref = None
self.validate()
def validate(self):
assert self.method in [1, 2], self.method
assert isinstance(self.r123, list), self.r123
assert isinstance(self.scale, float), self.scale
assert isinstance(self.cid, int), self.cid
assert isinstance(self.mb, int), self.mb
assert isinstance(self.idrf, int), self.idrf
assert isinstance(self.racc, float), self.racc
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a RFORCE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nid = integer_or_blank(card, 2, 'nid', 0)
cid = integer_or_blank(card, 3, 'cid', 0)
scale = double_or_blank(card, 4, 'scale', 1.)
r1 = double_or_blank(card, 5, 'r1', 0.)
r2 = double_or_blank(card, 6, 'r2', 0.)
r3 = double_or_blank(card, 7, 'r3', 0.)
method = integer_or_blank(card, 8, 'method', 1)
racc = double_or_blank(card, 9, 'racc', 0.)
mb = integer_or_blank(card, 10, 'mb', 0)
idrf = integer_or_blank(card, 11, 'idrf', 0)
assert len(card) <= 12, 'len(RFORCE card) = %i\ncard=%s' % (len(card), card)
return RFORCE(sid, nid, scale, [r1, r2, r3],
cid=cid, method=method, racc=racc, mb=mb, idrf=idrf, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a RFORCE card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid, nid, cid, a, r1, r2, r3, method, racc, mb = data
scale = 1.0
return RFORCE(sid, nid, scale, [r1, r2, r3], cid=cid, method=method, racc=racc, mb=mb,
idrf=0, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by RFORCE sid=%s' % self.sid
if self.nid > 0:
self.nid_ref = model.Node(self.nid, msg=msg)
self.cid_ref = model.Coord(self.cid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
msg = ', which is required by RFORCE sid=%s' % self.sid
if self.nid > 0:
self.nid_ref = model.Node(self.nid, msg=msg)
self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nid = self.Nid()
self.cid = self.Cid()
self.nid_ref = None
self.cid_ref = None
@property
def node_id(self):
if self.nid_ref is not None:
return self.nid_ref.nid
return self.nid
def Nid(self):
return self.node_id
def Cid(self):
if self.cid_ref is not None:
return self.cid_ref.cid
return self.cid
def get_loads(self):
return [self]
def raw_fields(self):
list_fields = (['RFORCE', self.sid, self.node_id, self.Cid(), self.scale] +
list(self.r123) + [self.method, self.racc, self.mb, self.idrf])
return list_fields
def repr_fields(self):
#method = set_blank_if_default(self.method,1)
racc = set_blank_if_default(self.racc, 0.)
mb = set_blank_if_default(self.mb, 0)
idrf = set_blank_if_default(self.idrf, 0)
list_fields = (['RFORCE', self.sid, self.node_id, self.Cid(), self.scale] +
list(self.r123) + [self.method, racc, mb, idrf])
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
elif is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
class RFORCE1(Load):
"""
NX Nastran specific card
+---------+------+----+---------+---+----+----+----+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+====+=========+===+====+====+====+========+
| RFORCE1 | SID | G | CID | A | R1 | R2 | R3 | METHOD |
+---------+------+----+---------+---+----+----+----+--------+
| | RACC | MB | GROUPID | | | | | |
+---------+------+----+---------+---+----+----+----+--------+
"""
type = 'RFORCE1'
_properties = ['node_id']
@classmethod
def _init_from_empty(cls):
sid = 1
nid = 1
scale = 1.
group_id = 1
return RFORCE1(sid, nid, scale, group_id,
cid=0, r123=None, racc=0., mb=0, method=2, comment='')
def __init__(self, sid, nid, scale, group_id,
cid=0, r123=None, racc=0., mb=0, method=2, comment=''):
"""
Creates an RFORCE1 card
Parameters
----------
sid : int
load set id
nid : int
grid point through which the rotation vector acts
scale : float
scale factor of the angular velocity in revolutions/time
r123 : List[float, float, float] / (3, ) float ndarray
rectangular components of the rotation vector R that passes
through point G (R1**2+R2**2+R3**2 > 0 unless A and RACC are
both zero).
racc : int; default=0.0
Scale factor of the angular acceleration in revolutions per
unit time squared.
mb : int; default=0
Indicates whether the CID coordinate system is defined in the main
Bulk Data Section (MB = -1) or the partitioned superelement Bulk
Data Section (MB = 0). Coordinate systems referenced in the main
Bulk Data Section are considered stationary with respect to the
assembly basic coordinate system.
group_id : int
Group identification number. The GROUP entry referenced in the
GROUPID field selects the grid points to which the load is applied.
cid : int; default=0
Coordinate system defining the components of the rotation vector.
method : int; default=2
Method used to compute centrifugal forces due to angular velocity.
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
self.nid = nid
self.cid = cid
self.scale = scale
if r123 is None:
self.r123 = np.array([1., 0., 0.])
else:
self.r123 = np.asarray(r123)
self.method = method
self.racc = racc
self.mb = mb
self.group_id = group_id
self.nid_ref = None
self.cid_ref = None
def validate(self):
if not np.linalg.norm(self.r123) > 0.:
msg = 'r123=%s norm=%s' % (self.r123, np.linalg.norm(self.r123))
raise RuntimeError(msg)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a RFORCE1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nid = integer_or_blank(card, 2, 'nid', 0)
cid = integer_or_blank(card, 3, 'cid', 0)
scale = double_or_blank(card, 4, 'scale', 1.)
r123 = [
double_or_blank(card, 5, 'r1', 1.),
double_or_blank(card, 6, 'r2', 0.),
double_or_blank(card, 7, 'r3', 0.),
]
method = integer_or_blank(card, 8, 'method', 1)
racc = double_or_blank(card, 9, 'racc', 0.)
mb = integer_or_blank(card, 10, 'mb', 0)
group_id = integer_or_blank(card, 11, 'group_id', 0)
assert len(card) <= 12, 'len(RFORCE1 card) = %i\ncard=%s' % (len(card), card)
return RFORCE1(sid, nid, scale, cid=cid, r123=r123, racc=racc,
mb=mb, group_id=group_id, method=method, comment=comment)
def get_loads(self):
return [self]
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by RFORCE1 sid=%s' % self.sid
#if self.nid > 0: # TODO: why was this every here?
self.nid_ref = model.Node(self.nid, msg=msg)
self.cid_ref = model.Coord(self.cid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
msg = ', which is required by RFORCE1 sid=%s' % self.sid
#if self.nid > 0: # TODO: why was this every here?
self.nid_ref = model.Node(self.nid, msg=msg)
self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nid = self.node_id
self.cid = self.Cid()
self.nid_ref = None
self.cid_ref = None
@property
def node_id(self):
if self.nid_ref is not None:
return self.nid_ref.nid
return self.nid
def Nid(self):
return self.node_id
def Cid(self):
if self.cid_ref is not None:
return self.cid_ref.cid
return self.cid
def raw_fields(self):
list_fields = (['RFORCE1', self.sid, self.node_id, self.Cid(), self.scale]
+ list(self.r123) + [self.method, self.racc,
self.mb, self.group_id])
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
```
#### File: bdf/mesh_utils/find_coplanar_elements.py
```python
from __future__ import annotations
from typing import List, Union, Optional, TYPE_CHECKING
import numpy as np
from pyNastran.bdf.mesh_utils.internal_utils import get_bdf_model
if TYPE_CHECKING:
from pyNastran.bdf.bdf import BDF
def find_coplanar_triangles(bdf_filename: Union[BDF, str],
eids: Optional[List[int]]=None) -> List[int]:
"""
Finds coplanar triangles
Parameters
----------
bdf_filename : BDF/str
BDF: a model
str: the path to the bdf input file
eids : list
the element ids to consider
Returns
-------
coplanar_eids : List[int]
the elements that are coplanar
"""
model = get_bdf_model(bdf_filename, xref=False, log=None, debug=False)
log = model.log
if eids is None:
eids = model.elements.keys()
i = 0
eids_removed = []
neids = len(eids)
nids = np.zeros((neids, 3), dtype='int32')
for eid in eids:
elem = model.elements[eid]
try:
nids[i, :] = elem.nodes
except ValueError:
eids_removed.append(eid)
assert len(elem.nodes) != 3, str(elem)
continue
i += 1
if i != neids:
log.warning(f'removed {neids-i} non-triangles; eids_removed={eids_removed}')
nids = nids[:i, :]
#nids = np.array([
#[10, 20, 30],
#[20, 30, 10],
#[10, 30, 20],
#], dtype='int32')
# [1, 2, 3]
# [2, 3, 1]
# [1, 3, 2]
#imin = nids.argmin(axis=1)
#imax = nids.argmax(axis=1)
imin = nids.min(axis=1)
imax = nids.max(axis=1)
#print('imin = %s' % (imin)) # [0, 2, 0]
#print('imax = %s' % (imax)) # [2, 1, 1]
imid = []
for row, imini, imaxi in zip(nids, imin, imax):
#a = [imini, imaxi]
#print(row, imini, imaxi)
a = list(row)
#a.remove(row[imini])
#a.remove(row[imaxi])
#print(a)
a.remove(imini)
#print(a)
a.remove(imaxi)
#print(a)
#print('')
imid.append(a[0])
#print('imid = %s' % (imid)) # [1, 0, 2]
nids2 = np.vstack([imin, imid, imax]).T
aset = set()
eids_to_remove = set()
for eid, row in zip(eids, nids2):
new_row = tuple(list(row))
if new_row in aset:
log.debug(f'eid={eid} exists already...')
eids_to_remove.add(eid)
else:
aset.add(new_row)
return model, eids_to_remove
```
#### File: utils/qt/pydialog.py
```python
from pyNastran.gui.qt_version import qt_version
from qtpy.QtCore import Qt
from qtpy.QtGui import QFont
from qtpy.QtWidgets import QDialog, QComboBox
from pyNastran.bdf.utils import (
parse_patran_syntax, parse_patran_syntax_dict)
from pyNastran.gui.utils.qt.checks.qlineedit import (
check_path, check_save_path,
check_int, check_positive_int_or_blank,
check_float, check_float_ranged,
check_name_str, check_name_length, check_format, check_format_str,
)
def make_font(font_size, is_bold=False):
"""creates a QFont"""
font = QFont()
font.setPointSize(font_size)
if is_bold:
font.setBold(is_bold)
return font
class PyDialog(QDialog):
"""
common class for QDialog so value checking & escape/close code
is not repeated
"""
def __init__(self, data, win_parent):
super(PyDialog, self).__init__(win_parent)
self.out_data = data
self.win_parent = win_parent
self.font_size = None
def set_font_size(self, font_size):
"""
Updates the font size of all objects in the PyDialog
Parameters
----------
font_size : int
the font size
"""
if self.font_size == font_size:
return
self.font_size = font_size
font = make_font(font_size, is_bold=False)
self.setFont(font)
def closeEvent(self, event):
self.out_data['close'] = True
event.accept()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.on_cancel()
def check_patran_syntax(cell, pound=None):
text = str(cell.text())
try:
values = parse_patran_syntax(text, pound=pound)
cell.setStyleSheet("QLineEdit{background: white;}")
return values, True
except ValueError as error:
cell.setStyleSheet("QLineEdit{background: red;}")
cell.setToolTip(str(error))
return None, False
def check_patran_syntax_dict(cell, pound=None):
text = str(cell.text())
try:
value = parse_patran_syntax_dict(text)
cell.setStyleSheet("QLineEdit{background: white;}")
cell.setToolTip('')
return value, True
except (ValueError, SyntaxError, KeyError) as error:
cell.setStyleSheet("QLineEdit{background: red;}")
cell.setToolTip(str(error))
return None, False
def make_combo_box(items, initial_value):
"""
Makes a QComboBox, sets the items, and sets an initial value.
Parameters
----------
items : List[str]
the values of the combo box
initial_value : str
the value to set the combo box to
Returns
-------
combo_box : QComboBox
the pulldown
"""
assert initial_value in items, 'initial_value=%r items=%s' % (initial_value, items)
combo_box = QComboBox()
combo_box.addItems(items)
set_combo_box_text(combo_box, initial_value)
if initial_value not in items:
msg = 'initial_value=%r is not supported in %s' % (initial_value, items)
raise RuntimeError(msg)
return combo_box
def set_combo_box_text(combo_box, value):
"""sets the combo_box text"""
if qt_version == 'pyside':
items = [combo_box.itemText(i) for i in range(combo_box.count())]
j = items.index(value)
combo_box.setCurrentIndex(j)
else:
combo_box.setCurrentText(value)
def check_color(color_float):
assert len(color_float) == 3, color_float
assert isinstance(color_float[0], float), color_float
color_int = [int(colori * 255) for colori in color_float]
return color_float, color_int
``` |
{
"source": "2167-Team1/TeamProject",
"score": 4
} |
#### File: 2167-Team1/TeamProject/Ejercicio 1 - EspacioPorGuion - CP.py
```python
texto = input("Ingrese su texto: ")
def SpaceToDash(texto):
return texto.replace(" ", "-")
print (SpaceToDash(texto))
``` |
{
"source": "216k155/opentrade",
"score": 3
} |
#### File: opentrade/scripts/roll_confirmation.py
```python
import datetime
import time
import os
from parse_confirmation import *
confirmations = []
orders = {}
now = time.time()
one_day = 24 * 3600
def check_confirmation(seq, raw, exec_type, id, *args):
if exec_type == kUnconfirmedNew:
tm, algo_id, qty = args[:3]
x = now - int(tm) / 1e6
if x > one_day:
log('too old orders skipped', x / one_day, 'days ago')
return
orders[id] = float(qty)
confirmations.append((id, raw))
elif exec_type == kNew:
confirmations.append((id, raw))
elif exec_type in (kRiskRejected, kCanceled, kRejected, kExpired, kCalculated,
kDoneForDay):
if id in orders: del orders[id]
elif exec_type in (kPartiallyFilled, kFilled):
tm, last_shares, last_px, exec_trans_type = args[:4]
n = float(last_shares)
if id in orders:
if exec_trans_type == kTransCancel: orders[id] += n
elif exec_trans_type == kTransNew:
orders[id] -= n
if orders[id] <= 1e-8:
del orders[id]
def main():
src = sys.argv[1]
dest = sys.argv[2]
if os.path.exists(dest):
log(dest, 'already exists, skip rolling', src)
return
if not os.path.exists(src):
log(src, 'not exists, skip rolling')
return
fh = open(dest, 'wb')
rolls = []
parse(src, check_confirmation)
rolls = [(id, raw) for id, raw in confirmations if id in orders]
log(len(orders), 'orders rolled')
seq = 0
for id, raw in rolls:
seq += 1
raw = struct.pack('I', seq) + raw[4:]
if raw[4] == kUnconfirmedNew:
# modify qty
qty = orders[id]
a = raw[:7]
n = 7
while raw[n] != '\0':
n += 1
b = raw[7:n]
b = b.split(' ')
b[3] = str(qty)
raw = a + ' '.join(b) + '\0\n'
fh.write(raw)
fh.close()
def log(*args):
args = (datetime.datetime.now(),) + args
print(' '.join([str(x) for x in args]))
if __name__ == '__main__':
main()
``` |
{
"source": "217heidai/Todo-List",
"score": 3
} |
#### File: 217heidai/Todo-List/conf.py
```python
import os
import configparser
class Config(object):
def __init__(self, filename):
self.__conf = configparser.ConfigParser()
self.__conf.read(filename, encoding='GBK')
def getConfig(self, section, item):
try:
itemDict = dict(self.__conf.items(section))
if item in itemDict:
return itemDict[item]
return None
except Exception as e:
return None
if __name__ == '__main__':
conf = Config(os.getcwd() + '/conf/conf.ini')
print(conf.getConfig('APP', 'name'))
print(conf.getConfig('USER', 'name'))
print(conf.getConfig('TITLE', 'title1'))
print(conf.getConfig('TITLE', 'title2'))
print(conf.getConfig('TITLE', 'title3'))
```
#### File: 217heidai/Todo-List/tts.py
```python
import os
from aip import AipSpeech
class TTS_BAIDU(object):
def __init__(self):
self.__APP_ID = '25204844'
self.__APP_KEY = '<KEY>'
self.__SECRET_KEY = '<KEY>'
self.__client = AipSpeech(self.__APP_ID, self.__APP_KEY, self.__SECRET_KEY)
def TTS(self, text, filename):
if os.path.exists(filename):
return True
result = self.__client.synthesis(text*20, 'zh', 1, {'spd': 5, 'vol': 5, 'per': 1})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
#if os.path.exists(filename):
# os.remove(filename)
with open(filename, 'wb') as f:
f.write(result)
return True
return False
if __name__ == '__main__':
from pygame import mixer
def test(text, filename):
baidu = TTS_BAIDU()
baidu.TTS(text, filename)
mixer.init()
mixer.music.load(filename)
mixer.music.play()
while mixer.music.get_busy():
pass
mixer.music.stop()
text = '现在是7点50分,请提醒作战勤务值班员组织交班。'
filename = os.getcwd() + '/alarms/现在是7点50分,请提醒作战勤务值班员组织交班。.wav'
test(text, filename)
``` |
{
"source": "21945764/Absinthe",
"score": 2
} |
#### File: 21945764/Absinthe/wskey.py
```python
import socket # 用于端口检测
import base64 # 用于编解码
import json # 用于Json解析
import os # 用于导入系统变量
import sys # 实现 sys.exit
import logging # 用于日志输出
import time # 时间
import re # 正则过率
if "WSKEY_DEBUG" in os.environ: # 判断调试模式变量
logging.basicConfig(level=logging.DEBUG, format='%(message)s') # 设置日志为 Debug等级输出
logger = logging.getLogger(__name__) # 主模块
logger.debug("\nDEBUG模式开启!\n") # 消息输出
else: # 判断分支
logging.basicConfig(level=logging.INFO, format='%(message)s') # Info级日志
logger = logging.getLogger(__name__) # 主模块
try: # 异常捕捉
import requests # 导入HTTP模块
except Exception as e: # 异常捕捉
logger.info(str(e) + "\n缺少requests模块, 请执行命令:pip3 install requests\n") # 日志输出
sys.exit(1) # 退出脚本
os.environ['no_proxy'] = '*' # 禁用代理
requests.packages.urllib3.disable_warnings() # 抑制错误
try: # 异常捕捉
from notify import send # 导入青龙消息通知模块
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
logger.info("无推送文件") # 标准日志输出
ver = 20505 # 版本号
def ql_send(text):
try: # 异常捕捉
send('WSKEY转换', text) # 消息发送
except Exception as err: # 异常捕捉
logger.debug(str(err)) # Debug日志输出
logger.info("通知发送失败") # 标准日志输出
# 登录青龙 返回值 token
def get_qltoken(username, password): # 方法 用于获取青龙 Token
logger.info("Token失效, 新登陆\n") # 日志输出
url = "http://127.0.0.1:{0}/api/user/login".format(port) # 设置青龙地址 使用 format格式化自定义端口
payload = {
'username': username,
'password': password
} # HTTP请求载荷
payload = json.dumps(payload) # json格式化载荷
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
} # HTTP请求头 设置为 Json格式
try: # 异常捕捉
res = requests.post(url=url, headers=headers, data=payload) # 使用 requests模块进行 HTTP POST请求
if res.status_code == 200 and res.json()["code"] == 200:
token = res.json()["data"]['token'] # 从 res.text 返回值中 取出 Token值
return token
else:
logger.info("暂未兼容两步验证")
ql_send("青龙登录失败, 请检查是否开启两步验证 脚本暂未兼容两步验证")
sys.exit(1) # 脚本退出
except Exception as err:
logger.debug(str(err)) # Debug日志输出
logger.info("使用旧版青龙登录接口")
url = "http://127.0.0.1:{0}/api/login".format(port) # 设置青龙地址 使用 format格式化自定义端口
payload = {
'username': username,
'password': password
} # HTTP请求载荷
payload = json.dumps(payload) # json格式化载荷
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
} # HTTP请求头 设置为 Json格式
try: # 异常捕捉
res = requests.post(url=url, headers=headers, data=payload) # 使用 requests模块进行 HTTP POST请求
token = json.loads(res.text)["data"]['token'] # 从 res.text 返回值中 取出 Token值
except Exception as err: # 异常捕捉
logger.debug(str(err)) # Debug日志输出
logger.info("青龙登录失败, 请检查面板状态!") # 标准日志输出
ql_send('青龙登陆失败, 请检查面板状态.')
sys.exit(1) # 脚本退出
else: # 无异常执行分支
return token # 返回 token值
# else: # 无异常执行分支
# return token # 返回 token值
# 返回值 Token
def ql_login(): # 方法 青龙登录(获取Token 功能同上)
path = '/ql/config/auth.json' # 设置青龙 auth文件地址
if not os.path.isfile(path):
path = '/ql/data/config/auth.json' # 尝试设置青龙 auth 新版文件地址
if os.path.isfile(path): # 进行文件真值判断
with open(path, "r") as file: # 上下文管理
auth = file.read() # 读取文件
file.close() # 关闭文件
auth = json.loads(auth) # 使用 json模块读取
username = auth["username"] # 提取 username
password = auth["password"] # 提取 password
token = auth["token"] # 提取 authkey
if token == '': # 判断 Token是否为空
return get_qltoken(username, password) # 调用方法 get_qltoken 传递 username & password
else: # 判断分支
url = "http://127.0.0.1:{0}/api/user".format(port) # 设置URL请求地址 使用 Format格式化端口
headers = {
'Authorization': 'Bearer {0}'.format(token),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Edg/94.0.992.38'
} # 设置用于 HTTP头
res = requests.get(url=url, headers=headers) # 调用 request模块发送 get请求
if res.status_code == 200: # 判断 HTTP返回状态码
return token # 有效 返回 token
else: # 判断分支
return get_qltoken(username, password) # 调用方法 get_qltoken 传递 username & password
else: # 判断分支
logger.info("没有发现auth文件, 你这是青龙吗???") # 输出标准日志
sys.exit(0) # 脚本退出
# 返回值 list[wskey]
def get_wskey(): # 方法 获取 wskey值 [系统变量传递]
if "JD_WSCK" in os.environ: # 判断 JD_WSCK是否存在于环境变量
wskey_list = os.environ['JD_WSCK'].split('&') # 读取系统变量 以 & 分割变量
if len(wskey_list) > 0: # 判断 WSKEY 数量 大于 0 个
return wskey_list # 返回 WSKEY [LIST]
else: # 判断分支
logger.info("JD_WSCK变量未启用") # 标准日志输出
sys.exit(1) # 脚本退出
else: # 判断分支
logger.info("未添加JD_WSCK变量") # 标准日志输出
sys.exit(0) # 脚本退出
# 返回值 list[jd_cookie]
def get_ck(): # 方法 获取 JD_COOKIE值 [系统变量传递] <! 此方法未使用 !>
if "JD_COOKIE" in os.environ: # 判断 JD_COOKIE是否存在于环境变量
ck_list = os.environ['JD_COOKIE'].split('&') # 读取系统变量 以 & 分割变量
if len(ck_list) > 0: # 判断 WSKEY 数量 大于 0 个
return ck_list # 返回 JD_COOKIE [LIST]
else: # 判断分支
logger.info("JD_COOKIE变量未启用") # 标准日志输出
sys.exit(1) # 脚本退出
else: # 判断分支
logger.info("未添加JD_COOKIE变量") # 标准日志输出
sys.exit(0) # 脚本退出
# 返回值 bool
def check_ck(ck): # 方法 检查 Cookie有效性 使用变量传递 单次调用
searchObj = re.search(r'pt_pin=([^;\s]+)', ck, re.M | re.I) # 正则检索 pt_pin
if searchObj: # 真值判断
pin = searchObj.group(1) # 取值
else: # 判断分支
pin = ck.split(";")[1] # 取值 使用 ; 分割
if "WSKEY_UPDATE_HOUR" in os.environ: # 判断 WSKEY_UPDATE_HOUR是否存在于环境变量
updateHour = 23 # 更新间隔23小时
if os.environ["WSKEY_UPDATE_HOUR"].isdigit(): # 检查是否为 DEC值
updateHour = int(os.environ["WSKEY_UPDATE_HOUR"]) # 使用 int化数字
nowTime = time.time() # 获取时间戳 赋值
updatedAt = 0.0 # 赋值
searchObj = re.search(r'__time=([^;\s]+)', ck, re.M | re.I) # 正则检索 [__time=]
if searchObj: # 真值判断
updatedAt = float(searchObj.group(1)) # 取值 [float]类型
if nowTime - updatedAt >= (updateHour * 60 * 60) - (10 * 60): # 判断时间操作
logger.info(str(pin) + ";即将到期或已过期\n") # 标准日志输出
return False # 返回 Bool类型 False
else: # 判断分支
remainingTime = (updateHour * 60 * 60) - (nowTime - updatedAt) # 时间运算操作
hour = int(remainingTime / 60 / 60) # 时间运算操作 [int]
minute = int((remainingTime % 3600) / 60) # 时间运算操作 [int]
logger.info(str(pin) + ";未到期,{0}时{1}分后更新\n".format(hour, minute)) # 标准日志输出
return True # 返回 Bool类型 True
elif "WSKEY_DISCHECK" in os.environ: # 判断分支 WSKEY_DISCHECK 是否存在于系统变量
logger.info("不检查账号有效性\n--------------------\n") # 标准日志输出
return False # 返回 Bool类型 False
else: # 判断分支
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion' # 设置JD_API接口地址
headers = {
'Cookie': ck,
'Referer': 'https://home.m.jd.com/myJd/home.action',
'user-agent': ua
} # 设置 HTTP头
try: # 异常捕捉
res = requests.get(url=url, headers=headers, verify=False, timeout=10) # 进行 HTTP请求[GET] 超时 10秒
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
logger.info("JD接口错误 请重试或者更换IP") # 标准日志输出
return False # 返回 Bool类型 False
else: # 判断分支
if res.status_code == 200: # 判断 JD_API 接口是否为 200 [HTTP_OK]
code = int(json.loads(res.text)['retcode']) # 使用 Json模块对返回数据取值 int([retcode])
if code == 0: # 判断 code值
logger.info(str(pin) + ";状态正常\n") # 标准日志输出
return True # 返回 Bool类型 True
else: # 判断分支
logger.info(str(pin) + ";状态失效\n")
return False # 返回 Bool类型 False
else: # 判断分支
logger.info("JD接口错误码: " + str(res.status_code)) # 标注日志输出
return False # 返回 Bool类型 False
# 返回值 bool jd_ck
def getToken(wskey): # 方法 获取 Wskey转换使用的 Token 由 JD_API 返回 这里传递 wskey
try: # 异常捕捉
url = str(base64.b64decode(url_t).decode()) + 'genToken' # 设置云端服务器地址 路由为 genToken
header = {"User-Agent": ua} # 设置 HTTP头
params = requests.get(url=url, headers=header, verify=False, timeout=20).json() # 设置 HTTP请求参数 超时 20秒 Json解析
except Exception as err: # 异常捕捉
logger.info("Params参数获取失败") # 标准日志输出
logger.debug(str(err)) # 调试日志输出
return False, wskey # 返回 -> False[Bool], Wskey
headers = {
'cookie': wskey,
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'charset': 'UTF-8',
'accept-encoding': 'br,gzip,deflate',
'user-agent': ua
} # 设置 HTTP头
url = 'https://api.m.jd.com/client.action' # 设置 URL地址
data = 'body=%7B%22to%22%3A%22https%253a%252f%252fplogin.m.jd.com%252fjd-mlogin%252fstatic%252fhtml%252fappjmp_blank.html%22%7D&' # 设置 POST 载荷
try: # 异常捕捉
res = requests.post(url=url, params=params, headers=headers, data=data, verify=False,
timeout=10) # HTTP请求 [POST] 超时 10秒
res_json = json.loads(res.text) # Json模块 取值
tokenKey = res_json['tokenKey'] # 取出TokenKey
except Exception as err: # 异常捕捉
logger.info("JD_WSKEY接口抛出错误 尝试重试 更换IP") # 标准日志输出
logger.info(str(err)) # 标注日志输出
return False, wskey # 返回 -> False[Bool], Wskey
else: # 判断分支
return appjmp(wskey, tokenKey) # 传递 wskey, Tokenkey 执行方法 [appjmp]
# 返回值 bool jd_ck
def appjmp(wskey, tokenKey): # 方法 传递 wskey & tokenKey
wskey = "pt_" + str(wskey.split(";")[0]) # 变量组合 使用 ; 分割变量 拼接 pt_
if tokenKey == 'xxx': # 判断 tokenKey返回值
logger.info(str(wskey) + ";疑似IP风控等问题 默认为失效\n--------------------\n") # 标准日志输出
return False, wskey # 返回 -> False[Bool], Wskey
headers = {
'User-Agent': ua,
'accept': 'accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'x-requested-with': 'com.jingdong.app.mall'
} # 设置 HTTP头
params = {
'tokenKey': tokenKey,
'to': 'https://plogin.m.jd.com/jd-mlogin/static/html/appjmp_blank.html'
} # 设置 HTTP_URL 参数
url = 'https://un.m.jd.com/cgi-bin/app/appjmp' # 设置 URL地址
try: # 异常捕捉
res = requests.get(url=url, headers=headers, params=params, verify=False, allow_redirects=False,
timeout=20) # HTTP请求 [GET] 阻止跳转 超时 20秒
except Exception as err: # 异常捕捉
logger.info("JD_appjmp 接口错误 请重试或者更换IP\n") # 标准日志输出
logger.info(str(err)) # 标准日志输出
return False, wskey # 返回 -> False[Bool], Wskey
else: # 判断分支
try: # 异常捕捉
res_set = res.cookies.get_dict() # 从res cookie取出
pt_key = 'pt_key=' + res_set['pt_key'] # 取值 [pt_key]
pt_pin = 'pt_pin=' + res_set['pt_pin'] # 取值 [pt_pin]
if "WSKEY_UPDATE_HOUR" in os.environ: # 判断是否在系统变量中启用 WSKEY_UPDATE_HOUR
jd_ck = str(pt_key) + ';' + str(pt_pin) + ';__time=' + str(time.time()) + ';' # 拼接变量
else: # 判断分支
jd_ck = str(pt_key) + ';' + str(pt_pin) + ';' # 拼接变量
except Exception as err: # 异常捕捉
logger.info("JD_appjmp提取Cookie错误 请重试或者更换IP\n") # 标准日志输出
logger.info(str(err)) # 标准日志输出
return False, wskey # 返回 -> False[Bool], Wskey
else: # 判断分支
if 'fake' in pt_key: # 判断 pt_key中 是否存在fake
logger.info(str(wskey) + ";WsKey状态失效\n") # 标准日志输出
return False, wskey # 返回 -> False[Bool], Wskey
else: # 判断分支
logger.info(str(wskey) + ";WsKey状态正常\n") # 标准日志输出
return True, jd_ck # 返回 -> True[Bool], jd_ck
def update(): # 方法 脚本更新模块
up_ver = int(cloud_arg['update']) # 云端参数取值 [int]
if ver >= up_ver: # 判断版本号大小
logger.info("当前脚本版本: " + str(ver)) # 标准日志输出
logger.info("--------------------\n") # 标准日志输出
else: # 判断分支
logger.info("当前脚本版本: " + str(ver) + "新版本: " + str(up_ver)) # 标准日志输出
logger.info("存在新版本, 请更新脚本后执行") # 标准日志输出
logger.info("--------------------\n") # 标准日志输出
text = '当前脚本版本: {0}新版本: {1}, 请更新脚本~!'.format(ver, up_ver) # 设置发送内容
ql_send(text)
# sys.exit(0) # 退出脚本 [未启用]
def ql_check(port): # 方法 检查青龙端口
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Socket模块初始化
sock.settimeout(2) # 设置端口超时
try: # 异常捕捉
sock.connect(('127.0.0.1', port)) # 请求端口
except Exception as err: # 捕捉异常
logger.debug(str(err)) # 调试日志输出
sock.close() # 端口关闭
return False # 返回 -> False[Bool]
else: # 分支判断
sock.close() # 关闭端口
return True # 返回 -> True[Bool]
def serch_ck(pin): # 方法 搜索 Pin
for i in range(len(envlist)): # For循环 变量[envlist]的数量
if "name" not in envlist[i] or envlist[i]["name"] != "JD_COOKIE": # 判断 envlist内容
continue # 继续循环
if pin in envlist[i]['value']: # 判断envlist取值['value']
value = envlist[i]['value'] # 取值['value']
id = envlist[i][ql_id] # 取值 [ql_id](变量)
logger.info(str(pin) + "检索成功\n") # 标准日志输出
return True, value, id # 返回 -> True[Bool], value, id
else: # 判断分支
continue # 继续循环
logger.info(str(pin) + "检索失败\n") # 标准日志输出
return False, 1 # 返回 -> False[Bool], 1
def get_env(): # 方法 读取变量
url = 'http://127.0.0.1:{0}/api/envs'.format(port) # 设置 URL地址
try: # 异常捕捉
res = s.get(url) # HTTP请求 [GET] 使用 session
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
logger.info("\n青龙环境接口错误") # 标准日志输出
sys.exit(1) # 脚本退出
else: # 判断分支
data = json.loads(res.text)['data'] # 使用Json模块提取值[data]
return data # 返回 -> data
def check_id(): # 方法 兼容青龙老版本与新版本 id & _id的问题
url = 'http://127.0.0.1:{0}/api/envs'.format(port) # 设置 URL地址
try: # 异常捕捉
res = s.get(url).json() # HTTP[GET] 请求 使用 session
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
logger.info("\n青龙环境接口错误") # 标准日志输出
sys.exit(1) # 脚本退出
else: # 判断分支
if '_id' in res['data'][0]: # 判断 [_id]
logger.info("使用 _id 键值") # 标准日志输出
return '_id' # 返回 -> '_id'
else: # 判断分支
logger.info("使用 id 键值") # 标准日志输出
return 'id' # 返回 -> 'id'
def ql_update(e_id, n_ck): # 方法 青龙更新变量 传递 id cookie
url = 'http://127.0.0.1:{0}/api/envs'.format(port) # 设置 URL地址
data = {
"name": "JD_COOKIE",
"value": n_ck,
ql_id: e_id
} # 设置 HTTP POST 载荷
data = json.dumps(data) # json模块格式化
s.put(url=url, data=data) # HTTP [PUT] 请求 使用 session
ql_enable(eid) # 调用方法 ql_enable 传递 eid
def ql_enable(e_id): # 方法 青龙变量启用 传递值 eid
url = 'http://127.0.0.1:{0}/api/envs/enable'.format(port) # 设置 URL地址
data = '["{0}"]'.format(e_id) # 格式化 POST 载荷
res = json.loads(s.put(url=url, data=data).text) # json模块读取 HTTP[PUT] 的返回值
if res['code'] == 200: # 判断返回值为 200
logger.info("\n账号启用\n--------------------\n") # 标准日志输出
return True # 返回 ->True
else: # 判断分支
logger.info("\n账号启用失败\n--------------------\n") # 标准日志输出
return False # 返回 -> Fasle
def ql_disable(e_id): # 方法 青龙变量禁用 传递 eid
url = 'http://127.0.0.1:{0}/api/envs/disable'.format(port) # 设置 URL地址
data = '["{0}"]'.format(e_id) # 格式化 POST 载荷
res = json.loads(s.put(url=url, data=data).text) # json模块读取 HTTP[PUT] 的返回值
if res['code'] == 200: # 判断返回值为 200
logger.info("\n账号禁用成功\n--------------------\n") # 标准日志输出
return True # 返回 ->True
else: # 判断分支
logger.info("\n账号禁用失败\n--------------------\n") # 标准日志输出
return False # 返回 -> Fasle
def ql_insert(i_ck): # 方法 插入新变量
data = [{"value": i_ck, "name": "JD_COOKIE"}] # POST数据载荷组合
data = json.dumps(data) # Json格式化数据
url = 'http://127.0.0.1:{0}/api/envs'.format(port) # 设置 URL地址
s.post(url=url, data=data) # HTTP[POST]请求 使用session
logger.info("\n账号添加完成\n--------------------\n") # 标准日志输出
def cloud_info(): # 方法 云端信息
url = str(base64.b64decode(url_t).decode()) + 'check_api' # 设置 URL地址 路由 [check_api]
for i in range(3): # For循环 3次
try: # 异常捕捉
headers = {"authorization": "Bearer Shizuku"} # 设置 HTTP头
res = requests.get(url=url, verify=False, headers=headers, timeout=20).text # HTTP[GET] 请求 超时 20秒
except requests.exceptions.ConnectTimeout: # 异常捕捉
logger.info("\n获取云端参数超时, 正在重试!" + str(i)) # 标准日志输出
time.sleep(1) # 休眠 1秒
continue # 循环继续
except requests.exceptions.ReadTimeout: # 异常捕捉
logger.info("\n获取云端参数超时, 正在重试!" + str(i)) # 标准日志输出
time.sleep(1) # 休眠 1秒
continue # 循环继续
except Exception as err: # 异常捕捉
logger.info("\n未知错误云端, 退出脚本!") # 标准日志输出
logger.debug(str(err)) # 调试日志输出
sys.exit(1) # 脚本退出
else: # 分支判断
try: # 异常捕捉
c_info = json.loads(res) # json读取参数
except Exception as err: # 异常捕捉
logger.info("云端参数解析失败") # 标准日志输出
logger.debug(str(err)) # 调试日志输出
sys.exit(1) # 脚本退出
else: # 分支判断
return c_info # 返回 -> c_info
def check_cloud(): # 方法 云端地址检查
url_list = ['aHR0cDovLzQzLjEzNS45MC4yMy8=', 'aHR0cHM6Ly9zaGl6dWt1Lm1sLw==',
'aHR0cHM6Ly9jZi5zaGl6dWt1Lm1sLw=='] # URL list Encode
for i in url_list: # for循环 url_list
url = str(base64.b64decode(i).decode()) # 设置 url地址 [str]
try: # 异常捕捉
requests.get(url=url, verify=False, timeout=10) # HTTP[GET]请求 超时 10秒
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
continue # 循环继续
else: # 分支判断
info = ['Default', 'HTTPS', 'CloudFlare'] # 输出信息[List]
logger.info(str(info[url_list.index(i)]) + " Server Check OK\n--------------------\n") # 标准日志输出
return i # 返回 ->i
logger.info("\n云端地址全部失效, 请检查网络!") # 标准日志输出
ql_send('云端地址失效. 请联系作者或者检查网络.') # 推送消息
sys.exit(1) # 脚本退出
def check_port(): # 方法 检查变量传递端口
logger.info("\n--------------------\n") # 标准日志输出
if "QL_PORT" in os.environ: # 判断 系统变量是否存在[QL_PORT]
try: # 异常捕捉
port = int(os.environ['QL_PORT']) # 取值 [int]
except Exception as err: # 异常捕捉
logger.debug(str(err)) # 调试日志输出
logger.info("变量格式有问题...\n格式: export QL_PORT=\"端口号\"") # 标准日志输出
logger.info("使用默认端口5700") # 标准日志输出
return 5700 # 返回端口 5700
else: # 判断分支
port = 5700 # 默认5700端口
if not ql_check(port): # 调用方法 [ql_check] 传递 [port]
logger.info(str(port) + "端口检查失败, 如果改过端口, 请在变量中声明端口 \n在config.sh中加入 export QL_PORT=\"端口号\"") # 标准日志输出
logger.info("\n如果你很确定端口没错, 还是无法执行, 在GitHub给我发issus\n--------------------\n") # 标准日志输出
sys.exit(1) # 脚本退出
else: # 判断分支
logger.info(str(port) + "端口检查通过") # 标准日志输出
return port # 返回->port
if __name__ == '__main__': # Python主函数执行入口
port = check_port() # 调用方法 [check_port] 并赋值 [port]
token = ql_login() # 调用方法 [ql_login] 并赋值 [token]
s = requests.session() # 设置 request session方法
s.headers.update({"authorization": "Bearer " + str(token)}) # 增加 HTTP头认证
s.headers.update({"Content-Type": "application/json;charset=UTF-8"}) # 增加 HTTP头 json 类型
ql_id = check_id() # 调用方法 [check_id] 并赋值 [ql_id]
url_t = check_cloud() # 调用方法 [check_cloud] 并赋值 [url_t]
cloud_arg = cloud_info() # 调用方法 [cloud_info] 并赋值 [cloud_arg]
update() # 调用方法 [update]
ua = cloud_arg['User-Agent'] # 设置全局变量 UA
wslist = get_wskey() # 调用方法 [get_wskey] 并赋值 [wslist]
envlist = get_env() # 调用方法 [get_env] 并赋值 [envlist]
if "WSKEY_SLEEP" in os.environ and str(os.environ["WSKEY_SLEEP"]).isdigit(): # 判断变量[WSKEY_SLEEP]是否为数字类型
sleepTime = int(os.environ["WSKEY_SLEEP"]) # 获取变量 [int]
else: # 判断分支
sleepTime = 10 # 默认休眠时间 10秒
for ws in wslist: # wslist变量 for循环 [wslist -> ws]
wspin = ws.split(";")[0] # 变量分割 ;
if "pin" in wspin: # 判断 pin 是否存在于 [wspin]
wspin = "pt_" + wspin + ";" # 封闭变量
return_serch = serch_ck(wspin) # 变量 pt_pin 搜索获取 key eid
if return_serch[0]: # bool: True 搜索到账号
jck = str(return_serch[1]) # 拿到 JD_COOKIE
if not check_ck(jck): # bool: False 判定 JD_COOKIE 有效性
tryCount = 1 # 重试次数 1次
if "WSKEY_TRY_COUNT" in os.environ: # 判断 [WSKEY_TRY_COUNT] 是否存在于系统变量
if os.environ["WSKEY_TRY_COUNT"].isdigit(): # 判断 [WSKEY_TRY_COUNT] 是否为数字
tryCount = int(os.environ["WSKEY_TRY_COUNT"]) # 设置 [tryCount] int
for count in range(tryCount): # for循环 [tryCount]
count += 1 # 自增
return_ws = getToken(ws) # 使用 WSKEY 请求获取 JD_COOKIE bool jd_ck
if return_ws[0]: # 判断 [return_ws]返回值 Bool类型
break # 中断循环
if count < tryCount: # 判断循环次
logger.info("{0} 秒后重试,剩余次数:{1}\n".format(sleepTime, tryCount - count)) # 标准日志输出
time.sleep(sleepTime) # 脚本休眠 使用变量 [sleepTime]
if return_ws[0]: # 判断 [return_ws]返回值 Bool类型
nt_key = str(return_ws[1]) # 从 return_ws[1] 取出 -> nt_key
# logger.info("wskey转pt_key成功", nt_key) # 标准日志输出 [未启用]
logger.info("wskey转换成功") # 标准日志输出
eid = return_serch[2] # 从 return_serch 拿到 eid
ql_update(eid, nt_key) # 函数 ql_update 参数 eid JD_COOKIE
else: # 判断分支
if "WSKEY_AUTO_DISABLE" in os.environ: # 从系统变量中获取 WSKEY_AUTO_DISABLE
logger.info(str(wspin) + "账号失效") # 标准日志输出
text = "账号: {0} WsKey疑似失效".format(wspin) # 设置推送内容
else: # 判断分支
eid = return_serch[2] # 读取 return_serch[2] -> eid
logger.info(str(wspin) + "账号禁用") # 标准日志输出
ql_disable(eid) # 执行方法[ql_disable] 传递 eid
text = "账号: {0} WsKey疑似失效, 已禁用Cookie".format(wspin) # 设置推送内容
ql_send(text)
else: # 判断分支
logger.info(str(wspin) + "账号有效") # 标准日志输出
eid = return_serch[2] # 读取 return_serch[2] -> eid
ql_enable(eid) # 执行方法[ql_enable] 传递 eid
logger.info("--------------------\n") # 标准日志输出
else: # 判断分支
logger.info("\n新wskey\n") # 标准日志分支
return_ws = getToken(ws) # 使用 WSKEY 请求获取 JD_COOKIE bool jd_ck
if return_ws[0]: # 判断 (return_ws[0]) 类型: [Bool]
nt_key = str(return_ws[1]) # return_ws[1] -> nt_key
logger.info("wskey转换成功\n") # 标准日志输出
ql_insert(nt_key) # 调用方法 [ql_insert]
logger.info("暂停{0}秒\n".format(sleepTime)) # 标准日志输出
time.sleep(sleepTime) # 脚本休眠
else: # 判断分支
logger.info("WSKEY格式错误\n--------------------\n") # 标准日志输出
logger.info("执行完成\n--------------------") # 标准日志输出
sys.exit(0) # 脚本退出
# Enjoy
``` |
{
"source": "219-design/sysroot_qt5.15.0_binaries_armv6zk_rpizero_bullseye_gcc10.3",
"score": 3
} |
#### File: Inputs/shtest-shell/check_path.py
```python
from __future__ import print_function
import os
import sys
def check_path(argv):
if len(argv) < 3:
print("Wrong number of args")
return 1
type = argv[1]
paths = argv[2:]
exit_code = 0
if type == 'dir':
for idx, dir in enumerate(paths):
print(os.path.isdir(dir))
elif type == 'file':
for idx, file in enumerate(paths):
print(os.path.isfile(file))
else:
print("Unrecognised type {}".format(type))
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit (check_path (sys.argv))
```
#### File: python3.9/test/regrtest.py
```python
import os
import sys
from test.libregrtest import main
# Alias for backward compatibility (just in case)
main_in_temp_cwd = main
def _main():
global __file__
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path) - 1
while i >= 0:
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
else:
i -= 1
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
main()
if __name__ == '__main__':
_main()
``` |
{
"source": "219-design/sysroot_qt5.15.0_binaries_armv6zk_rpizero",
"score": 2
} |
#### File: dist-packages/apt/cache.py
```python
from __future__ import print_function
import fnmatch
import os
import warnings
import weakref
try:
from typing import (Any, Callable, Dict, Iterator, List, Optional,
Set, Tuple, Union, cast, KeysView)
Any # pyflakes
Callable # pyflakes
Dict # pyflakes
Iterator # pyflakes
KeysView # pyflakes
List # pyflakes
Optional # pyflakes
Set # pyflakes
Tuple # pyflakes
Union # pyflakes
except ImportError:
def cast(typ, obj): # type: ignore
return obj
pass
import apt_pkg
from apt.package import Package, Version
import apt.progress.text
from apt.progress.base import AcquireProgress, InstallProgress, OpProgress
OpProgress # pyflakes
InstallProgress # pyflakes
AcquireProgress # pyflakes
Version # pyflakes
class FetchCancelledException(IOError):
"""Exception that is thrown when the user cancels a fetch operation."""
class FetchFailedException(IOError):
"""Exception that is thrown when fetching fails."""
class UntrustedException(FetchFailedException):
"""Exception that is thrown when fetching fails for trust reasons"""
class LockFailedException(IOError):
"""Exception that is thrown when locking fails."""
class CacheClosedException(Exception):
"""Exception that is thrown when the cache is used after close()."""
class _WrappedLock(object):
"""Wraps an apt_pkg.FileLock to raise LockFailedException.
Initialized using a directory path."""
def __init__(self, path):
# type: (str) -> None
self._path = path
self._lock = apt_pkg.FileLock(os.path.join(path, "lock"))
def __enter__(self):
# type: () -> None
try:
return self._lock.__enter__()
except apt_pkg.Error as e:
raise LockFailedException(("Failed to lock directory %s: %s") %
(self._path, e))
def __exit__(self, typ, value, traceback):
# type: (object, object, object) -> None
return self._lock.__exit__(typ, value, traceback)
class Cache(object):
"""Dictionary-like package cache.
The APT cache file contains a hash table mapping names of binary
packages to their metadata. A Cache object is the in-core
representation of the same. It provides access to APTs idea of the
list of available packages.
The cache can be used like a mapping from package names to Package
objects (although only getting items is supported).
Keyword arguments:
progress -- a OpProgress object,
rootdir -- an alternative root directory. if that is given the system
sources.list and system lists/files are not read, only file relative
to the given rootdir,
memonly -- build the cache in memory only.
.. versionchanged:: 1.0
The cache now supports package names with special architecture
qualifiers such as :all and :native. It does not export them
in :meth:`keys()`, though, to keep :meth:`keys()` a unique set.
"""
def __init__(self, progress=None, rootdir=None, memonly=False):
# type: (OpProgress, str, bool) -> None
self._cache = cast(apt_pkg.Cache, None) # type: apt_pkg.Cache
self._depcache = cast(apt_pkg.DepCache, None) # type: apt_pkg.DepCache
self._records = cast(apt_pkg.PackageRecords, None) # type: apt_pkg.PackageRecords # nopep8
self._list = cast(apt_pkg.SourceList, None) # type: apt_pkg.SourceList
self._callbacks = {} # type: Dict[str, List[Union[Callable[..., None],str]]] # nopep8
self._callbacks2 = {} # type: Dict[str, List[Tuple[Callable[..., Any], Tuple[Any, ...], Dict[Any,Any]]]] # nopep8
self._weakref = weakref.WeakValueDictionary() # type: weakref.WeakValueDictionary[str, apt.Package] # nopep8
self._weakversions = weakref.WeakSet() # type: weakref.WeakSet[Version] # nopep8
self._changes_count = -1
self._sorted_set = None # type: Optional[List[str]]
self.connect("cache_post_open", "_inc_changes_count")
self.connect("cache_post_change", "_inc_changes_count")
if memonly:
# force apt to build its caches in memory
apt_pkg.config.set("Dir::Cache::pkgcache", "")
if rootdir:
rootdir = os.path.abspath(rootdir)
if os.path.exists(rootdir + "/etc/apt/apt.conf"):
apt_pkg.read_config_file(apt_pkg.config,
rootdir + "/etc/apt/apt.conf")
if os.path.isdir(rootdir + "/etc/apt/apt.conf.d"):
apt_pkg.read_config_dir(apt_pkg.config,
rootdir + "/etc/apt/apt.conf.d")
apt_pkg.config.set("Dir", rootdir)
apt_pkg.config.set("Dir::State::status",
rootdir + "/var/lib/dpkg/status")
# also set dpkg to the rootdir path so that its called for the
# --print-foreign-architectures call
apt_pkg.config.set("Dir::bin::dpkg",
os.path.join(rootdir, "usr", "bin", "dpkg"))
# create required dirs/files when run with special rootdir
# automatically
self._check_and_create_required_dirs(rootdir)
# Call InitSystem so the change to Dir::State::Status is actually
# recognized (LP: #320665)
apt_pkg.init_system()
# Prepare a lock object (context manager for archive lock)
archive_dir = apt_pkg.config.find_dir("Dir::Cache::Archives")
self._archive_lock = _WrappedLock(archive_dir)
self.open(progress)
def fix_broken(self):
# type: () -> None
"""Fix broken packages."""
self._depcache.fix_broken()
def _inc_changes_count(self):
# type: () -> None
"""Increase the number of changes"""
self._changes_count += 1
def _check_and_create_required_dirs(self, rootdir):
# type: (str) -> None
"""
check if the required apt directories/files are there and if
not create them
"""
files = ["/var/lib/dpkg/status",
"/etc/apt/sources.list",
]
dirs = ["/var/lib/dpkg",
"/etc/apt/",
"/var/cache/apt/archives/partial",
"/var/lib/apt/lists/partial",
]
for d in dirs:
if not os.path.exists(rootdir + d):
#print "creating: ", rootdir + d
os.makedirs(rootdir + d)
for f in files:
if not os.path.exists(rootdir + f):
open(rootdir + f, "w").close()
def _run_callbacks(self, name):
# type: (str) -> None
""" internal helper to run a callback """
if name in self._callbacks:
for callback in self._callbacks[name]:
if callback == '_inc_changes_count':
self._inc_changes_count()
else:
callback() # type: ignore
if name in self._callbacks2:
for callback, args, kwds in self._callbacks2[name]:
callback(self, *args, **kwds)
def open(self, progress=None):
# type: (OpProgress) -> None
""" Open the package cache, after that it can be used like
a dictionary
"""
if progress is None:
progress = apt.progress.base.OpProgress()
# close old cache on (re)open
self.close()
self.op_progress = progress
self._run_callbacks("cache_pre_open")
self._cache = apt_pkg.Cache(progress)
self._depcache = apt_pkg.DepCache(self._cache)
self._records = apt_pkg.PackageRecords(self._cache)
self._list = apt_pkg.SourceList()
self._list.read_main_list()
self._sorted_set = None
self.__remap()
self._have_multi_arch = len(apt_pkg.get_architectures()) > 1
progress.done()
self._run_callbacks("cache_post_open")
def __remap(self):
# type: () -> None
"""Called after cache reopen() to relocate to new cache.
Relocate objects like packages and versions from the old
underlying cache to the new one.
"""
for key in list(self._weakref.keys()):
try:
pkg = self._weakref[key]
except KeyError:
continue
try:
pkg._pkg = self._cache[pkg._pkg.name, pkg._pkg.architecture]
except LookupError:
del self._weakref[key]
for ver in list(self._weakversions):
# Package has been reseated above, reseat version
for v in ver.package._pkg.version_list:
# Requirements as in debListParser::SameVersion
if (v.hash == ver._cand.hash and
(v.size == 0 or ver._cand.size == 0 or
v.size == ver._cand.size) and
v.multi_arch == ver._cand.multi_arch and
v.ver_str == ver._cand.ver_str):
ver._cand = v
break
else:
self._weakversions.remove(ver)
def close(self):
# type: () -> None
""" Close the package cache """
# explicitely free the FDs that _records has open
del self._records
self._records = cast(apt_pkg.PackageRecords, None)
def __enter__(self):
# type: () -> Cache
""" Enter the with statement """
return self
def __exit__(self, exc_type, exc_value, traceback):
# type: (object, object, object) -> None
""" Exit the with statement """
self.close()
def __getitem__(self, key):
# type: (object) -> Package
""" look like a dictionary (get key) """
try:
key = str(key)
rawpkg = self._cache[key]
except KeyError:
raise KeyError('The cache has no package named %r' % key)
# It might be excluded due to not having a version or something
if not self.__is_real_pkg(rawpkg):
raise KeyError('The cache has no package named %r' % key)
pkg = self._rawpkg_to_pkg(rawpkg)
return pkg
def get(self, key, default=None):
# type: (object, object) -> Any
"""Return *self*[*key*] or *default* if *key* not in *self*.
.. versionadded:: 1.1
"""
try:
return self[key]
except KeyError:
return default
def _rawpkg_to_pkg(self, rawpkg):
# type: (apt_pkg.Package) -> Package
"""Returns the apt.Package object for an apt_pkg.Package object.
.. versionadded:: 1.0.0
"""
fullname = rawpkg.get_fullname(pretty=True)
return self._weakref.setdefault(fullname, Package(self, rawpkg))
def __iter__(self):
# type: () -> Iterator[Package]
# We iterate sorted over package names here. With this we read the
# package lists linearly if we need to access the package records,
# instead of having to do thousands of random seeks; the latter
# is disastrous if we use compressed package indexes, and slower than
# necessary for uncompressed indexes.
for pkgname in self.keys():
pkg = Package(self, self._cache[pkgname])
yield self._weakref.setdefault(pkgname, pkg)
def __is_real_pkg(self, rawpkg):
# type: (apt_pkg.Package) -> bool
"""Check if the apt_pkg.Package provided is a real package."""
return rawpkg.has_versions
def has_key(self, key):
# type: (object) -> bool
return key in self
def __contains__(self, key):
# type: (object) -> bool
try:
return self.__is_real_pkg(self._cache[str(key)])
except KeyError:
return False
def __len__(self):
# type: () -> int
return len(self.keys())
def keys(self):
# type: () -> List[str]
if self._sorted_set is None:
self._sorted_set = sorted(p.get_fullname(pretty=True)
for p in self._cache.packages
if self.__is_real_pkg(p))
return list(self._sorted_set) # We need a copy here, caller may modify
def get_changes(self):
# type: () -> List[Package]
""" Get the marked changes """
changes = []
marked_keep = self._depcache.marked_keep
for rawpkg in self._cache.packages:
if not marked_keep(rawpkg):
changes.append(self._rawpkg_to_pkg(rawpkg))
return changes
def upgrade(self, dist_upgrade=False):
# type: (bool) -> None
"""Upgrade all packages.
If the parameter *dist_upgrade* is True, new dependencies will be
installed as well (and conflicting packages may be removed). The
default value is False.
"""
self.cache_pre_change()
self._depcache.upgrade(dist_upgrade)
self.cache_post_change()
@property
def required_download(self):
# type: () -> int
"""Get the size of the packages that are required to download."""
if self._records is None:
raise CacheClosedException(
"Cache object used after close() called")
pm = apt_pkg.PackageManager(self._depcache)
fetcher = apt_pkg.Acquire()
pm.get_archives(fetcher, self._list, self._records)
return fetcher.fetch_needed
@property
def required_space(self):
# type: () -> int
"""Get the size of the additional required space on the fs."""
return self._depcache.usr_size
@property
def req_reinstall_pkgs(self):
# type: () -> Set[str]
"""Return the packages not downloadable packages in reqreinst state."""
reqreinst = set()
get_candidate_ver = self._depcache.get_candidate_ver
states = frozenset((apt_pkg.INSTSTATE_REINSTREQ,
apt_pkg.INSTSTATE_HOLD_REINSTREQ))
for pkg in self._cache.packages:
cand = get_candidate_ver(pkg)
if cand and not cand.downloadable and pkg.inst_state in states:
reqreinst.add(pkg.get_fullname(pretty=True))
return reqreinst
def _run_fetcher(self, fetcher, allow_unauthenticated):
# type: (apt_pkg.Acquire, Optional[bool]) -> int
if allow_unauthenticated is None:
allow_unauthenticated = apt_pkg.config.find_b("APT::Get::"
"AllowUnauthenticated", False)
untrusted = [item for item in fetcher.items if not item.is_trusted]
if untrusted and not allow_unauthenticated:
raise UntrustedException("Untrusted packages:\n%s" %
"\n".join(i.desc_uri for i in untrusted))
# do the actual fetching
res = fetcher.run()
# now check the result (this is the code from apt-get.cc)
failed = False
err_msg = ""
for item in fetcher.items:
if item.status == item.STAT_DONE:
continue
if item.STAT_IDLE:
continue
err_msg += "Failed to fetch %s %s\n" % (item.desc_uri,
item.error_text)
failed = True
# we raise a exception if the download failed or it was cancelt
if res == fetcher.RESULT_CANCELLED:
raise FetchCancelledException(err_msg)
elif failed:
raise FetchFailedException(err_msg)
return res
def _fetch_archives(self,
fetcher, # type: apt_pkg.Acquire
pm, # type: apt_pkg.PackageManager
allow_unauthenticated=None, # type: Optional[bool]
):
# type: (...) -> int
""" fetch the needed archives """
if self._records is None:
raise CacheClosedException(
"Cache object used after close() called")
# this may as well throw a SystemError exception
if not pm.get_archives(fetcher, self._list, self._records):
return False
# now run the fetcher, throw exception if something fails to be
# fetched
return self._run_fetcher(fetcher, allow_unauthenticated)
def fetch_archives(self,
progress=None, # type: Optional[AcquireProgress]
fetcher=None, # type: Optional[apt_pkg.Acquire]
allow_unauthenticated=None, # type: Optional[bool]
):
# type: (...) -> int
"""Fetch the archives for all packages marked for install/upgrade.
You can specify either an :class:`apt.progress.base.AcquireProgress()`
object for the parameter *progress*, or specify an already
existing :class:`apt_pkg.Acquire` object for the parameter *fetcher*.
The return value of the function is undefined. If an error occurred,
an exception of type :class:`FetchFailedException` or
:class:`FetchCancelledException` is raised.
The keyword-only parameter *allow_unauthenticated* specifies whether
to allow unauthenticated downloads. If not specified, it defaults to
the configuration option `APT::Get::AllowUnauthenticated`.
.. versionadded:: 0.8.0
"""
if progress is not None and fetcher is not None:
raise ValueError("Takes a progress or a an Acquire object")
if progress is None:
progress = apt.progress.text.AcquireProgress()
if fetcher is None:
fetcher = apt_pkg.Acquire(progress)
with self._archive_lock:
return self._fetch_archives(fetcher,
apt_pkg.PackageManager(self._depcache),
allow_unauthenticated)
def is_virtual_package(self, pkgname):
# type: (str) -> bool
"""Return whether the package is a virtual package."""
try:
pkg = self._cache[pkgname]
except KeyError:
return False
else:
return bool(pkg.has_provides and not pkg.has_versions)
def get_providing_packages(self, pkgname, candidate_only=True,
include_nonvirtual=False):
# type: (str, bool, bool) -> List[Package]
"""Return a list of all packages providing a package.
Return a list of packages which provide the virtual package of the
specified name.
If 'candidate_only' is False, return all packages with at
least one version providing the virtual package. Otherwise,
return only those packages where the candidate version
provides the virtual package.
If 'include_nonvirtual' is True then it will search for all
packages providing pkgname, even if pkgname is not itself
a virtual pkg.
"""
providers = set() # type: Set[Package]
get_candidate_ver = self._depcache.get_candidate_ver
try:
vp = self._cache[pkgname]
if vp.has_versions and not include_nonvirtual:
return list(providers)
except KeyError:
return list(providers)
for provides, providesver, version in vp.provides_list:
rawpkg = version.parent_pkg
if not candidate_only or (version == get_candidate_ver(rawpkg)):
providers.add(self._rawpkg_to_pkg(rawpkg))
return list(providers)
def update(self, fetch_progress=None, pulse_interval=0,
raise_on_error=True, sources_list=None):
# type: (AcquireProgress, int, bool, str) -> int
"""Run the equivalent of apt-get update.
You probably want to call open() afterwards, in order to utilise the
new cache. Otherwise, the old cache will be used which can lead to
strange bugs.
The first parameter *fetch_progress* may be set to an instance of
apt.progress.FetchProgress, the default is apt.progress.FetchProgress()
.
sources_list -- Update a alternative sources.list than the default.
Note that the sources.list.d directory is ignored in this case
"""
with _WrappedLock(apt_pkg.config.find_dir("Dir::State::Lists")):
if sources_list:
old_sources_list = apt_pkg.config.find("Dir::Etc::sourcelist")
old_sources_list_d = (
apt_pkg.config.find("Dir::Etc::sourceparts"))
old_cleanup = apt_pkg.config.find("APT::List-Cleanup")
apt_pkg.config.set("Dir::Etc::sourcelist",
os.path.abspath(sources_list))
apt_pkg.config.set("Dir::Etc::sourceparts", "xxx")
apt_pkg.config.set("APT::List-Cleanup", "0")
slist = apt_pkg.SourceList()
slist.read_main_list()
else:
slist = self._list
try:
if fetch_progress is None:
fetch_progress = apt.progress.base.AcquireProgress()
try:
res = self._cache.update(fetch_progress, slist,
pulse_interval)
except SystemError as e:
raise FetchFailedException(e)
if not res and raise_on_error:
raise FetchFailedException()
else:
return res
finally:
if sources_list:
apt_pkg.config.set("Dir::Etc::sourcelist",
old_sources_list)
apt_pkg.config.set("Dir::Etc::sourceparts",
old_sources_list_d)
apt_pkg.config.set("APT::List-Cleanup",
old_cleanup)
def install_archives(self, pm, install_progress):
# type: (apt_pkg.PackageManager, InstallProgress) -> int
"""
The first parameter *pm* refers to an object returned by
apt_pkg.PackageManager().
The second parameter *install_progress* refers to an InstallProgress()
object of the module apt.progress.
This releases a system lock in newer versions, if there is any,
and reestablishes it afterwards.
"""
# compat with older API
try:
install_progress.startUpdate() # type: ignore
except AttributeError:
install_progress.start_update()
did_unlock = apt_pkg.pkgsystem_is_locked()
if did_unlock:
apt_pkg.pkgsystem_unlock_inner()
try:
res = install_progress.run(pm)
finally:
if did_unlock:
apt_pkg.pkgsystem_lock_inner()
try:
install_progress.finishUpdate() # type: ignore
except AttributeError:
install_progress.finish_update()
return res
def commit(self,
fetch_progress=None, # type: Optional[AcquireProgress]
install_progress=None, # type: Optional[InstallProgress]
allow_unauthenticated=None, # type: Optional[bool]
):
# type: (...) -> bool
"""Apply the marked changes to the cache.
The first parameter, *fetch_progress*, refers to a FetchProgress()
object as found in apt.progress, the default being
apt.progress.FetchProgress().
The second parameter, *install_progress*, is a
apt.progress.InstallProgress() object.
The keyword-only parameter *allow_unauthenticated* specifies whether
to allow unauthenticated downloads. If not specified, it defaults to
the configuration option `APT::Get::AllowUnauthenticated`.
"""
# FIXME:
# use the new acquire/pkgmanager interface here,
# raise exceptions when a download or install fails
# and send proper error strings to the application.
# Current a failed download will just display "error"
# which is less than optimal!
if fetch_progress is None:
fetch_progress = apt.progress.base.AcquireProgress()
if install_progress is None:
install_progress = apt.progress.base.InstallProgress()
assert install_progress is not None
with apt_pkg.SystemLock():
pm = apt_pkg.PackageManager(self._depcache)
fetcher = apt_pkg.Acquire(fetch_progress)
with self._archive_lock:
while True:
# fetch archives first
res = self._fetch_archives(fetcher, pm,
allow_unauthenticated)
# then install
res = self.install_archives(pm, install_progress)
if res == pm.RESULT_COMPLETED:
break
elif res == pm.RESULT_FAILED:
raise SystemError("installArchives() failed")
elif res == pm.RESULT_INCOMPLETE:
pass
else:
raise SystemError("internal-error: unknown result "
"code from InstallArchives: %s" %
res)
# reload the fetcher for media swaping
fetcher.shutdown()
return (res == pm.RESULT_COMPLETED)
def clear(self):
# type: () -> None
""" Unmark all changes """
self._depcache.init()
# cache changes
def cache_post_change(self):
# type: () -> None
" called internally if the cache has changed, emit a signal then "
self._run_callbacks("cache_post_change")
def cache_pre_change(self):
# type: () -> None
""" called internally if the cache is about to change, emit
a signal then """
self._run_callbacks("cache_pre_change")
def connect(self, name, callback):
# type: (str, Union[Callable[..., None],str]) -> None
"""Connect to a signal.
.. deprecated:: 1.0
Please use connect2() instead, as this function is very
likely to cause a memory leak.
"""
if callback != '_inc_changes_count':
warnings.warn("connect() likely causes a reference"
" cycle, use connect2() instead", RuntimeWarning, 2)
if name not in self._callbacks:
self._callbacks[name] = []
self._callbacks[name].append(callback)
def connect2(self, name, callback, *args, **kwds):
# type: (str, Callable[..., Any], object, object) -> None
"""Connect to a signal.
The callback will be passed the cache as an argument, and
any arguments passed to this function. Make sure that, if you
pass a method of a class as your callback, your class does not
contain a reference to the cache.
Cyclic references to the cache can cause issues if the Cache object
is replaced by a new one, because the cache keeps a lot of objects and
tens of open file descriptors.
currently only used for cache_{post,pre}_{changed,open}.
.. versionadded:: 1.0
"""
if name not in self._callbacks2:
self._callbacks2[name] = []
self._callbacks2[name].append((callback, args, kwds))
def actiongroup(self):
# type: () -> apt_pkg.ActionGroup
"""Return an `ActionGroup` object for the current cache.
Action groups can be used to speedup actions. The action group is
active as soon as it is created, and disabled when the object is
deleted or when release() is called.
You can use the action group as a context manager, this is the
recommended way::
with cache.actiongroup():
for package in my_selected_packages:
package.mark_install()
This way, the action group is automatically released as soon as the
with statement block is left. It also has the benefit of making it
clear which parts of the code run with a action group and which
don't.
"""
return apt_pkg.ActionGroup(self._depcache)
@property
def dpkg_journal_dirty(self):
# type: () -> bool
"""Return True if the dpkg was interrupted
All dpkg operations will fail until this is fixed, the action to
fix the system if dpkg got interrupted is to run
'dpkg --configure -a' as root.
"""
dpkg_status_dir = os.path.dirname(
apt_pkg.config.find_file("Dir::State::status"))
for f in os.listdir(os.path.join(dpkg_status_dir, "updates")):
if fnmatch.fnmatch(f, "[0-9]*"):
return True
return False
@property
def broken_count(self):
# type: () -> int
"""Return the number of packages with broken dependencies."""
return self._depcache.broken_count
@property
def delete_count(self):
# type: () -> int
"""Return the number of packages marked for deletion."""
return self._depcache.del_count
@property
def install_count(self):
# type: () -> int
"""Return the number of packages marked for installation."""
return self._depcache.inst_count
@property
def keep_count(self):
# type: () -> int
"""Return the number of packages marked as keep."""
return self._depcache.keep_count
class ProblemResolver(object):
"""Resolve problems due to dependencies and conflicts.
The first argument 'cache' is an instance of apt.Cache.
"""
def __init__(self, cache):
# type: (Cache) -> None
self._resolver = apt_pkg.ProblemResolver(cache._depcache)
self._cache = cache
def clear(self, package):
# type: (Package) -> None
"""Reset the package to the default state."""
self._resolver.clear(package._pkg)
def install_protect(self):
# type: () -> None
"""mark protected packages for install or removal."""
self._resolver.install_protect()
def protect(self, package):
# type: (Package) -> None
"""Protect a package so it won't be removed."""
self._resolver.protect(package._pkg)
def remove(self, package):
# type: (Package) -> None
"""Mark a package for removal."""
self._resolver.remove(package._pkg)
def resolve(self):
# type: () -> None
"""Resolve dependencies, try to remove packages where needed."""
self._cache.cache_pre_change()
self._resolver.resolve()
self._cache.cache_post_change()
def resolve_by_keep(self):
# type: () -> None
"""Resolve dependencies, do not try to remove packages."""
self._cache.cache_pre_change()
self._resolver.resolve_by_keep()
self._cache.cache_post_change()
# ----------------------------- experimental interface
class Filter(object):
""" Filter base class """
def apply(self, pkg):
# type: (Package) -> bool
""" Filter function, return True if the package matchs a
filter criteria and False otherwise
"""
return True
class MarkedChangesFilter(Filter):
""" Filter that returns all marked changes """
def apply(self, pkg):
# type: (Package) -> bool
if pkg.marked_install or pkg.marked_delete or pkg.marked_upgrade:
return True
else:
return False
class InstalledFilter(Filter):
"""Filter that returns all installed packages.
.. versionadded:: 1.0.0
"""
def apply(self, pkg):
# type: (Package) -> bool
return pkg.is_installed
class _FilteredCacheHelper(object):
"""Helper class for FilteredCache to break a reference cycle."""
def __init__(self, cache):
# type: (Cache) -> None
# Do not keep a reference to the cache, or you have a cycle!
self._filtered = {} # type: Dict[str,bool]
self._filters = [] # type: List[Filter]
cache.connect2("cache_post_change", self.filter_cache_post_change)
cache.connect2("cache_post_open", self.filter_cache_post_change)
def _reapply_filter(self, cache):
# type: (Cache) -> None
" internal helper to refilter "
# Do not keep a reference to the cache, or you have a cycle!
self._filtered = {}
for pkg in cache:
for f in self._filters:
if f.apply(pkg):
self._filtered[pkg.name] = True
break
def set_filter(self, filter):
# type: (Filter) -> None
"""Set the current active filter."""
self._filters = []
self._filters.append(filter)
def filter_cache_post_change(self, cache):
# type: (Cache) -> None
"""Called internally if the cache changes, emit a signal then."""
# Do not keep a reference to the cache, or you have a cycle!
self._reapply_filter(cache)
class FilteredCache(object):
""" A package cache that is filtered.
Can work on a existing cache or create a new one
"""
def __init__(self, cache=None, progress=None):
# type: (Cache, OpProgress) -> None
if cache is None:
self.cache = Cache(progress)
else:
self.cache = cache
self._helper = _FilteredCacheHelper(self.cache)
def __len__(self):
# type: () -> int
return len(self._helper._filtered)
def __getitem__(self, key):
# type: (str) -> Package
return self.cache[key]
def __iter__(self):
# type: () -> Iterator[Package]
for pkgname in self._helper._filtered:
yield self.cache[pkgname]
def keys(self):
# type: () -> KeysView[str]
return self._helper._filtered.keys()
def has_key(self, key):
# type: (object) -> bool
return key in self
def __contains__(self, key):
# type: (object) -> bool
try:
# Normalize package name for multi arch
return self.cache[key].name in self._helper._filtered
except KeyError:
return False
def set_filter(self, filter):
# type: (Filter) -> None
"""Set the current active filter."""
self._helper.set_filter(filter)
self.cache.cache_post_change()
def filter_cache_post_change(self):
# type: () -> None
"""Called internally if the cache changes, emit a signal then."""
self._helper.filter_cache_post_change(self.cache)
def __getattr__(self, key):
# type: (str) -> Any
"""we try to look exactly like a real cache."""
return getattr(self.cache, key)
def cache_pre_changed(cache):
# type: (Cache) -> None
print("cache pre changed")
def cache_post_changed(cache):
# type: (Cache) -> None
print("cache post changed")
def _test():
# type: () -> None
"""Internal test code."""
print("Cache self test")
apt_pkg.init()
cache = Cache(apt.progress.text.OpProgress())
cache.connect2("cache_pre_change", cache_pre_changed)
cache.connect2("cache_post_change", cache_post_changed)
print(("aptitude" in cache))
pkg = cache["aptitude"]
print(pkg.name)
print(len(cache))
for pkgname in cache.keys():
assert cache[pkgname].name == pkgname
cache.upgrade()
changes = cache.get_changes()
print(len(changes))
for pkg in changes:
assert pkg.name
# see if fetching works
for dirname in ["/tmp/pytest", "/tmp/pytest/partial"]:
if not os.path.exists(dirname):
os.mkdir(dirname)
apt_pkg.config.set("Dir::Cache::Archives", "/tmp/pytest")
pm = apt_pkg.PackageManager(cache._depcache)
fetcher = apt_pkg.Acquire(apt.progress.text.AcquireProgress())
cache._fetch_archives(fetcher, pm, None)
#sys.exit(1)
print("Testing filtered cache (argument is old cache)")
filtered = FilteredCache(cache)
filtered.cache.connect2("cache_pre_change", cache_pre_changed)
filtered.cache.connect2("cache_post_change", cache_post_changed)
filtered.cache.upgrade()
filtered.set_filter(MarkedChangesFilter())
print(len(filtered))
for pkgname in filtered.keys():
assert pkgname == filtered[pkgname].name
print(len(filtered))
print("Testing filtered cache (no argument)")
filtered = FilteredCache(progress=apt.progress.base.OpProgress())
filtered.cache.connect2("cache_pre_change", cache_pre_changed)
filtered.cache.connect2("cache_post_change", cache_post_changed)
filtered.cache.upgrade()
filtered.set_filter(MarkedChangesFilter())
print(len(filtered))
for pkgname in filtered.keys():
assert pkgname == filtered[pkgname].name
print(len(filtered))
if __name__ == '__main__':
_test()
```
#### File: dist-packages/aptsources/distro.py
```python
import gettext
import logging
import re
import shlex
import os
from xml.etree.ElementTree import ElementTree
from apt_pkg import gettext as _
class NoDistroTemplateException(Exception):
pass
class Distribution(object):
def __init__(self, id, codename, description, release, is_like=[]):
""" Container for distribution specific informations """
# LSB information
self.id = id
self.codename = codename
self.description = description
self.release = release
self.is_like = is_like
self.binary_type = "deb"
self.source_type = "deb-src"
def get_sources(self, sourceslist):
"""
Find the corresponding template, main and child sources
for the distribution
"""
self.sourceslist = sourceslist
# corresponding sources
self.source_template = None
self.child_sources = []
self.main_sources = []
self.disabled_sources = []
self.cdrom_sources = []
self.download_comps = []
self.enabled_comps = []
self.cdrom_comps = []
self.used_media = []
self.get_source_code = False
self.source_code_sources = []
# location of the sources
self.default_server = ""
self.main_server = ""
self.nearest_server = ""
self.used_servers = []
# find the distro template
for template in self.sourceslist.matcher.templates:
if (self.is_codename(template.name) and
template.distribution == self.id):
#print "yeah! found a template for %s" % self.description
#print template.description, template.base_uri, \
# template.components
self.source_template = template
break
if self.source_template is None:
raise NoDistroTemplateException(
"Error: could not find a distribution template for %s/%s" %
(self.id, self.codename))
# find main and child sources
media = []
comps = []
cdrom_comps = []
enabled_comps = []
#source_code = []
for source in self.sourceslist.list:
if (not source.invalid and
self.is_codename(source.dist) and
source.template and
source.template.official and
self.is_codename(source.template.name)):
#print "yeah! found a distro repo: %s" % source.line
# cdroms need do be handled differently
if (source.uri.startswith("cdrom:") and
not source.disabled):
self.cdrom_sources.append(source)
cdrom_comps.extend(source.comps)
elif (source.uri.startswith("cdrom:") and
source.disabled):
self.cdrom_sources.append(source)
elif (source.type == self.binary_type and
not source.disabled):
self.main_sources.append(source)
comps.extend(source.comps)
media.append(source.uri)
elif (source.type == self.binary_type and
source.disabled):
self.disabled_sources.append(source)
elif (source.type == self.source_type and
not source.disabled):
self.source_code_sources.append(source)
elif (source.type == self.source_type and
source.disabled):
self.disabled_sources.append(source)
if (not source.invalid and
source.template in self.source_template.children):
if (not source.disabled and
source.type == self.binary_type):
self.child_sources.append(source)
elif (not source.disabled and
source.type == self.source_type):
self.source_code_sources.append(source)
else:
self.disabled_sources.append(source)
self.download_comps = set(comps)
self.cdrom_comps = set(cdrom_comps)
enabled_comps.extend(comps)
enabled_comps.extend(cdrom_comps)
self.enabled_comps = set(enabled_comps)
self.used_media = set(media)
self.get_mirrors()
def get_mirrors(self, mirror_template=None):
"""
Provide a set of mirrors where you can get the distribution from
"""
# the main server is stored in the template
self.main_server = self.source_template.base_uri
# other used servers
for medium in self.used_media:
if not medium.startswith("cdrom:"):
# seems to be a network source
self.used_servers.append(medium)
if len(self.main_sources) == 0:
self.default_server = self.main_server
else:
self.default_server = self.main_sources[0].uri
# get a list of country codes and real names
self.countries = {}
fname = "/usr/share/xml/iso-codes/iso_3166.xml"
if os.path.exists(fname):
et = ElementTree(file=fname)
# python2.6 compat, the next two lines can get removed
# once we do not use py2.6 anymore
if getattr(et, "iter", None) is None:
et.iter = et.getiterator
it = et.iter('iso_3166_entry')
for elm in it:
try:
descr = elm.attrib["common_name"]
except KeyError:
descr = elm.attrib["name"]
try:
code = elm.attrib["alpha_2_code"]
except KeyError:
code = elm.attrib["alpha_3_code"]
self.countries[code.lower()] = gettext.dgettext('iso_3166',
descr)
# try to guess the nearest mirror from the locale
self.country = None
self.country_code = None
locale = os.getenv("LANG", default="en_UK")
a = locale.find("_")
z = locale.find(".")
if z == -1:
z = len(locale)
country_code = locale[a + 1:z].lower()
if mirror_template:
self.nearest_server = mirror_template % country_code
if country_code in self.countries:
self.country = self.countries[country_code]
self.country_code = country_code
def _get_mirror_name(self, server):
''' Try to get a human readable name for the main mirror of a country
Customize for different distributions '''
country = None
i = server.find("://")
li = server.find(".archive.ubuntu.com")
if i != -1 and li != -1:
country = server[i + len("://"):li]
if country in self.countries:
# TRANSLATORS: %s is a country
return _("Server for %s") % self.countries[country]
else:
return("%s" % server.rstrip("/ "))
def get_server_list(self):
''' Return a list of used and suggested servers '''
def compare_mirrors(mir1, mir2):
''' Helper function that handles comaprision of mirror urls
that could contain trailing slashes'''
return re.match(mir1.strip("/ "), mir2.rstrip("/ "))
# Store all available servers:
# Name, URI, active
mirrors = []
if (len(self.used_servers) < 1 or
(len(self.used_servers) == 1 and
compare_mirrors(self.used_servers[0], self.main_server))):
mirrors.append([_("Main server"), self.main_server, True])
if self.nearest_server:
mirrors.append([self._get_mirror_name(self.nearest_server),
self.nearest_server, False])
elif (len(self.used_servers) == 1 and not
compare_mirrors(self.used_servers[0], self.main_server)):
mirrors.append([_("Main server"), self.main_server, False])
# Only one server is used
server = self.used_servers[0]
# Append the nearest server if it's not already used
if self.nearest_server:
if not compare_mirrors(server, self.nearest_server):
mirrors.append([self._get_mirror_name(self.nearest_server),
self.nearest_server, False])
if server:
mirrors.append([self._get_mirror_name(server), server, True])
elif len(self.used_servers) > 1:
# More than one server is used. Since we don't handle this case
# in the user interface we set "custom servers" to true and
# append a list of all used servers
mirrors.append([_("Main server"), self.main_server, False])
if self.nearest_server:
mirrors.append([self._get_mirror_name(self.nearest_server),
self.nearest_server, False])
mirrors.append([_("Custom servers"), None, True])
for server in self.used_servers:
mirror_entry = [self._get_mirror_name(server), server, False]
if (compare_mirrors(server, self.nearest_server) or
compare_mirrors(server, self.main_server)):
continue
elif mirror_entry not in mirrors:
mirrors.append(mirror_entry)
return mirrors
def add_source(self, type=None,
uri=None, dist=None, comps=None, comment=""):
"""
Add distribution specific sources
"""
if uri is None:
# FIXME: Add support for the server selector
uri = self.default_server
if dist is None:
dist = self.codename
if comps is None:
comps = list(self.enabled_comps)
if type is None:
type = self.binary_type
new_source = self.sourceslist.add(type, uri, dist, comps, comment)
# if source code is enabled add a deb-src line after the new
# source
if self.get_source_code and type == self.binary_type:
self.sourceslist.add(
self.source_type, uri, dist, comps, comment,
file=new_source.file,
pos=self.sourceslist.list.index(new_source) + 1)
def enable_component(self, comp):
"""
Enable a component in all main, child and source code sources
(excluding cdrom based sources)
comp: the component that should be enabled
"""
comps = set([comp])
# look for parent components that we may have to add
for source in self.main_sources:
for c in source.template.components:
if c.name == comp and c.parent_component:
comps.add(c.parent_component)
for c in comps:
self._enable_component(c)
def _enable_component(self, comp):
def add_component_only_once(source, comps_per_dist):
"""
Check if we already added the component to the repository, since
a repository could be splitted into different apt lines. If not
add the component
"""
# if we don't have that distro, just return (can happen for e.g.
# dapper-update only in deb-src
if source.dist not in comps_per_dist:
return
# if we have seen this component already for this distro,
# return (nothing to do)
if comp in comps_per_dist[source.dist]:
return
# add it
source.comps.append(comp)
comps_per_dist[source.dist].add(comp)
sources = []
sources.extend(self.main_sources)
sources.extend(self.child_sources)
# store what comps are enabled already per distro (where distro is
# e.g. "dapper", "dapper-updates")
comps_per_dist = {}
comps_per_sdist = {}
for s in sources:
if s.type == self.binary_type:
if s.dist not in comps_per_dist:
comps_per_dist[s.dist] = set()
for c in s.comps:
comps_per_dist[s.dist].add(c)
for s in self.source_code_sources:
if s.type == self.source_type:
if s.dist not in comps_per_sdist:
comps_per_sdist[s.dist] = set()
for c in s.comps:
comps_per_sdist[s.dist].add(c)
# check if there is a main source at all
if len(self.main_sources) < 1:
# create a new main source
self.add_source(comps=["%s" % comp])
else:
# add the comp to all main, child and source code sources
for source in sources:
add_component_only_once(source, comps_per_dist)
for source in self.source_code_sources:
add_component_only_once(source, comps_per_sdist)
# check if there is a main source code source at all
if self.get_source_code:
if len(self.source_code_sources) < 1:
# create a new main source
self.add_source(type=self.source_type, comps=["%s" % comp])
else:
# add the comp to all main, child and source code sources
for source in self.source_code_sources:
add_component_only_once(source, comps_per_sdist)
def disable_component(self, comp):
"""
Disable a component in all main, child and source code sources
(excluding cdrom based sources)
"""
sources = []
sources.extend(self.main_sources)
sources.extend(self.child_sources)
sources.extend(self.source_code_sources)
if comp in self.cdrom_comps:
sources = []
sources.extend(self.main_sources)
for source in sources:
if comp in source.comps:
source.comps.remove(comp)
if len(source.comps) < 1:
self.sourceslist.remove(source)
def change_server(self, uri):
''' Change the server of all distro specific sources to
a given host '''
def change_server_of_source(source, uri, seen):
# Avoid creating duplicate entries
source.uri = uri
for comp in source.comps:
if [source.uri, source.dist, comp] in seen:
source.comps.remove(comp)
else:
seen.append([source.uri, source.dist, comp])
if len(source.comps) < 1:
self.sourceslist.remove(source)
seen_binary = []
seen_source = []
self.default_server = uri
for source in self.main_sources:
change_server_of_source(source, uri, seen_binary)
for source in self.child_sources:
# Do not change the forces server of a child source
if (source.template.base_uri is None or
source.template.base_uri != source.uri):
change_server_of_source(source, uri, seen_binary)
for source in self.source_code_sources:
change_server_of_source(source, uri, seen_source)
def is_codename(self, name):
''' Compare a given name with the release codename. '''
if name == self.codename:
return True
else:
return False
class DebianDistribution(Distribution):
''' Class to support specific Debian features '''
def is_codename(self, name):
''' Compare a given name with the release codename and check if
if it can be used as a synonym for a development releases '''
if name == self.codename or self.release in ("testing", "unstable"):
return True
else:
return False
def _get_mirror_name(self, server):
''' Try to get a human readable name for the main mirror of a country
Debian specific '''
country = None
i = server.find("://ftp.")
li = server.find(".debian.org")
if i != -1 and li != -1:
country = server[i + len("://ftp."):li]
if country in self.countries:
# TRANSLATORS: %s is a country
return _("Server for %s") % gettext.dgettext(
"iso_3166", self.countries[country].rstrip()).rstrip()
else:
return("%s" % server.rstrip("/ "))
def get_mirrors(self):
Distribution.get_mirrors(
self, mirror_template="http://ftp.%s.debian.org/debian/")
class UbuntuDistribution(Distribution):
''' Class to support specific Ubuntu features '''
def get_mirrors(self):
Distribution.get_mirrors(
self, mirror_template="http://%s.archive.ubuntu.com/ubuntu/")
class UbuntuRTMDistribution(UbuntuDistribution):
''' Class to support specific Ubuntu RTM features '''
def get_mirrors(self):
self.main_server = self.source_template.base_uri
def _lsb_release():
"""Call lsb_release --idrc and return a mapping."""
from subprocess import Popen, PIPE
import errno
result = {'Codename': 'sid', 'Distributor ID': 'Debian',
'Description': 'Debian GNU/Linux unstable (sid)',
'Release': 'unstable'}
try:
out = Popen(['lsb_release', '-idrc'], stdout=PIPE).communicate()[0]
# Convert to unicode string, needed for Python 3.1
out = out.decode("utf-8")
result.update(l.split(":\t") for l in out.split("\n") if ':\t' in l)
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.warning('lsb_release failed, using defaults:' % exc)
return result
def _system_image_channel():
"""Get the current channel from system-image-cli -i if possible."""
from subprocess import Popen, PIPE
import errno
try:
from subprocess import DEVNULL
except ImportError:
# no DEVNULL in 2.7
DEVNULL = os.open(os.devnull, os.O_RDWR)
try:
out = Popen(
['system-image-cli', '-i'], stdout=PIPE, stderr=DEVNULL,
universal_newlines=True).communicate()[0]
for l in out.splitlines():
if l.startswith('channel: '):
return l.split(': ', 1)[1]
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.warning(
'system-image-cli failed, using defaults: %s' % exc)
return None
class _OSRelease:
DEFAULT_OS_RELEASE_FILE = '/etc/os-release'
OS_RELEASE_FILE = '/etc/os-release'
def __init__(self, lsb_compat=True):
self.result = {}
self.valid = False
self.file = _OSRelease.OS_RELEASE_FILE
if not os.path.isfile(self.file):
return
self.parse()
self.valid = True
if lsb_compat:
self.inject_lsb_compat()
def inject_lsb_compat(self):
self.result['Distributor ID'] = self.result['ID']
self.result['Description'] = self.result['PRETTY_NAME']
# Optionals as per os-release spec.
self.result['Codename'] = self.result.get('VERSION_CODENAME')
if not self.result['Codename']:
# Transient Ubuntu 16.04 field (LP: #1598212)
self.result['Codename'] = self.result.get('UBUNTU_CODENAME')
self.result['Release'] = self.result.get('VERSION_ID')
def parse(self):
f = open(self.file, 'r')
for line in f:
line = line.strip()
if not line:
continue
self.parse_entry(*line.split('=', 1))
f.close()
def parse_entry(self, key, value):
value = self.parse_value(value) # Values can be shell strings...
if key == "ID_LIKE" and isinstance(value, str):
# ID_LIKE is specified as quoted space-separated list. This will
# be parsed as string that we need to split manually.
value = value.split(' ')
self.result[key] = value
def parse_value(self, value):
values = shlex.split(value)
if len(values) == 1:
return values[0]
return values
def get_distro(id=None, codename=None, description=None, release=None,
is_like=[]):
"""
Check the currently used distribution and return the corresponding
distriubtion class that supports distro specific features.
If no paramter are given the distro will be auto detected via
a call to lsb-release
"""
# make testing easier
if not (id and codename and description and release):
os_release = _OSRelease()
os_result = []
lsb_result = _lsb_release()
if os_release.valid:
os_result = os_release.result
# TODO: We cannot presently use os-release to fully replace lsb_release
# because os-release's ID, VERSION_ID and VERSION_CODENAME fields
# are specified as lowercase. In lsb_release they can be upcase
# or captizalized. So, switching to os-release would consitute
# a behavior break a which point lsb_release support should be
# fully removed.
# This in particular is a problem for template matching, as this
# matches against Distribution objects and depends on string
# case.
lsb_result = _lsb_release()
id = lsb_result['Distributor ID']
codename = lsb_result['Codename']
description = lsb_result['Description']
release = lsb_result['Release']
# Not available with LSB, use get directly.
is_like = os_result.get('ID_LIKE', [])
if id == "Ubuntu":
channel = _system_image_channel()
if channel is not None and "ubuntu-rtm/" in channel:
id = "Ubuntu-RTM"
codename = channel.rsplit("/", 1)[1].split("-", 1)[0]
description = codename
release = codename
if id == "Ubuntu":
return UbuntuDistribution(id, codename, description, release, is_like)
if id == "Ubuntu-RTM":
return UbuntuRTMDistribution(
id, codename, description, release, is_like)
elif id == "Debian":
return DebianDistribution(id, codename, description, release, is_like)
else:
return Distribution(id, codename, description, release, is_like)
``` |
{
"source": "21aslade/CumulusCI",
"score": 2
} |
#### File: cumulusci/tasks/bulkdata.py
```python
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from contextlib import contextmanager
import datetime
import io
import os
import time
import tempfile
import xml.etree.ElementTree as ET
from salesforce_bulk.util import IteratorBytesIO
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import text
from sqlalchemy import types
from sqlalchemy import event
import requests
import unicodecsv
from cumulusci.core.utils import process_bool_arg, ordered_yaml_load
from cumulusci.core.exceptions import BulkDataException
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import convert_to_snake_case, log_progress, os_friendly_path
# TODO: UserID Catcher
# TODO: Dater
# Create a custom sqlalchemy field type for sqlite datetime fields which are stored as integer of epoch time
class EpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
def process_bind_param(self, value, dialect):
return int((value - self.epoch).total_seconds()) * 1000
def process_result_value(self, value, dialect):
return self.epoch + datetime.timedelta(seconds=value / 1000)
# Listen for sqlalchemy column_reflect event and map datetime fields to EpochType
@event.listens_for(Table, "column_reflect")
def setup_epoch(inspector, table, column_info):
if isinstance(column_info["type"], types.DateTime):
column_info["type"] = EpochType()
class BulkJobTaskMixin(object):
def _job_state_from_batches(self, job_id):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job_id)
response = requests.get(uri, headers=self.bulk.headers())
return self._parse_job_state(response.content)
def _parse_job_state(self, xml):
tree = ET.fromstring(xml)
completed = 0
pending = 0
failed = 0
for el in tree.iterfind(".//{%s}state" % self.bulk.jobNS):
state = el.text
if state == "Not Processed":
return "Aborted"
elif state == "Failed":
failed += 1
elif state == "Completed":
completed += 1
else: # Queued, InProgress
pending += 1
if pending:
return "InProgress"
elif failed:
return "Failed"
else:
return "Completed"
def _wait_for_job(self, job_id):
while True:
job_status = self.bulk.job_status(job_id)
self.logger.info(
" Waiting for job {} ({}/{})".format(
job_id,
job_status["numberBatchesCompleted"],
job_status["numberBatchesTotal"],
)
)
result = self._job_state_from_batches(job_id)
if result != "InProgress":
break
time.sleep(10)
self.logger.info("Job {} finished with result: {}".format(job_id, result))
return result
def _sql_bulk_insert_from_csv(self, conn, table, columns, data_file):
if conn.dialect.name in ("postgresql", "psycopg2"):
# psycopg2 (the postgres driver) supports COPY FROM
# to efficiently bulk insert rows in CSV format
with conn.connection.cursor() as cursor:
cursor.copy_expert(
"COPY {} ({}) FROM STDIN WITH (FORMAT CSV)".format(
table, ",".join(columns)
),
data_file,
)
else:
# For other db drivers we need to use standard SQL
# -- this is optimized for ease of implementation
# rather than performance and may need more work.
reader = unicodecsv.DictReader(data_file, columns)
table = self.metadata.tables[table]
rows = list(reader)
if rows:
conn.execute(table.insert().values(rows))
self.session.flush()
class DeleteData(BaseSalesforceApiTask, BulkJobTaskMixin):
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the recycle bin. Default: False"
},
}
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
# Split and trim objects string into a list if not already a list
if not isinstance(self.options["objects"], list):
self.options["objects"] = [
obj.strip() for obj in self.options["objects"].split(",")
]
self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _run_task(self):
for obj in self.options["objects"]:
self.logger.info("Deleting all {} records".format(obj))
delete_job = self._create_job(obj)
if delete_job is not None:
self._wait_for_job(delete_job)
def _create_job(self, obj):
# Query for rows to delete
delete_rows = self._query_salesforce_for_records_to_delete(obj)
if not delete_rows:
self.logger.info(" No {} objects found, skipping delete".format(obj))
return
# Upload all the batches
operation = "hardDelete" if self.options["hardDelete"] else "delete"
delete_job = self.bulk.create_job(obj, operation)
self.logger.info(" Deleting {} {} records".format(len(delete_rows), obj))
batch_num = 1
for batch in self._upload_batches(delete_job, delete_rows):
self.logger.info(" Uploaded batch {}".format(batch))
batch_num += 1
self.bulk.close_job(delete_job)
return delete_job
def _query_salesforce_for_records_to_delete(self, obj):
# Query for all record ids
self.logger.info(" Querying for all {} objects".format(obj))
query_job = self.bulk.create_query_job(obj, contentType="CSV")
batch = self.bulk.query(query_job, "select Id from {}".format(obj))
while not self.bulk.is_batch_done(batch, query_job):
time.sleep(10)
self.bulk.close_job(query_job)
delete_rows = []
for result in self.bulk.get_all_results_for_query_batch(batch, query_job):
reader = unicodecsv.DictReader(result, encoding="utf-8")
for row in reader:
delete_rows.append(row)
return delete_rows
def _split_batches(self, data, batch_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
def _upload_batches(self, job, data):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job)
headers = self.bulk.headers({"Content-Type": "text/csv"})
for batch in self._split_batches(data, 10000):
rows = ['"Id"']
rows += ['"{}"'.format(record["Id"]) for record in batch]
resp = requests.post(uri, data="\n".join(rows), headers=headers)
content = resp.content
if resp.status_code >= 400:
self.bulk.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.bulk.jobNS)
yield batch_id
class LoadData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
"sql_path": {
"description": "If specified, a database will be created from an SQL script at the provided path"
},
}
def _init_options(self, kwargs):
super(LoadData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
if not os.path.isfile(self.options["sql_path"]):
raise TaskOptionsError(
"File {} does not exist".format(self.options["sql_path"])
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
def _run_task(self):
self._init_mapping()
self._init_db()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
# Skip steps until start_step
if not started and start_step and name != start_step:
self.logger.info("Skipping step: {}".format(name))
continue
started = True
self.logger.info("Running Job: {}".format(name))
result = self._load_mapping(mapping)
if result != "Completed":
break
def _load_mapping(self, mapping):
"""Load data for a single step."""
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
# We store inserted ids even if some batches failed
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
def _create_job(self, mapping):
"""Initiate a bulk insert and upload batches to run in parallel."""
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
# Upload batches
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping.get("fields", {}).copy()
if mapping["oid_as_pk"]:
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy()
for lookup in lookups.values():
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
def _convert(self, value):
if value:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
"""Get the job results and store inserted SF Ids in a new table"""
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
# don't let that stop us from downloading the others
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
def _reset_id_table(self, mapping):
"""Create an empty table to hold the inserted SF Ids"""
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _store_inserted_ids_for_batch(
self, result_file, local_ids, id_table_name, conn
):
# Set up a function to generate rows based on this result file
def produce_csv():
"""Iterate over job results and prepare rows for id table"""
reader = unicodecsv.reader(result_file)
next(reader) # skip header
i = 0
for row, local_id in zip(reader, local_ids):
if row[1] == "true": # Success
sf_id = row[0]
yield "{},{}\n".format(local_id, sf_id).encode("utf-8")
else:
self.logger.warning(" Error on row {}: {}".format(i, row[3]))
i += 1
# Bulk insert rows into id table
columns = ("id", "sf_id")
data_file = IteratorBytesIO(produce_csv())
self._sql_bulk_insert_from_csv(conn, id_table_name, columns, data_file)
def _sqlite_load(self):
conn = self.session.connection()
cursor = conn.connection.cursor()
with open(self.options["sql_path"], "r") as f:
try:
cursor.executescript(f.read())
finally:
cursor.close()
# self.session.flush()
def _init_db(self):
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize the DB session
self.session = Session(self.engine)
if self.options.get("sql_path"):
self._sqlite_load()
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if "table" in mapping and mapping["table"] not in self.models:
self.models[mapping["table"]] = self.base.classes[mapping["table"]]
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mapping = ordered_yaml_load(f)
class QueryData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "A DATABASE_URL where the query output should be written",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"sql_path": {
"description": "If set, an SQL script will be generated at the path provided "
+ "This is useful for keeping data in the repository and allowing diffs."
},
}
def _init_options(self, kwargs):
super(QueryData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
def _run_task(self):
self._init_mapping()
self._init_db()
for mapping in self.mappings.values():
soql = self._soql_for_mapping(mapping)
self._run_query(soql, mapping)
self._drop_sf_id_columns()
if self.options.get("sql_path"):
self._sqlite_dump()
def _init_db(self):
self.models = {}
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# Create the tables
self._create_tables()
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# initialize session
self.session = create_session(bind=self.engine, autocommit=False)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mappings = ordered_yaml_load(f)
def _soql_for_mapping(self, mapping):
sf_object = mapping["sf_object"]
fields = []
if not mapping["oid_as_pk"]:
fields.append("Id")
fields += [field["sf"] for field in self._fields_for_mapping(mapping)]
soql = "SELECT {fields} FROM {sf_object}".format(
**{"fields": ", ".join(fields), "sf_object": sf_object}
)
if "record_type" in mapping:
soql += " WHERE RecordType.DeveloperName = '{}'".format(
mapping["record_type"]
)
return soql
def _run_query(self, soql, mapping):
self.logger.info("Creating bulk job for: {sf_object}".format(**mapping))
job = self.bulk.create_query_job(mapping["sf_object"], contentType="CSV")
self.logger.info("Job id: {0}".format(job))
self.logger.info("Submitting query: {}".format(soql))
batch = self.bulk.query(job, soql)
self.logger.info("Batch id: {0}".format(batch))
self.bulk.wait_for_batch(job, batch)
self.logger.info("Batch {0} finished".format(batch))
self.bulk.close_job(job)
self.logger.info("Job {0} closed".format(job))
conn = self.session.connection()
for result_file in self._get_results(batch, job):
self._import_results(mapping, result_file, conn)
def _get_results(self, batch_id, job_id):
result_ids = self.bulk.get_query_batch_result_ids(batch_id, job_id=job_id)
for result_id in result_ids:
self.logger.info("Result id: {}".format(result_id))
uri = "{}/job/{}/batch/{}/result/{}".format(
self.bulk.endpoint, job_id, batch_id, result_id
)
with _download_file(uri, self.bulk) as f:
self.logger.info("Result {} downloaded".format(result_id))
yield f
def _import_results(self, mapping, result_file, conn):
# Map SF field names to local db column names
sf_header = [
name.strip('"')
for name in result_file.readline().strip().decode("utf-8").split(",")
]
columns = []
lookup_keys = []
for sf in sf_header:
if sf == "Records not found for this query":
return
if sf:
column = mapping.get("fields", {}).get(sf)
if not column:
lookup = mapping.get("lookups", {}).get(sf, {})
if lookup:
lookup_keys.append(sf)
column = get_lookup_key_field(lookup, sf)
if column:
columns.append(column)
if not columns:
return
record_type = mapping.get("record_type")
if record_type:
columns.append("record_type")
processor = log_progress(
process_incoming_rows(result_file, record_type), self.logger
)
data_file = IteratorBytesIO(processor)
if mapping["oid_as_pk"]:
self._sql_bulk_insert_from_csv(conn, mapping["table"], columns, data_file)
else:
# If using the autogenerated id field, split out the CSV file from the Bulk API
# into two separate files and load into the main table and the sf_id_table
with tempfile.TemporaryFile("w+b") as f_values:
with tempfile.TemporaryFile("w+b") as f_ids:
data_file_values, data_file_ids = self._split_batch_csv(
data_file, f_values, f_ids
)
self._sql_bulk_insert_from_csv(
conn, mapping["table"], columns, data_file_values
)
self._sql_bulk_insert_from_csv(
conn, mapping["sf_id_table"], ["sf_id"], data_file_ids
)
self.session.commit()
if lookup_keys and not mapping["oid_as_pk"]:
self._convert_lookups_to_id(mapping, lookup_keys)
def _get_mapping_for_table(self, table):
""" Returns the first mapping for a table name """
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping
def _split_batch_csv(self, data_file, f_values, f_ids):
writer_values = unicodecsv.writer(f_values)
writer_ids = unicodecsv.writer(f_ids)
for row in unicodecsv.reader(data_file):
writer_values.writerow(row[1:])
writer_ids.writerow([row[:1]])
f_values.seek(0)
f_ids.seek(0)
return f_values, f_ids
def _convert_lookups_to_id(self, mapping, lookup_keys):
for lookup_key in lookup_keys:
lookup_dict = mapping["lookups"][lookup_key]
model = self.models[mapping["table"]]
lookup_mapping = self._get_mapping_for_table(lookup_dict["table"])
lookup_model = self.models[lookup_mapping["sf_id_table"]]
key_field = get_lookup_key_field(lookup_dict, lookup_key)
key_attr = getattr(model, key_field)
try:
self.session.query(model).filter(
key_attr.isnot(None), key_attr == lookup_model.sf_id
).update({key_attr: lookup_model.id}, synchronize_session=False)
except NotImplementedError:
# Some databases such as sqlite don't support multitable update
mappings = []
for row, lookup_id in self.session.query(model, lookup_model.id).join(
lookup_model, key_attr == lookup_model.sf_id
):
mappings.append({"id": row.id, key_field: lookup_id})
self.session.bulk_update_mappings(model, mappings)
self.session.commit()
def _create_tables(self):
for mapping in self.mappings.values():
self._create_table(mapping)
self.metadata.create_all()
def _create_table(self, mapping):
model_name = "{}Model".format(mapping["table"])
mapper_kwargs = {}
table_kwargs = {}
self.models[mapping["table"]] = type(model_name, (object,), {})
# Provide support for legacy mappings which used the OID as the pk but
# default to using an autoincrementing int pk and a separate sf_id column
fields = []
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
if mapping["oid_as_pk"]:
id_column = mapping["fields"]["Id"]
fields.append(Column(id_column, Unicode(255), primary_key=True))
else:
fields.append(Column("id", Integer(), primary_key=True, autoincrement=True))
for field in self._fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], self.metadata, *fields, **table_kwargs)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
if not mapping["oid_as_pk"]:
mapping["sf_id_table"] = mapping["table"] + "_sf_id"
# If multiple mappings point to the same table, don't recreate the table
if mapping["sf_id_table"] not in self.models:
sf_id_model_name = "{}Model".format(mapping["sf_id_table"])
self.models[mapping["sf_id_table"]] = type(
sf_id_model_name, (object,), {}
)
sf_id_fields = [
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("sf_id", Unicode(24)),
]
id_t = Table(mapping["sf_id_table"], self.metadata, *sf_id_fields)
mapper(self.models[mapping["sf_id_table"]], id_t)
mapper(self.models[mapping["table"]], t, **mapper_kwargs)
def _fields_for_mapping(self, mapping):
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append(
{"sf": sf_field, "db": get_lookup_key_field(lookup, sf_field)}
)
return fields
def _drop_sf_id_columns(self):
for mapping in self.mappings.values():
if mapping.get("oid_as_pk"):
continue
self.metadata.tables[mapping["sf_id_table"]].drop()
def _sqlite_dump(self):
path = self.options["sql_path"]
if os.path.exists(path):
os.remove(path)
with open(path, "w") as f:
for line in self.session.connection().connection.iterdump():
f.write(line + "\n")
@contextmanager
def _download_file(uri, bulk_api):
"""Download the bulk API result file for a single batch"""
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
def process_incoming_rows(f, record_type=None):
if record_type and not isinstance(record_type, bytes):
record_type = record_type.encode("utf-8")
for line in f:
if record_type:
yield line.rstrip() + b"," + record_type + b"\n"
else:
yield line
def get_lookup_key_field(lookup, sf_field):
return lookup.get("key_field", convert_to_snake_case(sf_field))
``` |
{
"source": "21Buttons/aiorazemax",
"score": 2
} |
#### File: aiorazemax/aiorazemax/publisher.py
```python
import asyncio
import json
import logging
from datetime import datetime
from typing import Dict
import aiobotocore
class SNSMessagePublisher:
def __init__(self, sns_client, topic_arn):
self._client = sns_client
self._topic_arn = topic_arn
async def close(self):
await self._client.close()
async def publish(self, event_name: str, event_body: Dict, extra_meta: Dict = {}) -> Dict:
meta = {
"timestamp": datetime.utcnow().isoformat('T'),
"version": 1
}
meta.update(extra_meta)
message = {
"type": event_name,
"meta": meta,
"body": event_body
}
message_json = json.dumps(message)
# https://docs.python.org/3/howto/logging.html#optimization
logging.debug('event_name %s, topic_arn %s, message_json %s', event_name, self._topic_arn, message_json)
return await self._client.publish(TopicArn=self._topic_arn,
Message=message_json,
MessageAttributes={
'event_name': {
'DataType': 'String',
'StringValue': event_name
}
})
@classmethod
async def build(cls, topic_arn: str, aws_settings: Dict = {}) -> 'SNSMessagePublisher':
""" aws_settings is a dict with:
- region_name
- aws_access_key_id
- aws_secret_access_key
- endpoint_url (optional)
"""
loop = asyncio.get_running_loop()
session = aiobotocore.get_session(loop=loop)
sns_client = session.create_client('sns', **aws_settings)
return cls(sns_client, topic_arn)
``` |
{
"source": "21dotco/docker-two1",
"score": 2
} |
#### File: service-ping/utils/publish.py
```python
import sys
import argparse
# 3rd party imports
import yaml
# two1 imports
from two1.commands.util.exceptions import ServerRequestError
from two1.sell.util.cli_helpers import get_rest_client
def publish_manifest(service, zt_ip, port):
""" Publish manifest to 21 Marketplace index.
"""
with open('/usr/src/app/manifest.yaml', "r") as f:
manifest_json = yaml.load(f)
manifest_json["basePath"] = "/%s" % service
manifest_json["host"] = "%s:%s" % (zt_ip, port)
try:
manifest_json["info"]["x-21-quick-buy"] = manifest_json["info"]["x-21-quick-buy"] % (zt_ip, port, service)
except Exception:
pass
try:
with open('/usr/src/app/manifest.yaml', "w") as f:
yaml.dump(manifest_json, f)
resp = get_rest_client().publish({"manifest": manifest_json,
"marketplace": "21market"})
except ServerRequestError as e:
if e.status_code == 403 and e.data.get("error") == "TO600":
sys.exit(101) # publish_stats.append((service.title(), False, ["Endpoint already published"]))
else:
sys.exit(102) # publish_stats.append((service.title(), False, ["Failed to publish"]))
except:
sys.exit(99) # publish_stats.append((service.title(), False, ["An unknown error occurred"]))
else:
if resp.status_code == 201:
sys.exit(100) # publish_stats.append((service.title(), True, ["Published"]))
else:
sys.exit(102) # publish_stats.append((service.title(), False, ["Failed to publish"]))
if __name__ == "__main__":
""" Run publish tool.
"""
# parse arguments
parser = argparse.ArgumentParser(description="Publish service manifest.")
parser.add_argument('service', action='store')
parser.add_argument('zt_ip', action='store')
parser.add_argument('port', action='store')
args = parser.parse_args()
# publish manifest
publish_manifest(args.service,
args.zt_ip,
args.port)
```
#### File: service-ping/utils/update_manifest.py
```python
import os
import yaml
manifest_path = '/usr/src/app/manifest.yaml'
zt_ip, port = os.environ['PAYMENT_SERVER_IP'].replace('https://', '').replace('http://', '').rsplit(':', 1)
def update():
"""Update manifest with host IP."""
with open(manifest_path, "r") as f:
manifest_json = yaml.load(f)
service = os.environ['SERVICE']
manifest_json["basePath"] = "/%s" % service
manifest_json["host"] = "%s:%s" % (zt_ip, port)
try:
manifest_json["info"]["x-21-quick-buy"] = manifest_json["info"]["x-21-quick-buy"] % (zt_ip, port, service)
except:
pass
with open(manifest_path, "w") as f:
yaml.dump(manifest_json, f)
if __name__ == "__main__":
update()
``` |
{
"source": "21-guns/algo",
"score": 4
} |
#### File: Python3/class/class.py
```python
import copy
class Shape(object):
def __init__(self, line_count, name):
self.line_count = line_count
self.name = name
square = Shape(4, "Square")
square_copy = copy.deepcopy(square)
# nested class
class Shape(object):
def __init__(self, line_count, name):
self.line_count = line_count
self.name = name
class NestedClass(object):
pass
square = Shape(4, "Square")
nested1 = square.NestedClass()
# define class
class SomeClass:
pass
some_class = SomeClass()
# Read-only properties
## stored properties
class FilmList:
def __init__(self):
self.__count = 10
@property
def count(self):
return self.__count
film_list = FilmList()
print(film_list.count)
## computed properties
import math
class Circle:
def __init__(self, radius=0):
self.__radius = radius
@property
def area(self):
return 2 * math.pi * self.__radius
print(Circle(radius=10).area)
## stored properties
class Point:
def __init__(self):
self._x = 0
self._y = 0
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = value
point = Point()
point.x = 3
point.y = 7
print(point.x, point.y)
# lazy properties
from lazy import lazy
class FilmsList:
def __init__(self):
print("Some long operation")
class MediaPlayer:
@lazy
def films_list(self):
return FilmsList()
player = MediaPlayer()
print("media player created")
film_list1 = player.films_list
film_list2 = player.films_list
print(film_list1 == film_list2)
## type properties
class ClassProperty(property):
def __get__(self, instance, owner):
return self.fget.__get__(None, owner)()
class Lesson:
__lessonsCount = 0
def __init__(self):
Lesson.__lessonsCount += 1
@classmethod
def getCount(cls):
return cls.__lessonsCount
@classmethod
def setCount(cls, value):
cls.__lessonsCount = value
lessonsCount = ClassProperty(getCount, setCount)
lesson1 = Lesson()
print(lesson1.lessonsCount)
lesson2 = Lesson()
print(lesson1.lessonsCount)
## in/out parameters
def swapStrings(s1, s2):
s1[0], s2[0] = s2[0], s1[0]
s1 = ["A"]
s2 = ["B"]
swapStrings(s1, s2)
print(s1, s2)
## without any parameters
class Greeting:
@classmethod
def sayGoodbye(cls):
print("GoodBye")
@staticmethod
def sayHello():
print("Hello")
def test(arg):
print(arg)
Greeting.sayGoodbye()
Greeting.sayHello()
Greeting.test(1)
# array of parameters
def get_avg(*args):
if len(args) == 0:
return 0
sum = 0
for each in args:
sum += each
return sum / len(args)
print(get_avg(1, 2, 3, 4))
# variable parameters
def print5(data):
if len(data) > 5:
data = data[0: len(data) - 2]
return data
print(1234567)
## replacement of the parent constructor
class Man:
def __init__(self, name):
self.name = name
class Position:
def __init__(self, position):
self.position = position
class Employee(Man, Position):
def __del__(self):
print("Died")
def __enter__(self):
print("Enter")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print("exit")
def __init__(self, name, position):
super(Employee, self).__init__(name)
Position.__init__(self, position)
self.__private = "asdasfasgsaf"
def __getitem__(self, item):
pass
employee = Employee("Max", "teacher")
print(employee.name, employee.position)
```
#### File: Python3/except/except.py
```python
def throw_if_true(param):
try:
if param:
raise Exception("test exception")
except Exception as e:
print("except")
else:
print("else")
throw_if_true(True)
throw_if_true(False)
# guaranteed code execution
def throw_if_true(param):
try:
if param:
raise Exception("test exception")
except Exception as e:
print("except")
finally:
print("finally")
throw_if_true(True)
throw_if_true(False)
# define an exception type
class SimpleException(Exception):
def __str__(self):
return "SimpleException"
class RecommendExc(Exception):
def __init__(self, arg):
self.args = arg
# catch the specific exception
try:
raise SimpleException()
except SimpleException as e:
print(e.__str__())
try:
raise RecommendExc("exception")
except RecommendExc as e:
print(e.__str__())
# catch all exception
try:
raise RecommendExc("exception")
except Exception as e:
print(e.__str__())
# method throw an exception
def method_with_exception():
raise Exception("Test")
# re-throw exceptions
try:
method_with_exception()
except Exception as e:
raise e
```
#### File: Python3/lambda/base.py
```python
from random import randint
x = 5
y = 6
# not recommend in PEP8, do not assign a lambda expression , use a def
addYToX = lambda x, y: x + y
print(addYToX(x, y))
# recommend in PEP8
def add_x_to_y():
return lambda x, y: x + y
addYToXFunction = add_x_to_y()
print(addYToXFunction(x, y))
numbers = [randint(0, 10) for i in range(10)]
# lambda
numbers_1_1 = list(map(lambda x: x * 2 + 1, numbers))
numbers_1_2 = list(filter(lambda x: x % 3 == 0, numbers))
print(numbers_1_1)
print(numbers_1_2)
# define functions
def do_something(x):
return x * 2 + 1
def do_another(x):
return x % 3 == 0
numbers_2_1 = list(map(do_something, numbers))
numbers_2_2 = list(filter(do_another, numbers))
print(numbers_2_1)
print(numbers_2_2)
# capture of variables
def increment(n):
return lambda x: x + n
inc_3 = increment(3)
inc_5 = increment(5)
print(inc_3(10))
print(inc_5(10))
# with a parameter
pow_of_two = lambda power: pow(2.0, power)
pow8 = pow_of_two(8)
print(pow8)
def pow_of_three(power):
return pow(3.0, power)
pow8 = pow_of_three(8)
print(pow8)
# with multi parameter
avg = lambda a, b: (a + b) / 2
avg_1 = avg(4, 8)
print(avg_1)
def get_avg(a, b):
return (a + b) / 2
avg_2 = get_avg(4, 8)
print(avg_2)
# with multi operators
from math import *
class point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def get_distances(p1, p2):
d1 = pow(p1.x - p2.x, 2)
d2 = pow(p1.y - p2.y, 2)
return sqrt(d1 + d2)
point1 = point(0, 0)
point2 = point(5, 5)
distance = get_distances(point1, point2)
print(distance)
# currying
def carry(f):
return lambda a: lambda b: f(a, b)
avg = lambda a, b: (a + b) / 2
n1 = avg(1, 2)
print(n1)
avg_1 = carry(avg)(1)
n2 = avg_1(3)
print(n2)
curried_avg = lambda a: lambda b: (a + b) / 2
avg3 = curried_avg(3)
n3 = avg3(3)
print(n3)
# return None
add_2_and_print = lambda a: print(a + 2)
print(add_2_and_print(10))
def add_3_and_print(a):
print(a + 3)
print(add_3_and_print(10))
# Void function as parameter
def check_and_process(number, process):
if number < 10:
process(number)
check_and_process(5, lambda number: print(number * 10))
```
#### File: algo/sort/bubble.py
```python
__author__ = 'loopsun'
from sort.data import num_data_check, DataProvider
def bubble_sort(data):
if num_data_check(data):
print("排序前列表: ", data)
while True:
sortedTime = 0
for i in range(len(data) - 1):
if data[i] > data[i+1]:
data[i], data[i+1] = data[i+1], data[i]
sortedTime += 1
if not sortedTime:
break
print("冒泡排序后列表: ", data)
return data
else:
return data
if __name__ == '__main__':
test_demo = DataProvider().get_random_num_list()
bubble_sort(test_demo)
```
#### File: algo/sort/data.py
```python
__author__ = 'loopsun'
from random import randint
class DataProvider(object):
def __init__(self):
pass
def get_random_num_list(self, length=10, min=1, max=100):
num_list = [randint(min, max) for i in range(length)]
return num_list
def num_data_check(data):
if data and isinstance(data, list):
for i in data:
if not isinstance(i, int):
return False
return True
return False
if __name__ == '__main__':
demo = DataProvider()
print(demo.get_random_num_list())
```
#### File: algo/sort/insertion.py
```python
__author__ = 'loopsun'
from sort.data import num_data_check, DataProvider
def insertion_sort(data):
if num_data_check(data):
print("排序前列表: ", data)
sorted_index = 0
for i in range(sorted_index + 1, len(data)):
for k in range(sorted_index + 1):
if data[i] < data[k]:
data.insert(k, data[i])
del data[i + 1]
sorted_index += 1
print("选择排序后列表: ", data)
return data
else:
return data
if __name__ == '__main__':
test_demo = DataProvider().get_random_num_list()
insertion_sort(test_demo)
``` |
{
"source": "21-Hack-Street/docker-elk",
"score": 3
} |
#### File: 21-Hack-Street/docker-elk/botweeter.py
```python
import tweepy
import json
from elasticsearch import Elasticsearch
# Tweeter Data
api_key = "q24LpCj7gzUROVrOYfXK0EUi1"
api_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
auth = tweepy.OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
hashtags = ["cyberattaque", "cybersécurité", "cyberdéfense", "FIC2022"]
keyword = "FIC_eu"
# ELK Data
es = Elasticsearch(HOST="http://localhost", PORT=9200)
def PrintDbg(tweet):
print()
print("metadata of tweet:")
print("author: " + tweet.user.screen_name)
print("date: " + str(tweet.created_at))
print("geolocalisation: " + str(tweet.user.location))
print("retweet count: " + str(tweet.retweet_count))
print("like count: " + str(tweet.favorite_count))
print("lang: " + tweet.lang)
print("source: " + tweet.source)
# Send data to elk
def SendData(json_obj, destination, type):
#print(json_obj)
es.index(index=destination, doc_type=type, body=json_obj)
# Tweet only
def SearchTw( Research, howMany):
SearchResults = tweepy.Cursor(api.search_tweets, q=Research + " -filter:retweets", tweet_mode="extended").items(howMany)
for tweet in SearchResults:
#PrintDbg(tweet)
data = {}
data["author"] = tweet.user.screen_name
data["date"] = str(tweet.created_at)
data["geolocalisation"] = tweet.user.location
data["retweet_count"] = tweet.retweet_count
data["favorite_count"] = tweet.favorite_count
data["lang"] = tweet.lang
data["source"] = tweet.source
SendData(json.dumps(data), "tweet_mining", "tweet")
# Retweet only
def SearchRTw( Research, howMany):
SearchResults = tweepy.Cursor(api.search_tweets, q=Research + " filter:retweets", tweet_mode="extended").items(howMany)
for tweet in SearchResults:
#PrintDbg(tweet)
data = {}
data["author"] = tweet.user.screen_name
data["date"] = str(tweet.created_at)
data["geolocalisation"] = tweet.user.location
data["retweet_count"] = tweet.retweet_count
data["favorite_count"] = tweet.favorite_count
data["lang"] = tweet.lang
data["source"] = tweet.source
SendData(json.dumps(data), "retweet_mining", "retweet")
for i in hashtags:
#looking for #
SearchTw("%23" + i, 10)
SearchRTw("%23" + i, 10)
#looking for @
SearchTw("%40" + keyword, 10)
SearchRTw("%40" + keyword, 10)
``` |
{
"source": "21haoshaonian/LynkCoHelper",
"score": 2
} |
#### File: LynkCoHelper/LynkCoHelper/lynco_regist_wrok.py
```python
import threading
import time
import base64
from lynkco_app_request import lynkco_app_request
from com.uestcit.api.gateway.sdk.auth.aes import aes as AES
from sms_request import sms_request
import json
import sys
import os
import re
class lynco_regist_wrok(threading.Thread):
"""新开线程处理任务"""
def __init__(self, config):
# 初始化线程
threading.Thread.__init__(self)
# 缓存配置信息
self.config = config
self.project_id = self.config['sms_platform']['project_id']
self.max_count = int(self.config['sms_platform']['count'])
self.sms_request = sms_request()
# 缓存APPKEY(因为存储的是base64后的值,所以需要base64解码一次)
self.app_key = base64.b64decode(self.config['api_geteway']['app_key']).decode('utf-8')
# 缓存APPSECRET(因为存储的是base64后的值,所以需要base64解码一次)
self.app_secret = base64.b64decode(self.config['api_geteway']['app_secret']).decode('utf-8')
# 缓存AESKEY(因为存储的是两次base64后的值,所以需要base64解码两次)
self.aes_key = base64.b64decode(base64.b64decode(self.config['aes_key']).decode('utf-8')).decode('utf-8')
self.AES = AES(self.aes_key)
self.lynkco_app_request = lynkco_app_request(self.app_key, self.app_secret)
def run(self):
"""线程开始的方法"""
print ("开始注册任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
self.token = self.get_token()
if('' == self.token):
return 0
phone_list = []
while len(phone_list) < self.max_count:
phone = self.regist()
if('' == phone):
continue
phone_list.append({ 'username': phone, 'password': '<PASSWORD>' })
with open(sys.path[0] + '/phone_list_' + time.strftime('%Y%m%d%H%M%S') + '.json', 'w') as json_file:
json_file.write(json.dumps(phone_list,ensure_ascii = False))
print ("注册执行完成任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
def get_token(self):
"""登录获取token"""
sms_username = self.config['sms_platform']['username']
sms_password = self.config['sms_platform']['password']
context = self.sms_request.login(sms_username, sms_password)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户登录失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
token = array[1]
print("短信账户登录成功,token:" + token + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return token
def regist(self):
"""App端操作流程"""
# 获取一个手机号
context = self.sms_request.get_phone(self.token, self.project_id)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取手机号失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
phone = array[1]
# 发送注册短信
response = self.lynkco_app_request.get_vcode_by_regist(phone)
if response['code'] != 'success':
print("发送注册短信失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 循环10次获取短信内容,每次获取失败等待3秒钟
vcode = ''
fail_count = 0;
while fail_count < 10:
context = self.sms_request.get_phone_msg(self.token, self.project_id, phone)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
fail_count += 1
time.sleep(3)
else:
context = array[1]
# 此处需要正则取验证码
pattern = re.compile(r'\d{6}')
result = pattern.findall(context)
if(len(result) != 1):
print("短信账户解析验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
vcode = result[0]
print("短信账户获取验证码内容成功:" + vcode + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
break
if('' == vcode):
return ''
# 发送注册
password = self.AES.encrypt('<PASSWORD>')
response = self.lynkco_app_request.regist(phone, password, vcode)
if response['code'] != 'success':
print("发送注册接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 尝试登陆一次
response = self.lynkco_app_request.login(phone, password)
if response['code'] != 'success':
print("尝试接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return phone
return phone
``` |
{
"source": "21isenough/lnurl",
"score": 2
} |
#### File: lnurl/lnurl/models.py
```python
import math
from pydantic import BaseModel, Field, constr, validator
from typing import List, Optional, Union
try:
from typing import Literal
except ImportError: # pragma: nocover
from typing_extensions import Literal
from .exceptions import LnurlResponseException
from .types import LightningInvoice, LightningNodeUri, LnurlPayMetadata, MilliSatoshi, TorUrl, WebUrl
class LnurlPayRouteHop(BaseModel):
node_id: str = Field(..., alias="nodeId")
channel_update: str = Field(..., alias="channelUpdate")
class LnurlPaySuccessAction(BaseModel):
pass
class AesAction(LnurlPaySuccessAction):
tag: Literal["aes"] = "aes"
description: constr(max_length=144)
ciphertext: str # TODO
iv: constr(min_length=24, max_length=24)
class MessageAction(LnurlPaySuccessAction):
tag: Literal["message"] = "message"
message: constr(max_length=144)
class UrlAction(LnurlPaySuccessAction):
tag: Literal["url"] = "url"
description: constr(max_length=144)
url: Union[TorUrl, WebUrl]
class LnurlResponseModel(BaseModel):
class Config:
allow_population_by_field_name = True
def dict(self, **kwargs):
kwargs.setdefault("by_alias", True)
return super().dict(**kwargs)
def json(self, **kwargs):
kwargs.setdefault("by_alias", True)
return super().json(**kwargs)
@property
def ok(self) -> bool:
return not ("status" in self.__fields__ and self.status == "ERROR")
class LnurlErrorResponse(LnurlResponseModel):
status: Literal["ERROR"] = "ERROR"
reason: str
@property
def error_msg(self) -> str:
return self.reason
class LnurlSuccessResponse(LnurlResponseModel):
status: Literal["OK"] = "OK"
class LnurlAuthResponse(LnurlResponseModel):
tag: Literal["login"] = "login"
callback: Union[TorUrl, WebUrl]
k1: str
class LnurlChannelResponse(LnurlResponseModel):
tag: Literal["channelRequest"] = "channelRequest"
uri: LightningNodeUri
callback: Union[TorUrl, WebUrl]
k1: str
class LnurlHostedChannelResponse(LnurlResponseModel):
tag: Literal["hostedChannelRequest"] = "hostedChannelRequest"
uri: LightningNodeUri
k1: str
alias: Optional[str]
class LnurlPayResponse(LnurlResponseModel):
tag: Literal["payRequest"] = "payRequest"
callback: Union[TorUrl, WebUrl]
min_sendable: MilliSatoshi = Field(..., alias="minSendable")
max_sendable: MilliSatoshi = Field(..., alias="maxSendable")
metadata: LnurlPayMetadata
@validator("max_sendable")
def max_less_than_min(cls, value, values, **kwargs): # noqa
if "min_sendable" in values and value < values["min_sendable"]:
raise ValueError("`max_sendable` cannot be less than `min_sendable`.")
return value
@property
def min_sats(self) -> int:
return int(math.ceil(self.min_sendable / 1000))
@property
def max_sats(self) -> int:
return int(math.floor(self.max_sendable / 1000))
class LnurlPayActionResponse(LnurlResponseModel):
pr: LightningInvoice
success_action: Optional[Union[MessageAction, UrlAction, AesAction]] = Field(None, alias="successAction")
routes: List[List[LnurlPayRouteHop]] = []
class LnurlWithdrawResponse(LnurlResponseModel):
tag: Literal["withdrawRequest"] = "withdrawRequest"
callback: Union[TorUrl, WebUrl]
k1: str
min_withdrawable: MilliSatoshi = Field(..., alias="minWithdrawable")
max_withdrawable: MilliSatoshi = Field(..., alias="maxWithdrawable")
default_description: str = Field("", alias="defaultDescription")
@validator("max_withdrawable")
def max_less_than_min(cls, value, values, **kwargs): # noqa
if "min_withdrawable" in values and value < values["min_withdrawable"]:
raise ValueError("`max_withdrawable` cannot be less than `min_withdrawable`.")
return value
@property
def min_sats(self) -> int:
return int(math.ceil(self.min_withdrawable / 1000))
@property
def max_sats(self) -> int:
return int(math.floor(self.max_withdrawable / 1000))
class LnurlResponse:
@staticmethod
def from_dict(d: dict) -> LnurlResponseModel:
try:
if "tag" in d:
# some services return `status` here, but it is not in the spec
d.pop("status", None)
return {
"channelRequest": LnurlChannelResponse,
"hostedChannelRequest": LnurlHostedChannelResponse,
"payRequest": LnurlPayResponse,
"withdrawRequest": LnurlWithdrawResponse,
}[d["tag"]](**d)
if "successAction" in d:
d.pop("status", None)
return LnurlPayActionResponse(**d)
# some services return `status` in lowercase, but spec says upper
d["status"] = d["status"].upper()
if "status" in d and d["status"] == "ERROR":
return LnurlErrorResponse(**d)
return LnurlSuccessResponse(**d)
except Exception:
raise LnurlResponseException
``` |
{
"source": "21jun/ctc-segmentation",
"score": 2
} |
#### File: ctc-segmentation/ctc_segmentation/ctc_segmentation.py
```python
import logging
import numpy as np
# import for table of character probabilities mapped to time
try:
from .ctc_segmentation_dyn import cython_fill_table
except ImportError:
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()})
from .ctc_segmentation_dyn import cython_fill_table
class CtcSegmentationParameters:
"""Default values for CTC segmentation.
May need adjustment according to localization or ASR settings.
The character set is taken from the model dict, i.e., usually are generated
with SentencePiece. An ASR model trained in the corresponding language and
character set is needed. If the character set contains any punctuation
characters, "#", the Greek char "ε", or the space placeholder, adapt
these settings.
"""
max_prob = -10000000000.0
skip_prob = -10000000000.0
min_window_size = 8000
max_window_size = 100000
index_duration = 0.025
score_min_mean_over_L = 30
space = "·"
blank = 0
replace_spaces_with_blanks = False
blank_transition_cost_zero = False
preamble_transition_cost_zero = True
backtrack_from_max_t = False
self_transition = "ε"
start_of_ground_truth = "#"
excluded_characters = ".,»«•❍·"
tokenized_meta_symbol = "▁"
char_list = None
# legacy Parameters (will be ignored in future versions)
subsampling_factor = None
frame_duration_ms = None
@property
def index_duration_in_seconds(self):
"""Derive index duration from frame duration and subsampling.
This value can be fixed by setting ctc_index_duration, which causes
frame_duration_ms and subsampling_factor to be ignored.
Legacy function. This function will be removed in later versions
and replaced by index_duration.
"""
if self.subsampling_factor and self.frame_duration_ms:
t = self.frame_duration_ms * self.subsampling_factor / 1000
else:
t = self.index_duration
return t
@property
def flags(self):
"""Get configuration flags to pass to the table_fill operation."""
flags = int(self.blank_transition_cost_zero)
flags += 2 * int(self.preamble_transition_cost_zero)
return flags
def update_excluded_characters(self):
"""Remove known tokens from the list of excluded characters."""
self.excluded_characters = "".join(
[
char
for char in self.excluded_characters
if True not in [char == j for j in self.char_list]
]
)
logging.debug(f"Excluded characters: {self.excluded_characters}")
def __init__(self, **kwargs):
"""Set all parameters as attribute at init."""
self.set(**kwargs)
def set(self, **kwargs):
"""Update CtcSegmentationParameters.
Args:
**kwargs: Key-value dict that contains all properties
with their new values. Unknown properties are ignored.
"""
for key in kwargs:
if (
not key.startswith("_")
and hasattr(self, key)
and kwargs[key] is not None
):
setattr(self, key, kwargs[key])
def __repr__(self):
"""Print all attribute as dictionary."""
output = "CtcSegmentationParameters( "
for attribute in self.__dict__.keys():
value = self.__dict__[attribute]
output += f"{attribute}={value}, "
output += ")"
return output
def ctc_segmentation(config, lpz, ground_truth):
"""Extract character-level utterance alignments.
:param config: an instance of CtcSegmentationParameters
:param lpz: probabilities obtained from CTC output
:param ground_truth: ground truth text in the form of a label sequence
:return:
"""
blank = config.blank
offset = 0
audio_duration = lpz.shape[0] * config.index_duration_in_seconds
logging.info(
f"CTC segmentation of {len(ground_truth)} chars "
f"to {audio_duration:.2f}s audio "
f"({lpz.shape[0]} indices)."
)
if len(ground_truth) > lpz.shape[0] and config.skip_prob <= config.max_prob:
raise AssertionError("Audio is shorter than text!")
window_size = config.min_window_size
# Try multiple window lengths if it fails
while True:
# Create table of alignment probabilities
table = np.zeros(
[min(window_size, lpz.shape[0]), len(ground_truth)], dtype=np.float32
)
table.fill(config.max_prob)
# Use array to log window offsets per character
offsets = np.zeros([len(ground_truth)], dtype=np.int64)
# Run actual alignment of utterances
t, c = cython_fill_table(
table,
lpz.astype(np.float32),
np.array(ground_truth, dtype=np.int64),
offsets,
config.blank,
config.flags,
)
if config.backtrack_from_max_t:
t = table.shape[0] - 1
logging.debug(
f"Max. joint probability to align text to audio: "
f"{table[:, c].max()} at time index {t}"
)
# Backtracking
timings = np.zeros([len(ground_truth)])
char_probs = np.zeros([lpz.shape[0]])
state_list = [""] * lpz.shape[0]
try:
# Do until start is reached
while t != 0 or c != 0:
# Calculate the possible transition probs towards the current cell
min_s = None
min_switch_prob_delta = np.inf
max_lpz_prob = config.max_prob
for s in range(ground_truth.shape[1]):
if ground_truth[c, s] != -1:
offset = offsets[c] - (offsets[c - 1 - s] if c - s > 0 else 0)
switch_prob = (
lpz[t + offsets[c], ground_truth[c, s]]
if c > 0
else config.max_prob
)
est_switch_prob = table[t, c] - table[t - 1 + offset, c - 1 - s]
if abs(switch_prob - est_switch_prob) < min_switch_prob_delta:
min_switch_prob_delta = abs(switch_prob - est_switch_prob)
min_s = s
max_lpz_prob = max(max_lpz_prob, switch_prob)
stay_prob = (
max(lpz[t + offsets[c], blank], max_lpz_prob)
if t > 0
else config.max_prob
)
est_stay_prob = table[t, c] - table[t - 1, c]
# Check which transition has been taken
if abs(stay_prob - est_stay_prob) > min_switch_prob_delta:
# Apply reverse switch transition
if c > 0:
# Log timing and character - frame alignment
for s in range(0, min_s + 1):
timings[c - s] = (
offsets[c] + t
) * config.index_duration_in_seconds
char_probs[offsets[c] + t] = max_lpz_prob
char_index = ground_truth[c, min_s]
state_list[offsets[c] + t] = config.char_list[char_index]
c -= 1 + min_s
t -= 1 - offset
else:
# Apply reverse stay transition
char_probs[offsets[c] + t] = stay_prob
state_list[offsets[c] + t] = config.self_transition
t -= 1
except IndexError:
logging.warning(
"IndexError: Backtracking was not successful, "
"the window size might be too small."
)
window_size *= 2
if window_size < config.max_window_size:
logging.warning("Increasing the window size to: " + str(window_size))
continue
else:
logging.error("Maximum window size reached.")
logging.error("Check data and character list!")
raise
break
return timings, char_probs, state_list
def prepare_text(config, text, char_list=None):
"""Prepare the given text for CTC segmentation.
Creates a matrix of character symbols to represent the given text,
then creates list of char indices depending on the models char list.
:param config: an instance of CtcSegmentationParameters
:param text: iterable of utterance transcriptions
:param char_list: a set or list that includes all characters/symbols,
characters not included in this list are ignored
:return: label matrix, character index matrix
"""
# temporary compatibility fix for previous espnet versions
if type(config.blank) == str:
config.blank = 0
if char_list is not None:
config.char_list = char_list
blank = config.char_list[config.blank]
ground_truth = config.start_of_ground_truth
utt_begin_indices = []
for utt in text:
# One space in-between
if not ground_truth.endswith(config.space):
ground_truth += config.space
# Start new utterance remember index
utt_begin_indices.append(len(ground_truth) - 1)
# Add chars of utterance
for char in utt:
if char.isspace() and config.replace_spaces_with_blanks:
if not ground_truth.endswith(config.space):
ground_truth += config.space
elif char in config.char_list and char not in config.excluded_characters:
ground_truth += char
# Add space to the end
if not ground_truth.endswith(config.space):
ground_truth += config.space
logging.debug(f"ground_truth: {ground_truth}")
utt_begin_indices.append(len(ground_truth) - 1)
# Create matrix: time frame x number of letters the character symbol spans
max_char_len = max([len(c) for c in config.char_list])
ground_truth_mat = np.ones([len(ground_truth), max_char_len], np.int64) * -1
for i in range(len(ground_truth)):
for s in range(max_char_len):
if i - s < 0:
continue
span = ground_truth[i - s : i + 1]
span = span.replace(config.space, blank)
if span in config.char_list:
char_index = config.char_list.index(span)
ground_truth_mat[i, s] = char_index
return ground_truth_mat, utt_begin_indices
def prepare_tokenized_text(config, text):
"""Prepare the given tokenized text for CTC segmentation.
:param config: an instance of CtcSegmentationParameters
:param text: string with tokens separated by spaces
:return: label matrix, character index matrix
"""
ground_truth = [config.start_of_ground_truth]
utt_begin_indices = []
for utt in text:
# One space in-between
if not ground_truth[-1] == config.space:
ground_truth += [config.space]
# Start new utterance remember index
utt_begin_indices.append(len(ground_truth) - 1)
# Add tokens of utterance
for token in utt.split():
if token in config.char_list:
if config.replace_spaces_with_blanks and not token.beginswith(
config.tokenized_meta_symbol
):
ground_truth += [config.space]
ground_truth += [token]
# Add space to the end
if not ground_truth[-1] == config.space:
ground_truth += [config.space]
logging.debug(f"ground_truth: {ground_truth}")
utt_begin_indices.append(len(ground_truth) - 1)
# Create matrix: time frame x number of letters the character symbol spans
max_char_len = 1
ground_truth_mat = np.ones([len(ground_truth), max_char_len], np.int64) * -1
for i in range(1, len(ground_truth)):
if ground_truth[i] == config.space:
ground_truth_mat[i, 0] = config.blank
else:
char_index = config.char_list.index(ground_truth[i])
ground_truth_mat[i, 0] = char_index
return ground_truth_mat, utt_begin_indices
def prepare_token_list(config, text):
"""Prepare the given token list for CTC segmentation.
This function expects the text input in form of a list
of numpy arrays: [np.array([2, 5]), np.array([7, 9])]
:param config: an instance of CtcSegmentationParameters
:param text: list of numpy arrays with tokens
:return: label matrix, character index matrix
"""
ground_truth = [-1]
utt_begin_indices = []
for utt in text:
# It's not possible to detect spaces when sequence is
# already tokenized, so we skip replace_spaces_with_blanks
# Insert blanks between utterances
if not ground_truth[-1] == config.blank:
ground_truth += [config.blank]
# Start-of-new-utterance remember index
utt_begin_indices.append(len(ground_truth) - 1)
# Append tokens to list
ground_truth += utt.tolist()
# Add a blank to the end
if not ground_truth[-1] == config.blank:
ground_truth += [config.blank]
logging.debug(f"ground_truth: {ground_truth}")
utt_begin_indices.append(len(ground_truth) - 1)
# Create matrix: time frame x number of letters the character symbol spans
ground_truth_mat = np.array(ground_truth, dtype=np.int64).reshape(-1, 1)
return ground_truth_mat, utt_begin_indices
def determine_utterance_segments(config, utt_begin_indices, char_probs, timings, text):
"""Utterance-wise alignments from char-wise alignments.
:param config: an instance of CtcSegmentationParameters
:param utt_begin_indices: list of time indices of utterance start
:param char_probs: character positioned probabilities obtained from backtracking
:param timings: mapping of time indices to seconds
:param text: list of utterances
:return: segments, a list of: utterance start and end [s], and its confidence score
"""
def compute_time(index, align_type):
"""Compute start and end time of utterance.
:param index: frame index value
:param align_type: one of ["begin", "end"]
:return: start/end time of utterance in seconds
"""
middle = (timings[index] + timings[index - 1]) / 2
if align_type == "begin":
return max(timings[index + 1] - 0.5, middle)
elif align_type == "end":
return min(timings[index - 1] + 0.5, middle)
segments = []
min_prob = np.float64(-10000000000.0)
for i in range(len(text)):
start = compute_time(utt_begin_indices[i], "begin")
end = compute_time(utt_begin_indices[i + 1], "end")
start_t = int(round(start / config.index_duration_in_seconds))
end_t = int(round(end / config.index_duration_in_seconds))
# Compute confidence score by using the min mean probability
# after splitting into segments of L frames
n = config.score_min_mean_over_L
if end_t <= start_t:
min_avg = min_prob
elif end_t - start_t <= n:
min_avg = char_probs[start_t:end_t].mean()
else:
min_avg = np.float64(0.0)
for t in range(start_t, end_t - n):
min_avg = min(min_avg, char_probs[t : t + n].mean())
segments.append((start, end, min_avg))
return segments
``` |
{
"source": "21kmegerusca/MyProject",
"score": 4
} |
#### File: 21kmegerusca/MyProject/3loadconstructList.py
```python
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
import shelve
def buildStructure(x, y, z, structure):
xStart = x
zStart = z
for row in structure:
for column in row:
for block in column:
mc.setBlock(x, y, z, block.id, block.data)
z += 1
x += 1
z = zStart
y += 1
x = xStart
# Открываем файл и загружаем трехмерный список structure
structure = shelve.open("structuresFile.db")
structureName = input("Введите название конструкции: ")
pos = mc.player.getTilePos()
x = pos.x
y = pos.y
z = pos.z
buildStructure(x, y, z, structure[structureName])
```
#### File: 21kmegerusca/MyProject/3saveconstructList.py
```python
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
import shelve
def sortPair(val1, val2):
if val1 > val2:
return val2, val1
else:
return val1, val2
def copyStructure(x1, y1, z1, x2, y2, z2):
x1, x2 = sortPair(x1, x2)
y1, y2 = sortPair(y1, y2)
z1, z2 = sortPair(z1, z2)
width = x2 - x1
height = y2 - y1
length = z2 - z1
structure = []
print("Пожалуйста, подождите…")
#Копируем конструкцию
for row in range(height):
structure.append([])
for column in range(width):
structure[row].append([])
for depth in range(length):
block = mc.getBlockWithData(x1 + column, y1 + row, z1 + depth)
structure[row][column].append(block)
return structure
s = ""
while s!="выход":
# Получаем координаты первого угла
input("Пройдите к первому углу и нажмите Enter в этом окне")
pos1 = mc.player.getTilePos()
x1 = pos1.x
y1 = pos1.y
z1 = pos1.z
# Получаем координаты второго (противоположного) угла
input("Подойдите к противоположному углу и нажмите Enter в этом окне")
pos2 = mc.player.getTilePos()
x2 = pos2.x
y2 = pos2.y
z2 = pos2.z
structure = copyStructure(x1, y1, z1, x2, y2, z2)
structureName = input("Как вы хотите назвать конструкцию? ")
#Сохраняем конструкцию в файл
shelveFile = shelve.open("structuresFile.db")
shelveFile[structureName] = structure
shelveFile.close()
s = input("если больше не хотите сохранять конструкции, напишите 'выход'")
``` |
{
"source": "21littlesun/spectrome",
"score": 3
} |
#### File: spectrome/atlases/load_atlas.py
```python
import numpy as np
import os
from ..utils.path import get_root_path
def get_DK_names(cortical_only=False):
"""Return the DK regions in alphabetical ordering.
Args:
cortical_only (Bool): If true, returns 68 cortical regions instead of 86.
Returns:
array: names of DK regions.
"""
if cortical_only:
names_file = "OrderingAlphabetical_68ROIs.txt"
else:
names_file = "OrderingAlphabetical_86ROIs.txt"
root_path = get_root_path()
file_path = os.path.join(root_path, "atlases/")
file_path = os.path.join(file_path, names_file)
names_array = np.genfromtxt(file_path, dtype="str")
return names_array
def get_HCP_names(cortical_only=False):
"""Return the DK regions following yet another naming convention.
Args:
cortical_only (Bool): If true, returns 68 cortical regions instead of 86.
Returns:
array: names of DK regions following convention 2.
"""
if cortical_only:
names_file = "OrderingAlphabetical_68ROIs.txt"
else:
names_file = "OrderingAlphabetical_86ROIs.txt"
root_path = get_root_path()
file_path = os.path.join(root_path, "atlases/")
file_path = os.path.join(file_path, names_file)
names_array = np.genfromtxt(file_path, dtype="str")
return names_array
```
#### File: spectrome/forward/network_transfer.py
```python
import numpy as np
def network_transfer_function(brain, parameters, w, use_smalleigs=True):
"""Network Transfer Function for spectral graph model.
Args:
brain (Brain): specific brain to calculate NTF
parameters (dict): parameters for ntf. We shall keep this separate from Brain
for now, as we want to change and update according to fitting.
frequency (float): frequency at which to calculate NTF
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
frequency_response (numpy asarray): frequency response of local oscillators
ev (numpy asarray): Eigen values
Vv (numpy asarray): Eigen vectors
model_out (numpy asarray): Each region's frequency response for
the given frequency (w)
FCmodel (numpy asarray): Functional connectivity - still in the works
"""
C = brain.reducedConnectome
D = brain.distance_matrix
# defining parameters
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters[
"gei"
] # excitatory-inhibitory synaptic conductance as ratio of E-E syn
gii = parameters[
"gii"
] # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
tauC = parameters["tauC"] # tauC = 0.5*tau_e
global_alpha = parameters["alpha"]
local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# define sum of degrees for rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
Tau = 0.001 * D / speed
Cc = C * np.exp(-1j * Tau * w)
# Eigen Decomposition of Complex Laplacian Here
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - global_alpha * np.matmul(np.diag(L2), Cc)
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K]
# Cortical model
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
# Hed = 1/tau_e/(1j*w + 1/tau_e*He)
Hed = local_alpha / tau_e / (1j * w + local_alpha / tau_e * Fe)
# Hid = 1/tau_i/(1j*w + 1/tau_i*Hi)
Hid = local_alpha / tau_i / (1j * w + local_alpha / tau_i * Fi)
Heid = gei * Fe * Fi / (1 + gei * Fe * Fi)
Htotal = a * Hed + (1 - a) / 2 * Hid + (1 - a) / 2 * Heid
q1 = 1 / local_alpha * tauC * (1j * w + local_alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
FCmodel = np.matmul(
np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)),
np.transpose(eigenvectors[:, 1:K]),
)
den = np.sqrt(np.abs(model_out))
FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, FCmodel
def network_transfer_local_alpha(brain, parameters, w, use_smalleigs=True):
"""Network Transfer Function for spectral graph model.
Args:
brain (Brain): specific brain to calculate NTF
parameters (dict): parameters for ntf. We shall keep this separate from Brain
for now, as we want to change and update according to fitting.
frequency (float): frequency at which to calculate NTF
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
frequency_response (numpy asarray):
ev (numpy asarray): Eigen values
Vv (numpy asarray): Eigen vectors
model_out (numpy asarray): Each region's frequency response for
the given frequency (w)
FCmodel (numpy asarray): Functional connectivity - still in the works
"""
C = brain.reducedConnectome
D = brain.distance_matrix
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters[
"gei"
] # excitatory-inhibitory synaptic conductance as ratio of E-E syn
gii = parameters[
"gii"
] # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
tauC = parameters["tauC"] # tauC = 0.5*tau_e
alpha = parameters["alpha"]
# local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# define sum of degrees for rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
Tau = 0.001 * D / speed
Cc = C * np.exp(-1j * Tau * w)
# Eigen Decomposition of Complex Laplacian Here
#L1 = 0.8 * np.identity(nroi) # 0.8I in matlab
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - alpha * np.matmul(np.diag(L2), Cc)
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K]
# Cortical model
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
# Hed = 1/tau_e/(1j*w + 1/tau_e*He)
Hed = alpha / tau_e / (1j * w + alpha / tau_e * Fe)
# Hid = 1/tau_i/(1j*w + 1/tau_i*Hi)
Hid = alpha / tau_i / (1j * w + alpha / tau_i * Fi)
Heid = gei * Fe * Fi / (1 + gei * Fe * Fi)
Htotal = a * Hed + (1 - a) / 2 * Hid + (1 - a) / 2 * Heid
q1 = 1 / alpha * tauC * (1j * w + alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
FCmodel = np.matmul(
np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)),
np.transpose(eigenvectors[:, 1:K]),
)
den = np.sqrt(np.abs(model_out))
FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, FCmodel
def network_transfer_HM(brain, parameters, w, use_smalleigs=True):
"""Network transfer function for spectral graph model, the local oscillator model is modified by HM.
Args:
brain (Brain): Brain class object with connectome and distance matrix
parameters (dict): model parameters
w (float): Frequency of interest
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
[type]: [description]
"""
# Housing keeping - defining connectomes, distance matrix, and model parameters.
C = brain.reducedConnectome
D = brain.distance_matrix
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters["gei"]
gii = parameters["gii"]
tauC = parameters["tauC"]
global_alpha = parameters["alpha"]
local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
# use_smalleigs = True # otherwise uses full eig()
numsmalleigs = np.round(2 / 3 * C.shape[0]) # 2/3
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# gei = 4 # excitatory-inhibitory synaptic conductance as ratio of E-E syn
# gii = 1 # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
# tauC = 0.5*tau_e
# define sum of degrees in rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
# Use all eigenmodes or 2/3 eigenmodes excluding the subcortical ones
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
# Complex connectivity:
Tau = (
0.001 * D / speed
) # divide distance by speed, which is in meters per second, 0.001 converts D to meters
Cc = C * np.exp(-1j * Tau * w)
# Complex Laplacian:
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - global_alpha * np.matmul(np.diag(L2), Cc)
# eigen decomposition:
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K] # K is either 2/3 or all eigenmodes
# Cortical model:
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
He = local_alpha / tau_e / (1j * w + local_alpha / tau_e * Fe)
Hi = local_alpha / tau_i / (1j * w + local_alpha / tau_e * Fi)
# denominator term for alternative model proposed by HM
denom = 1 + (gei ** 2 / tau_e * tau_i) * Fe * Fi * He * Hi
He_alt = np.divide(He, denom)
Hi_alt = np.divide(Hi, denom)
Hoffdiag_alt = np.divide(
gei * ((-1 / tau_e) * Fe + (1 / tau_i) * Fi) * He * Hi, denom
)
Htotal = He_alt + Hi_alt + Hoffdiag_alt
# This scaling may not be necessary, take a look at Htotal
q1 = 1 / local_alpha * tauC * (1j * w + local_alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
# FCmodel = np.matmul(
# np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)), np.transpose(eigenvectors[:, 1:K])
# )
# den = np.sqrt(np.abs(model_out))
# FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, Htotal
# Look at Htotal only, see if it's similar to HOrig.
```
#### File: spectrome/preprocess/permute.py
```python
from ..utils import path as pth
import csv
import numpy as np
def order_dict(data, orderfile):
"""order_dict. Reorders a dictionary according to the order of keys passed in the
list in orderfile. Returns a reordered dictionary.
Args:
data (type): Input data dictionary.
orderfile (type): Name of file containing list of keys in the desired standard order.
Returns:
type: Dictionary of data, reordered to match the input standard of the orderfile.
"""
dataorder = data.keys()
standardlist = pth.read_hdf5(orderfile)
newdata = {}
loc_in_standard = []
for key in standardlist:
if key in dataorder:
newdata[key] = data[key]
loc_in_standard.append(standardlist.index(key))
else:
break
# print('Skipping region of brain -- not in data')
return newdata
def get_HCP_order(filepath, save=False, fileout=None, cortexstart=18):
"""Import the HCP connectome, and create a list with the same order so
that input data can be rearranged to compare. The dictionary keys are standardised to single
words, lower case only. The HCP is also rearranged so that the cortex comes first,
non-cortex parts of the brain are placed after.
Args:
filepath (string): Path to HCP connectome file.
save (Boolean): Save output list to file?
fileout (string): Location of output list.
cortexstart (int): Index of start of cortex in original HCP ordering.
Returns:
List: List of brain regions; HCP rearranged so that the cortex comes first,
non-cortex parts of the brain are placed after.
"""
with open(filepath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
region_order = []
for row in csv_reader:
if line_count == 0:
for cell in row:
index = cell.find("ctx")
if index != -1:
cell = cell[4:6].upper() + cell[index + 7 :].lower()
region_order.append(cell)
else:
cell = cell
region_order.append(cell)
line_count += 1
else:
break
# put the non-cortex to the end of the order list
order = region_order[cortexstart:]
for item in region_order[0:cortexstart]:
order.append(item)
if save:
pth.save_hdf5(fileout, order)
return order
def reorder_connectome(conmat, distmat, save=False, cortexstart=18):
"""A function to rearrange matrices by a cyclical permutation (no rearranging of order).
This is the equivalent of perm_HCP in the first code version:
np.concatenate([np.arange(18, 52),
np.arange(52, 86),
np.arange(0, 9),
np.arange(9, 18)])
Args:
conmat (numpy array): Direct input connectome
distmat (numpy array): Direct input distance matrix
save (bool): Whether to save out to files.
cortexstart (int): Index of the first point in the cortex, eg. LHbankssts.
Returns:
numpy arrays: Connectome, distance matrix, and the permutation used on them.
"""
Connectome = conmat
Distance_matrix = distmat
permutation = np.concatenate(
[np.arange(cortexstart, 86), np.arange(0, cortexstart)]
)
Connectome = Connectome[permutation,][:, permutation]
Distance_matrix = Distance_matrix[permutation,][:, permutation]
return Connectome, Distance_matrix, permutation
```
#### File: spectrome/utils/path.py
```python
import os
import deepdish as dd
from pathlib import Path
def get_file_path(filename):
"""Find filename in the relative directory `../data/` .
Args:
filename (str): file we're looking for in the ./data/ directory.
Returns:
str: absolute path to file "filename" in ./data/ dir.
"""
root_dir = Path(__file__).parent.parent
file_dir = os.path.join(str(root_dir), "data", filename)
return file_dir
def get_data_path():
"""Return absolute path to `/data/`."""
root_path = Path(__file__).parent.parent
data_path = os.path.join(str(root_path), "data")
return data_path
def get_absolute_path(relative_path="."):
"""Return absolute path given `relative_path`.
Args:
relative_path (str): path relative to 'here'.
Returns:
str: absolute path
"""
here_dir = os.path.dirname(os.path.realpath("__file__"))
abs_path = os.path.join(str(here_dir), relative_path)
return abs_path
def get_sibling_path(folder):
"""returns the path of 'folder' on the same level"""
root_dir = Path(__file__).parent.parent
sibling_dir = os.path.join(str(root_dir), folder)
return sibling_dir
def get_root_path():
root_path = Path(__file__).parent.parent
return root_path
def save_hdf5(path, dict):
"""Save out a dictionary/numpy array to HDF5 format using deepdish package.
Args:
path (type): full path including filename of intended output.
dict (type): dictionary/numpy array to be saved.
Returns:
type: Description of returned object.
"""
dd.io.save(path, dict)
def read_hdf5(path):
"""Read in dictionary/numpy array from HDF5 format using deepdish package.
Args:
path (type): full path including filename of input.
Returns:
type: dictionary of data.
"""
dict = dd.io.load(path)
return dict
def walk_tree(datapath):
"""Return list of directories in the passed folder.
Args:
datapath (type): folder of interest.
Returns:
type: list of directories in the passed folder.
"""
directories = []
for (path, dirs, files) in os.walk(datapath):
directories.append(dirs)
return directories[0]
``` |
{
"source": "21PIRLO21/LeetCode2020",
"score": 4
} |
#### File: 21PIRLO21/LeetCode2020/0095_Unique_Binary_Search_Trees_II.py
```python
from typing import List
''' Medium '''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution_1:
def __init__(self):
self.maps = {}
def generateTrees(self, n):
if n < 1: return []
return self.helper(1, n)
def helper(self, s, e):
if (s,e) in self.maps:
return self.maps[(s,e)]
res = []
if s > e:
res.append(None)
return res
for i in range(s, e + 1):
left = self.helper(s, i - 1)
right = self.helper(i + 1, e)
for L in left:
for R in right:
root = TreeNode(i)
root.left = L
root.right = R
res.append(root)
self.maps[(s,e)] = res
return res
class Solution_2:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
dct = {}
def left_right(left: int, right: int) -> List[TreeNode]:
if left > right:
return [None]
if (left, right) in dct:
return dct[(left, right)]
ret = []
for i in range(left, right+1):
left_lst = left_right(left, i-1)
right_lst = left_right(i+1, right)
for L in left_lst:
for R in right_lst:
app_Tree = TreeNode(i)
app_Tree.left = L
app_Tree.right = R
ret.append(app_Tree)
dct[(left, right)] = ret
return ret
# left_right(1, n)
return left_right(1, n)
# the fastest
class Solution_3:
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
def generate_trees(start, end):
if start > end:
return [None,]
all_trees = []
for i in range(start, end + 1): # pick up a root
# all possible left subtrees if i is choosen to be a root
left_trees = generate_trees(start, i - 1)
# all possible right subtrees if i is choosen to be a root
right_trees = generate_trees(i + 1, end)
# connect left and right subtrees to the root i
for l in left_trees:
for r in right_trees:
current_tree = TreeNode(i)
current_tree.left = l
current_tree.right = r
all_trees.append(current_tree)
return all_trees
return generate_trees(1, n) if n else []
```
#### File: 21PIRLO21/LeetCode2020/0167_Two_Sum_II_Input_array_is_sorted.py
```python
from typing import List
''' Easy '''
class Solution_1:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
low, high = 0, len(numbers) - 1
while low < high:
total = numbers[low] + numbers[high]
if total == target:
return [low + 1, high + 1]
elif total < target:
low += 1
else:
high -= 1
return [-1, -1]
# the fastest
class Solution_2:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
i = 0
j = len(numbers)-1
while j>i:
if numbers[i]+numbers[j]==target:
return [i+1, j+1]
elif numbers[i]+numbers[j]<target:
i+=1
else:
j-=1
return []
# 二分查找
# 作者:LeetCode-Solution
# 链接:https://leetcode-cn.com/problems/two-sum-ii-input-array-is-sorted/solution/liang-shu-zhi-he-ii-shu-ru-you-xu-shu-zu-by-leet-2/
class Solution_3:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
n = len(numbers)
for i in range(n):
low, high = i + 1, n - 1
while low <= high:
mid = (low + high) // 2
if numbers[mid] == target - numbers[i]:
return [i + 1, mid + 1]
elif numbers[mid] > target - numbers[i]:
high = mid - 1
else:
low = mid + 1
return [-1, -1]
```
#### File: 21PIRLO21/LeetCode2020/0174_Dungeon_Game.py
```python
class Solution_1:
def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:
if not dungeon or not dungeon[0]:
return 0
rows, cols = len(dungeon), len(dungeon[0])
# box = 1
BIG = 10**9
dp = [[BIG] * (cols + 1) for _ in range(rows + 1)]
dp[rows][cols - 1] = dp[rows - 1][cols] = 1
for x in range(rows - 1, -1, -1): # (-1, rows - 1] == [0, rows - 1]
for y in range(cols - 1, -1, -1):
# box--走下一步之前血槽最少要有多少血
box = min(dp[x + 1][y], dp[x][y + 1])
# dp[x][y]--走到坐标(x, y)想存活的话,上一步应含血量值
dp[x][y] = max(1, box - dungeon[x][y])
return dp[0][0]
# the fastest
class Solution_2:
def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:
x = len(dungeon)
y = len(dungeon[0])
dp = [[None for __ in range(y)] for __ in range(x)]
# 先填充最后一格
dp[-1][-1] = 1 if dungeon[-1][-1] >= 0 else -dungeon[-1][-1]+1
# 填充最后一列
for i in range(x-2, -1, -1):
tmp = dp[i+1][-1]-dungeon[i][-1]
dp[i][-1] = 1 if tmp <= 0 else tmp
# 填充最后一行
for i in range(y-2, -1, -1):
tmp = dp[-1][i+1]-dungeon[-1][i]
dp[-1][i] = 1 if tmp <= 0 else tmp
# 填充其他
for i in range(x-2, -1, -1):
for j in range(y-2, -1, -1):
tmp = min(dp[i][j+1], dp[i+1][j])-dungeon[i][j]
dp[i][j] = 1 if tmp <= 0 else tmp
return dp[0][0]
```
#### File: 21PIRLO21/LeetCode2020/0309_Best_Time_to_Buy_and_Sell_Stock_with_Cooldown.py
```python
class Solution_1:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
n = len(prices)
# f[i][0]: 手上持有股票的最大收益
# f[i][1]: 手上不持有股票,并且处于冷冻期中的累计最大收益
# f[i][2]: 手上不持有股票,并且不在冷冻期中的累计最大收益
f = [[-prices[0], 0, 0]] + [[0] * 3 for _ in range(n - 1)]
for i in range(1, n):
f[i][0] = max(f[i - 1][0], f[i - 1][2] - prices[i])
f[i][1] = f[i - 1][0] + prices[i]
f[i][2] = max(f[i - 1][1], f[i - 1][2])
return max(f[n - 1][1], f[n - 1][2])
class Solution_2:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
n = len(prices)
f0, f1, f2 = -prices[0], 0, 0
for i in range(1, n):
newf0 = max(f0, f2 - prices[i])
newf1 = f0 + prices[i]
newf2 = max(f1, f2)
f0, f1, f2 = newf0, newf1, newf2
return max(f1, f2)
```
#### File: 21PIRLO21/LeetCode2020/0329_Longest_Increasing_Path_in_a_Matrix.py
```python
from typing import List
import collections
''' Hard '''
# 记忆化+深度优先搜索
# 作者:ting-ting-28
# 链接:https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix/solution/python3-shen-du-you-xian-sou-suo-ji-yi-hua-di-gui-/
class Solution_1:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
nxt = ((1, 0), (-1, 0), (0, -1), (0, 1))
old = {}
matrix = [[float("inf")]+i+[float("inf")] for i in matrix]
col = len(matrix[0])
matrix = [[float("inf")]*col]+matrix+[[float("inf")]*col]
row = len(matrix)
def maxIncreasingRoute(x: int, y: int) -> int:
if (x, y) in old:
return old[(x, y)]
if x in {0, row-1} or y in {0, col-1}:
return 0
old[(x, y)] = 1+max([0]+[maxIncreasingRoute(x+xx, y+yy) for xx, yy in nxt if matrix[x][y] < matrix[x+xx][y+yy]])
return old[(x, y)]
return max([maxIncreasingRoute(i, j) for j in range(1, col-1) for i in range(1, row-1)])
# 拓扑排序
# 作者:LeetCode-Solution
# 链接:https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix/solution/ju-zhen-zhong-de-zui-chang-di-zeng-lu-jing-by-le-2/
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
rows, columns = len(matrix), len(matrix[0])
outdegrees = [[0] * columns for _ in range(rows)]
queue = collections.deque()
for i in range(rows):
for j in range(columns):
for dx, dy in Solution.DIRS:
newRow, newColumn = i + dx, j + dy
if 0 <= newRow < rows and 0 <= newColumn < columns and matrix[newRow][newColumn] > matrix[i][j]:
outdegrees[i][j] += 1
if outdegrees[i][j] == 0:
queue.append((i, j))
ans = 0
while queue:
ans += 1
size = len(queue)
for _ in range(size):
row, column = queue.popleft()
for dx, dy in Solution.DIRS:
newRow, newColumn = row + dx, column + dy
if 0 <= newRow < rows and 0 <= newColumn < columns and matrix[newRow][newColumn] < matrix[row][column]:
outdegrees[newRow][newColumn] -= 1
if outdegrees[newRow][newColumn] == 0:
queue.append((newRow, newColumn))
return ans
# sum(dp, []),没想到sum还能用来合并二维数组到一维,学到了。
# ([cui]可以用 list.extend() + for循环 实现同样的效果)
# >>> matrix=[[1,2,3],[4,5,6],[7,8,9]]
# >>> matrix
# [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# >>> x, y = len(matrix), len(matrix[0])
# >>> x, y
# (3, 3)
# >>> b = [[(matrix[i][j], i, j) for j in range(y)] for i in range(x)]
# [[(1, 0, 0), (2, 0, 1), (3, 0, 2)], [(4, 1, 0), (5, 1, 1), (6, 1, 2)], [(7, 2, 0), (8, 2, 1), (9, 2, 2)]]
# >>> sum(b, [])
# [(1, 0, 0), (2, 0, 1), (3, 0, 2), (4, 1, 0), (5, 1, 1), (6, 1, 2), (7, 2, 0), (8, 2, 1), (9, 2, 2)]
# >>>
```
#### File: 21PIRLO21/LeetCode2020/0983_Minimum_Cost_For_Tickets.py
```python
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
dp = [0 for _ in range(days[-1] + 1)]
days_idx = 0
for day in range(1, days[-1] + 1):
if day != days[days_idx]:
dp[day] = dp[day - 1]
else:
dp[day] = min(dp[max(0, day - 1)] + costs[0],
dp[max(0, day - 7)] + costs[1],
dp[max(0, day - 30)] + costs[2])
days_idx +=1
return dp[-1]
```
#### File: 21PIRLO21/LeetCode2020/JZoffer_11_Find_Minimum_in_Rotated_Sorted_Array.py
```python
from typing import List
''' Easy '''
class Solution_1:
def minArray(self, numbers: List[int]) -> int:
for i in range(1, len(numbers)):
if numbers[i - 1] > numbers[i]:
return numbers[i]
return numbers[0]
# 二分查找
# 作者:LeetCode-Solution
# 链接:https://leetcode-cn.com/problems/xuan-zhuan-shu-zu-de-zui-xiao-shu-zi-lcof/solution/xuan-zhuan-shu-zu-de-zui-xiao-shu-zi-by-leetcode-s/
# 考虑数组中的最后一个元素 x:
# 在最小值右侧的元素,它们的值一定都小于等于(<=) x;
# 而在最小值左侧的元素,它们的值一定都大于等于(>=) x。
class Solution_2:
def minArray(self, numbers: List[int]) -> int:
low, high = 0, len(numbers) - 1
while low < high:
pivot = low + (high - low) // 2
if numbers[pivot] < numbers[high]:
high = pivot
elif numbers[pivot] > numbers[high]:
low = pivot + 1
else:
high -= 1
return numbers[low]
```
#### File: 21PIRLO21/LeetCode2020/LCP_13.py
```python
from queue import Queue
from typing import List
# 作者:li-bo-8
# 链接:https://leetcode-cn.com/problems/xun-bao/solution/python3-bfsdpzhuang-tai-ya-suo-by-li-bo-8/
class Solution:
def minimalSteps(self, maze: List[str]) -> int:
# 四个方向
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]
# 计算(x, y)到maze中其他点的距离,结果保存在ret中
def bfs(x, y, maze, m, n):
ret = [[-1]*n for _ in range(m)]
ret[x][y] = 0
q = Queue()
q.put((x,y))
while q.qsize():
curx, cury = q.get()
for dx, dy in dd:
nx = curx + dx
ny = cury + dy
if 0 <= nx < m and 0 <= ny < n and maze[nx][ny] != '#' and ret[nx][ny] == -1:
ret[nx][ny] = ret[curx][cury] + 1
q.put((nx, ny))
return ret
m = len(maze)
n = len(maze[0])
startX = -1
startY = -1
endX = -1
endY = -1
# 机关 & 石头
buttons = []
stones = []
# 记录所有特殊信息的位置
for i in range(m):
for j in range(n):
if maze[i][j] == 'S':
startX = i
startY = j
elif maze[i][j] == 'T':
endX = i
endY = j
elif maze[i][j] == 'O':
stones.append((i,j))
elif maze[i][j] == 'M':
buttons.append((i,j))
else:
pass
nb = len(buttons)
ns = len(stones)
startToAnyPos = bfs(startX, startY, maze, m, n)
# 若没有机关,最短距离就是(startX, startY)到(endX, endY)的距离
if nb == 0:
return startToAnyPos[endX][endY]
# 记录第i个机关到第j个机关的最短距离
# dist[i][nb]表示到起点的距离, dist[i][nb+1]表示到终点的距离
dist = [[-1]*(nb+2) for _ in range(nb)]
# 遍历所有机关,计算其和其他点的距离
buttonsToAnyPos = []
for i in range(nb):
bx, by = buttons[i]
# 记录第i个机关到其他点的距离
iToAnyPos = bfs(bx, by, maze, m, n)
buttonsToAnyPos.append(iToAnyPos)
# 第i个机关到终点的距离就是(bx, by)到(endX, endY)的距离
dist[i][nb + 1] = iToAnyPos[endX][endY]
for i in range(nb):
# 计算第i个机关到(startX, startY)的距离
# 即从第i个机关出发,通过每个石头(sx, sy),到(startX, startY)的最短距离
tmp = -1
for j in range(ns):
sx, sy = stones[j]
if buttonsToAnyPos[i][sx][sy] != -1 and startToAnyPos[sx][sy] != -1:
if tmp == -1 or tmp > buttonsToAnyPos[i][sx][sy] + startToAnyPos[sx][sy]:
tmp = buttonsToAnyPos[i][sx][sy] + startToAnyPos[sx][sy]
dist[i][nb] = tmp
# 计算第i个机关到第j个机关的距离
# 即从第i个机关出发,通过每个石头(sx, sy),到第j个机关的最短距离
for j in range(i+1, nb):
mn = -1
for k in range(ns):
sx, sy = stones[k]
if buttonsToAnyPos[i][sx][sy] != -1 and buttonsToAnyPos[j][sx][sy] != -1:
if mn == -1 or mn > buttonsToAnyPos[i][sx][sy] + buttonsToAnyPos[j][sx][sy]:
mn = buttonsToAnyPos[i][sx][sy] + buttonsToAnyPos[j][sx][sy]
# 距离是无向图,对称的
dist[i][j] = mn
dist[j][i] = mn
# 若有任意一个机关 到起点或终点没有路径(即为-1),则说明无法达成,返回-1
for i in range(nb):
if dist[i][nb] == -1 or dist[i][nb+1] == -1:
return -1
# dp数组, -1代表没有遍历到, 1<<nb表示题解中提到的mask, dp[mask][j]表示当前处于第j个机关,总的触发状态为mask所需要的最短路径, 由于有2**nb个状态,因此1<<nb的开销必不可少
dp = [[-1]*nb for _ in range(1 << nb)]
# 初识状态,即从start到第i个机关,此时mask的第i位为1,其余位为0
for i in range(nb):
dp[1 << i][i] = dist[i][nb]
# 二进制中数字大的mask的状态肯定比数字小的mask的状态多,所以直接从小到大遍历更新即可
for mask in range(1, (1 << nb)):
for i in range(nb):
# 若当前位置是正确的,即mask的第i位是1
if mask & (1 << i) != 0:
for j in range(nb):
# 选择下一个机关j,要使得机关j目前没有到达,即mask的第j位是0
if mask & (1 << j) == 0:
nextMask = mask | (1 << j)
if dp[nextMask][j] == -1 or dp[nextMask][j] > dp[mask][i] + dist[i][j]:
dp[nextMask][j] = dp[mask][i] + dist[i][j]
# 最后一个机关到终点
ans = -1
finalMask = (1 << nb) - 1
for i in range(nb):
if ans == -1 or ans > dp[finalMask][i] + dist[i][nb + 1]:
ans = dp[finalMask][i] + dist[i][nb + 1]
return ans
``` |
{
"source": "21-prashant/movielibiary",
"score": 3
} |
#### File: project/controllers/library.py
```python
from project import app, security
from flask import render_template, request, session, redirect, url_for,jsonify
from flask.ext.wtf import Form, TextField, validators
from project.model.Library import Library
from project.model.User import User
import json
@app.route('/libraries')
@security('user')
def libraries(user = None):
libraries = Library.objects(user=user,unit='Movie')
return render_template('library/master.html', libraries=libraries,user=user)
@app.route('/libraries/add', methods=['POST'])
@security('user')
def addLibrary(user = None):
name = request.form['name']
library = Library.objects(user=user,unit='Movie',name=name).first()
if library:
return jsonify(response='error',message='Library with name %s already exists' % library.name),404
library = Library(user=user,unit='Movie',name=name).save()
return jsonify(response='success',type='redirect',path=url_for(endpoint='libraries',_external=True))
@app.route('/libraries/remove', methods=['POST'])
@security('user')
def removeLibrary(user = None):
name = request.form['name']
library = Library.objects(user=user,unit='Movie',name=name).first()
if not library:
return jsonify(response='error',message='Library requested does not exists'),404
if library.name == 'Master' or library.name == 'Loaned':
return jsonify(response='error',message='Library %s cannot be deleted' % library.name),404
library.delete()
return jsonify(response='success',type='redirect',path=url_for(endpoint='libraries',_external=True))
@app.route('/libraries/<name>')
@security('user')
def library(name,user=None):
from project.model.Movie import Movie
library = Library.objects(user=user,name=name,unit='Movie').first()
if not library:
return render_template('404.html',message='Unable to find given Library',user=user),404
return render_template('library/library.html',library=library,user=user)
@app.route('/libraries/<name>/<int:index>')
@security('user')
def libraryItem(name, index,user=None):
from project.model.Movie import Movie
library = Library.objects(user=user,name=name,unit='Movie').first()
if not library:
return render_template('404.html',message='Unable to find given Library',user=user),404
movie = library.hydrateUnit(index-1)
if not movie:
return render_template('404.html',message='Unable to find given Movie',user=user),404
return render_template('library/libraryItem.html',item=movie,user=user,library=library,index=index)
@app.route('/libraries/<name>/remove', methods=['POST'])
@security('user')
def removelibraryItem(name,user=None):
from project.model.Movie import Movie
library = Library.objects(user=user,name=name,unit='Movie').first()
if not library:
return jsonify(response='error',message='Unable to find the given Library'),404
index = int(request.form['id'])
if not index:
return jsonify(response='error',message='Invalid parameters'),404
movie = library.hydrateUnit(index-1)
if not movie:
return jsonify(response='error',message='Unable to find the given Movie in Library %s' % library.name),404
if library.name == 'Master':
libraries = Library.objects(user=user,unit='Movie')
for library in libraries:
library.removeUnit(movie)
else:
library.removeUnit(movie)
return jsonify(response='success',type='redirect',path=url_for(endpoint='library',name=name,_external=True))
@app.route('/libraries/<name>/add', methods=['POST'])
@security('user')
def addlibraryItem(name,user=None):
from project.model.Movie import Movie
library = Library.objects(user=user,name=name,unit='Movie').first()
if not library:
return jsonify(response='error',message='Unable to find the given Library'),404
movie_id = request.form['id']
if not movie_id:
return jsonify(response='error',message='Invalid Movie given'),404
from project.model.Movie import Movie
movie = Movie.objects(tmdb_id=movie_id).first()
if movie:
if library.name != 'Master':
master = Library.objects(user=user,name="Master",unit='Movie').first()
master.addUnit(movie)
library.addUnit(movie)
return jsonify(response='success',type='redirect',path=url_for(endpoint='library',name=name,_external=True))
from tmdb3 import Movie as tmdbMovie
movie = tmdbMovie(movie_id)
if not movie:
return jsonify(response='error',message='Invalid Movie given'),404
from project.model.Movie import Movie
movie = Movie.convertMovie(movie)
library.addUnit(movie)
if library.name != 'Master':
master = Library.objects(user=user,name="Master",unit='Movie').first()
master.addUnit(movie)
return jsonify(response='success',type='redirect',path=url_for(endpoint='library',name=name,_external=True))
``` |
{
"source": "21praveen/360projection",
"score": 3
} |
#### File: 21praveen/360projection/equirectangular.py
```python
import cv2
import numpy as np
def deg2rad(d):
return float(d) * np.pi / 180
def rotate_image(old_image):
(old_height, old_width, _) = old_image.shape
M = cv2.getRotationMatrix2D(((old_width - 1) / 2., (old_height - 1) / 2.), 270, 1)
rotated = cv2.warpAffine(old_image, M, (old_width, old_height))
return rotated
def xrotation(th):
c = np.cos(th)
s = np.sin(th)
return np.array([[1, 0, 0], [0, c, s], [0, -s, c]])
def yrotation(th):
c = np.cos(th)
s = np.sin(th)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def render_image_np(theta0, phi0, fov_h, fov_v, width, img):
"""
theta0 is pitch
phi0 is yaw
render view at (pitch, yaw) with fov_h by fov_v
width is the number of horizontal pixels in the view
"""
m = np.dot(yrotation(phi0), xrotation(theta0))
(base_height, base_width, _) = img.shape
height = int(width * np.tan(fov_v / 2) / np.tan(fov_h / 2))
new_img = np.zeros((height, width, 3), np.uint8)
DI = np.ones((height * width, 3), np.int)
trans = np.array([[2.*np.tan(fov_h / 2) / float(width), 0., -np.tan(fov_h / 2)],
[0., -2.*np.tan(fov_v / 2) / float(height), np.tan(fov_v / 2)]])
xx, yy = np.meshgrid(np.arange(width), np.arange(height))
DI[:, 0] = xx.reshape(height * width)
DI[:, 1] = yy.reshape(height * width)
v = np.ones((height * width, 3), np.float)
v[:, :2] = np.dot(DI, trans.T)
v = np.dot(v, m.T)
diag = np.sqrt(v[:, 2] ** 2 + v[:, 0] ** 2)
theta = np.pi / 2 - np.arctan2(v[:, 1], diag)
phi = np.arctan2(v[:, 0], v[:, 2]) + np.pi
ey = np.rint(theta * base_height / np.pi).astype(np.int)
ex = np.rint(phi * base_width / (2 * np.pi)).astype(np.int)
ex[ex >= base_width] = base_width - 1
ey[ey >= base_height] = base_height - 1
new_img[DI[:, 1], DI[:, 0]] = img[ey, ex]
return new_img
def equi_to_cube(face_size, img):
"""
given an equirectangular spherical image, project it onto standard cube
"""
cube_img_h = face_size * 3
cube_img_w = face_size * 2
cube_img = np.zeros((cube_img_h, cube_img_w, 3), np.uint8)
ii = render_image_np(np.pi / 2, np.pi, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_top.jpg', ii)
cube_img[:cube_img_h / 3, cube_img_w / 2:] = ii.copy()
ii = render_image_np(0, 0, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_front.jpg', ii)
cube_img[cube_img_h / 3:cube_img_h * 2 / 3, :cube_img_w / 2] = rotate_image(ii).copy()
ii = render_image_np(0, np.pi / 2, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_right.jpg', ii)
cube_img[cube_img_h * 2 / 3:, :cube_img_w / 2] = rotate_image(ii).copy()
ii = render_image_np(0, np.pi, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_back.jpg', ii)
cube_img[cube_img_h / 3:cube_img_h * 2 / 3, cube_img_w / 2:] = ii.copy()
ii = render_image_np(0, np.pi * 3 / 2, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_left.jpg', ii)
cube_img[:cube_img_h / 3, :cube_img_w / 2] = rotate_image(ii).copy()
ii = render_image_np(-np.pi / 2, np.pi, \
np.pi / 2, np.pi / 2, \
face_size, img)
# cv2.imwrite('g_bottom.jpg', ii)
cube_img[cube_img_h * 2 / 3:, cube_img_w / 2:] = ii.copy()
# cv2.imwrite('g_cube.jpg', cube_img)
return cube_img
if __name__ == '__main__':
img = cv2.imread('equi_image.bmp')
face_size = 1000
yaw = 0
pitch = 0
fov_h = 90
fov_v = 90
rimg = render_image_np(deg2rad(pitch), deg2rad(yaw), \
deg2rad(fov_v), deg2rad(fov_h), \
face_size, img)
cv2.imwrite('rendered_image_%d_%d.bmp' % (pitch, yaw), rimg)
```
#### File: 21praveen/360projection/equi_to_offset_cube.py
```python
import cv2
import numpy as np
from cube import Cube
from cube import Face
def xrotation(th):
c = np.cos(th)
s = np.sin(th)
return np.array([[1, 0, 0], [0, c, s], [0, -s, c]])
def yrotation(th):
c = np.cos(th)
s = np.sin(th)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def deg2rad(d):
return d*np.pi/180
def rotate_image(old_image):
(old_height, old_width, _) = old_image.shape
M = cv2.getRotationMatrix2D(((old_width - 1) / 2., (old_height - 1) / 2.), 270, 1)
rotated = cv2.warpAffine(old_image, M, (old_width, old_height))
return rotated
class OffsetCube(Cube):
def __init__(self, expand_coef, offcenter_z, yaw, pitch, w, h, equi_img, bmp_fn=None):
self.expand_coef = expand_coef
self.offcenter_z = offcenter_z
self.yaw = yaw # in rad
self.pitch = pitch # in rad
self.fw = w # face width
self.fh = h # face height
self.bmp_fn = bmp_fn
d= {}
d['left'] = (0, 3*np.pi/2)
d['front'] = (0, 0)
d['right'] = (0, np.pi/2)
d['back'] = (0, np.pi)
d['top'] = (np.pi/2, np.pi)
d['bottom'] = (-np.pi/2, np.pi)
self.d = d
self.init_cube(equi_img)
def init_cube(self, equi_img):
NO_ROTATE = 0
a = np.array
self.faces = [
Face('left', project_offset_face_np(self.pitch, self.yaw, self.d['left'], self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([-1., 0., 0.]), a([ 0., 0., 1.]), a([0., 1., 0.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
Face('front', project_offset_face_np(self.pitch, self.yaw, self.d['front'], self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([ 0., 0., 1.]), a([ 1., 0., 0.]), a([0., 1., 0.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
Face('right', project_offset_face_np(self.pitch, self.yaw, self.d['right'], self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([ 1., 0., 0.]), a([ 0., 0., -1.]), a([0., 1., 0.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
Face('top', project_offset_face_np(self.pitch, self.yaw, self.d['top'], self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([ 0., 1., 0.]), a([-1., 0., 0.]), a([0., 0., 1.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
Face('back', project_offset_face_np(self.pitch, self.yaw, self.d['back'], self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([ 0., 0., -1.]), a([-1., 0., 0.]), a([0., 1., 0.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
Face('bottom', project_offset_face_np(self.pitch, self.yaw, self.d['bottom'],self.fh, self.fw, self.expand_coef, self.offcenter_z, equi_img), a([ 0., -1., 0.]), a([-1., 0., 0.]), a([0., 0.,-1.]), self.expand_coef, NO_ROTATE, self.yaw, self.pitch),
]
self.front_face = self.faces[1].pv
self.face_vecs = np.zeros((3,6))
for i, f in enumerate(self.faces):
self.face_vecs[:, i] = f.pv / f.k
def project_offset_face_np(theta0, phi0, f_info, height, width, expand_coef, offcenter_z, img):
"""
theta0 is front pitch
phi0 is front yaw
both in radiant
"""
theta1, phi1= f_info
m = np.dot( yrotation( phi0 ), xrotation( theta0 ) )
n = np.dot( yrotation( phi1 ), xrotation( theta1 ) )
mn = np.dot( m, n )
(base_height, base_width, _) = img.shape
new_img = np.zeros((height, width, 3), np.uint8)
DI = np.ones((height*width, 3), np.int)
trans = np.array([[2./float(width)*expand_coef, 0., -expand_coef],
[0.,-2./float(height)*expand_coef, expand_coef]])
xx, yy = np.meshgrid(np.arange(width), np.arange(height))
DI[:, 0] = xx.reshape(height*width)
DI[:, 1] = yy.reshape(height*width)
v = np.ones((height*width, 3), np.float)
v[:, :2] = np.dot(DI, trans.T)
v = np.dot(v, mn.T)
pv = np.dot(np.array([0, 0, 1.]), m.T)
off = offcenter_z * pv
a = v[:,0]**2 + v[:,1]**2 + v[:,2]**2
b = -2 * (v[:,0]*off[0] + v[:,1]*off[1] + v[:,2]*off[2])
c = np.sum(off*off)-1
t = (-b+np.sqrt(b*b - 4*a*c))/(2*a)
v = v*t[:,None] - off
diag = np.sqrt(v[:,2]**2 + v[:,0]**2)
theta = np.pi/2 - np.arctan2(v[:,1],diag)
phi = np.arctan2(v[:,0],v[:,2]) + np.pi
ey = np.rint(theta*base_height/np.pi ).astype(np.int)
ex = np.rint(phi*base_width/(2*np.pi) ).astype(np.int)
ex[ex >= base_width] = base_width - 1
ey[ey >= base_height] = base_height - 1
new_img[DI[:, 1], DI[:, 0]] = img[ey, ex]
return new_img
def equi_to_offset_cube(image):
cube_yaw = 0
cube_pitch = 0
face_size = 656
off_cb = OffsetCube(1.025, -0.7, deg2rad(cube_yaw), deg2rad(cube_pitch), face_size, face_size, image)
write_to_cb_img(off_cb, face_size, 'g_offsetcube.jpg')
def equi_to_cube(image):
cube_yaw = 0
cube_pitch = 0
face_size = 656
off_cb = OffsetCube(1.01, 0., deg2rad(cube_yaw), deg2rad(cube_pitch), face_size, face_size, image)
write_to_cb_img(off_cb, face_size, 'g_cube.jpg')
def write_to_cb_img(off_cb, face_size, name):
cube_img_h = face_size * 3
cube_img_w = face_size * 2
cube_img = np.zeros((cube_img_h, cube_img_w, 3), np.uint8)
for face in off_cb.faces:
if face.descr == 'top':
cube_img[:cube_img_h / 3, cube_img_w / 2:] = face.img.copy()
elif face.descr == 'front':
cube_img[cube_img_h / 3:cube_img_h * 2 / 3, :cube_img_w / 2] = rotate_image(face.img).copy()
elif face.descr == 'right':
cube_img[cube_img_h * 2 / 3:, :cube_img_w / 2] = rotate_image(face.img).copy()
elif face.descr == 'back':
cube_img[cube_img_h / 3:cube_img_h * 2 / 3, cube_img_w / 2:] = face.img.copy()
elif face.descr == 'left':
cube_img[:cube_img_h / 3, :cube_img_w / 2] = rotate_image(face.img).copy()
elif face.descr == 'bottom':
cube_img[cube_img_h * 2 / 3:, cube_img_w / 2:] = face.img.copy()
cv2.imwrite(name, cube_img)
if __name__== '__main__':
image = cv2.imread('../youtube_equi_image.jpg')
cube_yaw = 0
cube_pitch = 0
off_cb = OffsetCube(1.03125, -0.7, deg2rad(cube_yaw), deg2rad(cube_pitch), 528, 528, image)
for face in off_cb.faces:
cv2.imwrite('generated_offset_cube_%s.bmp'%face.descr, face.img)
``` |
{
"source": "21stio/collectd-docker-stats",
"score": 2
} |
#### File: lib/Docker/DependencyResolver.py
```python
from DictHelper import DictHelper
from ContainerStatsStreamPool import ContainerStatsStreamPool
from DockerFormatter import DockerFormatter
from DockerStatsClient import DockerStatsClient
import docker
from distutils.version import StrictVersion
import logging
import sys
class DependencyResolver:
resolver = None
@classmethod
def get_Resolver(cls, logger=None, socket_url=None, timeout=None):
if cls.resolver is None:
cls.resolver = DependencyResolver(logger, socket_url, timeout)
return cls.resolver
def __init__(self, logger=None, socket_url=None, timeout=None):
self.logger = logger or self.get_std_out_logger()
self.socket_url = socket_url or 'unix://var/run/docker.sock'
self.timeout = timeout or 3
self.min_api_version = '1.17'
self.dockerClient = None
self.dictHelper = None
self.dockerFormatter = None
self.containerStatsStreamPool = None
self.dockerStatsClient = None
def get_DockerClient(self):
if self.dockerClient is None:
self.dockerClient = docker.Client(
base_url=self.socket_url,
version=self.min_api_version
)
self.dockerClient.timeout = self.timeout
daemon_version = self.dockerClient.version()['ApiVersion']
if StrictVersion(daemon_version) < StrictVersion(self.min_api_version):
raise Exception('Docker daemon at {0} does not support container statistics!'.format(self.socket_url))
self.logger.info("started docker client socket_url: {0} version: {1} timeout: {2}".format(self.socket_url, self.min_api_version, self.timeout))
return self.dockerClient
def get_ContainerStatsStreamPool(self):
if self.containerStatsStreamPool is None:
self.containerStatsStreamPool = ContainerStatsStreamPool(self.logger, self.get_DockerClient())
return self.containerStatsStreamPool
def get_DictHelper(self):
if self.dictHelper is None:
self.dictHelper = DictHelper()
return self.dictHelper
def get_DockerFormatter(self):
if self.dockerFormatter is None:
self.dockerFormatter = DockerFormatter(self.get_DictHelper())
return self.dockerFormatter
def get_DockerStatsClient(self):
if self.dockerStatsClient is None:
self.dockerStatsClient = DockerStatsClient(self.get_DockerClient(), self.get_ContainerStatsStreamPool())
return self.dockerStatsClient
def get_std_out_logger(self):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
return logger
```
#### File: lib/Docker/DockerStatsClient.py
```python
class DockerStatsClient:
def __init__(self, dockerClient, containerStatsStreamPool):
self.dockerClient = dockerClient
self.containerStatsStreamPool = containerStatsStreamPool
def get_containers(self):
return self.dockerClient.containers()
def get_container_stats(self, container_ids):
stats = {}
for containers_id in container_ids:
stream = self.containerStatsStreamPool.get_ContainerStatsStream(containers_id)
stats[containers_id] = stream.get_stats()
return stats
```
#### File: tests/integration_docker/ContainerStatsStreamTest.py
```python
import unittest
from lib.Docker.DependencyResolver import DependencyResolver
class ContainerStatsStreamTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.containerStatsStreamPool = DependencyResolver.get_Resolver().get_ContainerStatsStreamPool()
cls.dockerFormatter = DependencyResolver.get_Resolver().get_DockerFormatter()
cls.dockerStatsClient = DependencyResolver.get_Resolver().get_DockerStatsClient()
def setUp(self):
self.cls = ContainerStatsStreamTest
def test_get_stats(self):
containers = self.cls.dockerStatsClient.get_containers()
container_name = self.cls.dockerFormatter.get_container_name(containers[0])
stats = self.cls.containerStatsStreamPool.get_ContainerStatsStream(container_name).get_stats()
self.assertTrue('cpu_stats' in stats)
self.assertTrue('memory_stats' in stats)
```
#### File: tests/integration_docker/DockerFormatterTest.py
```python
import unittest
from lib.Docker.DependencyResolver import DependencyResolver
class DockerFormatterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dockerStatsClient = DependencyResolver.get_Resolver().get_DockerStatsClient()
cls.dockerFormatter = DependencyResolver.get_Resolver().get_DockerFormatter()
cls.containers = cls.dockerStatsClient.get_containers()
cls.container_names = cls.dockerFormatter.get_container_names(cls.containers)
def setUp(self):
self.cls = DockerFormatterTest
def test_process_stats(self):
stats = self.cls.dockerStatsClient.get_container_stats(self.cls.container_names)
processed_stats = self.cls.dockerFormatter.process_stats(stats)
self.assertTrue('memory_stats.limit' in processed_stats['all'])
del(processed_stats['all'])
self.assertTrue('memory_stats.limit' not in processed_stats.values()[0])
self.assertTrue('memory_stats.usage' in processed_stats.values()[0])
``` |
{
"source": "21stio/python-handson-ml",
"score": 2
} |
#### File: src/three/minst.py
```python
from beeprint import pp
from plotly import tools
from sklearn.base import clone, BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, precision_recall_curve, roc_curve
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import fetch_mldata
import numpy as np
from sklearn.linear_model import SGDClassifier
import plotly.graph_objs as go
import plotly.offline as py
from sklearn.preprocessing import StandardScaler
mnist = fetch_mldata('MNIST original')
X, y = mnist["data"], mnist["target"]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
def simple_training(model):
model.fit(X_train, y_train_5)
p = model.predict(X_train[:20])
def own_cross_training(model):
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(model)
X_train_folds = X_train[train_index]
y_train_folds = y_train_5[train_index]
X_test_folds = X_train[test_index]
y_test_folds = y_train_5[test_index]
clone_clf.fit(X_train_folds, y_train_folds)
y_hat = clone_clf.predict(X_test_folds)
n_correct = sum(y_hat == y_test_folds)
print(n_correct / len(y_hat))
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
def cross_training(model, X_train, y_train):
s = cross_val_score(model, X_train, y_train, cv=3, scoring="accuracy")
pp(s)
def cross_eval(model, X_train, y_train):
y_hat = cross_val_predict(model, X_train, y_train, cv=3)
eval(y_train, y_hat)
plot_confusion_matrix_heatmap(y_train, y_hat)
def eval(y_true, y_hat):
cm = confusion_matrix(y_true, y_hat)
pp(cm)
ps = precision_score(y_true, y_hat, average='micro')
rs = recall_score(y_true, y_hat, average='micro')
f1 = f1_score(y_true, y_hat, average='micro')
print("Precision: {:.2f}, Recall: {:.2f} F1 Score: {:.2f}".format(ps, rs, f1))
def plot_precision_recall_vs_threshold(y_true, y_scores, labels):
data = []
for i, y_score in enumerate(y_scores):
precisions, recalls, thresholds = precision_recall_curve(y_true, y_score)
data.append(go.Scatter(x=thresholds, y=precisions[:-1], name=labels[i] + ' precision', mode='line'))
data.append(go.Scatter(x=thresholds, y=recalls[:-1], name=labels[i] + ' recall', mode='line'))
layout = go.Layout(
title='Plot Title',
xaxis=dict(
title='Recall',
),
yaxis=dict(
title='Precision',
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='/tmp/precision_recall_vs_threshold.html')
def predict_with_threshold(y_scores, y_true, threshold):
y_hat = y_scores > threshold
eval(y_true, y_hat)
def plot_roc_curve(y_true, y_scores, labels):
data = []
for i, y_score in enumerate(y_scores):
fpr, tpr, thresholds = roc_curve(y_true, y_score)
data.append(go.Scatter(x=fpr, y=tpr, name=labels[i], mode='line'))
layout = go.Layout(
title='Plot Title',
xaxis=dict(
title='False Positive Rate',
),
yaxis=dict(
title='True Positive Rate',
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='/tmp/roc_curve.html')
def plot_confusion_matrix_heatmap(y_true, y_hat):
cm = confusion_matrix(y_true, y_hat)
py.plot([
go.Heatmap(z=cm[::-1], name='', colorscale='Viridis'),
], filename='/tmp/confusion_matrix_heatmap.html')
cmn = cm * 1
np.fill_diagonal(cmn, 0)
row_sums = cm.sum(axis=1, keepdims=True)
cmn = cmn / row_sums
py.plot([
go.Heatmap(z=cmn[::-1], name='', colorscale='Viridis'),
], filename='/tmp/confusion_matrix_heatmap_normal.html')
def train_score(model, X_train, y_train):
s = cross_val_score(model, X_train, y_train, cv=3, scoring='accuracy')
pp(s)
def get_scores(model, X_train, y_train, method):
scores = cross_val_predict(model, X_train, y_train, cv=3, method=method)
def run1():
sgd_clf = SGDClassifier(random_state=42)
sgd_y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method='decision_function')
forest_clf = RandomForestClassifier(random_state=42)
y_scores_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba")[:, 1]
plot_roc_curve(y_train_5, [sgd_y_scores, y_scores_forest], ['sgd', 'random forest'])
plot_precision_recall_vs_threshold(y_train_5, [sgd_y_scores, y_scores_forest], ['sgd', 'random forest'])
sgd_clf = SGDClassifier(random_state=42)
forest_clf = RandomForestClassifier(random_state=42)
std = StandardScaler()
# X_train = std.fit_transform(X_train.astype(np.float64))
# train_score(sgd_clf, X_train, y_train)
# train_score(forest_clf, X_train, y_train)
cross_eval(forest_clf, X_train, y_train)
```
#### File: src/two/2_end_to_end.py
```python
import hashlib
import os, tarfile, pandas as pd, numpy as np
import plotly.figure_factory as ff
import plotly.offline as py
from beeprint import pp
import cufflinks as cf
from numpy.polynomial import Polynomial
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, LabelBinarizer, StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from two.transformers import CombinedAttributesAdder, CategoricalEncoder, IndexSelector, ColumnToIndexTransformer, Inspector, PassThrough
cf.set_config_file(offline=True, theme='pearl')
HOUSING_PATH = "../datasets/housing"
def load_housing_df(housing_path=HOUSING_PATH):
p = os.path.join(housing_path, "housing.csv")
df = pd.read_csv(p)
return df
def prep_housing_df(df):
def prep_index(df):
df.reset_index(inplace=True)
df["id"] = df["longitude"] * 1000 + df["latitude"]
return df
def prep_income_cat(df):
df["income_cat"] = np.ceil(df["median_income"] / 1.5)
df["income_cat"].where(df["income_cat"] < 5, 5.0, inplace=True)
return df
def prep_feature_combis(df):
df["rooms_per_household"] = df["total_rooms"] / df["households"]
df["bedrooms_per_room"] = df["total_bedrooms"] / df["total_rooms"]
df["bedrooms_per_household"] = df["total_bedrooms"] / df["households"]
df["population_per_household"] = df["population"] / df["households"]
return df
df = prep_index(df)
df = prep_income_cat(df)
df = prep_feature_combis(df)
return df
def get_num_df(df):
return df.drop("ocean_proximity", axis=1)
def clean_df(df):
def remove_nan_rows(df):
df.dropna(subset=["total_bedrooms"], inplace=True)
return df, np.nan
def remove_feature(df):
df.drop("total_bedrooms", axis=1, inplace=True)
return df, np.nan
def fill_nan(df):
m = df["total_bedrooms"].median()
df["total_bedrooms"].fillna(m, inplace=True)
return df, m
def impute_nan(df):
num_df = get_num_df(df)
imputer = Imputer(strategy="median")
imputer.fit(num_df)
X = imputer.transform(num_df)
new_df = pd.DataFrame(X, columns=df.columns)
return new_df, np.nan
return remove_nan_rows(df)
def encode_df(df):
def manual(df):
l_encoder = LabelEncoder()
housing_cat = df["ocean_proximity"]
housing_cat_encoded = l_encoder.fit_transform(housing_cat)
oh_encoder = OneHotEncoder()
housing_cat_1hot = oh_encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))
return housing_cat_1hot
def auto(df):
housing_cat = df["ocean_proximity"]
encoder = LabelBinarizer(sparse_output=True)
housing_cat_1hot = encoder.fit_transform(housing_cat)
return housing_cat_1hot
return auto(df)
def visualize(df):
df.iplot(kind='histogram', bins=50, subplots=True, filename='/tmp/histogram-subplots.html', asPlot=True)
# df.scatter_matrix(filename='/tmp/scatter-matrix.html')
df.iplot(
kind="scatter",
x="longitude",
y="latitude",
filename='/tmp/loc.html',
asPlot=True,
)
fig = ff.create_scatterplotmatrix(df[["housing_median_age", "total_rooms", "median_income", "median_house_value", ]], diag='histogram', width=1000, height=1000)
py.plot(fig, filename='/tmp/scatterplotmatrix.html')
def inspect(df):
print("\n\nHEAD")
pp(df.head())
print("\n\nINFO")
pp(df.info())
print("\n\nINCOME_CAT_DIST")
pp(df["income_cat"].value_counts() / len(df))
print("\n\nCORR median_house_value")
corr_matrix = df.corr()
pp(corr_matrix["median_house_value"].sort_values(ascending=False))
def inspect_train_test_sets(train, test):
print(len(train), "train +", len(test), "test")
def split_train_test(df, test_ratio):
shuffled_indices = np.random.permutation(len(df))
test_set_size = int(len(df) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return df.iloc[train_indices], df.iloc[test_indices]
def test_set_check(identifier, test_ration, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ration
def split_train_test_by_id(df, test_ratio, id_column, hash=hashlib.md5):
ids = df[id_column]
in_test_set = ids.apply(lambda _id: test_set_check(_id, test_ratio, hash))
return df.loc[~in_test_set], df.loc[in_test_set]
def stratified_train_test_split(df):
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
train_sets = []
test_sets = []
for train_indices, test_indices in split.split(df, df["income_cat"]):
train_sets.append(df.loc[train_indices])
test_sets.append(df.loc[test_indices])
return train_sets, test_sets
def split(X, y):
# train_set, test_set = split_train_test(df, test_ratio=0.2)
# train_set, test_set = split_train_test_by_id(df, test_ratio=0.2, id_column="index")
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=42)
# train_sets, test_sets = stratified_train_test_split(df)
# train_set, test_set = train_sets[0], test_sets[0]
return train_X, test_X, train_y, test_y
def get_X_y(df, x_cols, y_cols):
X = df[x_cols].values
y = df[y_cols].values
return X, y
def get_X_pipeline(num_ix, cat_ix):
num_pipeline = Pipeline([
('selector', IndexSelector(num_ix)),
# ('inspector', Inspector('num_inspector')),
('imputer', Imputer(strategy='median')),
('attribs_adder', CombinedAttributesAdder()),
('poly', PolynomialFeatures(degree=1)),
# ('std_scaler', MinMaxScaler())
])
cat_pipeline = Pipeline([
('selector', IndexSelector(cat_ix)),
# ('inspector', Inspector('cat_inspector')),
('label_binarizer', CategoricalEncoder(encoding="onehot-dense")),
])
union_pipeline = FeatureUnion(transformer_list=[
('num_pipeline', num_pipeline),
('cat_pipeline', cat_pipeline),
])
p = Pipeline([
# ('inspector:before', Inspector('top_inspector:before')),
('union', union_pipeline),
# ('inspector:after', Inspector('top_inspector:after')),
])
return p
def get_y_pipeline():
p = Pipeline([
# ('std_scaler', MinMaxScaler()),
('pass_through', PassThrough())
])
return p
def evaluate_error(model, y_pipeline, train_X, test_X, train_y, test_y):
y_hat = model.predict(train_X).reshape(-1, 1)
# train_y = y_pipeline.inverse_transform(train_y)
# y_hat = y_pipeline.inverse_transform(y_hat)
train_rmse = mean_squared_error(train_y, y_hat) ** 0.5
print("train rmse: {}".format(train_rmse))
y_hat = model.predict(test_X).reshape(-1, 1)
# test_y = y_pipeline.inverse_transform(test_y)
# y_hat = y_pipeline.inverse_transform(y_hat)
test_rmse = mean_squared_error(test_y, y_hat) ** 0.5
print("test rmse: {}".format(test_rmse))
def predict(model, y_pipeline, X, y_true):
y_hat = model.predict(X).reshape(-1, 1)
print("y_hat: \n")
y_hat = y_pipeline.inverse_transform(y_hat)
print(y_hat)
print("y_true: \n")
y_true = y_pipeline.inverse_transform(y_true)
print(y_true)
def run():
housing_df = load_housing_df()
y_cols = ["median_house_value"]
x_cols = [x for x in list(housing_df.columns) if x not in y_cols]
cat_attribs = ["ocean_proximity"]
num_attribs = [x for x in x_cols if x not in cat_attribs]
X, y = get_X_y(housing_df, x_cols, y_cols)
x_cti_trans = ColumnToIndexTransformer(full_columns=list(x_cols))
cat_ix = x_cti_trans.transform(cat_attribs)
num_ix = x_cti_trans.transform(num_attribs)
train_X, test_X, train_y, test_y = split(X, y)
x_pipeline = get_X_pipeline(num_ix, cat_ix)
train_X = x_pipeline.fit_transform(train_X)
test_X = x_pipeline.transform(test_X)
y_pipeline = get_y_pipeline()
train_y = y_pipeline.fit_transform(train_y)
test_y = y_pipeline.transform(test_y)
model = RandomForestRegressor(warm_start=False, bootstrap=False, max_features=6, n_estimators=80)
def simple_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y):
model.fit(train_X, train_y)
evaluate_error(model, y_pipeline, train_X, test_X, train_y, test_y)
predict(model, y_pipeline, x_pipeline.transform(X[[17606, 18632, 14650, 3230, 3555]]), y_pipeline.transform(y[[17606, 18632, 14650, 3230, 3555]]))
predict(model, y_pipeline, test_X[:5], test_y[:5])
def cross_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y):
scores = cross_val_score(model, train_X, train_y, scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores) # ** 0.5
def display_scores(scores):
print("Scores:", y_pipeline.inverse_transform(scores.reshape(-1, 1)))
print("Mean:", y_pipeline.inverse_transform([[scores.mean()]]))
print("Standard deviation:", y_pipeline.inverse_transform([[scores.std()]]))
display_scores(rmse_scores)
def grid_search(model, train_X, train_y):
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
{'bootstrap': [False], 'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
]
grid_search = GridSearchCV(model, param_grid, cv=5, n_jobs=os.cpu_count(), scoring="neg_mean_squared_error")
grid_search.fit(train_X, train_y)
pp(grid_search.best_params_)
pp(grid_search.best_estimator_)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
def feature_importance(model, x_pipeline):
i = model.feature_importances_
extra_attrs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
# encoder = get_pipeline_object(x_pipeline, ["union", "cat_pipeline", "label_binarizer"])
encoder = x_pipeline.get_params()["union__cat_pipeline__label_binarizer"]
one_hot_classes = list(np.array(encoder.categories_).ravel())
attrs = num_attribs + extra_attrs + one_hot_classes
pp(sorted(zip(i, attrs), reverse=True))
simple_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y)
feature_importance(model, x_pipeline)
# cross_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y)
# grid_search(model, train_X, train_y)
run()
``` |
{
"source": "21strun/django-coverage",
"score": 2
} |
#### File: django-coverage/django_coverage/coverage_runner.py
```python
import os
import sys
import django
if django.VERSION < (1, 2):
msg = """
django-coverage 1.1+ requires django 1.2+.
Please use django-coverage 1.0.3 if you have django 1.1 or django 1.0
"""
raise Exception(msg)
from django.conf import global_settings
from django.db.models import get_app, get_apps
from django.test.utils import get_runner
import coverage
from django_coverage import settings
from django_coverage.utils.coverage_report import html_report
from django_coverage.utils.module_tools import get_all_modules
DjangoTestSuiteRunner = get_runner(global_settings)
class CoverageRunner(DjangoTestSuiteRunner):
"""
Test runner which displays a code coverage report at the end of the run.
"""
def __new__(cls, *args, **kwargs):
"""
Add the original test runner to the front of CoverageRunner's bases,
so that CoverageRunner will inherit from it. This allows it to work
with customized test runners.
"""
# If the test runner was changed by the management command, change it
# back to its original value in order to get the original runner.
if getattr(settings, 'ORIG_TEST_RUNNER', None):
settings.TEST_RUNNER = settings.ORIG_TEST_RUNNER
TestRunner = get_runner(settings)
if (TestRunner != DjangoTestSuiteRunner):
cls.__bases__ = (TestRunner,) + cls.__bases__
return super(CoverageRunner, cls).__new__(cls)
def _get_app_package(self, app_model_module):
"""
Returns the app module name from the app model module.
"""
return '.'.join(app_model_module.__name__.split('.')[:-1])
def run_tests(self, test_labels, extra_tests=None, **kwargs):
coverage.use_cache(settings.COVERAGE_USE_CACHE)
for e in settings.COVERAGE_CODE_EXCLUDES:
coverage.exclude(e)
coverage.start()
results = super(CoverageRunner, self).run_tests(test_labels,
extra_tests, **kwargs)
coverage.stop()
coverage_modules = []
if test_labels:
for label in test_labels:
label = label.split('.')[-1]
app = get_app(label)
coverage_modules.append(self._get_app_package(app))
else:
for app in get_apps():
coverage_modules.append(self._get_app_package(app))
coverage_modules.extend(settings.COVERAGE_ADDITIONAL_MODULES)
packages, modules, excludes, errors = get_all_modules(
coverage_modules, settings.COVERAGE_MODULE_EXCLUDES,
settings.COVERAGE_PATH_EXCLUDES)
if settings.COVERAGE_USE_STDOUT:
coverage.report(list(modules.values()), show_missing=1)
if excludes:
message = "The following packages or modules were excluded:"
print("")
print(message)
for e in excludes:
print(e)
print("")
if errors:
message = "There were problems with the following packages "
message += "or modules:"
print("")
print(message)
for e in errors:
print(e)
print("")
outdir = settings.COVERAGE_REPORT_HTML_OUTPUT_DIR
if outdir:
outdir = os.path.abspath(outdir)
if settings.COVERAGE_CUSTOM_REPORTS:
html_report(outdir, modules, excludes, errors)
else:
coverage._the_coverage.html_report(list(modules.values()), outdir)
print("")
print("HTML reports were output to '%s'" %outdir)
return results
``` |
{
"source": "21tushar/Fourier-Transform",
"score": 3
} |
#### File: 21tushar/Fourier-Transform/utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
def signal(sinuses):
# Create the singal as a sum of different sinuses
t = np.linspace(0, 0.5, 800)
s=0
for i in range(len(sinuses)):
s += np.sin(sinuses[i] * 2 * np.pi * t)
# Plot the signal
plt.style.use('seaborn')
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(1,1,1)
ax.plot(t, s, label = r'$y=f(x)$')
ax.set_title(" Signal ", fontsize = 20)
ax.set_ylabel("Amplitude")
ax.set_xlabel("Time [s]")
ax.legend(loc='best')
ax.grid(True)
plt.show()
fig.savefig('signal.png')
return s, t
def Fourier(s, t):
#Perform the Fourier Transform
fft = np.fft.fft(s)
fft = np.fft.fft(s)
T = t[1] - t[0] # sample rate
N = s.size
# 1/T = frequency
f = np.linspace(0, 1 / T, N)
# Plot the signal Decomposition
plt.style.use('seaborn')
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(1,1,1)
ax.set_title(" Decomposed Signal ", fontsize = 20)
ax.set_ylabel("Amplitude")
ax.set_xlabel("Frequency [Hz]")
ax.bar(f[:N // 2], np.abs(fft)[:N // 2] * 1 / N, width=1.5) # 1 / N is a normalization factor
ax.grid(False)
plt.show()
fig.savefig("Decomposed_signal.png")
``` |
{
"source": "21vcloud/Controller",
"score": 2
} |
#### File: management/commands/base_button_data.py
```python
import os
from django.core.management.base import BaseCommand
from common.access_control.base import AccessSet
class Command(BaseCommand):
def handle(self, *args, **options):
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, 'cfg/base_button_data')
info_list = open(file_path, 'rb').read()
AccessSet.create_button_data(eval(info_list))
# button_info = open(file_path, 'rb').read()
# button_dict = eval(button_info)
# button_list = []
# for x in button_dict:
# button_list.extend(button_dict[x])
# AccessSet.create_button_data(button_list)
```
#### File: app/access_control/views.py
```python
import os
import logging
from django.http import HttpResponse
from rest_framework.views import APIView
from drf_yasg.utils import swagger_auto_schema
from BareMetalControllerBackend.settings import (DATA_CFG_LIST, ROLE_CFG_LIST, USER_CFG_LIST)
from account.auth import utils as auth_utils
from account.repository.serializers import TokenParameter
from account.repository.auth_models import (BmProject, BmUserInfo, BmMappingUserProject)
from common.access_control.base import *
from common.lark_common.model.common_model import ResponseObj
from access_control.models import (RoleUser, RolePermission, Element, Role)
from access_control import serializers
# Create your views here.
class BaseClass(APIView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.module_dir = os.path.dirname(__file__)
self.logger = logging.getLogger(__name__)
@staticmethod
def get_project_obj(response_obj, project_id, project_name):
project_obj = None
if not (project_id or project_name):
pass
elif project_id:
try:
project_obj = BmProject.objects.get(id=project_id)
except BmProject.DoesNotExist:
response_obj.content = "not find project."
response_obj.is_ok = False
response_obj.no = 404
else:
try:
project_obj = BmProject.objects.get(project_name=project_name)
except BmProject.DoesNotExist:
response_obj.content = "not find project."
response_obj.is_ok = False
response_obj.no = 404
return response_obj, project_obj
class Console(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
query_serializer=serializers.ConsoleQueryListSerializer,
responses={200: serializers.ConsoleListSerializer},
operation_description="获取前台权限接口"
)
@auth_utils.token_verify
# @check_element_permission
def get(self, request):
response_obj = ResponseObj()
query_serializer = serializers.ConsoleQueryListSerializer(data=request.query_params.dict())
query_serializer.is_valid(True)
query_data = query_serializer.validated_data
user = request.session["username"]
element_type = query_data['type']
owner_path = query_data['owner_path']
project_name = query_data['project_name']
permission_type = 'element'
try:
owner = BmProject.objects.get(project_name=project_name)
except BmProject.DoesNotExist:
response_obj.content = "Can not find project_name."
response_obj.is_ok = False
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
role_id_list = RoleUser.objects.filter(
user__account=user,
role__owner=owner.id
).values_list(
'role__id',
flat=True
)
if not role_id_list:
role_id_list = RoleUser.objects.filter(
user__account=user
).values_list(
'role__id',
flat=True
)
element_uuid_list = RolePermission.objects.filter(
role__id__in=role_id_list,
permission__type=permission_type
).values_list(
'permission__type_id',
flat=True
)
res_list = Element.objects.filter(
uuid__in=element_uuid_list
).filter(
owner_path=owner_path,
type=element_type
).order_by('sort_id')
permission_list = res_list.values_list('url_path', flat=True)
serializer = serializers.ConsoleListSerializer(res_list, many=True)
bar_list = serializer.data
response_obj.content = {
"permission_list": list(permission_list),
"bar_list": bar_list
}
return HttpResponse(response_obj.to_json(), content_type="application/json")
class Button(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
query_serializer=serializers.ButtonQueryListSerializer,
operation_description="获取button权限接口"
)
@auth_utils.token_verify
def get(self, request):
response_obj = ResponseObj()
query_serializer = serializers.ButtonQueryListSerializer(data=request.query_params.dict())
query_serializer.is_valid(True)
query_data = query_serializer.validated_data
user = request.session["username"]
owner_path = query_data['owner_path']
project_name = query_data['project_name']
permission_type = 'element'
try:
owner = BmProject.objects.get(project_name=project_name)
except BmProject.DoesNotExist:
response_obj.content = "Can not find project_name."
response_obj.is_ok = False
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
role_id_list = RoleUser.objects.filter(
user__account=user,
role__owner=owner.id
).values_list(
'role__id',
flat=True
)
if not role_id_list:
role_id_list = RoleUser.objects.filter(
user__account=user
).values_list(
'role__id',
flat=True
)
element_uuid_list = RolePermission.objects.filter(
role__id__in=role_id_list,
permission__type=permission_type
).values_list(
'permission__type_id',
flat=True
)
res_list = Element.objects.filter(
uuid__in=element_uuid_list
).filter(
owner_path=owner_path,
type="button"
).order_by('sort_id')
element_list = Element.objects.filter(owner_path=owner_path, type="button").order_by('sort_id')
submit_list = []
select_list = []
for button_info in element_list:
if button_info in res_list:
disable = False
else:
disable = True
button_info_dict = {
"disabled": disable,
"element_id": button_info.element_id,
"html_laber": button_info.html_laber,
"id": button_info.id,
"info": button_info.info,
"owner_path": button_info.owner_path
}
button_dict = eval(button_info.info)
if button_dict['type'] == 'submit':
submit_list.append(button_info_dict)
elif button_dict['type'] == 'list':
select_list.append(button_info_dict)
response_obj.content = {
"submit_list": submit_list,
"select_list": select_list
}
return HttpResponse(response_obj.to_json(), content_type="application/json")
class RoleListView(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
request_body=serializers.RolePostSerializer,
responses={200: "successful"},
operation_description="项目角色初始化"
)
@auth_utils.token_verify
@check_element_permission
def post(self, request):
response_obj = ResponseObj()
response_obj.content = "successful."
response_obj.no = 200
data_serializer = serializers.RolePostSerializer(data=request.data)
data_serializer.is_valid(True)
query_data = data_serializer.validated_data
project_id = query_data.get("project_id", None)
project_name = query_data.get("project_name", None)
response_obj, project_obj = self.get_project_obj(response_obj, project_id, project_name)
if not response_obj.no == 200:
return HttpResponse(response_obj.to_json(), content_type="application/json")
access_set_obj = AccessSet()
for cfg in ROLE_CFG_LIST:
file_path = os.path.join(self.module_dir, 'management/commands/cfg/%s' % cfg)
info_list = open(file_path, 'rb').read()
if project_obj:
res = access_set_obj.switch_role(cfg, eval(info_list), project_obj.id)
else:
res = access_set_obj.switch_role(cfg, eval(info_list))
if not res:
response_obj.is_ok = res
response_obj.content = "error cfg_type."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
@swagger_auto_schema(
manual_parameters=TokenParameter,
request_body=serializers.RoleDeleteSerializer,
responses={200: "successful"},
operation_description="项目角色删除"
)
@auth_utils.token_verify
@check_element_permission
def delete(self, request):
response_obj = ResponseObj()
response_obj.content = "successful."
response_obj.no = 200
data_serializer = serializers.RoleDeleteSerializer(data=request.data)
data_serializer.is_valid(True)
query_data = data_serializer.validated_data
project_id = query_data["project_id"]
try:
owner = BmProject.objects.get(id=project_id)
except BmProject.DoesNotExist:
response_obj.content = "not find project."
response_obj.is_ok = False
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
access_set_obj = AccessSet()
for cfg in ROLE_CFG_LIST:
file_path = os.path.join(self.module_dir, 'management/commands/cfg/%s' % cfg)
info_list = open(file_path, 'rb').read()
res = access_set_obj.delete_role(eval(info_list), owner.id)
if not res:
response_obj.is_ok = res
response_obj.content = "error cfg_type."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
class PermissionListView(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
responses={200: "successful"},
operation_description="权限元素初始化"
)
@auth_utils.token_verify
@check_element_permission
def post(self, request):
response_obj = ResponseObj()
access_set_obj = AccessSet()
response_obj.content = "successful."
response_obj.no = 200
for cfg in DATA_CFG_LIST:
file_path = os.path.join(self.module_dir, 'management/commands/cfg/%s' % cfg)
info_list = open(file_path, 'rb').read()
res = access_set_obj.switch_data(cfg, eval(info_list))
if not res:
response_obj.is_ok = res
response_obj.content = "error cfg_type."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
class BaseRoleUserListView(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
request_body=serializers.BaseRoleUserPostSerializer,
responses={200: "successful"},
operation_description="用户授权"
)
# @auth_utils.token_verify
# @check_element_permission
def post(self, request):
response_obj = ResponseObj()
response_obj.content = "successful."
response_obj.no = 200
data_serializer = serializers.BaseRoleUserPostSerializer(data=request.data)
data_serializer.is_valid(True)
query_data = data_serializer.validated_data
email = query_data["email"]
project_name = query_data.get("project_name", None)
project_id = query_data.get("project_id", None)
level = query_data["level"]
response_obj, project_obj = self.get_project_obj(response_obj, project_id, project_name)
if not response_obj.no == 200:
return HttpResponse(response_obj.to_json(), content_type="application/json")
try:
user = BmUserInfo.objects.get(account=email)
except BmUserInfo.DoesNotExist:
response_obj.is_ok = False
response_obj.content = "%s user mail not find" % email
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
for cfg in USER_CFG_LIST:
file_path = os.path.join(self.module_dir, 'management/commands/cfg/%s' % cfg)
info_list = eval(open(file_path, 'rb').read()).get(level)
if project_obj:
res = AccessSet.add_role_user(user, info_list, project_obj.id)
else:
res = AccessSet.add_role_user(user, info_list)
if not res:
response_obj.is_ok = res
response_obj.content = "error cfg_type."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
@swagger_auto_schema(
manual_parameters=TokenParameter,
request_body=serializers.BaseRoleUserDeleteSerializer,
responses={200: "successful"},
operation_description="用户授权"
)
@auth_utils.token_verify
@check_element_permission
def delete(self, request):
response_obj = ResponseObj()
response_obj.content = "successful."
response_obj.no = 200
data_serializer = serializers.BaseRoleUserDeleteSerializer(data=request.data)
data_serializer.is_valid(True)
query_data = data_serializer.validated_data
email = query_data["email"]
project_id = query_data["project_id"]
try:
project_obj = BmProject.objects.get(id=project_id)
except BmProject.DoesNotExist:
response_obj.is_ok = False
response_obj.content = "not find project."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
try:
user = BmUserInfo.objects.get(account=email)
except BmUserInfo.DoesNotExist:
response_obj.is_ok = False
response_obj.content = "%s user mail not find" % email
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
for cfg in USER_CFG_LIST:
file_path = os.path.join(self.module_dir, 'management/commands/cfg/%s' % cfg)
info_list = eval(open(file_path, 'rb').read()).get('1')
res = AccessSet.delete_role_user(user, info_list, project_obj.id)
if not res:
response_obj.is_ok = res
response_obj.content = "error cfg_type."
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
class RoleUserListView(BaseClass):
@swagger_auto_schema(
manual_parameters=TokenParameter,
request_body=serializers.RoleUserPostSerializer,
responses={200: "successful"},
operation_description="用户授权"
)
# @auth_utils.token_verify
def post(self, request):
response_obj = ResponseObj()
response_obj.content = "successful."
response_obj.no = 200
data_serializer = serializers.RoleUserPostSerializer(data=request.data)
data_serializer.is_valid(True)
query_data = data_serializer.validated_data
email = query_data["email"]
role_name = query_data["role_name"]
try:
user_obj = BmUserInfo.objects.get(account=email)
except BmUserInfo.DoesNotExist:
response_obj.is_ok = False
response_obj.content = "can not find user: %s." % email
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
try:
role_obj = Role.objects.get(name=role_name)
except Role.DoesNotExist:
response_obj.is_ok = False
response_obj.content = "can not find user: %s." % email
response_obj.no = 404
return HttpResponse(response_obj.to_json(), content_type="application/json")
try:
RoleUser.objects.create(user=user_obj, role=role_obj)
except Exception as ex:
print(ex)
response_obj.is_ok = False
response_obj.content = str(ex)
response_obj.no = 500
return HttpResponse(response_obj.to_json(), content_type="application/json")
return HttpResponse(response_obj.to_json(), content_type="application/json")
```
#### File: app/account/cache_test.py
```python
from __future__ import absolute_import
from django.core.cache import cache
from django.shortcuts import reverse
from django.http import HttpRequest
from django.utils.cache import get_cache_key
def expire_page_cache(view, curreq, args=None, key_prefix=None):
"""
Removes cache created by cache_page functionality.
Parameters are used as they are in reverse()
"""
if args is None:
path = reverse(view)
else:
path = reverse(view, args=args)
http_host = curreq.META.get("HTTP_HOST", "")
if len(http_host.split(":")) == 1:
server_name, server_port = http_host, "80"
else:
server_name, server_port = http_host.split(":")
request = HttpRequest()
request.META = {'SERVER_NAME': server_name, 'SERVER_PORT': server_port}
request.META.update(dict((header, value) for (header, value) in
curreq.META.items() if header.startswith('HTTP_')))
request.path = path
key = get_cache_key(request, key_prefix=key_prefix)
value = cache.get(key)
if key and cache.get(key):
a = cache.set(key, None, 0)
b = cache.get(key)
return a
```
#### File: app/api_tests/test_api_request.py
```python
import jsonpickle
import requests
from common.lark_common.kong_provider import APIHandler
from common.lark_common.model.common_model import ResponseObj
from common.lark_common.utils import EncodeHandler, JsonUtils
from common.lark_common.utils import AESCipher
class RequestClient(APIHandler):
def __init__(self, endpoint=None, api_key=None, logger=None):
APIHandler.__init__(self, endpoint=endpoint, api_key=api_key)
self.__post_header = self.header
self.__post_header['Accept'] = u'application/json'
self.__post_header['Content-type'] = u'application/json'
self.__post_header['Connection'] = u'close'
self.__get_header = self.header
self.logger = logger
self.aes_cipher = AESCipher()
def __getattr__(self, name):
def handler_function(dict_param, method, token=None):
return self.make_request_by_operation(name, dict_param, method, token)
return handler_function
def make_request_by_operation(self, path_url, args, method, token):
if method and method == "POST":
self.__post_header["token"] = token
ret = self.__post(url=path_url, post_data=args)
self.__post_header.pop("token")
else:
self.__get_header["token"] = token
ret = self.__get(url=path_url, param=args)
self.__get_header.pop("token")
return ret
def __get(self, url, param):
"""
HTTP Get 方法工具类
:param url: 目标地址
:param param: 参数
:return: 返回对象,通常为Response Obj
"""
response_obj = ResponseObj()
url = self.endpoint + url
http_response = requests.get(url, headers=self.__get_header, verify=False, params=param)
response_obj.no = http_response.status_code
if http_response.status_code != 200:
response_obj.is_ok = False
response_obj.message = http_response.reason
else:
response_obj.is_ok = True
http_content = EncodeHandler.byteify(jsonpickle.loads(http_response.content))
response_obj = JsonUtils.convert_dict_to_object(http_content)
response_obj.json = http_content
return response_obj
def __post(self, url, post_data):
"""
HTTP POST 方法工具类
:param url: 目标地址
:param post_data: 参数
:return: 返回对象,通常为Response Obj
"""
response_obj = ResponseObj()
# 加密
encrypt_post_data = self.aes_cipher.encrypt(post_data)
json_data = {"aesRequest": encrypt_post_data.decode()}
json_data = jsonpickle.dumps(json_data)
post_url = self.endpoint + url
http_response = requests.post(post_url, headers=self.__post_header, data=json_data, verify=False)
if http_response.status_code != 200:
response_obj.is_ok = False
response_obj.no = http_response.status_code
response_obj.message = http_response.reason
return response_obj
http_content = EncodeHandler.byteify(jsonpickle.loads(http_response.content))
response_obj = JsonUtils.convert_dict_to_object(http_content)
response_obj.json = http_content
return response_obj
def __post_list(self, url, post_data):
"""
HTTP Post支持批量数据提交,例如批量添加。函数内会将对象类型转化为list对象
:param url: POST提交地址
:param post_data: 批量提交数据
:return: 返回值,通常为Response Obj
"""
post_data_list = []
if isinstance(post_data, list):
post_data_list = post_data
else:
post_data_list.append(post_data)
return self.__post(url=url, post_data=post_data_list)
```
#### File: app/api_tests/tests_suite.py
```python
import unittest
import os
def init_django():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BareMetalControllerBackend.settings")
import django
django.setup()
if __name__ == "__main__":
init_django()
from account.tests import test_views_account
from baremetal_service.tests import test_ceate_listener
from baremetal_service.tests import test_add_member
from baremetal_service.tests import test_delete_cmdb
suite = unittest.TestSuite()
tests = list()
# account
# tests.append(test_views_account.TestAccountClient("token_verify"))
# tests.append(test_views_account.TestAccountClient("token_verify_not_exit"))
# tests.append(test_views_account.TestAccountClient("token_verify_not_timeout"))
# tests.append(test_views_account.TestAccountClient("register"))
# tests.append(test_views_account.TestAccountClient("register_fail"))
#tests.append(test_views_account.TestAccountClient("login"))
# tests.append(test_views_account.TestAccountClient("login_fail"))
# tests.append(test_views_account.TestAccountClient("logout"))
# tests.append(test_views_account.TestAccountClient("update_password"))
# tests.append(test_views_account.TestAccountClient("confirm_update_password"))
# tests.append(test_views_account.TestVpcs("test_get_vpcs_from_db"))
# tests.append(test_ceate_listener.testLb("test"))
# tests.append(test_add_member.testAddMember("test"))
tests.append(test_delete_cmdb.testCmdb("instance_create"))
tests.append(test_delete_cmdb.testCmdb("tag_create"))
tests.append(test_delete_cmdb.testCmdb("instance_put"))
tests.append(test_delete_cmdb.testCmdb("instance_delete"))
suite.addTests(tests=tests)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
```
#### File: baremetal_dashboard/tests/test_views_baremental.py
```python
from django.test import TestCase
from BareMentalConsole.utils.env import env_config
from common.api_request import RequestClient
class TestAccountClient(TestCase):
def setUp(self):
self.infra_machine_client = RequestClient(
# DEV
endpoint=env_config.baremetal_endpoint,
# endpoint="http://172.16.31.10:9001/account/",
# UAT
# endpoint="http://172.16.31.10:8000/uat/baremental/",
api_key="")
def get_info_by_key(self):
apollo_info_obj = self.infra_machine_client.get_info_by_key(
dict_param={"key_value": "introList"}, method="GET")
self.assertTrue(apollo_info_obj.is_ok)
def create_contact_to_us(self):
dict_param = {
"customer_name": "string",
"phone": "string",
"email": "string",
"company": "string",
"department": "string",
"job_position": "string",
"issue_content": "string",
"issue_type": "string",
"created_at": "2019-04-23T02:23:49.682Z"
}
contact_to_us_obj = self.infra_machine_client.contact_to_us(dict_param=dict_param, method="POST")
self.assertTrue(contact_to_us_obj.is_ok)
```
#### File: baremetal_openstack/repository/nova_provider.py
```python
import logging
from keystoneauth1 import exceptions as keystone_exceptions
from BareMetalControllerBackend.conf.env import EnvConfig
from common.lark_common.model.common_model import ResponseObj
from common import exceptions as exc
from common import utils
from openstack import connection
config = EnvConfig()
LOG = logging.getLogger(__name__)
class OpenstackClientProvider(object):
def __init__(self, openstack_client=None):
self.openstack_client = openstack_client
def get_openstack_client_by_request(self, request):
try:
self.openstack_client = utils.get_openstack_client(request)
except keystone_exceptions.NotFound:
# Maybe token has expired,Get client use password
self.openstack_client = utils.get_openstack_client(request, auth_plugin='password')
else:
if not self.openstack_client:
raise exc.SessionNotFound()
return self.openstack_client
def get_admin_openstack_client(self):
admin_conn = connection.Connection(
region_name=config.openstack_admin_region_name,
auth=dict(
auth_url=config.openstack_admin_auth_url,
username=config.openstack_admin_username,
password=<PASSWORD>,
project_id=config.openstack_admin_project_id,
user_domain_id=config.openstack_admin_user_domain_id
),
compute_api_version=config.openstack_admin_compute_api_version,
identity_interface=config.openstack_admin_identity_interface
)
return admin_conn
def create_server(self, name, image_id, flavor, network):
response_obj = ResponseObj()
try:
# openstack_client = utils.get_openstack_client()
server = self.openstack_client.create_server(name=name, image=image_id, flavor=flavor, network=network)
except Exception as ex:
response_obj.message = ex
response_obj.no = 500
response_obj.is_ok = True
return server
```
#### File: baremetal_service/repository/service_model.py
```python
import uuid
from django.db import models
from django.db.models.fields.related import ManyToManyField
from common.lark_common import random_object_id
from account.repository.auth_models import BmContract, BmProject
ORDER_TYPE_INCREASE = "increase"
ORDER_TYPE_ALTERATION = "alteration"
ORDER_STATUS_DELIVERED = "delivered"
ORDER_STATUS_DELIVERING = "delivering"
ORDER_TYPE_DICT = {"increase": "新购", "alteration": "变更"}
FLAVOR_ACTIVE = "active"
VOLUME_ACTIVE = "active"
VOLUME_BACKUP_ACTIVE = "active"
FLOATINGIP_ACTIVE = "active"
LB_ERROR = "ERROR"
resource_type_volume = "volume"
resource_type_volume_backup = "volume_backup"
resource_type_flaoting_ip = "flaoting_ip"
resource_type_ecbm = "ECBM"
resource_contract_info = "contract_info"
FLOATINGIP_AATTACH_ECBM = "ECBM"
FLOATINGIP_AATTACH_SLB = "LoadBalance"
class BmServiceOrder(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_bm_service_order_code)
account_id = models.CharField(max_length=64, blank=True, null=True,
help_text="客户id 示例:3f4cb35aeec544d3af33150f38b55286")
contract_number = models.CharField(max_length=64, blank=True, null=True, help_text="合同编码 示例:ding11")
project = models.ForeignKey(BmProject, models.DO_NOTHING, blank=True, null=True, help_text="项目 示例:test")
# contract_number = models.CharField(max_length=64, blank=True, null=True)
# project_id = models.CharField(max_length=64, blank=True, null=True)
# project_name = models.CharField(max_length=64, blank=True, null=True)
region = models.CharField(max_length=64, blank=True, null=True, help_text="可用区 示例:华北区")
order_type = models.CharField(max_length=64, blank=True, null=True, default=ORDER_TYPE_INCREASE,
help_text="订单类型 示例:increase")
order_price = models.FloatField(blank=True, null=True, help_text="订单价格 示例:1300")
product_type = models.CharField(max_length=64, blank=True, null=True, help_text="产品类型 示例:ECBM")
product_info = models.TextField(blank=True, null=True, help_text="产品详细信息")
billing_model = models.CharField(max_length=64, blank=True, null=True, help_text="绑定的合同类型 示例:框架合同 ")
service_count = models.IntegerField(blank=True, null=True, help_text="所购买的服务数量 示例:3")
delivery_status = models.CharField(max_length=64, blank=True, null=True, help_text="")
create_at = models.DateTimeField(blank=True, null=True, help_text="")
update_at = models.DateTimeField(blank=True, null=True, help_text="")
deleted = models.CharField(max_length=11, blank=True, null=True, help_text="")
class Meta:
managed = False
db_table = 'bm_service_order'
class BmServiceMachine(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
order = models.ForeignKey('BmServiceOrder', models.DO_NOTHING, blank=True, null=True)
uuid = models.CharField(max_length=64, blank=True, null=True)
flavor_name = models.CharField(max_length=255, blank=True, null=True, help_text="机器名称 示例:戴尔")
flavor_id = models.CharField(max_length=64, blank=True, null=True,
help_text="机器id 示例:11a2c533-73cc-4f95-8e7b-0055b7ec18a7")
image_name = models.CharField(max_length=255, blank=True, null=True, help_text="镜像名称 示例:Windows2016")
image_id = models.CharField(max_length=64, blank=True, null=True,
help_text="镜像id 示例:198e0048-c8b2-4db9-9f08-395ea005af21")
monitoring = models.CharField(max_length=11, blank=True, null=True, help_text="是否携带监控 示例:True/False")
vulnerability_scanning = models.CharField(max_length=11, blank=True, null=True, help_text="是否携带扫描 示例:True/False")
disk_info = models.TextField(blank=True, null=True, help_text="磁盘信息")
network_path_type = models.CharField(max_length=64, blank=True, null=True, help_text="网络类型")
network = models.CharField(max_length=64, blank=True, null=True, help_text="网络名称")
network_id = models.CharField(max_length=64, blank=True, null=True, help_text="网络id")
floating_ip_info = models.TextField(blank=True, null=True, help_text="弹性公网IP信息")
floating_ip_allocation = models.BooleanField(max_length=64, blank=True, null=True, help_text="弹性公网IP可用量")
floating_ip_bandwidth = models.CharField(max_length=64, blank=True, null=True, help_text="弹性公网IP带宽")
floating_ip_line = models.CharField(max_length=64, blank=True, null=True, help_text="弹性公网IP类型")
firewall_id = models.CharField(max_length=64, blank=True, null=True, help_text="防火墙id")
firewall_name = models.CharField(max_length=64, blank=True, null=True, help_text="防火墙名称")
service_name = models.CharField(max_length=64, blank=True, null=True, help_text="所生成的服务器名称")
login_method = models.CharField(max_length=64, blank=True, null=True,
help_text="登陆方式(user_password-密码登陆)(keypair-密钥登陆)")
service_username = models.CharField(max_length=64, blank=True, null=True, help_text="所生成服务器的登录名")
service_password = models.CharField(max_length=64, blank=True, null=True, help_text="所生成服务器的密码")
public_key = models.TextField(blank=True, null=True, help_text="公钥信息")
create_at = models.DateTimeField(blank=True, null=True, help_text="服务创建时间")
update_at = models.DateTimeField(blank=True, null=True, help_text="服务更新时间")
deleted = models.CharField(max_length=11, blank=True, null=True, help_text="服务删除时间")
status = models.CharField(max_length=64, blank=True, null=True, help_text="状态(active-激活)(DELETED-删除)")
job_model = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_machine'
class BmRequestLog(models.Model):
account_id = models.CharField(max_length=64, blank=True, null=True)
account_name = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
object_id = models.CharField(max_length=64, blank=True, null=True)
object_name = models.CharField(max_length=255, blank=True, null=True)
object_type = models.CharField(max_length=255, blank=True, null=True)
action = models.CharField(max_length=255, blank=True, null=True)
uri = models.CharField(max_length=255, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
request_info = models.TextField(blank=True, null=True)
extra = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_request_log'
class BmServiceInstance(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=64, blank=True, null=True,
help_text="客户id 示例:3f4cb35aeec544d3af33150f38b55286")
project_id = models.CharField(max_length=64, blank=True, null=True,
help_text="项目id 示例:7ae5a60714014778baddea703b85cd93")
region = models.CharField(max_length=64, blank=True, null=True, help_text="区域 示例:regionOne")
monitoring = models.CharField(max_length=11, blank=True, null=True,help_text="是否带监控 示例:True")
contract_number = models.CharField(max_length=64, blank=True, null=True, help_text="合同编号 示例:ding111")
product_type = models.CharField(max_length=64, blank=True, null=True, help_text="产品类型 示例:ECBM")
billing_model = models.CharField(max_length=64, blank=True, null=True, help_text="绑定合同类型 示例:标准合同")
uuid = models.CharField(unique=True, max_length=64, help_text="对应机器id 示例:c4130c54-bc4b-4249-928d-c014827653db")
name = models.CharField(max_length=255, help_text="机器名称", blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True, help_text="状态 示例:active/DELETED")
task = models.CharField(max_length=64, blank=True, null=True, help_text="实例创建结果 示例:success/instance_build")
create_at = models.DateTimeField(blank=True, null=True, help_text="实例创建时间")
update_at = models.DateTimeField(blank=True, null=True, help_text="实例更新时间")
deleted = models.NullBooleanField(blank=True, null=True, default=False, help_text="实例删除时间")
class Meta:
managed = False
db_table = 'bm_service_instance'
class BmServiceFloatingIp(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id, help_text="")
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
contract_number = models.CharField(max_length=64, blank=True, null=True)
external_line_type = models.CharField(max_length=64, blank=True, null=True, help_text="进出网络类型 "
"示例:three_line_ip")
external_name = models.CharField(max_length=255, blank=True, null=True)
external_name_id = models.CharField(max_length=255, blank=True, null=True)
floating_ip = models.CharField(max_length=64, blank=True, null=True)
floating_ip_id = models.CharField(max_length=64, blank=True, null=True)
attached = models.NullBooleanField(blank=True, null=True, default=False)
instance_uuid = models.CharField(max_length=64, blank=True, null=True)
instance_name = models.CharField(max_length=255, blank=True, null=True)
fixed_address = models.CharField(max_length=255, blank=True, null=True)
shared_qos_policy_type = models.BooleanField(default=False)
qos_policy_name = models.CharField(max_length=64, blank=True, null=True, help_text="带宽大小 示例:100M")
qos_policy_id = models.CharField(max_length=64, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
first_create_at = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True)
is_measure_end = models.NullBooleanField(blank=True, null=True, default=False)
attached_type = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_floating_ip'
class BmContractFloatingIpMaterial(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
material_number = models.CharField(max_length=64, blank=True, null=True)
floating_ip_type_name = models.CharField(max_length=255, blank=True, null=True)
floating_ip_type = models.CharField(max_length=255, blank=True, null=True)
external_line_type = models.CharField(max_length=255, blank=True, null=True)
charge_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
floating_ip_available = models.IntegerField(blank=True, null=True)
floating_ip_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_floating_ip'
class BmContractBandWidthaterial(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
material_number = models.CharField(max_length=64, blank=True, null=True)
band_width_type_name = models.CharField(max_length=255, blank=True, null=True)
band_width_type = models.CharField(max_length=255, blank=True, null=True)
external_line_type = models.CharField(max_length=255, blank=True, null=True)
charge_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
band_width_available = models.IntegerField(blank=True, null=True)
band_width_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_band_width'
class BmServiceLoadbalanceFlavor(models.Model):
id = models.CharField(primary_key=True, max_length=64)
type = models.CharField(max_length=255, blank=True, null=True)
max_connect = models.CharField(max_length=255, blank=True, null=True)
new_connetc = models.CharField(max_length=255, blank=True, null=True)
second_query = models.CharField(max_length=255, blank=True, null=True)
openstack_name = models.CharField(max_length=255, blank=True, null=True)
memory = models.CharField(max_length=255, blank=True, null=True)
disk = models.CharField(max_length=255, blank=True, null=True)
vcpus = models.CharField(max_length=255, blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_loadbalance_flavor'
class BmServiceMaterialVolume(models.Model):
id = models.CharField(primary_key=True, max_length=64)
material_number = models.CharField(max_length=64, blank=True, null=True)
volume_type_name = models.CharField(max_length=255, blank=True, null=True)
volume_type = models.CharField(max_length=255, blank=True, null=True)
openstack_volume_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.FloatField(blank=True, null=True)
volume_available = models.IntegerField(blank=True, null=True)
volume_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_volume'
class BmServiceMaterialNat(models.Model):
id = models.CharField(primary_key=True, max_length=64)
material_number = models.CharField(max_length=64, blank=True, null=True)
nat_getway_type_name = models.CharField(max_length=255, blank=True, null=True)
charge_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.FloatField(blank=True, null=True)
nat_getway_available = models.IntegerField(blank=True, null=True)
nat_getway_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
nat_getway_type = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_nat'
class BmServiceMaterialLb(models.Model):
id = models.CharField(primary_key=True, max_length=64)
material_number = models.CharField(max_length=64, blank=True, null=True)
lb_type_name = models.CharField(max_length=255, blank=True, null=True)
charge_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.FloatField(blank=True, null=True)
lb_available = models.IntegerField(blank=True, null=True)
lb_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
lb_type = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_lb'
class BmServiceMaterialBandWidth(models.Model):
id = models.CharField(primary_key=True, max_length=64)
material_number = models.CharField(max_length=64, blank=True, null=True)
band_width_type_name = models.CharField(max_length=255, blank=True, null=True)
band_width_type = models.CharField(max_length=255, blank=True, null=True)
external_line_type = models.CharField(max_length=255, blank=True, null=True)
charge_type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.FloatField(blank=True, null=True)
band_width_available = models.IntegerField(blank=True, null=True)
band_width_capacity = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_material_band_width'
class BmMaterialFloatingIpSeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
date = models.DateTimeField(blank=True, null=True)
band_material_number = models.CharField(max_length=255, blank=True, null=True)
band_width_type_name = models.CharField(max_length=255, blank=True, null=True)
material_band_base_price = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
floating_ip_type_name = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
service_expired_time = models.CharField(max_length=255, blank=True, null=True)
service_start_time = models.CharField(max_length=255, blank=True, null=True)
contract_start_date = models.CharField(max_length=255, blank=True, null=True)
contract_expire_date = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer_account = models.CharField(max_length=255, blank=True, null=True)
external_line_type = models.CharField(max_length=255, blank=True, null=True)
external_name = models.CharField(max_length=255, blank=True, null=True)
external_name_id = models.CharField(max_length=255, blank=True, null=True)
floating_ip = models.CharField(max_length=255, blank=True, null=True)
floating_ip_id = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True)
max_band = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_floating_ip_seasonal'
class BmMaterialVolumeSeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
create_at = models.CharField(max_length=255, blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
size = models.CharField(max_length=255, blank=True, null=True)
end_at = models.CharField(max_length=255, blank=True, null=True)
volume_id = models.CharField(max_length=255, blank=True, null=True)
volume_type = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
account = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
volume_type_name = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_volume_seasonal'
class BmMaterialLbSeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
create_at = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
delete_at = models.CharField(max_length=255, blank=True, null=True)
ip_adress = models.CharField(max_length=255, blank=True, null=True)
loadbalance_id = models.CharField(max_length=255, blank=True, null=True)
loadbalance_name = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
account = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
lb_type_name = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_lb_seasonal'
class BmMaterialNetGetwaySeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
create_at = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
delete_at = models.CharField(max_length=255, blank=True, null=True)
net_getway_id = models.CharField(max_length=255, blank=True, null=True)
net_getway_name = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
account = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
nat_getway_type_name = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_net_getway_seasonal'
class BmMaterialMachineSeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
create_at = models.CharField(max_length=255, blank=True, null=True)
flavor_name = models.CharField(max_length=255, blank=True, null=True)
image_name = models.CharField(max_length=255, blank=True, null=True)
monitoring = models.CharField(max_length=255, blank=True, null=True)
vulnerability_scanning = models.CharField(max_length=255, blank=True, null=True)
network = models.CharField(max_length=255, blank=True, null=True)
network_path_type = models.CharField(max_length=255, blank=True, null=True)
service_name = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
product_type = models.CharField(max_length=255, blank=True, null=True)
service_count = models.CharField(max_length=255, blank=True, null=True)
delete_at = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
account = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
type = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
flavor_info = models.CharField(max_length=255, blank=True, null=True)
cpu_model = models.CharField(max_length=255, blank=True, null=True)
cpu_core = models.CharField(max_length=255, blank=True, null=True)
cpu_hz = models.CharField(max_length=255, blank=True, null=True)
ram = models.CharField(max_length=255, blank=True, null=True)
disk = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_machine_seasonal'
class BmMaterialVolumeBakSeasonal(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
account_id = models.CharField(max_length=255, blank=True, null=True)
user_account = models.CharField(max_length=255, blank=True, null=True)
identity_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
create_at = models.CharField(max_length=255, blank=True, null=True)
backup_name = models.CharField(max_length=255, blank=True, null=True)
order_id = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=255, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
service_count = models.CharField(max_length=255, blank=True, null=True)
delete_time = models.CharField(max_length=255, blank=True, null=True)
volume_id = models.CharField(max_length=255, blank=True, null=True)
contract_customer_name = models.CharField(max_length=255, blank=True, null=True)
contract_authorizer = models.CharField(max_length=255, blank=True, null=True)
contract_type = models.CharField(max_length=255, blank=True, null=True)
sales = models.CharField(max_length=255, blank=True, null=True)
enterprise_number = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
customer_service_name = models.CharField(max_length=255, blank=True, null=True)
account = models.CharField(max_length=255, blank=True, null=True)
material_number = models.CharField(max_length=255, blank=True, null=True)
volume_type_name = models.CharField(max_length=255, blank=True, null=True)
base_price = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_material_volume_bak_seasonal'
class BmContractFloatingIp(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
contract_number = models.CharField(max_length=64, blank=True, null=True)
external_line_type = models.CharField(max_length=64, blank=True, null=True)
external_name = models.CharField(max_length=255, blank=True, null=True)
external_name_id = models.CharField(max_length=255, blank=True, null=True)
floating_ip = models.CharField(max_length=64, blank=True, null=True)
floating_ip_id = models.CharField(max_length=64, blank=True, null=True)
attached = models.NullBooleanField(blank=True, null=True, default=False)
instance_uuid = models.CharField(max_length=64, blank=True, null=True)
instance_name = models.CharField(max_length=255, blank=True, null=True)
fixed_address = models.CharField(max_length=255, blank=True, null=True)
qos_policy_name = models.CharField(max_length=64, blank=True, null=True)
qos_policy_id = models.CharField(max_length=64, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True)
is_measure_end = models.NullBooleanField(blank=True, null=True, default=False)
class Meta:
managed = False
db_table = 'bm_contract_floating_ip'
class BmServiceFlavor(models.Model):
id = models.CharField(primary_key=True, max_length=64)
material_number = models.CharField(max_length=64, blank=True, null=True)
name = models.CharField(max_length=64, blank=True, null=True)
openstack_flavor_name = models.CharField(max_length=64, blank=True, null=True)
type = models.CharField(max_length=64, blank=True, null=True)
type_name = models.CharField(max_length=64, blank=True, null=True)
resource_class = models.CharField(max_length=255, blank=True, null=True)
cpu_model = models.CharField(max_length=64, blank=True, null=True)
base_price = models.FloatField(blank=True, null=True)
cpu_core = models.CharField(max_length=64, blank=True, null=True)
cpu_hz = models.CharField(max_length=64, blank=True, null=True)
gpu = models.CharField(max_length=255, blank=True, null=True)
ram = models.CharField(max_length=64, blank=True, null=True)
disk = models.CharField(max_length=255, blank=True, null=True)
flavor_info = models.CharField(max_length=255, blank=True, null=True)
count = models.IntegerField(blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_flavor'
class BmServiceImages(models.Model):
id = models.CharField(primary_key=True, max_length=64)
type = models.CharField(max_length=64, blank=True, null=True)
image_name = models.CharField(max_length=64, blank=True, null=True)
openstack_image_name = models.CharField(max_length=64, blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_images'
class BmServiceQosPolicy(models.Model):
# id = models.CharField(primary_key=True, max_length=64)
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
qos_policy_name = models.CharField(max_length=64, blank=True, null=True)
qos_policy_count = models.IntegerField(blank=True, null=True)
shared = models.IntegerField(blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
project_name = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_qos_policy'
class Networks(models.Model):
network_id = models.CharField(primary_key=True, max_length=64)
name = models.CharField(max_length=255, blank=True, null=True)
cidr = models.CharField(max_length=64, blank=True, null=True)
enable_dhcp = models.NullBooleanField(blank=True, null=True, default=True)
gateway_ip = models.CharField(max_length=64, blank=True, null=True)
dns = models.TextField(max_length=255, blank=True, null=True)
vpc = models.ForeignKey('Vpc', on_delete=models.CASCADE)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_networks'
class Vpc(models.Model):
vpc_name = models.CharField(max_length=255)
region = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=32, blank=True, null=True)
deleted = models.IntegerField(blank=True, null=True)
router_id = models.CharField(max_length=64, blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_vpc'
unique_together = (('vpc_name', 'project_id'),)
class FirewallRule(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
firewall = models.ForeignKey('Firewalls', models.CASCADE, blank=True, null=True)
direction = models.CharField(max_length=255, blank=True, null=True)
action = models.CharField(max_length=255, blank=True, null=True)
protocol = models.CharField(max_length=255, blank=True, null=True)
remote_ip = models.CharField(max_length=255, blank=True, null=True)
remote_port = models.TextField(max_length=255, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_firewall_rule'
class Firewalls(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
name = models.CharField(max_length=255, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
enabled = models.IntegerField(blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_firewalls'
class BmServiceLoadbalance(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
contract_number = models.CharField(max_length=64, blank=True, null=True)
loadbalance_id = models.CharField(max_length=64, blank=True, null=True)
loadbalance_name = models.CharField(max_length=255, blank=True, null=True)
location = models.CharField(max_length=64, blank=True, null=True)
region = models.CharField(max_length=64, blank=True, null=True)
flavor_id = models.CharField(max_length=64, blank=True, null=True)
is_public = models.IntegerField(blank=True, null=True)
network_name = models.CharField(max_length=255, blank=True, null=True)
vip_network_id = models.CharField(max_length=255, blank=True, null=True)
vpc_id = models.CharField(max_length=64, blank=True, null=True)
created_at = models.DateTimeField(blank=True, null=True)
first_create_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
listener_count = models.IntegerField(blank=True, null=True)
pool_count = models.IntegerField(blank=True, null=True)
deleted = models.IntegerField(blank=True, null=True)
is_measure_end = models.IntegerField(blank=True, null=True)
is_new_ip = models.IntegerField(blank=True, null=True)
error_type = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_loadbalance'
class BmNetworks(models.Model):
network_id = models.CharField(primary_key=True, max_length=64)
name = models.CharField(max_length=255, blank=True, null=True)
cidr = models.CharField(max_length=64, blank=True, null=True)
enable_dhcp = models.IntegerField(blank=True, null=True)
gateway_ip = models.CharField(max_length=64, blank=True, null=True)
dns = models.TextField(blank=True, null=True)
vpc = models.ForeignKey('BmVpc', models.DO_NOTHING)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_networks'
class BmVpc(models.Model):
vpc_name = models.CharField(max_length=255)
region = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=32, blank=True, null=True)
deleted = models.IntegerField(blank=True, null=True)
router_id = models.CharField(max_length=64, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_vpc'
unique_together = (('vpc_name', 'project_id'),)
class BmServiceVolume(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
volume_type = models.CharField(max_length=255, blank=True, null=True)
volume_id = models.CharField(max_length=255, blank=True, null=True)
size = models.CharField(max_length=255, blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
create_at = models.DateTimeField(null=True)
update_at = models.DateTimeField(null=True)
is_measure_end = models.IntegerField(blank=True, null=True, default=0)
region = models.CharField(max_length=255, blank=True, null=True)
attached_type = models.CharField(max_length=255, blank=True, null=True)
instance_uuid = models.CharField(max_length=64, blank=True, null=True)
instance_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
first_create_at = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_service_volume'
class BmFloatingipfirewallMapping(models.Model):
floating_ip_id = models.CharField(max_length=64, blank=True, null=True)
firewall = models.ForeignKey(Firewalls, models.CASCADE, blank=True, null=True)
floating_ip = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_floatingipfirewall_mapping'
class BmInstanceMemberMapping(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=random_object_id.gen_random_object_id)
instance_id = models.CharField(max_length=255, blank=True, null=True)
pool_id = models.CharField(max_length=255, blank=True, null=True)
loadbancer_id = models.CharField(max_length=255, blank=True, null=True)
member_id = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_instance_member_mapping'
class BmServiceVolumeBackup(models.Model):
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
region = models.CharField(max_length=64, blank=True, null=True)
backup_id = models.CharField(max_length=64, primary_key=True)
volume_id = models.CharField(max_length=64, blank=True, null=True)
volume_name = models.CharField(max_length=64, blank=True, null=True)
is_incremental = models.BooleanField(default=False)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
deleted = models.NullBooleanField(blank=True, null=True, default=False)
status = models.CharField(max_length=255, blank=True, null=True)
backup_name = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
is_measure_end = models.IntegerField(blank=True, null=True, default=0)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_service_volume_backup'
class UUIDTools(object):
@staticmethod
def uuid4_hex():
# retun uuid4 hex string
return uuid.uuid4().hex
class BmServiceNatGateway(models.Model):
id = models.CharField(primary_key=True, max_length=36, default=UUIDTools.uuid4_hex)
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
vpc = models.ForeignKey('Vpc', models.DO_NOTHING, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
is_measure_end = models.BooleanField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
deleted = models.BooleanField(blank=True, null=True, default=False)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
delete_at = models.DateTimeField(blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_nat_gateway'
class NatRule(models.Model):
nat_rule_id = models.CharField(primary_key=True, max_length=36)
nat_gateway = models.ForeignKey('BmServiceNatGateway', on_delete=models.PROTECT, related_name='nat_rule')
floatingip_id = models.CharField(max_length=36)
floating_ip_address = models.CharField(max_length=64, null=True, blank=True)
scenes = models.CharField(max_length=64, blank=True, null=True, default='vpc')
external_port = models.IntegerField(max_length=11, blank=True, null=True)
protocol = models.CharField(max_length=36, blank=True, null=True)
internal_ip_address = models.CharField(max_length=64, blank=True, null=True)
internal_port_id = models.CharField(max_length=64, blank=True, null=True)
internal_port = models.IntegerField(max_length=11, blank=True, null=True)
description = models.TextField(blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
def to_dict(self):
opts = self._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if self.pk is None:
data[f.name] = []
else:
data[f.name] = list(f.value_from_object(self).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(self)
return data
class Meta:
managed = False
db_table = 'bm_nat_rule'
class BmShareBandWidth(models.Model):
id = models.CharField(primary_key=True, max_length=64, default=UUIDTools.uuid4_hex, help_text="")
shared_bandwidth_id = models.CharField(max_length=64, help_text="")
billing_type = models.CharField(max_length=64, blank=True, null=True)
order_id = models.CharField(max_length=64, blank=True, null=True)
account_id = models.CharField(max_length=64, blank=True, null=True)
contract_id = models.CharField(max_length=255, blank=True, null=True)
contract_number = models.CharField(max_length=255, blank=True, null=True)
project_id = models.CharField(max_length=64, blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
max_kbps = models.IntegerField(blank=True, null=True)
create_at = models.DateTimeField(blank=True, null=True)
update_at = models.DateTimeField(blank=True, null=True)
is_measure_end = models.IntegerField(blank=True, null=True, default=0)
first_create_at = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=64, blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'bm_share_bandwidth'
class ShareBandWidthQuota(models.Model):
project_id = models.CharField(unique=True, max_length=64)
share_bandwidth_count = models.IntegerField(default=5)
floating_ip_count = models.IntegerField(default=20)
class Meta:
managed = False
db_table = 'share_bandwidth_quota'
```
#### File: baremetal_service/repository/service_provider.py
```python
import datetime
import logging
import requests
# Only use in current version
from cinderclient import client as volume_client
from baremetal_openstack.repository.nova_provider import OpenstackClientProvider
from django.db.models import Count
from django.db import connections
from BareMetalControllerBackend.conf.env import EnvConfig
from common.api_request import RequestClient, RequestClientTwo
from BareMetalControllerBackend.conf.env import env_config
from account.repository import auth_models
from baremetal_service.repository import service_model
from common import exceptions as exc
from common import utils
from common.lark_common.model.common_model import ResponseObj
from common.openstack.baremetal_volume import baremetal_detach_volume
LOG = logging.getLogger(__name__)
REQUEST_INFO_LOGGER = logging.getLogger("request_info")
FIP_SERVICE_TYPE = 'network:floatingip'
# 服务器相关接口
class ServiceProvider(object):
def __init__(self):
pass
@staticmethod
def baremetal_service_order_create(param_info):
response_obj = ResponseObj()
try:
request = service_model.ServiceOrder.objects.using("controller").create(**param_info)
response_obj.no = 200
response_obj.is_ok = True
response_obj.content = request
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "提交失败!", "admin_info": str(ex)}
return response_obj
@staticmethod
def get_qos_policy_info(qos_policy_name):
response_obj = ResponseObj()
try:
request = service_model.BmServiceQosPolicy.objects.get(qos_policy_name=qos_policy_name)
response_obj.no = 200
response_obj.is_ok = True
response_obj.content = request
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "查询带宽信息失败!", "admin_info": str(ex)}
return response_obj
# 服务器相关接口
class ServiceFloatingProvider(object):
def __init__(self):
self.cmdb_provider = ServiceCmdbProvider()
def floating_ip_list(self, openstack_client):
floating_ip_obj = openstack_client.list_floating_ips()
attached_port_list = []
for floating_ip in floating_ip_obj:
if floating_ip.attached:
attached_port_list.append(floating_ip.fixed_ip_address)
def detach_ip_from_server(self, openstack_client, instance_uuid, floating_ip_id):
result = openstack_client.detach_ip_from_server(server_id=instance_uuid, floating_ip_id=floating_ip_id)
if not result:
raise Exception(result)
# 更新 实例信息到 平台数据库
service_attach_obj = service_model.BmServiceFloatingIp.objects.get(floating_ip_id=floating_ip_id,
is_measure_end=False)
instance_uuid = service_attach_obj.instance_uuid
service_attach_obj.attached = False
service_attach_obj.attached_type = None
service_attach_obj.instance_uuid = None
service_attach_obj.instance_name = None
service_attach_obj.fixed_address = None
# service_attach_obj.update_at = datetime.datetime.now()
service_attach_obj.save()
cmdb_result = self.cmdb_provider.cmdb_data_sys_put(instance_uuid=instance_uuid, status=None)
LOG.info("CMDB the ip detach to server sys result: %s" % cmdb_result)
return service_attach_obj
def get_floatingip_quota(self, openstack_client, project_id):
LOG.info("Get floatingip quota for project %s", project_id)
network_quota = openstack_client.network.get_quota(project_id, details=True)
LOG.info("network quota %s", network_quota)
REQUEST_INFO_LOGGER.info("Get network quota %s", network_quota.floating_ips)
available_fip_quota = network_quota.floating_ips['limit'] - \
network_quota.floating_ips['used'] - int(network_quota.floating_ips['reserved'])
return {"available_fip_quota": available_fip_quota}
def check_fip_quota(self, openstack_client, fip_count, project_id):
LOG.info("Check network quota with %s count %s", project_id, fip_count)
check_result = True
available_fip_quota = self.get_floatingip_quota(openstack_client, project_id).get('available_fip_quota')
if fip_count > available_fip_quota:
check_result = False
return check_result, available_fip_quota
def check_fip_pools(self, fip_count, external_net_id):
LOG.info("Check fip pools")
admin_client = utils.get_admin_client()
LOG.info("Get available_ips")
available_ips = admin_client.network.get_network_ip_availability(external_net_id)
REQUEST_INFO_LOGGER.info("Get fip pools %s", available_ips)
subnet_ip_availability_dict = {}
for ip_available in available_ips.subnet_ip_availability:
subnet_ip_availability_dict[ip_available['subnet_id']] = ip_available
LOG.debug("Find floatingip subnets begin")
subnets = admin_client.network.subnets(network_id=external_net_id)
fip_subnet_ids = []
for subnet in subnets:
if FIP_SERVICE_TYPE in subnet.service_types:
fip_subnet_ids.append(subnet.id)
LOG.info("Floaginip subnets %s", fip_subnet_ids)
ip_available_count = 0
if fip_subnet_ids:
for subnet_id in fip_subnet_ids:
ip_avialability = subnet_ip_availability_dict.get(subnet_id)
if ip_avialability:
ip_available_count += ip_avialability['total_ips'] - ip_avialability['used_ips']
else:
for ip_avialability in subnet_ip_availability_dict.values():
ip_available_count += ip_avialability['total_ips'] - ip_avialability['used_ips']
if fip_count > ip_available_count:
return False
return True
@staticmethod
def get_qos_max_kbps(service_fip_object):
"""
Get fip qos max kbps
:param service_fip_object:
:return: max_kbps
"""
REQUEST_INFO_LOGGER.info("Get fip qos max kbps %s", service_fip_object)
if service_fip_object.shared_qos_policy_type:
shared_bandwidth = service_model.BmShareBandWidth.objects.filter(
shared_bandwidth_id=service_fip_object.qos_policy_id, is_measure_end=False).first()
REQUEST_INFO_LOGGER.error("No shared bandwidth found ,we will use measure end query ")
if not shared_bandwidth:
shared_bandwidth = service_model.BmShareBandWidth.objects.filter(
shared_bandwidth_id=service_fip_object.qos_policy_id).first()
if shared_bandwidth:
return shared_bandwidth.max_kbps
else:
return None
else:
qos_policy_name = service_fip_object.qos_policy_name
if not qos_policy_name.endswith("M"):
qos_policy_name = service_model.BmServiceQosPolicy.objects.get(
id=service_fip_object.qos_policy_id).qos_policy_name
return int(qos_policy_name.rstrip("M")) * 1024
class ServiceCmdbProvider(object):
def __init__(self):
self.env_config = EnvConfig()
self.logger = logging.getLogger(__name__)
self.request_info_logger = logging.getLogger("request_info")
self.openstack_provider = OpenstackClientProvider()
self.openstack_client = self.openstack_provider.get_admin_openstack_client()
self.request_cmdb_customize_client = RequestClientTwo(endpoint=self.env_config.cmdb_customize_service,
api_key="")
self.request_cmdb_asset_client = RequestClientTwo(endpoint=self.env_config.cmdb_asset_service,
api_key="")
def cmdb_data_sys_post(self, instance_uuid, zone, project_id):
is_success = True
try:
query_dict = {"instance_uuid": instance_uuid}
node_obj = self.openstack_client.baremetal.nodes(details=True, **query_dict)
node_info_list = list(node_obj)
for node_info in node_info_list:
dict_info = {
"uuid": node_info.id,
"zone": zone,
"project_id": project_id
}
result = self.request_cmdb_customize_client.baremetal(dict_param=dict_info, method="POST", is_add=True)
self.logger.error("The instance post into cmdb result %s" % result)
if not result.success:
is_success = False
except Exception as ex:
self.logger.error("Fail to sys the instance post into cmdb %s" % ex)
return is_success
def cmdb_data_sys_put(self, instance_uuid, status):
success = True
try:
query_dict = {"instance_uuid": instance_uuid}
node_obj = self.openstack_client.baremetal.nodes(details=True, **query_dict)
node_info_list = list(node_obj)
for node_info in node_info_list:
dict_info = {
"uuid": node_info.id
}
if status:
dict_info["status"] = status
result = self.request_cmdb_customize_client.baremetal(dict_param=dict_info, method="PUT")
self.logger.error("The instance put into cmdb result %s" % result)
if not result.success:
success = False
except Exception as ex:
self.logger.error("Fail to sys the instance put into cmdb %s" % ex)
return success
def cmdb_data_sys_put2(self, instance_uuid, status):
success = True
try:
query_dict = {"instance_uuid": instance_uuid}
node_obj = self.openstack_client.baremetal.nodes(details=True, **query_dict)
node_info_list = list(node_obj)
for node_info in node_info_list:
dict_info = {
"uuid": node_info.id
}
if status:
dict_info["net_status"] = status
result = self.request_cmdb_customize_client.baremetal(dict_param=dict_info, method="PUT")
self.logger.error("The instance put into cmdb result %s" % result)
if not result.success:
success = False
except Exception as ex:
self.logger.error("Fail to sys the instance put into cmdb %s" % ex)
return success
def cmdb_data_sys_del(self, instance_uuid):
try:
query_dict = {"instance_uuid": instance_uuid}
node_obj = self.openstack_client.baremetal.nodes(details=True, **query_dict)
node_info_list = list(node_obj)
for node_info in node_info_list:
result = self.request_cmdb_customize_client.baremetal(dict_param={}, method="DELETE",
url_param_list=[node_info.id + "/"])
self.logger.error("The instance delete into cmdb result %s" % result)
except Exception as ex:
self.logger.error("Fail to sys the instance delete into cmdb %s" % ex)
return True
def cmdb_data_tag_create(self, instance_uuid):
is_success = True
try:
query_dict = {"instance_uuid": instance_uuid}
node_obj = self.openstack_client.baremetal.nodes(details=True, **query_dict)
node_info_list = list(node_obj)
for node_info in node_info_list:
dict_info = {
"abstract_custom": node_info.id,
"tag_name": "zabbix",
"tag_type": "agent"
}
result = self.request_cmdb_asset_client.abstract_tag(dict_param=dict_info, method="POST", is_add=True)
self.logger.error("The instance tag post into cmdb result %s" % result)
if not result.success:
is_success = False
except Exception as ex:
self.logger.error("Fail to sys the tag create into cmdb %s" % ex)
return is_success
class ServiceVolumeProvider(object):
def __init__(self):
pass
def detach_volume_from_server(self, openstack_client, volume_id, server_id,
volume_result=None, server_result=None):
bm_service_volume = service_model.BmServiceVolume.objects.get(volume_id=volume_id, is_measure_end=False)
if not server_result:
server_result = openstack_client.compute.get_server(server_id)
if env_config.baremetal_tag in server_result.tags:
baremetal_detach_volume(server_result, volume_id, openstack_client)
# openstack_client.delete_block_mapping(server_id, volume_id)
else:
if not volume_result:
volume_result = openstack_client.get_volume(volume_id)
openstack_client.detach_volume(server=server_result, volume=volume_result)
bm_service_volume.attached_type = None
bm_service_volume.instance_uuid = None
bm_service_volume.instance_name = None
bm_service_volume.save()
return bm_service_volume
def get_volume_quota(self, openstack_client, project_id):
LOG.info("Get %s volume quota", project_id)
cinder = volume_client.Client('3.44', session=openstack_client.session)
volume_quota = cinder.quotas.get(project_id, usage=True)
volume_available_count = int(volume_quota.volumes['limit']) - int(volume_quota.volumes['in_use']) - int(
volume_quota.volumes['reserved'])
volume_available_size = int(volume_quota.gigabytes['limit']) - int(volume_quota.gigabytes['in_use']) - int(
volume_quota.gigabytes['reserved'])
return {"volume_available_count": volume_available_count, "volume_available_size": volume_available_size}
def check_volume_quota(self, openstack_client, need_single_size, need_count, project_id):
"""
检查磁盘配额和后台存储池是否满足资源需要
:param request: request
:param need_single_size: 单个盘的大小
:param need_count: 盘个数
:param project_id: 项目id
:return:
"""
check_result = True
LOG.info("Check volume quota with single size %s, count %s for project %s",
need_single_size, need_count, project_id)
volume_quota = self.get_volume_quota(openstack_client, project_id)
volume_available_count = int(volume_quota.get('volume_available_count'))
volume_available_size = int(volume_quota.get('volume_available_size'))
if need_count > volume_available_count or need_single_size * need_count > volume_available_size:
check_result = False
if not check_result:
return check_result, volume_available_count, volume_available_size
return check_result, None, None
def get_pool_size(self, volume_type='inspure_iscsi'):
LOG.info("Check backend store size")
admin_client = utils.get_admin_client()
cinder = volume_client.Client('2', session=admin_client.session)
pool = cinder.volumes.get_pools(detail=True, )
volume_pools_info = {}
for info in pool._info['pools']:
volume_pools_info[info['capabilities']['volume_backend_name']] = info['capabilities']
# TODO it's only in dev environment
if volume_type not in volume_pools_info:
volume_type = 'tripleo_iscsi'
backend_info = volume_pools_info.get(volume_type, None)
# if not backend_info or backend_info.get('backend_state', 'down') != 'up':
if not backend_info:
raise exc.VolumeTypeInvalid
free_capacity_gb = float(backend_info['free_capacity_gb'])
reserved_percentage = float(backend_info['reserved_percentage'])
max_over_subscription_ratio = float(backend_info['max_over_subscription_ratio'])
pool_available_size = (free_capacity_gb * (100 - reserved_percentage) / 100) * max_over_subscription_ratio
return pool_available_size
def check_pool(self, need_single_size, need_count, volume_type='inspure_iscsi'):
check_result = True
pool_available_size = self.get_pool_size(volume_type=volume_type)
if need_single_size * need_count > pool_available_size:
check_result = False
return check_result
# 物料相关接口
class BmMaterialProvider(object):
def __init__(self):
pass
@staticmethod
def contract_volume_bak_seasonal(start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出合同下的所有Volume信息
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
volume_list_obj = service_model.BmServiceVolumeBackup.objects.filter(
create_at__lte=end_date,
create_at__gte=start_date)
if not volume_list_obj:
response_obj.content = []
return response_obj
# 筛选出每个Volume下的最后的信息
volume_list = []
for volume_obj in volume_list_obj:
# 服务结束时间
if volume_obj.deleted:
delete_time = volume_obj.updated_at
else:
delete_time = ""
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=volume_obj.contract_number).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(
id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=volume_obj.order_id).first()
if not bm_contract_order:
region = ""
service_count = ""
else:
region = bm_contract_order.region
service_count = bm_contract_order.service_count
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=volume_obj.account_id).first()
if not bm_account_info:
user_account = ""
identity_name = ""
else:
user_account = bm_account_info.account
identity_name = bm_account_info.identity_name
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=volume_obj.project_id).first()
if not bm_contract_project:
project_name = ""
else:
project_name = bm_contract_project.project_name
# 查找volume信息
bm_contract_volume = service_model.BmServiceMaterialVolume.objects.filter(
openstack_volume_type=auth_models.VOLUME_BACK_TYPE).first()
if not bm_contract_volume:
material_number = ""
volume_type_name = ""
base_price = ""
else:
material_number = bm_contract_volume.material_number
volume_type_name = bm_contract_volume.volume_type_name
base_price = str(bm_contract_volume.base_price)
material_volume_dict_all = {
"用户ID": volume_obj.account_id,
"用户邮箱": user_account,
"用户姓名": identity_name,
"合同编码": volume_obj.contract_number,
"备份时间": volume_obj.create_at,
"云硬盘备份名称": volume_obj.backup_name,
"订单号": volume_obj.order_id,
"项目ID": volume_obj.project_id,
"项目名称": project_name,
"云硬盘大小": service_count,
"服务结束时间": delete_time,
"云硬盘id": volume_obj.volume_id,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"合同授权人邮箱": account,
"合同可用区": region,
"云硬盘备份物料编码": material_number,
"云硬盘物料类型": volume_type_name,
"基础价格": base_price
}
volume_list.append(material_volume_dict_all)
response_obj.content = volume_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "云硬盘物料查询失败!",
"admin_info": str(ex)}
return response_obj
def contract_nat_seasonal(self, start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出合同下的所有Nat信息
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
nat_list_obj = service_model.BmServiceNatGateway.objects.filter(
create_at__lte=end_date,
create_at__gte=start_date)
if not nat_list_obj:
response_obj.content = []
return response_obj
nat_list = []
for nat_obj in nat_list_obj:
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=nat_obj.contract_number).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(
id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=nat_obj.order_id).first()
if not bm_contract_order:
region = ""
else:
region = bm_contract_order.region
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=nat_obj.account_id).first()
if not bm_account_info:
user_account = ""
identity_name = ""
else:
user_account = bm_account_info.account
identity_name = bm_account_info.identity_name
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=nat_obj.project_id).first()
if not bm_contract_project:
project_name = ""
else:
project_name = bm_contract_project.project_name
# 查找Nat信息
bm_material_nat = service_model.BmServiceMaterialNat.objects.filter(
charge_type=auth_models.CHAR_TYPE).first()
if not bm_material_nat:
material_number = ""
nat_getway_type_name = ""
base_price = ""
else:
material_number = bm_material_nat.material_number
nat_getway_type_name = bm_material_nat.nat_getway_type_name
base_price = str(bm_material_nat.base_price)
material_nat_dict_all = {
"用户ID": nat_obj.account_id,
"用户邮箱": user_account,
"用户姓名": identity_name,
"合同编码": nat_obj.contract_number,
"创建时间": nat_obj.create_at,
"订单号": nat_obj.order_id,
"项目ID": nat_obj.project_id,
"项目名称": project_name,
"服务结束时间": nat_obj.delete_at,
"NAT网关id": nat_obj.id,
"NAT网关名称": nat_obj.name,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"合同授权人邮箱": account,
"合同可用区": region,
"NAT物料编码": material_number,
"NAT物料类型": nat_getway_type_name,
"基础价格": base_price
}
nat_list.append(material_nat_dict_all)
response_obj.content = nat_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "NAT网关物料查询失败!",
"admin_info": str(ex)}
return response_obj
@staticmethod
def contract_machine_seasonal(start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出合同下的所有instance信息
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
instance_list_obj = service_model.BmServiceInstance.objects.filter(task="success", create_at__lte=end_date,
create_at__gte=start_date)
instance_id_list = []
for instance_uuid in instance_list_obj:
if instance_uuid.uuid not in instance_id_list:
instance_id_list.append(instance_uuid.uuid)
# 筛选出每个下的instance机器配置信息
machine_list = []
if instance_id_list.__len__() > 0:
for instance_id in instance_id_list:
machine_list_obj = service_model.BmServiceMachine.objects.filter(
uuid=instance_id).first()
if not machine_list_obj:
response_obj.message = {
"user_info": "查询为空!uuid不存在"
}
continue
if machine_list_obj.deleted:
delete_at = machine_list_obj.update_at
else:
delete_at = ""
# 查找服务信息
bm_contract_service_obj = service_model.BmServiceInstance.objects.filter(uuid=instance_id).first()
if bm_contract_service_obj:
bm_contract_service = bm_contract_service_obj.__dict__
bm_contract_service_contract_number = bm_contract_service.get("contract_number", "")
bm_contract_service_account_id = bm_contract_service.get("account_id", "")
bm_contract_service_project_id = bm_contract_service.get("project_id", "")
bm_contract_service_product_type = bm_contract_service.get("product_type", "")
else:
bm_contract_service_contract_number = ""
bm_contract_service_account_id = ""
bm_contract_service_project_id = ""
bm_contract_service_product_type = ""
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=bm_contract_service_contract_number).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(
id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(
id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(
id=machine_list_obj.order_id).first()
if not bm_contract_order:
region = ""
service_count = ""
else:
region = bm_contract_order.region
service_count = bm_contract_order.service_count
# 查找物料信息
bm_contract_machine = service_model.BmServiceFlavor.objects.filter(
id=machine_list_obj.flavor_id).first()
if not bm_contract_machine:
material_number = ""
type = ""
base_price = ""
flavor_info = ""
cpu_model = ""
cpu_core = ""
cpu_hz = ""
ram = ""
disk = ""
else:
material_number = bm_contract_machine.material_number
type = bm_contract_machine.type
base_price = str(bm_contract_machine.base_price)
flavor_info = bm_contract_machine.flavor_info
cpu_model = bm_contract_machine.cpu_model
cpu_core = bm_contract_machine.cpu_core
cpu_hz = bm_contract_machine.cpu_hz
ram = bm_contract_machine.ram
disk = bm_contract_machine.disk
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=bm_contract_service_account_id).first()
if not bm_account_info:
user_account = ""
identity_name = ""
else:
user_account = bm_account_info.account
identity_name = bm_account_info.identity_name
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(
id=bm_contract_service_project_id).first()
if not bm_contract_project:
project_name = ""
else:
project_name = bm_contract_project.project_name
material_machine_dict_all = {
"用户ID": bm_contract_service_account_id,
"用户邮箱": user_account,
"用户姓名": identity_name,
"合同编号": bm_contract_service_contract_number,
"机器创建时间": machine_list_obj.create_at,
"flavor名称": machine_list_obj.flavor_name,
"镜像名称": machine_list_obj.image_name,
"是否携带监控": machine_list_obj.monitoring,
"是否携带漏洞扫描": machine_list_obj.vulnerability_scanning,
"网络名称": machine_list_obj.network,
"网络类型名称": machine_list_obj.network_path_type,
"机器名称": machine_list_obj.service_name,
"订单号": machine_list_obj.order_id,
"项目ID": bm_contract_service_project_id,
"项目名称": project_name,
"产品类型": bm_contract_service_product_type,
"机器数量": service_count,
"服务结束时间": delete_at,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"销售姓名": sales,
"公司编号": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"合同授权人邮箱": account,
"合同可用区": region,
"Flavor物料编码": material_number,
"flavor类型": type,
"基础价格": base_price,
"机器配置信息": flavor_info,
"CPU": cpu_model,
"内核": cpu_core,
"主频": cpu_hz,
"内存": ram,
"磁盘信息": disk
}
machine_list.append(material_machine_dict_all)
response_obj.content = machine_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "machine物料查询失败!",
"admin_info": str(ex)}
return response_obj
@staticmethod
def contract_volume_seasonal(start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出合同下的所有Volume信息
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
volume_list_obj = service_model.BmServiceVolume.objects.filter(
create_at__lte=end_date,
create_at__gte=start_date)
volume_id_list = []
for volume_ids in volume_list_obj:
if volume_ids.volume_id not in volume_id_list:
volume_id_list.append(volume_ids.volume_id)
# 筛选出每个Volume下的最后的信息
volume_list = []
for volume_id_1 in volume_id_list:
volume_list_obj = service_model.BmServiceVolume.objects.filter(
volume_id=volume_id_1, create_at__lte=end_date,
create_at__gte=start_date, ).last()
if not volume_list_obj:
response_obj.message = {
"user_info": "查询为空!"
}
return response_obj
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=volume_list_obj.contract_number).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(
id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=volume_list_obj.order_id).first()
if not bm_contract_order:
region = ""
else:
region = bm_contract_order.region
# 查找volume信息
bm_contract_volume = service_model.BmServiceMaterialVolume.objects.filter(
openstack_volume_type=volume_list_obj.volume_type).first()
if not bm_contract_volume:
material_number = ""
volume_type_name = ""
base_price = ""
else:
material_number = bm_contract_volume.material_number
volume_type_name = bm_contract_volume.volume_type_name
base_price = str(bm_contract_volume.base_price)
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=volume_list_obj.account_id).first()
if not bm_account_info:
user_account = ""
identity_name = ""
else:
user_account = bm_account_info.account
identity_name = bm_account_info.identity_name
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=volume_list_obj.project_id).first()
if not bm_contract_project:
project_name = ""
else:
project_name = bm_contract_project.project_name
material_volume_dict_all = {
"用户ID": volume_list_obj.account_id,
"用户邮箱": user_account,
"用户姓名": identity_name,
"合同编码": volume_list_obj.contract_number,
"云硬盘创建时间": volume_list_obj.create_at,
"云硬盘名称": volume_list_obj.name,
"订单号": volume_list_obj.order_id,
"项目ID": volume_list_obj.project_id,
"项目名称": project_name,
"云硬盘大小": volume_list_obj.size,
"服务结束时间": volume_list_obj.update_at,
"云硬盘id": volume_list_obj.volume_id,
"云硬盘类型": volume_list_obj.volume_type,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服名称": customer_service_name,
"合同授权人邮箱": account,
"合同可用区": region,
"云硬盘物料编码": material_number,
"云硬盘类型名称": volume_type_name,
"基础价格": base_price
}
volume_list.append(material_volume_dict_all)
response_obj.content = volume_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "云硬盘物料查询失败!",
"admin_info": str(ex)}
return response_obj
def material_ip_calculate_in_range(self, start_date, end_date, param_dict=None):
response_obj = ResponseObj()
try:
# 进行多次筛选操作
material_ip_list = []
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
while begin_date <= end_date:
material_ip_date = datetime.datetime.strftime(begin_date, "%Y-%m-%d")
# 查询弹性IP某一天的计量信息
material_day_ip_obj = self.material_ip_calculate_in_one_day(
material_ip_date=material_ip_date, param_dict=param_dict)
if not material_day_ip_obj.is_ok:
raise Exception(material_day_ip_obj)
# 查询结果为空跳过
if material_day_ip_obj.content:
material_ip_list.extend(material_day_ip_obj.content)
begin_date += datetime.timedelta(days=1)
response_obj.content = material_ip_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "contract_number 查询失败!",
"admin_info": str(ex)}
return response_obj
# 查询弹性IP某一天的计量信息
def material_ip_calculate_in_one_day(self, material_ip_date, param_dict=None):
response_obj = ResponseObj()
try:
# 查询需计费的弹性ip列表
create_at = datetime.datetime.strptime(material_ip_date, '%Y-%m-%d') + datetime.timedelta(days=1)
floating_ip_id_list_obj = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=create_at).values('floating_ip_id').annotate(Count=Count('floating_ip_id'))
# 根据额外条件进行弹性IP列表过滤
if param_dict:
floating_ip_id_list_obj = floating_ip_id_list_obj.filter(**param_dict).values(
'floating_ip_id').annotate(Count=Count('floating_ip_id'))
# 查询当天宽带最大及相关IP信息
material_ip_list = []
for floating_ip_dict in floating_ip_id_list_obj:
floating_ip_id = floating_ip_dict.get("floating_ip_id")
if not floating_ip_id:
continue
material_day_ip_obj = self.material_ip_day_by_ip_id(floating_ip_id, material_ip_date)
# 查询结果异常 跳出
if not material_day_ip_obj.is_ok:
raise Exception(material_day_ip_obj.message)
# 查询结果为空跳过
if not material_day_ip_obj.content:
continue
material_ip_list.append(material_day_ip_obj.content)
response_obj.content = material_ip_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "contract_number 查询失败!",
"admin_info": str(ex)}
return response_obj
# 优化获取弹性公网IP的取值
def material_ip_seasonal(self, start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出当前周期下的所有IP
material_ip_list = []
material_ip_id_list = []
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d") + datetime.timedelta(days=1)
start_date = begin_date.replace(day=1)
# 筛选出当月创建的IP
floating_ip_id_list_obj = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=end_date, create_at__gte=start_date).values('floating_ip_id').annotate(
Count=Count('floating_ip_id'))
for floating_ip_dict in floating_ip_id_list_obj:
floating_ip_id = floating_ip_dict.get("floating_ip_id")
if not floating_ip_id:
continue
material_ip_id_list.append(floating_ip_id)
# 筛选出当月之前未结束计费的IP---需要叠加在本月的数据当中进行计费
floating_ip_id_list_before_obj = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=start_date, is_measure_end=False).values('floating_ip_id').annotate(
Count=Count('floating_ip_id'))
for floating_ip_dict in floating_ip_id_list_before_obj:
floating_ip_id = floating_ip_dict.get("floating_ip_id")
if not floating_ip_id:
continue
material_ip_id_list.append(floating_ip_id)
while begin_date < end_date:
material_ip_date = datetime.datetime.strftime(begin_date, "%Y-%m-%d")
# 查询弹性IP某一天的计量信息
for floating_ip_id_single in material_ip_id_list:
material_day_ip_obj = self.material_ip_day_by_ip_id(
material_ip_date=material_ip_date, floating_ip_id=floating_ip_id_single)
if not material_day_ip_obj.is_ok:
response_obj.no = 500
response_obj.is_ok = False
response_obj.message = {"user_info": "物料IP信息查询失败!",
"admin_info": material_ip_date}
return response_obj
# 查询结果为空跳过
if material_day_ip_obj.content:
material_ip_list.append(material_day_ip_obj.content)
begin_date += datetime.timedelta(days=1)
response_obj.content = material_ip_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "物料IP信息查询失败!",
"admin_info": str(ex)}
return response_obj
def material_ip_day_by_ip_id(self, floating_ip_id, material_ip_date):
"""
# 根据IP的查询该合同下某个IP信息
:param floating_ip_id:
:param material_ip_date:
:return:
"""
response_obj = ResponseObj()
try:
# 进行数据处理
floating_ip_list_obj = service_model.BmServiceFloatingIp.objects.filter(
floating_ip_id=floating_ip_id).order_by("create_at")
floating_ip_list = [u.__dict__ for u in floating_ip_list_obj]
ip_first_obj = floating_ip_list_obj.first()
service_start_time = ip_first_obj.create_at
bm_material_ip_null = floating_ip_list_obj.filter(is_measure_end=False)
ip_update_null = [u.__dict__ for u in bm_material_ip_null]
# 到期时间的处理
service_expired_time = "-"
for material_ip in floating_ip_list:
if material_ip["status"] == "deleted":
service_expired_time = material_ip["update_at"]
break
# 筛选出某天的数据
date_ip = material_ip_date
create_at = datetime.datetime.strptime(material_ip_date, '%Y-%m-%d') + datetime.timedelta(days=1)
update_at = datetime.datetime.strptime(material_ip_date, '%Y-%m-%d')
bm_material_ip_info = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=create_at, update_at__gte=update_at, floating_ip_id=floating_ip_id).order_by("create_at")
material_ip_list_info = [u.__dict__ for u in bm_material_ip_info]
# 进行数据拼接得到当天所有IP的完整数据
if bm_material_ip_null:
if ip_update_null[0]["create_at"] <= create_at:
material_ip_list_info.extend(ip_update_null)
# 进行查到的数据为空数据的处理
if not material_ip_list_info:
response_obj.message = {"user_info": "{date_ip}查询为空!".format(date_ip=date_ip)}
return response_obj
# 找出当天的最大带宽
max_band = -1
max_band_ip_dict = dict()
max_band_persional = -1
max_band_ip_dict_persional = {}
max_band_ip_dict_share = {}
for material_ip in material_ip_list_info:
# 如果是共享带宽则无对应的最大带宽
if material_ip["shared_qos_policy_type"]:
max_band_ip_dict_share = material_ip
else:
material_ip_band = int(material_ip["qos_policy_name"].split("M")[0])
if max_band <= material_ip_band:
max_band_persional = material_ip_band
max_band_ip_dict_persional = material_ip
if max_band_persional != -1:
max_band = max_band_persional
max_band_ip_dict = max_band_ip_dict_persional
else:
max_band = 0
max_band_ip_dict = max_band_ip_dict_share
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=max_band_ip_dict.get("contract_number")).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
bm_contract_material_expire_date = bm_contract_material.get("expire_date", "")
bm_contract_material_start_date = bm_contract_material.get("start_date", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
bm_contract_material_start_date = ""
bm_contract_material_expire_date = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找物料IP的信息
bm_contract_material_ip = service_model.BmContractFloatingIpMaterial.objects.filter(
external_line_type=max_band_ip_dict.get("external_line_type")).first()
if not bm_contract_material_ip:
material_number = ""
floating_ip_type_name = ""
base_price = ""
else:
material_number = bm_contract_material_ip.material_number
floating_ip_type_name = bm_contract_material_ip.floating_ip_type_name
base_price = str(bm_contract_material_ip.base_price)
# 查找物料带宽的信息
if max_band_ip_dict.get("shared_qos_policy_type"):
material_band_material_number = "CBMS-N-S003"
band_width_type_name = "共享带宽"
material_band_base_price = "根据实际共享带宽大小而定"
else:
if max_band > 5:
band_type = "base2"
else:
band_type = "base1"
bm_contract_material_band = service_model.BmContractBandWidthaterial.objects.filter(
band_width_type="band_width", charge_type=band_type).first()
if not bm_contract_material_band:
material_band_material_number = ""
band_width_type_name = ""
material_band_base_price = ""
else:
material_band_material_number = bm_contract_material_band.material_number
band_width_type_name = bm_contract_material_band.band_width_type_name
material_band_base_price = str(bm_contract_material_band.base_price)
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=max_band_ip_dict.get("account_id")).first()
if not bm_account_info:
identity_name = ""
user_account = ""
else:
identity_name = bm_account_info.identity_name
user_account = bm_account_info.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=max_band_ip_dict.get("order_id")).first()
if not bm_contract_order:
region = ""
else:
region = bm_contract_order.region
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=max_band_ip_dict.get("project_id")).first()
project_name = ""
if bm_contract_project:
project_name = bm_contract_project.project_name
material_ip_dict = {
"用户ID": max_band_ip_dict.get("account_id"),
"用户名称": identity_name,
"用户邮箱": user_account,
"合同编号": max_band_ip_dict.get("contract_number"),
"查询日期": date_ip,
"带宽物料编码": material_band_material_number,
"带宽类型": band_width_type_name,
"带宽价格": material_band_base_price,
"IP物料编码": material_number,
"IP物料类型": floating_ip_type_name,
"IP价格": base_price,
"合同可用区": region,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"服务到期时间": service_expired_time,
"服务开始时间": service_start_time,
"合同开始时间": bm_contract_material_start_date,
"合同到期时间": bm_contract_material_expire_date,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"合同授权人邮箱": account,
"IP类型": max_band_ip_dict.get("external_line_type"),
"IP类型名称": max_band_ip_dict.get("external_name"),
"IP类型ID": max_band_ip_dict.get("external_name_id"),
"IP": max_band_ip_dict.get("floating_ip"),
"ip_id": max_band_ip_dict.get("floating_ip_id"),
"订单号": max_band_ip_dict.get("order_id"),
"项目ID": max_band_ip_dict.get("project_id"),
"项目名称": project_name,
"服务状态": max_band_ip_dict.get("status"),
"最大带宽": max_band,
}
response_obj.content = material_ip_dict
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "floating_ip_id %s 查询失败!" % floating_ip_id,
"admin_info": str(ex)}
return response_obj
@staticmethod
def contract_lb_seasonal(start_date, end_date):
response_obj = ResponseObj()
try:
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d") + datetime.timedelta(days=1)
lb_list_obj = service_model.BmServiceLoadbalance.objects.filter(
created_at__lte=end_date,
created_at__gte=begin_date,
)
if not lb_list_obj:
response_obj.content = []
return response_obj
lb_list = []
for lb_obj in lb_list_obj:
# 查找LB得删除时间
if lb_obj.deleted:
delete_time = lb_obj.updated_at
else:
delete_time = ""
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=lb_obj.contract_number).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(
id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=lb_obj.order_id).first()
if not bm_contract_order:
region = ""
else:
region = bm_contract_order.region
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=lb_obj.account_id).first()
if not bm_account_info:
user_account = ""
identity_name = ""
else:
user_account = bm_account_info.account
identity_name = bm_account_info.identity_name
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=lb_obj.project_id).first()
if not bm_contract_project:
project_name = ""
else:
project_name = bm_contract_project.project_name
# 查找对应IP地址
if lb_obj.is_public:
bm_floating_ip = service_model.BmServiceFloatingIp.objects.filter(
instance_uuid=lb_obj.loadbalance_id).first()
if not bm_floating_ip:
ip_adress = ""
else:
ip_adress = bm_floating_ip.floating_ip
# 查找LB物料信息
bm_material_lb = service_model.BmServiceMaterialLb.objects.filter(
charge_type=auth_models.CHAR_TYPE).first()
if not bm_material_lb:
material_number = ""
lb_type_name = ""
base_price = ""
else:
material_number = bm_material_lb.material_number
lb_type_name = bm_material_lb.lb_type_name
base_price = str(bm_material_lb.base_price)
material_lb_dict_all = {
"用户ID": lb_obj.account_id,
"用户邮箱": user_account,
"用户姓名": identity_name,
"合同编码": lb_obj.contract_number,
"创建时间": lb_obj.first_create_at,
"订单号": lb_obj.order_id,
"项目ID": lb_obj.project_id,
"项目名称": project_name,
"服务结束时间": delete_time,
"所绑定的IP地址": ip_adress,
"负载均衡实例id": lb_obj.loadbalance_id,
"LB名称": lb_obj.loadbalance_name,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"合同授权人邮箱": account,
"合同可用区": region,
"LB物料编码": material_number,
"LB物料类型": lb_type_name,
"基础价格": base_price
}
lb_list.append(material_lb_dict_all)
response_obj.content = lb_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "NAT网关物料查询失败!",
"admin_info": str(ex)}
return response_obj
# 查询所有物料某一天的计量信息并存储到数据库表当中
def material_into_mysql(self, start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出当前周期下的所有IP
material_ip_id_list = []
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
start_date = begin_date.replace(day=1)
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d") + datetime.timedelta(days=1)
# 筛选出当天/当前周期创建的IP
floating_ip_id_list_obj = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=end_date, create_at__gte=start_date).values('floating_ip_id').annotate(
Count=Count('floating_ip_id'))
for floating_ip_dict in floating_ip_id_list_obj:
floating_ip_id = floating_ip_dict.get("floating_ip_id")
if not floating_ip_id:
continue
material_ip_id_list.append(floating_ip_id)
# 筛选出开始时间之前未结束计费的IP---需要叠加在本月的数据当中进行计费
floating_ip_id_list_before_obj = service_model.BmServiceFloatingIp.objects.filter(
create_at__lte=start_date, is_measure_end=False).values('floating_ip_id').annotate(
Count=Count('floating_ip_id'))
for floating_ip_dict in floating_ip_id_list_before_obj:
floating_ip_id = floating_ip_dict.get("floating_ip_id")
if not floating_ip_id:
continue
material_ip_id_list.append(floating_ip_id)
while begin_date < end_date:
material_ip_date = datetime.datetime.strftime(begin_date, "%Y-%m-%d")
# 查询弹性IP某一天的计量信息
for floating_ip_id_single in material_ip_id_list:
material_day_ip_obj = self.material_ip_day_by_ip_id(
material_ip_date=material_ip_date, floating_ip_id=floating_ip_id_single)
if not material_day_ip_obj.is_ok:
raise Exception(material_day_ip_obj)
# 查询结果为空跳过
if material_day_ip_obj.content:
float_ip_dict = material_day_ip_obj.content
server_ip__dict = {
"account_id": float_ip_dict["用户ID"],
"identity_name": float_ip_dict["用户名称"],
"user_account": float_ip_dict["用户邮箱"],
"contract_number": float_ip_dict["合同编号"],
"date": float_ip_dict["查询日期"],
"band_material_number": float_ip_dict["带宽物料编码"],
"band_width_type_name": float_ip_dict["带宽类型"],
"material_band_base_price": str(float_ip_dict["带宽价格"]),
"material_number": float_ip_dict["IP物料编码"],
"floating_ip_type_name": float_ip_dict["IP物料类型"],
"base_price": str(float_ip_dict["IP价格"]),
"region": float_ip_dict["合同可用区"],
"sales": float_ip_dict["销售姓名"],
"enterprise_number": float_ip_dict["公司编码"],
"location": float_ip_dict["公司所属区"],
"customer_service_name": float_ip_dict["客服姓名"],
"service_expired_time": float_ip_dict["服务到期时间"],
"service_start_time": float_ip_dict["服务开始时间"],
"contract_start_date": float_ip_dict["合同开始时间"],
"contract_expire_date": float_ip_dict["合同到期时间"],
"contract_customer_name": float_ip_dict["客户名称"],
"contract_authorizer": float_ip_dict["合同授权人"],
"contract_type": float_ip_dict["合同类型"],
"contract_authorizer_account": float_ip_dict["合同授权人邮箱"],
"external_line_type": float_ip_dict["IP类型"],
"external_name": float_ip_dict["IP类型名称"],
"external_name_id": float_ip_dict["IP类型ID"],
"floating_ip": float_ip_dict["IP"],
"floating_ip_id": float_ip_dict["ip_id"],
"order_id": float_ip_dict["订单号"],
"project_id": float_ip_dict["项目ID"],
"project_name": float_ip_dict["项目名称"],
"status": float_ip_dict["服务状态"],
"max_band": float_ip_dict["最大带宽"]
}
ip_result = service_model.BmMaterialFloatingIpSeasonal.objects.create(
**server_ip__dict)
# # 查询云硬盘某一天的物料信息
material_day_volume_obj = self.contract_volume_seasonal(start_date=material_ip_date,
end_date=material_ip_date)
if not material_day_volume_obj.is_ok:
raise Exception(material_day_volume_obj)
# 查询结果为空跳过
if material_day_volume_obj.content:
volume_dict_list = material_day_volume_obj.content
for volume_dict in volume_dict_list:
server_volume__dict = {
"account_id": volume_dict["用户ID"],
"user_account": volume_dict["用户姓名"],
"identity_name": volume_dict["用户邮箱"],
"contract_number": volume_dict["合同编码"],
"create_at": volume_dict["云硬盘创建时间"],
"name": volume_dict["云硬盘名称"],
"order_id": volume_dict["订单号"],
"project_id": volume_dict["项目ID"],
"project_name": volume_dict["项目名称"],
"size": volume_dict["云硬盘大小"],
"end_at": volume_dict["服务结束时间"],
"volume_id": volume_dict["云硬盘id"],
"volume_type": volume_dict["云硬盘类型"],
"contract_customer_name": volume_dict["客户名称"],
"contract_authorizer": volume_dict["合同授权人"],
"contract_type": volume_dict["合同类型"],
"sales": volume_dict["销售姓名"],
"enterprise_number": volume_dict["公司编码"],
"location": volume_dict["公司所属区"],
"customer_service_name": volume_dict["客服名称"],
"account": volume_dict["合同授权人邮箱"],
"region": volume_dict["合同可用区"],
"material_number": volume_dict["云硬盘物料编码"],
"volume_type_name": volume_dict["云硬盘类型名称"],
"base_price": str(volume_dict["基础价格"]),
"date": material_ip_date
}
volume_result = service_model.BmMaterialVolumeSeasonal.objects.create(
**server_volume__dict)
# # 查询云硬盘备份某一天的物料信息
material_day_volume_bak_obj = self.contract_volume_bak_seasonal(start_date=material_ip_date,
end_date=material_ip_date)
if not material_day_volume_bak_obj.is_ok:
raise Exception(material_day_volume_bak_obj)
# 查询结果为空跳过
if material_day_volume_bak_obj.content:
volume_back_list = material_day_volume_bak_obj.content
for volume_back_dict in volume_back_list:
material_volume_back_dict_all = {
"account_id": volume_back_dict["用户ID"],
"user_account": volume_back_dict["用户邮箱"],
"identity_name": volume_back_dict["用户姓名"],
"contract_number": volume_back_dict["合同编码"],
"create_at": volume_back_dict["备份时间"],
"backup_name": volume_back_dict["云硬盘备份名称"],
"order_id": volume_back_dict["订单号"],
"project_id": volume_back_dict["项目ID"],
"project_name": volume_back_dict["项目名称"],
"service_count": volume_back_dict["云硬盘大小"],
"delete_time": volume_back_dict["服务结束时间"],
"volume_id": volume_back_dict["云硬盘id"],
"contract_customer_name": volume_back_dict["客户名称"],
"contract_authorizer": volume_back_dict["合同授权人"],
"contract_type": volume_back_dict["合同类型"],
"sales": volume_back_dict["销售姓名"],
"enterprise_number": volume_back_dict["公司编码"],
"location": volume_back_dict["公司所属区"],
"customer_service_name": volume_back_dict["客服姓名"],
"account": volume_back_dict["合同授权人邮箱"],
"region": volume_back_dict["合同可用区"],
"material_number": volume_back_dict["云硬盘备份物料编码"],
"volume_type_name": volume_back_dict["云硬盘物料类型"],
"base_price": str(volume_back_dict["基础价格"]),
"date": material_ip_date
}
volume_bak_result = service_model.BmMaterialVolumeBakSeasonal.objects.create(
**material_volume_back_dict_all)
# # 查询裸金属某一天的物料信息
material_day_machine_obj = self.contract_machine_seasonal(start_date=material_ip_date,
end_date=material_ip_date)
if not material_day_machine_obj.is_ok:
raise Exception(material_day_machine_obj)
# 查询结果为空跳过
if material_day_machine_obj.content:
machine_dict_list = material_day_machine_obj.content
for machine_dict in machine_dict_list:
material_machine_dict_all = {
"account_id": machine_dict["用户ID"],
"user_account": machine_dict["用户邮箱"],
"identity_name": machine_dict["用户姓名"],
"contract_number": machine_dict["合同编号"],
"create_at": machine_dict["机器创建时间"],
"flavor_name": machine_dict["flavor名称"],
"image_name": machine_dict["镜像名称"],
"monitoring": machine_dict["是否携带监控"],
"vulnerability_scanning": machine_dict["是否携带漏洞扫描"],
"network": machine_dict["网络名称"],
"network_path_type": machine_dict["网络类型名称"],
"service_name": machine_dict["机器名称"],
"order_id": machine_dict["订单号"],
"project_id": machine_dict["项目ID"],
"project_name": machine_dict["项目名称"],
"product_type": machine_dict["产品类型"],
"service_count": machine_dict["机器数量"],
"delete_at": machine_dict["服务结束时间"],
"contract_customer_name": machine_dict["客户名称"],
"contract_authorizer": machine_dict["合同授权人"],
"contract_type": machine_dict["合同类型"],
"sales": machine_dict["销售姓名"],
"enterprise_number": machine_dict["公司编号"],
"location": machine_dict["公司所属区"],
"customer_service_name": machine_dict["客服姓名"],
"account": machine_dict["合同授权人邮箱"],
"region": machine_dict["合同可用区"],
"material_number": machine_dict["Flavor物料编码"],
"type": machine_dict["flavor类型"],
"base_price": str(machine_dict["基础价格"]),
"flavor_info": machine_dict["机器配置信息"],
"cpu_model": machine_dict["CPU"],
"cpu_core": machine_dict["内核"],
"cpu_hz": machine_dict["主频"],
"ram": machine_dict["内存"],
"disk": machine_dict["磁盘信息"],
"date": material_ip_date
}
machine_result = service_model.BmMaterialMachineSeasonal.objects.create(
**material_machine_dict_all)
#
# # 查询NAT网关某一天的物料信息
material_day_nat_getway_obj = self.contract_nat_seasonal(start_date=material_ip_date,
end_date=material_ip_date)
if not material_day_nat_getway_obj.is_ok:
raise Exception(material_day_nat_getway_obj)
# 查询结果为空跳过
if material_day_nat_getway_obj.content:
net_getway_list = material_day_nat_getway_obj.content
for net_getway_dict in net_getway_list:
material_nat_dict_all = {
"account_id": net_getway_dict["用户ID"],
"user_account": net_getway_dict["用户邮箱"],
"identity_name": net_getway_dict["用户姓名"],
"contract_number": net_getway_dict["合同编码"],
"create_at": net_getway_dict["创建时间"],
"order_id": net_getway_dict["订单号"],
"project_id": net_getway_dict["项目ID"],
"project_name": net_getway_dict["项目名称"],
"delete_at": net_getway_dict["服务结束时间"],
"net_getway_id": net_getway_dict["NAT网关id"],
"net_getway_name": net_getway_dict["NAT网关名称"],
"contract_customer_name": net_getway_dict["客户名称"],
"contract_authorizer": net_getway_dict["合同授权人"],
"contract_type": net_getway_dict["合同类型"],
"sales": net_getway_dict["销售姓名"],
"enterprise_number": net_getway_dict["公司编码"],
"location": net_getway_dict["公司所属区"],
"customer_service_name": net_getway_dict["客服姓名"],
"account": net_getway_dict["合同授权人邮箱"],
"region": net_getway_dict["合同可用区"],
"material_number": net_getway_dict["NAT物料编码"],
"nat_getway_type_name": net_getway_dict["NAT物料类型"],
"base_price": str(net_getway_dict["基础价格"]),
"date": material_ip_date
}
nat_getway_result = service_model.BmMaterialNetGetwaySeasonal.objects.create(
**material_nat_dict_all)
# # 查询负载均衡某一天的物料信息
material_day_lb_obj = self.contract_lb_seasonal(start_date=material_ip_date,
end_date=material_ip_date)
if not material_day_lb_obj.is_ok:
raise Exception(material_day_lb_obj)
# 查询结果为空跳过
if material_day_lb_obj.content:
lb_list = material_day_lb_obj.content
for lb_dict in lb_list:
material_lb_dict_all = {
"account_id": lb_dict["用户ID"],
"user_account": lb_dict["用户邮箱"],
"identity_name": lb_dict["用户姓名"],
"contract_number": lb_dict["合同编码"],
"create_at": lb_dict["创建时间"],
"order_id": lb_dict["订单号"],
"project_id": lb_dict["项目ID"],
"project_name": lb_dict["项目名称"],
"delete_at": lb_dict["服务结束时间"],
"ip_adress": lb_dict["所绑定的IP地址"],
"loadbalance_id": lb_dict["负载均衡实例id"],
"loadbalance_name": lb_dict["LB名称"],
"contract_customer_name": lb_dict["客户名称"],
"contract_authorizer": lb_dict["合同授权人"],
"contract_type": lb_dict["合同类型"],
"sales": lb_dict["销售姓名"],
"enterprise_number": lb_dict["公司编码"],
"location": lb_dict["公司所属区"],
"customer_service_name": lb_dict["客服姓名"],
"account": lb_dict["合同授权人邮箱"],
"region": lb_dict["合同可用区"],
"material_number": lb_dict["LB物料编码"],
"lb_type_name": lb_dict["LB物料类型"],
"base_price": str(lb_dict["基础价格"]),
"date": material_ip_date
}
lb_result = service_model.BmMaterialLbSeasonal.objects.create(
**material_lb_dict_all)
begin_date += datetime.timedelta(days=1)
response_obj.content = "数据导入数据库成功!!!"
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "数据导入数据库失败!",
"admin_info": str(ex)}
return response_obj
def get_material_from_mysql(self, start_date, end_date):
response_obj = ResponseObj()
try:
ip_all_list = []
volume_all_list = []
volume_bak_all_list = []
machine_all_list = []
nat_all_list = []
lb_all_list = []
# 筛选出当前周期下的所有IP
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
# 查询弹性IP某时间段的计量信息
ip_list_obj = service_model.BmMaterialFloatingIpSeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if ip_list_obj:
for ip_dict in ip_list_obj:
server_ip__dict = {
"用户ID": ip_dict.account_id,
"用户名称": ip_dict.identity_name,
"用户邮箱": ip_dict.user_account,
"合同编号": ip_dict.contract_number,
"查询日期": ip_dict.date,
"带宽物料编码": ip_dict.band_material_number,
"带宽类型": ip_dict.band_width_type_name,
"带宽价格": ip_dict.material_band_base_price,
"IP物料编码": ip_dict.material_number,
"IP物料类型": ip_dict.floating_ip_type_name,
"IP价格": ip_dict.base_price,
"合同可用区": ip_dict.region,
"销售姓名": ip_dict.sales,
"公司编码": ip_dict.enterprise_number,
"公司所属区": ip_dict.location,
"客服姓名": ip_dict.customer_service_name,
"服务到期时间": ip_dict.service_expired_time,
"服务开始时间": ip_dict.service_start_time,
"合同开始时间": ip_dict.contract_start_date,
"合同到期时间": ip_dict.contract_expire_date,
"客户名称": ip_dict.contract_customer_name,
"合同授权人": ip_dict.contract_authorizer,
"合同类型": ip_dict.contract_type,
"合同授权人邮箱": ip_dict.contract_authorizer_account,
"IP类型": ip_dict.external_line_type,
"IP类型名称": ip_dict.external_name,
"IP类型ID": ip_dict.external_name_id,
"IP": ip_dict.floating_ip,
"ip_id": ip_dict.floating_ip_id,
"订单号": ip_dict.order_id,
"项目ID": ip_dict.project_id,
"项目名称": ip_dict.project_name,
"服务状态": ip_dict.status,
"最大带宽": ip_dict.max_band,
}
ip_all_list.append(server_ip__dict)
# 查询云硬盘某时间段计量信息
volume_list_obj = service_model.BmMaterialVolumeSeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if volume_list_obj:
for volume_dict in volume_list_obj:
material_volume_dict_all = {
"用户ID": volume_dict.account_id,
"用户邮箱": volume_dict.user_account,
"用户姓名": volume_dict.identity_name,
"合同编码": volume_dict.contract_number,
"云硬盘创建时间": volume_dict.create_at,
"云硬盘名称": volume_dict.name,
"订单号": volume_dict.order_id,
"项目ID": volume_dict.project_id,
"项目名称": volume_dict.project_name,
"云硬盘大小": volume_dict.size,
"服务结束时间": volume_dict.end_at,
"云硬盘id": volume_dict.volume_id,
"云硬盘类型": volume_dict.volume_type,
"客户名称": volume_dict.contract_customer_name,
"合同授权人": volume_dict.contract_authorizer,
"合同类型": volume_dict.contract_type,
"销售姓名": volume_dict.sales,
"公司编码": volume_dict.enterprise_number,
"公司所属区": volume_dict.location,
"客服名称": volume_dict.customer_service_name,
"合同授权人邮箱": volume_dict.account,
"合同可用区": volume_dict.region,
"云硬盘物料编码": volume_dict.material_number,
"云硬盘类型名称": volume_dict.volume_type_name,
"基础价格": volume_dict.base_price
}
volume_all_list.append(material_volume_dict_all)
# 查询云硬盘备份某时间段计量信息
volume_bak_list_obj = service_model.BmMaterialVolumeBakSeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if volume_bak_list_obj:
for volume_dict in volume_bak_list_obj:
material_volume_dict_all = {
"用户ID": volume_dict.account_id,
"用户邮箱": volume_dict.user_account,
"用户姓名": volume_dict.identity_name,
"合同编码": volume_dict.volume_obj.contract_number,
"备份时间": volume_dict.volume_obj.create_at,
"云硬盘备份名称": volume_dict.volume_obj.backup_name,
"订单号": volume_dict.volume_obj.order_id,
"项目ID": volume_dict.volume_obj.project_id,
"项目名称": volume_dict.project_name,
"可用区": volume_dict.region,
"云硬盘大小": volume_dict.service_count,
"服务结束时间": volume_dict.delete_time,
"云硬盘id": volume_dict.volume_id,
"客户名称": volume_dict.contract_customer_name,
"合同授权人": volume_dict.contract_authorizer,
"合同类型": volume_dict.contract_type,
"销售姓名": volume_dict.sales,
"公司编码": volume_dict.enterprise_number,
"公司所属区": volume_dict.location,
"客服姓名": volume_dict.customer_service_name,
"合同授权人邮箱": volume_dict.account,
"合同可用区": volume_dict.region,
"云硬盘备份物料编码": volume_dict.material_number,
"云硬盘物料类型": volume_dict.volume_type_name,
"基础价格": volume_dict.base_price
}
volume_bak_all_list.append(material_volume_dict_all)
# 查询裸金属某一时间段计量信息
machine_list_obj = service_model.BmMaterialMachineSeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if machine_list_obj:
for machine_dict in machine_list_obj:
material_machine_dict_all = {
"用户ID": machine_dict.account_id,
"用户邮箱": machine_dict.user_account,
"用户姓名": machine_dict.identity_name,
"合同编号": machine_dict.contract_number,
"机器创建时间": machine_dict.create_at,
"flavor名称": machine_dict.flavor_name,
"镜像名称": machine_dict.image_name,
"是否携带监控": machine_dict.monitoring,
"是否携带漏洞扫描": machine_dict.vulnerability_scanning,
"网络名称": machine_dict.network,
"网络类型名称": machine_dict.network_path_type,
"机器名称": machine_dict.service_name,
"订单号": machine_dict.order_id,
"项目ID": machine_dict.project_id,
"项目名称": machine_dict.project_name,
"产品类型": machine_dict.product_type,
"机器数量": machine_dict.service_count,
"服务结束时间": machine_dict.delete_at,
"客户名称": machine_dict.contract_customer_name,
"合同授权人": machine_dict.contract_authorizer,
"合同类型": machine_dict.contract_type,
"销售姓名": machine_dict.sales,
"公司编号": machine_dict.enterprise_number,
"公司所属区": machine_dict.location,
"客服姓名": machine_dict.customer_service_name,
"合同授权人邮箱": machine_dict.account,
"合同可用区": machine_dict.region,
"Flavor物料编码": machine_dict.material_number,
"flavor类型": machine_dict.type,
"基础价格": machine_dict.base_price,
"机器配置信息": machine_dict.flavor_info,
"CPU": machine_dict.cpu_model,
"内核": machine_dict.cpu_core,
"主频": machine_dict.cpu_hz,
"内存": machine_dict.ram,
"磁盘信息": machine_dict.disk
}
machine_all_list.append(material_machine_dict_all)
# 查询NAT网关某一段时间的计量信息
nat_list_obj = service_model.BmMaterialNetGetwaySeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if nat_list_obj:
for nat_obj in nat_list_obj:
material_nat_dict_all = {
"用户ID": nat_obj.account_id,
"用户邮箱": nat_obj.user_account,
"用户姓名": nat_obj.identity_name,
"合同编码": nat_obj.contract_number,
"创建时间": nat_obj.create_at,
"订单号": nat_obj.order_id,
"项目ID": nat_obj.project_id,
"项目名称": nat_obj.project_name,
"服务结束时间": nat_obj.delete_at,
"NAT网关id": nat_obj.net_getway_id,
"NAT网关名称": nat_obj.net_getway_name,
"客户名称": nat_obj.contract_customer_name,
"合同授权人": nat_obj.contract_authorizer,
"合同类型": nat_obj.contract_type,
"销售姓名": nat_obj.sales,
"公司编码": nat_obj.enterprise_number,
"公司所属区": nat_obj.location,
"客服姓名": nat_obj.customer_service_name,
"合同授权人邮箱": nat_obj.account,
"合同可用区": nat_obj.region,
"NAT物料编码": nat_obj.material_number,
"NAT物料类型": nat_obj.nat_getway_type_name,
"基础价格": nat_obj.base_price
}
nat_all_list.append(material_nat_dict_all)
# 查询负载均衡某一段时间的计量信息
lb_list_obj = service_model.BmMaterialLbSeasonal.objects.filter(
date__lte=end_date,
date__gte=begin_date)
if lb_list_obj:
for lb_obj in lb_list_obj:
material_lb_dict_all = {
"用户ID": lb_obj.account_id,
"用户邮箱": lb_obj.user_account,
"用户姓名": lb_obj.identity_name,
"合同编码": lb_obj.contract_number,
"创建时间": lb_obj.create_at,
"订单号": lb_obj.order_id,
"项目ID": lb_obj.project_id,
"项目名称": lb_obj.project_name,
"服务结束时间": lb_obj.delete_at,
"所绑定的IP地址": lb_obj.ip_adress,
"负载均衡实例id": lb_obj.loadbalance_id,
"LB名称": lb_obj.loadbalance_name,
"客户名称": lb_obj.contract_customer_name,
"合同授权人": lb_obj.contract_authorizer,
"合同类型": lb_obj.contract_type,
"销售姓名": lb_obj.sales,
"公司编码": lb_obj.enterprise_number,
"公司所属区": lb_obj.location,
"客服姓名": lb_obj.customer_service_name,
"合同授权人邮箱": lb_obj.account,
"合同可用区": lb_obj.region,
"LB物料编码": lb_obj.material_number,
"LB物料类型": lb_obj.lb_type_name,
"基础价格": lb_obj.base_price
}
lb_all_list.append(material_lb_dict_all)
material_dict = {
"material_ip": ip_all_list,
"material_volume": volume_all_list,
"material_volume_bak": volume_bak_all_list,
"material_machine": machine_all_list,
"material_nat_getway": nat_all_list,
"material_lb": lb_all_list
}
response_obj.content = material_dict
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "数据导出失败!",
"admin_info": str(ex)}
return response_obj
def material_share_band_seasonal(self, start_date, end_date):
response_obj = ResponseObj()
try:
# 筛选出当前周期下的所有共享带宽
material_share_band_list = []
material_share_band_id_list = []
begin_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d") + datetime.timedelta(days=1)
# 筛选出当月创建的共享带宽
share_band_list_obj = service_model.BmShareBandWidth.objects.filter(
create_at__lte=end_date, create_at__gte=begin_date).values('shared_bandwidth_id').annotate(
Count=Count('shared_bandwidth_id'))
for share_band_dict in share_band_list_obj:
share_band_id = share_band_dict.get("shared_bandwidth_id")
if not share_band_id:
continue
material_share_band_id_list.append(share_band_id)
# 筛选出当月之前未结束计费的共享带宽--需要叠加在本月的数据当中进行计费
share_band_id_list_before_obj = service_model.BmShareBandWidth.objects.filter(
create_at__lte=begin_date, is_measure_end=False).values('shared_bandwidth_id').annotate(
Count=Count('shared_bandwidth_id'))
for share_band_dict in share_band_id_list_before_obj:
share_band_id = share_band_dict.get("shared_bandwidth_id")
if not share_band_id:
continue
material_share_band_id_list.append(share_band_id)
while begin_date < end_date:
material_date = datetime.datetime.strftime(begin_date, "%Y-%m-%d")
# 查询弹性IP某一天的计量信息
for share_band_id_single in material_share_band_id_list:
material_day_share_band_obj = self.material_share_band_day_by_ip_id(
material_date=material_date, shared_bandwidth_id=share_band_id_single)
if not material_day_share_band_obj.is_ok:
raise Exception(material_day_share_band_obj)
# 查询结果为空跳过
if material_day_share_band_obj.content:
material_share_band_list.append(material_day_share_band_obj.content)
begin_date += datetime.timedelta(days=1)
response_obj.content = material_share_band_list
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "物料共享带宽信息查询失败!",
"admin_info": str(ex)}
return response_obj
def material_share_band_day_by_ip_id(self, material_date, shared_bandwidth_id):
response_obj = ResponseObj()
try:
# 进行数据处理
share_band_list_obj = service_model.BmShareBandWidth.objects.filter(
shared_bandwidth_id=shared_bandwidth_id).order_by("create_at")
share_band_list = [u.__dict__ for u in share_band_list_obj]
share_band_first_obj = share_band_list_obj.first()
service_start_time = share_band_first_obj.first_create_at
bm_material_share_band_null = share_band_list_obj.filter(update_at__isnull=True)
share_band_update_null = [u.__dict__ for u in bm_material_share_band_null]
# 到期时间的处理
bm_material_share_band_delete = share_band_list_obj.filter(status="deleted").first()
if bm_material_share_band_delete:
service_expired_time = bm_material_share_band_delete.deleted_at
else:
service_expired_time = "-"
# for material_share_band in share_band_list:
# if material_share_band["status"] == "deleted":
# service_expired_time = material_share_band["update_at"]
# break
# 筛选出某天的数据
create_at = datetime.datetime.strptime(material_date, '%Y-%m-%d') + datetime.timedelta(days=1)
update_at = datetime.datetime.strptime(material_date, '%Y-%m-%d')
bm_material_share_band_info = service_model.BmShareBandWidth.objects.filter(
create_at__lte=create_at, update_at__gte=update_at, shared_bandwidth_id=shared_bandwidth_id).order_by(
"create_at")
material_share_band_list_info = [u.__dict__ for u in bm_material_share_band_info]
# 进行数据拼接得到当天所有共享带宽的完整数据
if bm_material_share_band_null:
if share_band_update_null[0]["create_at"] <= create_at:
material_share_band_list_info.extend(share_band_update_null)
# 进行查到的数据为空数据的处理
if not material_share_band_list_info:
response_obj.message = {"user_info": "{material_date}查询为空!".format(material_date=material_date)}
return response_obj
# 找出当天的最大带宽
max_band = -1
max_band_dict = dict()
for material_share_band in material_share_band_list_info:
material_share_band_size = int(material_share_band["max_kbps"])
if max_band <= material_share_band_size:
max_band = material_share_band_size
max_band_dict = material_share_band
# 查找合同的一些信息
bm_contract_material_obj = auth_models.BmContract.objects.filter(
contract_number=max_band_dict.get("contract_number")).first()
if bm_contract_material_obj:
bm_contract_material = bm_contract_material_obj.__dict__
bm_contract_material_customer_id = bm_contract_material.get("customer_id", "")
bm_contract_material_account_id = bm_contract_material.get("account_id", "")
bm_contract_material_customer_name = bm_contract_material.get("customer_name", "")
bm_contract_material_authorizer = bm_contract_material.get("authorizer", "")
bm_contract_material_contract_type = bm_contract_material.get("contract_type", "")
bm_contract_material_expire_date = bm_contract_material.get("expire_date", "")
bm_contract_material_start_date = bm_contract_material.get("start_date", "")
else:
bm_contract_material_customer_id = ""
bm_contract_material_account_id = ""
bm_contract_material_customer_name = ""
bm_contract_material_authorizer = ""
bm_contract_material_contract_type = ""
bm_contract_material_start_date = ""
bm_contract_material_expire_date = ""
# 查找客户的信息
bm_contract_user = auth_models.BmEnterprise.objects.filter(id=bm_contract_material_customer_id).first()
if not bm_contract_user:
sales = ""
enterprise_number = ""
location = ""
customer_service_name = ""
else:
sales = bm_contract_user.sales
enterprise_number = bm_contract_user.enterprise_number
location = bm_contract_user.location
customer_service_name = bm_contract_user.customer_service_name
# 查找物料带宽的信息
if max_band > 5120:
band_type = "base2"
else:
band_type = "base1"
bm_contract_material_band = service_model.BmContractBandWidthaterial.objects.filter(
band_width_type="shared_band_width", charge_type=band_type).first()
if not bm_contract_material_band:
material_band_material_number = ""
band_width_type_name = ""
material_band_base_price = ""
else:
material_band_material_number = bm_contract_material_band.material_number
band_width_type_name = bm_contract_material_band.band_width_type_name
material_band_base_price = str(bm_contract_material_band.base_price)
# 查找授权人的信息
bm_contract_authorizer = auth_models.BmUserInfo.objects.filter(id=bm_contract_material_account_id).first()
if not bm_contract_authorizer:
account = ""
else:
account = bm_contract_authorizer.account
# 查找下单者的一些信息
bm_account_info = auth_models.BmUserInfo.objects.filter(id=max_band_dict.get("account_id")).first()
if not bm_account_info:
identity_name = ""
user_account = ""
else:
identity_name = bm_account_info.identity_name
user_account = bm_account_info.account
# 查找订单信息
bm_contract_order = service_model.BmServiceOrder.objects.filter(id=max_band_dict.get("order_id")).first()
if not bm_contract_order:
region = ""
else:
region = bm_contract_order.region
# 查找项目信息
bm_contract_project = service_model.BmProject.objects.filter(id=max_band_dict.get("project_id")).first()
project_name = ""
if bm_contract_project:
project_name = bm_contract_project.project_name
material_share_band_dict = {
"用户ID": max_band_dict.get("account_id"),
"用户名称": identity_name,
"用户邮箱": user_account,
"合同编号": max_band_dict.get("contract_number"),
"查询日期": material_date,
"带宽物料编码": material_band_material_number,
"带宽类型": band_width_type_name,
"带宽价格": material_band_base_price,
"合同可用区": region,
"销售姓名": sales,
"公司编码": enterprise_number,
"公司所属区": location,
"客服姓名": customer_service_name,
"服务到期时间": service_expired_time,
"服务开始时间": service_start_time,
"合同开始时间": bm_contract_material_start_date,
"合同到期时间": bm_contract_material_expire_date,
"客户名称": bm_contract_material_customer_name,
"合同授权人": bm_contract_material_authorizer,
"合同类型": bm_contract_material_contract_type,
"合同授权人邮箱": account,
"共享带宽id": max_band_dict.get("shared_bandwidth_id"),
"共享带宽名称": max_band_dict.get("name"),
"订单号": max_band_dict.get("order_id"),
"项目ID": max_band_dict.get("project_id"),
"项目名称": project_name,
"最大带宽": max_band,
}
response_obj.content = material_share_band_dict
except Exception as ex:
response_obj.no = 505
response_obj.is_ok = False
response_obj.message = {"user_info": "shared_bandwidth_id %s 查询失败!" % shared_bandwidth_id,
"admin_info": str(ex)}
return response_obj
class BmTimeTaskProvider(object):
def __init__(self):
self.__db_conn = connections['default']
def query_service_instance_in_use(self):
model_list = ["01-客户名称", "02-客户公司", "03-账号名称", "04-账号邮箱", "05-项目", "06-合同号",
"07-服务器类型", "08-物料编码", "09-服务器镜像", "10-服务器名称", "11-服务UUID",
"12-服务器状态", "13-服务器开通时间", "14-合同截至日期"]
sql = """
SELECT
bm_contract.authorizer, bm_contract.customer_name, bm_userinfo.identity_name, bm_userinfo.account,
bm_project.project_name, bm_contract.contract_number,
bm_service_flavor.type, bm_service_flavor.material_number, bm_service_machine.image_name,
bm_service_machine.service_name, bm_service_instance.uuid, bm_service_instance.`status`,
bm_service_instance.create_at, bm_contract.expire_date
from bm_service_instance
LEFT JOIN bm_service_machine on bm_service_instance.uuid = bm_service_machine.uuid
left join bm_service_flavor on bm_service_flavor.id = bm_service_machine.flavor_id
left JOIN bm_userinfo on bm_service_instance.account_id = bm_userinfo.id
LEFT join bm_project on bm_service_instance.project_id = bm_project.id
LEFT JOIN bm_contract on bm_service_instance.contract_number = bm_contract.contract_number
where bm_service_instance.`status` in ("SHUTOFF", "active") ORDER BY authorizer
"""
with self.__db_conn.cursor() as cursor:
cursor.execute(sql)
result_ids = cursor.fetchall()
dict_result = list(map(lambda x: dict(zip(model_list, x)), result_ids))
return dict_result
```
#### File: common/emial_server/send_email.py
```python
import smtplib
import urllib.request
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import common.lark_common.model.common_model as common_model
import common.lark_common.utils as lark_utils
from jinja2 import Template
from BareMetalControllerBackend.conf import env
class ModelEmailNotification:
def __init__(self):
self.env_config = env.EnvConfig()
self.from_address = None
self.to_address = []
self.cc_address = []
self.bcc_address = []
self.subject = None
self.email_template_uri = None
self.image_uri = {}
self.parameter = {}
self.smtp_server = self.env_config.notification_smtp_server
self.smtp_ssl_port = self.env_config.notification_smtp_ssl_port
self.smtp_login_required = self.env_config.notification_smtp_login_required
self.smtp_ssl_enable = self.env_config.notification_smtp_ssl_enable
self.smtp_username = self.env_config.notification_smtp_username
self.smtp_password = self.env_config.notification_smtp_password
class ProviderEmailNotification(object):
def __init__(self, model_notification=None):
self.model_notification = model_notification
self.env_config = env.EnvConfig()
def __generate_body(self):
"""
根据model notification中的模板内容和参数生成邮件正文
:return: 电子邮件正文
"""
response = common_model.ResponseObj()
try:
email_template_stream = urllib.request.urlopen(self.model_notification.email_template_uri).read().decode('utf-8')
# email_template_stream = urllib.request.install_opener("static/reset_apssword.html").read().decode('utf-8')
email_template = Template(email_template_stream)
parameter_dict = self.model_notification.parameter
if not isinstance(self.model_notification.parameter, dict):
parameter_dict = lark_utils.JsonUtils.convert_object_to_dict(self.model_notification.parameter)
email_body = email_template.render(parameter_dict)
response.is_ok = True
response.content = email_body
except Exception as ex:
# self.env_config.logger.error(ex)
response.is_ok = False
response.message = u"email template is emtpy or can not be downloaded."
return response
return response
def __generate_message(self):
response = common_model.ResponseObj()
msg = MIMEMultipart('related')
response_body = self.__generate_body()
if not response_body.is_ok:
return response_body
content = MIMEText(response_body.content, 'html', 'utf-8')
msg.attach(content)
msg['Subject'] = self.model_notification.subject
msg['From'] = self.model_notification.from_address
msg['To'] = ','.join(self.model_notification.to_address)
msg['Cc'] = ','.join(self.model_notification.cc_address)
msg['Bcc'] = ','.join(self.model_notification.bcc_address)
try:
image_uri_dict = self.model_notification.image_uri
if image_uri_dict and len(image_uri_dict) > 0:
for (image_key, image_uri) in image_uri_dict.items():
image_content = urllib.request.urlopen(image_uri).read()
mime_image = MIMEImage(image_content)
mime_image.add_header('Content-ID', image_key)
msg.attach(mime_image)
except Exception as ex:
return ex
# self.env_config.logger.error("can not download and read image. " + str(ex))
response.content = msg.as_string()
response.is_ok = True
return response
def send(self):
response_obj = common_model.ResponseObj()
try:
# 邮件主题内容
response_message = self.__generate_message()
if not response_message.is_ok:
return response_message
# 收件人列表
receiver = self.model_notification.to_address + self.model_notification.cc_address + self.model_notification.bcc_address
# 链接smtp 服务器
if not self.model_notification.smtp_ssl_enable:
smtp_server = smtplib.SMTP(self.model_notification.smtp_server)
else:
smtp_server = smtplib.SMTP_SSL(self.model_notification.smtp_server, self.model_notification.smtp_ssl_port)
# 登录smtp服务器
if self.model_notification.smtp_login_required:
smtp_server.login(self.model_notification.smtp_username, self.model_notification.smtp_password)
# 邮件发送
smtp_server.sendmail(self.model_notification.from_address, receiver, msg=response_message.content)
smtp_server.quit()
except Exception as ex:
response_obj.message = "Fail to send email! Error : %s " % str(ex)
response_obj.is_ok = False
response_obj.no = 500
return response_obj
response_obj.is_ok = True
response_obj.content = "email send success!"
return response_obj
```
#### File: innerpyapollo/cache/filecache.py
```python
"File-based cache backend"
import errno
import glob
import io
import json
import os
import tempfile
import zlib
from .basecache import BaseCache
try:
import cPickle as pickle
except:
import pickle
class FileBasedCache(BaseCache):
cache_suffix = '.cfg.cache'
def __init__(self, dir):
super(FileBasedCache, self).__init__()
self._dir = os.path.abspath(dir)
self._createdir()
def refresh(self, namespace, value):
self._createdir() # Cache dir can be deleted at any time.
fname = self._ns_to_file(namespace)
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with io.open(fd, 'wb') as f:
f.write(zlib.compress(pickle.dumps(value), -1))
os.rename(tmp_path, fname)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def load(self, namespace, default=None):
fname = self._ns_to_file(namespace)
if os.path.exists(fname):
try:
with io.open(fname, 'rb') as f:
return pickle.loads(zlib.decompress(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
pass # Cache file was removed after the exists check
return default
def delete(self, namespace):
return self._delete(self._ns_to_file(namespace))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except OSError as e:
# ENOENT can happen if the cache file is removed (by another
# process) after the os.path.exists check.
if e.errno != errno.ENOENT:
raise
def exists(self, namespace):
fname = self._ns_to_file(namespace)
if os.path.exists(fname):
return True
return False
def _createdir(self):
if not os.path.exists(self._dir):
try:
os.makedirs(self._dir, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _ns_to_file(self, namespace):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the namespace and a suffix.
"""
return os.path.join(self._dir, ''.join([namespace, self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
return [os.path.join(self._dir, fname) for fname in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
if __name__ == '__main__':
data = json.loads(
'{"a":1,"cluster": "default", "namespaceName": "application", "releaseKey": "20180724164952-d3ee4cedfd6bde9e", "configurations": {"test1": "aaaa", "switch": "123"}, "appId": "1001"}'
)
cache = FileBasedCache(dir="/home/vagrant/test/cache")
cache.refresh("ns1", data)
val = cache.load("ns1")
print(val, type(val))
```
#### File: compute/v2/keypair.py
```python
from openstack import resource
class Keypair(resource.Resource):
resource_key = 'keypair'
resources_key = 'keypairs'
base_path = '/os-keypairs'
# capabilities
allow_create = True
allow_fetch = True
allow_delete = True
allow_list = True
# Properties
#: The short fingerprint associated with the ``public_key`` for
#: this keypair.
fingerprint = resource.Body('fingerprint')
# NOTE: There is in fact an 'id' field. However, it's not useful
# because all operations use the 'name' as an identifier.
# Additionally, the 'id' field only appears *after* creation,
# so suddenly you have an 'id' field filled in after the fact,
# and it just gets in the way. We need to cover this up by listing
# name as alternate_id and listing id as coming from name.
#: The id identifying the keypair
id = resource.Body('name')
#: A name identifying the keypair
name = resource.Body('name', alternate_id=True)
#: The private key for the keypair
private_key = resource.Body('private_key')
#: The SSH public key that is paired with the server.
public_key = resource.Body('public_key')
def _consume_attrs(self, mapping, attrs):
# TODO(mordred) This should not be required. However, without doing
# it **SOMETIMES** keypair picks up id and not name. This is a hammer.
if 'id' in attrs:
attrs.setdefault('name', attrs.pop('id'))
return super(Keypair, self)._consume_attrs(mapping, attrs)
@classmethod
def list(cls, session, paginated=False, base_path=None):
if base_path is None:
base_path = cls.base_path
resp = session.get(base_path,
headers={"Accept": "application/json"})
resp = resp.json()
resp = resp[cls.resources_key]
for data in resp:
value = cls.existing(**data[cls.resource_key])
yield value
```
#### File: image/v2/_proxy.py
```python
import json
import jsonpatch
import operator
import time
import warnings
from openstack.cloud import exc
from openstack import exceptions
from openstack.image import _base_proxy
from openstack.image.v2 import image as _image
from openstack.image.v2 import member as _member
from openstack.image.v2 import schema as _schema
from openstack.image.v2 import task as _task
from openstack import resource
from openstack import utils
# Rackspace returns this for intermittent import errors
_IMAGE_ERROR_396 = "Image cannot be imported. Error code: '396'"
_INT_PROPERTIES = ('min_disk', 'min_ram', 'size', 'virtual_size')
class Proxy(_base_proxy.BaseImageProxy):
def import_image(self, image, method='glance-direct', uri=None):
"""Import data to an existing image
Interoperable image import process are introduced in the Image API
v2.6. It mainly allow image importing from an external url and let
Image Service download it by itself without sending binary data at
image creation.
:param image: The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:param method: Method to use for importing the image.
A valid value is glance-direct or web-download.
:param uri: Required only if using the web-download import method.
This url is where the data is made available to the Image
service.
:returns: None
"""
image = self._get_resource(_image.Image, image)
# as for the standard image upload function, container_format and
# disk_format are required for using image import process
if not all([image.container_format, image.disk_format]):
raise exceptions.InvalidRequest(
"Both container_format and disk_format are required for"
" importing an image")
image.import_image(self, method=method, uri=uri)
def upload_image(self, container_format=None, disk_format=None,
data=None, **attrs):
"""Create and upload a new image from attributes
.. warning:
This method is deprecated - and also doesn't work very well.
Please stop using it immediately and switch to
`create_image`.
:param container_format: Format of the container.
A valid value is ami, ari, aki, bare,
ovf, ova, or docker.
:param disk_format: The format of the disk. A valid value is ami,
ari, aki, vhd, vmdk, raw, qcow2, vdi, or iso.
:param data: The data to be uploaded as an image.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.image.v2.image.Image`,
comprised of the properties on the Image class.
:returns: The results of image creation
:rtype: :class:`~openstack.image.v2.image.Image`
"""
warnings.warn("upload_image is deprecated. Use create_image instead.")
# container_format and disk_format are required to be set
# on the image by the time upload_image is called, but they're not
# required by the _create call. Enforce them here so that we don't
# need to handle a failure in _create, as upload_image will
# return a 400 with a message about disk_format and container_format
# not being set.
if not all([container_format, disk_format]):
raise exceptions.InvalidRequest(
"Both container_format and disk_format are required")
img = self._create(_image.Image, disk_format=disk_format,
container_format=container_format,
**attrs)
# TODO(briancurtin): Perhaps we should run img.upload_image
# in a background thread and just return what is called by
# self._create, especially because the upload_image call doesn't
# return anything anyway. Otherwise this blocks while uploading
# significant amounts of image data.
img.data = data
img.upload(self)
return img
def _upload_image(
self, name, filename=None,
meta=None, **kwargs):
# We can never have nice things. Glance v1 took "is_public" as a
# boolean. Glance v2 takes "visibility". If the user gives us
# is_public, we know what they mean. If they give us visibility, they
# know that they mean.
if 'is_public' in kwargs['properties']:
is_public = kwargs['properties'].pop('is_public')
if is_public:
kwargs['visibility'] = 'public'
else:
kwargs['visibility'] = 'private'
try:
# This makes me want to die inside
if self._connection.image_api_use_tasks:
return self._upload_image_task(
name, filename,
meta=meta, **kwargs)
else:
return self._upload_image_put(
name, filename, meta=meta,
**kwargs)
except exc.OpenStackCloudException:
self._connection.log.debug("Image creation failed", exc_info=True)
raise
except Exception as e:
raise exc.OpenStackCloudException(
"Image creation failed: {message}".format(message=str(e)))
def _make_v2_image_params(self, meta, properties):
ret = {}
for k, v in iter(properties.items()):
if k in _INT_PROPERTIES:
ret[k] = int(v)
elif k == 'protected':
ret[k] = v
else:
if v is None:
ret[k] = None
else:
ret[k] = str(v)
ret.update(meta)
return ret
def _upload_image_put(
self, name, filename, meta, wait, timeout, **image_kwargs):
image_data = open(filename, 'rb')
properties = image_kwargs.pop('properties', {})
image_kwargs.update(self._make_v2_image_params(meta, properties))
image_kwargs['name'] = name
data = self.post('/images', json=image_kwargs)
image = self._connection._get_and_munchify(key=None, data=data)
try:
response = self.put(
'/images/{id}/file'.format(id=image.id),
headers={'Content-Type': 'application/octet-stream'},
data=image_data)
exceptions.raise_from_response(response)
except Exception:
self._connection.log.debug(
"Deleting failed upload of image %s", name)
try:
response = self.delete(
'/images/{id}'.format(id=image.id))
exceptions.raise_from_response(response)
except exc.OpenStackCloudHTTPError:
# We're just trying to clean up - if it doesn't work - shrug
self._connection.log.warning(
"Failed deleting image after we failed uploading it.",
exc_info=True)
raise
return self._connection._normalize_image(image)
def _upload_image_task(
self, name, filename,
wait, timeout, meta, **image_kwargs):
if not self._connection.has_service('object-store'):
raise exc.OpenStackCloudException(
"The cloud {cloud} is configured to use tasks for image"
" upload, but no object-store service is available."
" Aborting.".format(cloud=self._connection.config.name))
properties = image_kwargs.pop('properties', {})
md5 = properties[self._connection._IMAGE_MD5_KEY]
sha256 = properties[self._connection._IMAGE_SHA256_KEY]
container = properties[
self._connection._IMAGE_OBJECT_KEY].split('/', 1)[0]
image_kwargs.update(properties)
image_kwargs.pop('disk_format', None)
image_kwargs.pop('container_format', None)
self._connection.create_container(container)
self._connection.create_object(
container, name, filename,
md5=md5, sha256=sha256,
metadata={self._connection._OBJECT_AUTOCREATE_KEY: 'true'},
**{'content-type': 'application/octet-stream'})
# TODO(mordred): Can we do something similar to what nodepool does
# using glance properties to not delete then upload but instead make a
# new "good" image and then mark the old one as "bad"
task_args = dict(
type='import', input=dict(
import_from='{container}/{name}'.format(
container=container, name=name),
image_properties=dict(name=name)))
data = self.post('/tasks', json=task_args)
glance_task = self._connection._get_and_munchify(key=None, data=data)
self._connection.list_images.invalidate(self)
if wait:
start = time.time()
image_id = None
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the image to import."):
if image_id is None:
response = self.get(
'/tasks/{id}'.format(id=glance_task.id))
status = self._connection._get_and_munchify(
key=None, data=response)
if status['status'] == 'success':
image_id = status['result']['image_id']
image = self._connection.get_image(image_id)
if image is None:
continue
self.update_image_properties(
image=image, meta=meta, **image_kwargs)
self._connection.log.debug(
"Image Task %s imported %s in %s",
glance_task.id, image_id, (time.time() - start))
# Clean up after ourselves. The object we created is not
# needed after the import is done.
self._connection.delete_object(container, name)
return self._connection.get_image(image_id)
elif status['status'] == 'failure':
if status['message'] == _IMAGE_ERROR_396:
glance_task = self.post('/tasks', data=task_args)
self._connection.list_images.invalidate(self)
else:
# Clean up after ourselves. The image did not import
# and this isn't a 'just retry' error - glance didn't
# like the content. So we don't want to keep it for
# next time.
self._connection.delete_object(container, name)
raise exc.OpenStackCloudException(
"Image creation failed: {message}".format(
message=status['message']),
extra_data=status)
else:
return glance_task
def _update_image_properties(self, image, meta, properties):
img_props = image.properties.copy()
for k, v in iter(self._make_v2_image_params(meta, properties).items()):
if image.get(k, None) != v:
img_props[k] = v
if not img_props:
return False
headers = {
'Content-Type': 'application/openstack-images-v2.1-json-patch'}
patch = sorted(list(jsonpatch.JsonPatch.from_diff(
image.properties, img_props)), key=operator.itemgetter('value'))
# No need to fire an API call if there is an empty patch
if patch:
self.patch(
'/images/{id}'.format(id=image.id),
headers=headers,
data=json.dumps(patch))
self._connection.list_images.invalidate(self._connection)
return True
def _existing_image(self, **kwargs):
return _image.Image.existing(connection=self._connection, **kwargs)
def download_image(self, image, stream=False):
"""Download an image
This will download an image to memory when ``stream=False``, or allow
streaming downloads using an iterator when ``stream=True``.
For examples of working with streamed responses, see
:ref:`download_image-stream-true`.
:param image: The value can be either the ID of an image or a
:class:`~openstack.image.v2.image.Image` instance.
:param bool stream: When ``True``, return a :class:`requests.Response`
instance allowing you to iterate over the
response data stream instead of storing its entire
contents in memory. See
:meth:`requests.Response.iter_content` for more
details. *NOTE*: If you do not consume
the entirety of the response you must explicitly
call :meth:`requests.Response.close` or otherwise
risk inefficiencies with the ``requests``
library's handling of connections.
When ``False``, return the entire
contents of the response.
:returns: The bytes comprising the given Image when stream is
False, otherwise a :class:`requests.Response`
instance.
"""
image = self._get_resource(_image.Image, image)
return image.download(self, stream=stream)
def delete_image(self, image, ignore_missing=True):
"""Delete an image
:param image: The value can be either the ID of an image or a
:class:`~openstack.image.v2.image.Image` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the image does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent image.
:returns: ``None``
"""
self._delete(_image.Image, image, ignore_missing=ignore_missing)
def find_image(self, name_or_id, ignore_missing=True):
"""Find a single image
:param name_or_id: The name or ID of a image.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.image.v2.image.Image` or None
"""
return self._find(_image.Image, name_or_id,
ignore_missing=ignore_missing)
def get_image(self, image):
"""Get a single image
:param image: The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:returns: One :class:`~openstack.image.v2.image.Image`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_image.Image, image)
def images(self, **query):
"""Return a generator of images
:param kwargs query: Optional query parameters to be sent to limit
the resources being returned.
:returns: A generator of image objects
:rtype: :class:`~openstack.image.v2.image.Image`
"""
return self._list(_image.Image, **query)
def update_image(self, image, **attrs):
"""Update a image
:param image: Either the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:attrs kwargs: The attributes to update on the image represented
by ``value``.
:returns: The updated image
:rtype: :class:`~openstack.image.v2.image.Image`
"""
return self._update(_image.Image, image, **attrs)
def deactivate_image(self, image):
"""Deactivate an image
:param image: Either the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:returns: None
"""
image = self._get_resource(_image.Image, image)
image.deactivate(self)
def reactivate_image(self, image):
"""Deactivate an image
:param image: Either the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:returns: None
"""
image = self._get_resource(_image.Image, image)
image.reactivate(self)
def add_tag(self, image, tag):
"""Add a tag to an image
:param image: The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance
that the member will be created for.
:param str tag: The tag to be added
:returns: None
"""
image = self._get_resource(_image.Image, image)
image.add_tag(self, tag)
def remove_tag(self, image, tag):
"""Remove a tag to an image
:param image: The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance
that the member will be created for.
:param str tag: The tag to be removed
:returns: None
"""
image = self._get_resource(_image.Image, image)
image.remove_tag(self, tag)
def add_member(self, image, **attrs):
"""Create a new member from attributes
:param image: The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance
that the member will be created for.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.image.v2.member.Member`,
comprised of the properties on the Member class.
:returns: The results of member creation
:rtype: :class:`~openstack.image.v2.member.Member`
"""
image_id = resource.Resource._get_id(image)
return self._create(_member.Member, image_id=image_id, **attrs)
def remove_member(self, member, image, ignore_missing=True):
"""Delete a member
:param member: The value can be either the ID of a member or a
:class:`~openstack.image.v2.member.Member` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the member does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent member.
:returns: ``None``
"""
image_id = resource.Resource._get_id(image)
member_id = resource.Resource._get_id(member)
self._delete(_member.Member, member_id=member_id, image_id=image_id,
ignore_missing=ignore_missing)
def find_member(self, name_or_id, image, ignore_missing=True):
"""Find a single member
:param name_or_id: The name or ID of a member.
:param image: This is the image that the member belongs to,
the value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.image.v2.member.Member` or None
"""
image_id = resource.Resource._get_id(image)
return self._find(_member.Member, name_or_id, image_id=image_id,
ignore_missing=ignore_missing)
def get_member(self, member, image):
"""Get a single member on an image
:param member: The value can be the ID of a member or a
:class:`~openstack.image.v2.member.Member` instance.
:param image: This is the image that the member belongs to.
The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:returns: One :class:`~openstack.image.v2.member.Member`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
member_id = resource.Resource._get_id(member)
image_id = resource.Resource._get_id(image)
return self._get(_member.Member, member_id=member_id,
image_id=image_id)
def members(self, image):
"""Return a generator of members
:param image: This is the image that the member belongs to,
the value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:returns: A generator of member objects
:rtype: :class:`~openstack.image.v2.member.Member`
"""
image_id = resource.Resource._get_id(image)
return self._list(_member.Member, image_id=image_id)
def update_member(self, member, image, **attrs):
"""Update the member of an image
:param member: Either the ID of a member or a
:class:`~openstack.image.v2.member.Member` instance.
:param image: This is the image that the member belongs to.
The value can be the ID of a image or a
:class:`~openstack.image.v2.image.Image` instance.
:attrs kwargs: The attributes to update on the member represented
by ``value``.
:returns: The updated member
:rtype: :class:`~openstack.image.v2.member.Member`
"""
member_id = resource.Resource._get_id(member)
image_id = resource.Resource._get_id(image)
return self._update(_member.Member, member_id=member_id,
image_id=image_id, **attrs)
def get_images_schema(self):
"""Get images schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/images')
def get_image_schema(self):
"""Get single image schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/image')
def get_members_schema(self):
"""Get image members schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/members')
def get_member_schema(self):
"""Get image member schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/member')
def tasks(self, **query):
"""Return a generator of tasks
:param kwargs query: Optional query parameters to be sent to limit
the resources being returned.
:returns: A generator of task objects
:rtype: :class:`~openstack.image.v2.task.Task`
"""
return self._list(_task.Task, **query)
def get_task(self, task):
"""Get task details
:param task: The value can be the ID of a task or a
:class:`~openstack.image.v2.task.Task` instance.
:returns: One :class:`~openstack.image.v2.task.Task`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_task.Task, task)
def create_task(self, **attrs):
"""Create a new task from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.image.v2.task.Task`,
comprised of the properties on the Task class.
:returns: The results of task creation
:rtype: :class:`~openstack.image.v2.task.Task`
"""
return self._create(_task.Task, **attrs)
def wait_for_task(self, task, status='success', failures=None,
interval=2, wait=120):
"""Wait for a task to be in a particular status.
:param task: The resource to wait on to reach the specified status.
The resource must have a ``status`` attribute.
:type resource: A :class:`~openstack.resource.Resource` object.
:param status: Desired status.
:param failures: Statuses that would be interpreted as failures.
:type failures: :py:class:`list`
:param interval: Number of seconds to wait before to consecutive
checks. Default to 2.
:param wait: Maximum number of seconds to wait before the change.
Default to 120.
:returns: The resource is returned on success.
:raises: :class:`~openstack.exceptions.ResourceTimeout` if transition
to the desired status failed to occur in specified seconds.
:raises: :class:`~openstack.exceptions.ResourceFailure` if the resource
has transited to one of the failure statuses.
:raises: :class:`~AttributeError` if the resource does not have a
``status`` attribute.
"""
failures = ['failure'] if failures is None else failures
return resource.wait_for_status(
self, task, status, failures, interval, wait)
def get_tasks_schema(self):
"""Get image tasks schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/tasks')
def get_task_schema(self):
"""Get image task schema
:returns: One :class:`~openstack.image.v2.schema.Schema`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_schema.Schema, requires_id=False,
base_path='/schemas/task')
```
#### File: tests/unit/test_connection.py
```python
import os
import fixtures
from keystoneauth1 import session
import mock
from openstack import connection
import openstack.config
from openstack.tests.unit import base
CONFIG_AUTH_URL = "https://identity.example.com/"
CONFIG_USERNAME = "BozoTheClown"
CONFIG_PASSWORD = "<PASSWORD>"
CONFIG_PROJECT = "TheGrandPrizeGame"
CONFIG_CACERT = "TrustMe"
CLOUD_CONFIG = """
clouds:
sample-cloud:
region_name: RegionOne
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
insecure-cloud:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
verify: False
insecure-cloud-alternative-format:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
insecure: True
cacert-cloud:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
""".format(auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME,
password=<PASSWORD>, project=CONFIG_PROJECT,
cacert=CONFIG_CACERT)
class TestConnection(base.TestCase):
def setUp(self):
super(TestConnection, self).setUp()
# Create a temporary directory where our test config will live
# and insert it into the search path via OS_CLIENT_CONFIG_FILE.
config_dir = self.useFixture(fixtures.TempDir()).path
config_path = os.path.join(config_dir, "clouds.yaml")
with open(config_path, "w") as conf:
conf.write(CLOUD_CONFIG)
self.useFixture(fixtures.EnvironmentVariable(
"OS_CLIENT_CONFIG_FILE", config_path))
self.use_keystone_v2()
def test_other_parameters(self):
conn = connection.Connection(cloud='sample-cloud', cert='cert')
self.assertEqual(conn.session.cert, 'cert')
def test_session_provided(self):
mock_session = mock.Mock(spec=session.Session)
mock_session.auth = mock.Mock()
mock_session.auth.auth_url = 'https://auth.example.com'
conn = connection.Connection(session=mock_session, cert='cert')
self.assertEqual(mock_session, conn.session)
self.assertEqual('auth.example.com', conn.config.name)
def test_create_session(self):
conn = connection.Connection(cloud='sample-cloud')
self.assertIsNotNone(conn)
# TODO(mordred) Rework this - we need to provide requests-mock
# entries for each of the proxies below
# self.assertEqual('openstack.proxy',
# conn.alarm.__class__.__module__)
# self.assertEqual('openstack.clustering.v1._proxy',
# conn.clustering.__class__.__module__)
# self.assertEqual('openstack.compute.v2._proxy',
# conn.compute.__class__.__module__)
# self.assertEqual('openstack.database.v1._proxy',
# conn.database.__class__.__module__)
# self.assertEqual('openstack.identity.v2._proxy',
# conn.identity.__class__.__module__)
# self.assertEqual('openstack.image.v2._proxy',
# conn.image.__class__.__module__)
# self.assertEqual('openstack.object_store.v1._proxy',
# conn.object_store.__class__.__module__)
# self.assertEqual('openstack.load_balancer.v2._proxy',
# conn.load_balancer.__class__.__module__)
# self.assertEqual('openstack.orchestration.v1._proxy',
# conn.orchestration.__class__.__module__)
# self.assertEqual('openstack.workflow.v2._proxy',
# conn.workflow.__class__.__module__)
def test_create_connection_version_param_default(self):
c1 = connection.Connection(cloud='sample-cloud')
conn = connection.Connection(session=c1.session)
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
def test_create_connection_version_param_string(self):
c1 = connection.Connection(cloud='sample-cloud')
conn = connection.Connection(
session=c1.session, identity_api_version='2')
self.assertEqual('openstack.identity.v2._proxy',
conn.identity.__class__.__module__)
def test_create_connection_version_param_int(self):
c1 = connection.Connection(cloud='sample-cloud')
conn = connection.Connection(
session=c1.session, identity_api_version=3)
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
def test_create_connection_version_param_bogus(self):
c1 = connection.Connection(cloud='sample-cloud')
conn = connection.Connection(
session=c1.session, identity_api_version='red')
# TODO(mordred) This is obviously silly behavior
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
def test_from_config_given_config(self):
cloud_region = (openstack.config.OpenStackConfig().
get_one("sample-cloud"))
sot = connection.from_config(config=cloud_region)
self.assertEqual(CONFIG_USERNAME,
sot.config.config['auth']['username'])
self.assertEqual(CONFIG_PASSWORD,
sot.config.config['auth']['password'])
self.assertEqual(CONFIG_AUTH_URL,
sot.config.config['auth']['auth_url'])
self.assertEqual(CONFIG_PROJECT,
sot.config.config['auth']['project_name'])
def test_from_config_given_cloud(self):
sot = connection.from_config(cloud="sample-cloud")
self.assertEqual(CONFIG_USERNAME,
sot.config.config['auth']['username'])
self.assertEqual(CONFIG_PASSWORD,
sot.config.config['auth']['password'])
self.assertEqual(CONFIG_AUTH_URL,
sot.config.config['auth']['auth_url'])
self.assertEqual(CONFIG_PROJECT,
sot.config.config['auth']['project_name'])
def test_from_config_given_cloud_config(self):
cloud_region = (openstack.config.OpenStackConfig().
get_one("sample-cloud"))
sot = connection.from_config(cloud_config=cloud_region)
self.assertEqual(CONFIG_USERNAME,
sot.config.config['auth']['username'])
self.assertEqual(CONFIG_PASSWORD,
sot.config.config['auth']['password'])
self.assertEqual(CONFIG_AUTH_URL,
sot.config.config['auth']['auth_url'])
self.assertEqual(CONFIG_PROJECT,
sot.config.config['auth']['project_name'])
def test_from_config_given_cloud_name(self):
sot = connection.from_config(cloud_name="sample-cloud")
self.assertEqual(CONFIG_USERNAME,
sot.config.config['auth']['username'])
self.assertEqual(CONFIG_PASSWORD,
sot.config.config['auth']['password'])
self.assertEqual(CONFIG_AUTH_URL,
sot.config.config['auth']['auth_url'])
self.assertEqual(CONFIG_PROJECT,
sot.config.config['auth']['project_name'])
def test_from_config_verify(self):
sot = connection.from_config(cloud="insecure-cloud")
self.assertFalse(sot.session.verify)
sot = connection.from_config(cloud="cacert-cloud")
self.assertEqual(CONFIG_CACERT, sot.session.verify)
def test_from_config_insecure(self):
# Ensure that the "insecure=True" flag implies "verify=False"
sot = connection.from_config("insecure-cloud-alternative-format")
self.assertFalse(sot.session.verify)
class TestNetworkConnection(base.TestCase):
# Verify that if the catalog has the suffix we don't mess things up.
def test_network_proxy(self):
self.use_keystone_v3(catalog='catalog-v3-suffix.json')
self.assertEqual(
'openstack.network.v2._proxy',
self.cloud.network.__class__.__module__)
self.assert_calls()
self.assertEqual(
"https://network.example.com/v2.0",
self.cloud.network.get_endpoint())
class TestNetworkConnectionSuffix(base.TestCase):
# We need to do the neutron adapter test differently because it needs
# to actually get a catalog.
def test_network_proxy(self):
self.assertEqual(
'openstack.network.v2._proxy',
self.cloud.network.__class__.__module__)
self.assert_calls()
self.assertEqual(
"https://network.example.com/v2.0",
self.cloud.network.get_endpoint())
class TestAuthorize(base.TestCase):
def test_authorize_works(self):
res = self.cloud.authorize()
self.assertEqual('KeystoneToken-1', res)
def test_authorize_failure(self):
self.use_broken_keystone()
self.assertRaises(openstack.exceptions.HttpException,
self.cloud.authorize)
```
#### File: app/rest_api/swift_handler.py
```python
from datetime import datetime, timedelta
import functools
import swiftclient
from keystoneauth1 import exceptions as keystone_exceptions
import six.moves.urllib.parse as urlparse
from BareMetalControllerBackend.conf.env import env_config
from common import exceptions as exc
from common import utils
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
FOLDER_DELIMITER = "/"
CHUNK_SIZE = 512*1024
OBJECT_ENDPOINT = env_config.object_endpoint
def utctime_to_localtime(utc, utc_format='%Y-%m-%dT%H:%M:%S.%fZ'):
if not utc:
return None
utc_time = datetime.strptime(utc, utc_format)
local_time = utc_time + timedelta(hours=8)
return local_time.strftime("%Y-%m-%d %H:%M:%S")
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def to_dict(self):
return self._apidict
class Container(APIDictWrapper):
pass
class PseudoFolder(APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return 0
@property
def content_type(self):
return "application/pseudo-folder"
class StorageObject(APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
def safe_swift_exception(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except swiftclient.client.ClientException as e:
e.http_scheme = e.http_host = e.http_port = ''
raise e
return wrapper
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
class ObjectClientProvider(object):
def __init__(self, request):
try:
self.openstack_client = utils.get_openstack_client(request)
self.swift_client = swiftclient.client.Connection(session=self.openstack_client.session)
except keystone_exceptions.NotFound:
# Maybe token has expired,Get client use password
openstack_client = utils.get_openstack_client(request, auth_plugin='password')
self.swift_client = swiftclient.client.Connection(session=self.openstack_client.session)
else:
if not self.swift_client:
raise exc.SessionNotFound()
self.region = request.session.get('region', 'regionOne')
def swift_container_exist(self, container_name):
try:
self.swift_client.head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(self, container_name, object_name):
try:
self.swift_client.head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
@safe_swift_exception
def swift_create_container(self, container_name, metadata):
if self.swift_container_exist(container_name):
raise exc.ContainserAlreadyExists(name=container_name)
headers = _metadata_to_header(metadata or {})
self.swift_client.put_container(container_name, headers=headers)
new_headers = self.swift_client.head_container(container_name)
public_url = None
if metadata.get('is_public'):
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = OBJECT_ENDPOINT.get('regionOne') + '/' + parameters
ts_float = float(new_headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
container_info = {
'name': container_name,
'container_object_count': new_headers.get('x-container-object-count'),
'container_bytes_used': new_headers.get('x-container-bytes-used'),
'timestamp': utctime_to_localtime(timestamp),
'is_public': metadata.get('is_public'),
'public_url': public_url,
}
return Container(container_info)
@safe_swift_exception
def swift_get_containers(self):
headers, containers = self.swift_client.get_account(full_listing=True)
container_objs = []
for c in containers:
container = self.swift_get_container(c['name'])
container_objs.append(container)
return container_objs
@safe_swift_exception
def swift_get_container(self, container_name,):
headers = self.swift_client.head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
parameters = urlparse.quote(container_name.encode('utf8'))
if is_public:
public_url = OBJECT_ENDPOINT.get('regionOne') + '/' + parameters
ts_float = float(headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': utctime_to_localtime(timestamp),
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
@safe_swift_exception
def swift_delete_container(self, container_name):
self.swift_client.delete_container(container_name)
@safe_swift_exception
def swift_update_container(self, container_name, metadata=None):
headers = _metadata_to_header(metadata or {})
self.swift_client.post_container(container_name, headers=headers)
new_headers = self.swift_client.head_container(container_name)
public_url = None
if metadata.get('is_public'):
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = OBJECT_ENDPOINT.get('regionOne') + '/' + parameters
ts_float = float(new_headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
container_info = {
'name': container_name,
'container_object_count': new_headers.get('x-container-object-count'),
'container_bytes_used': new_headers.get('x-container-bytes-used'),
'timestamp': utctime_to_localtime(timestamp),
'is_public': metadata.get('is_public'),
'public_url': public_url,
}
return Container(container_info)
@safe_swift_exception
def swift_get_objects(self, container_name, prefix=None, path=None):
kwargs = dict(prefix=prefix,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = self.swift_client.get_container(container_name, **kwargs)
object_objs = _objectify(objects, container_name)
contents = [{
'path': o.subdir if isinstance(o, PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, PseudoFolder),
'is_object': not isinstance(o, PseudoFolder),
'content_type': getattr(o, 'content_type', None),
'timestamp': utctime_to_localtime(getattr(o, 'last_modified', None)),
} for o in object_objs if o.name != path]
return contents
@safe_swift_exception
def swift_create_pseudo_folder(self, container_name, pseudo_folder_name):
if self.swift_object_exists(container_name, pseudo_folder_name):
name =pseudo_folder_name.strip('/')
raise exc.ObjectAlreadyExist(model='folder', name=name)
headers = {}
etag = self.swift_client.put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name.strip('/'),
'etag': etag,
'is_subdir': True,
'is_object': False,
'content_type': 'application/pseudo-folder',
'path': pseudo_folder_name
}
return PseudoFolder(obj_info, container_name)
@safe_swift_exception
def swift_delete_object(self, container_name, object_name):
self.swift_client.delete_object(container_name, object_name)
return True
@safe_swift_exception
def swift_get_object(self, container_name, object_name, with_data=True,
resp_chunk_size=CHUNK_SIZE):
if with_data:
headers, data = self.swift_client.get_object(
container_name, object_name, resp_chunk_size=resp_chunk_size)
else:
data = None
headers = self.swift_client.head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': utctime_to_localtime(timestamp),
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
@safe_swift_exception
def swift_delete_folder(self, container_name, object_name):
objects = self.swift_get_objects(container_name, prefix=object_name)
for object in objects:
self.swift_client.delete_object(container_name, object.get('path'))
@safe_swift_exception
def swift_upload_object(self, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
if not object_name:
object_name = object_file.name
if object_name[-1] == '/':
object_name = object_name + object_file.name
etag = self.swift_client.put_object(container_name,
object_name,
object_file,
content_length=size,
headers=headers)
object = self.swift_get_object(container_name, object_name, with_data=False)
result = object.to_dict()
result['path'] = object_name
return result
@safe_swift_exception
def swift_copy_object(self, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if self.swift_object_exists(new_container_name, new_object_name):
raise exc.ObjectAlreadyExist(model='object', name=new_object_name)
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
etag = self.swift_client.put_object(new_container_name,
new_object_name,
None,
headers=headers)
obj_info = {'name': new_object_name, 'etag': etag}
return StorageObject(obj_info, new_container_name)
```
#### File: app/rest_api/tests.py
```python
import base64
import datetime
from hashlib import sha1
import hmac
import json
import requests
from urllib.parse import quote as urlencode
class BackendRequest(object):
def __init__(self, AccessKeyId, SecretKey, endpoint):
self.AccessKeyId = AccessKeyId
self.SecretKey = SecretKey
self.version = 'v1'
self.USER_AGNET = 'BareMetalBackend-agent'
self.endpoint = endpoint
def concat_url(self, endpoint, path):
return "%s%s" % (endpoint, path)
# 签名算法: by client
def client_sign(self, method, path, params=None, body=None):
timestamp = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
data = {
"Access-Key": self.AccessKeyId,
"Secret-Key": self.SecretKey
}
# Basic Signed Headers
host = "Host: {}\n".format(self.endpoint)
content_type = "Content-Type: {}\n".format('application/json')
signed_header = host + content_type
uri = "{} {}".format(method, path)
canonical_uri = urlencode(uri)
# CanonicalizedRequest
# URL Encoding
if method == 'POST':
param_info = ""
else:
param_info = params
canonical_query_str = urlencode(param_info)
# StringToSign
string_to_sign = method + '&' + \
signed_header + '&' + \
canonical_uri + '&' + \
canonical_query_str + '&' + \
timestamp
# Calculate Signature
hmac_data = hmac.new(
self.SecretKey.encode("utf8"),
"{}{}".format(string_to_sign.lower(), data).encode("utf8"),
sha1
).digest()
# b64code = base64.b64encode(hmac_data)
# b64code = b64code.replace('/', '_').replace('+', '-')
b64code = base64.urlsafe_b64encode(hmac_data)
signature = urlencode(b64code)
return timestamp, signature
def http_request(self, method, path, **kwargs):
path = '/' + self.version + path
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGNET
kwargs['headers']['Accept'] = 'application/json'
timestamp, signature = self.client_sign(method, path, body=kwargs.get('body'),
params=kwargs.get('params'))
kwargs['headers']['AUTHORIZATION'] = signature
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['body']['timestamp'] = timestamp
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
else:
kwargs.setdefault('params', kwargs.get('params', {}))
kwargs['params']['timestamp'] = timestamp
url = self.concat_url(self.endpoint, path)
resp = requests.request(method, url, **kwargs)
if resp.status_code == 200:
response = resp.json()
else:
resp.raise_for_status()
return resp.json()
def access_login(self):
body = {
"AccessKeyId": self.AccessKeyId
}
url_path = '/iam/access_login'
self.http_request('POST', url_path, body=body)
if __name__ == '__main__':
my_client = BackendRequest(AccessKeyId='ca038fe789854a748e7930610229b7c5',
SecretKey='<KEY>',
endpoint="http://127.0.0.1:8002")
my_client.access_login()
``` |
{
"source": "21WelfareForEveryone/WelfareForEveryone",
"score": 2
} |
#### File: WelfareForEveryone/test/ksbert.py
```python
from locust import HttpUser, between, task
from config import config
import json
class test(HttpUser):
wait_time = between(5, 10)
@task
def connect(self):
payload = {
"token": config.token,
"chat_message": "<PASSWORD>"
}
headers = {'content-type': 'application/json'}
self.client.post(config.chatbot, data=json.dumps(payload), headers=headers)
```
#### File: WelfareForEveryone/test/login.py
```python
from locust import HttpUser, between, task
from config import config
import json
class test(HttpUser):
wait_time = between(1, 5)
@task
def connect(self):
payload = {
"user_id": config.user_id,
"user_password": <PASSWORD>
}
headers = {'content-type': 'application/json'}
self.client.post(config.login, data=json.dumps(payload), headers=headers)
``` |
{
"source": "220111/Sam-s-Quest",
"score": 4
} |
#### File: 220111/Sam-s-Quest/setup.py
```python
import random
import sys
import time
from graphics import title
from graphics import monimg
def game():
sama = 1
mone = 3
def one():
time.sleep(1)
mone = 3
samone = input("will SAM attack?")
if samone == "yes":
samaone = random.randint(0, 1)
if samaone == 1:
time.sleep(1)
print("yes a hit!")
if mone == 3:
mone = 2
two()
elif mone == 2:
mone == 1
thr()
elif mone == 1:
mone == 0
time.sleep(1)
print ("you Win")
sys.exit
else:
time.sleep(1)
print("no a miss")
monaone = random.randint(0, 25)
if monaone == 1:
time.sleep(1)
print ("the monster hit you")
print ("you died")
sys.exit
else:
time.sleep(1)
print("the monster missed")
if mone == 3:
one()
elif mone == 2:
two()
elif mone == 1:
thr()
def two():
time.sleep(1)
mone = 2
samtwo = input("will SAM attack again?")
if samtwo == "yes":
samatwo = random.randint(0, 1)
if samatwo == 1:
time.sleep(1)
print("yes a hit!")
if mone == 3:
mone = 2
two()
elif mone == 2:
mone == 1
thr()
elif mone == 1:
mone == 0
time.sleep(1)
print ("you Win")
sys.exit
else:
time.sleep(1)
print("no a miss")
monaone = random.randint(0, 10)
if monaone == 1:
time.sleep(1)
print ("the monster hit you")
print ("you died")
sys.exit
else:
time.sleep(1)
print("the monster missed")
if mone == 3:
one()
elif mone == 2:
two()
elif mone == 1:
thr()
def thr():
time.sleep(1)
mone = 1
samr = input("will SAM attack again?")
if samr == "yes":
samar = random.randint(0, 1)
if samar == 1:
time.sleep(1)
print("yes a hit!")
if mone == 3:
mone = 2
two()
elif mone == 2:
mone == 1
thr()
elif mone == 1:
mone == 0
time.sleep(1)
print ("you Win")
sys.exit
else:
time.sleep(1)
print("no a miss")
monaone = random.randint(0, 5)
if monaone == 1:
time.sleep(1)
print ("the monster hit you")
print ("you died")
sys.exit
else:
time.sleep(1)
print("the monster missed")
if mone == 3:
one()
elif mone == 2:
two()
elif mone == 1:
thr()
def start():
title()
time.sleep(1)
start = input("(s)tart, (q)uit")
if start == 'S':
print ("please use lowercase letters")
start()
elif start == 's':
time.sleep(1)
print("A monster approaches")
mone = 3
one()
else:
sys.exit()
start()
game()
``` |
{
"source": "22014471/information_flask",
"score": 2
} |
#### File: modules/index/views.py
```python
from flask import current_app
from flask import render_template
from info import redis_store
from . import index_blu
@index_blu.route('/')
def index():
return render_template('news/index.html')
# 在打开网页的时候,浏览器会默认去请求根路径+favicon.ico作网站标签的小图标
# send_static_file 是 flask 去查找指定的静态文件所调用的方法
@index_blu.route('/favicon.ico')
def favicon():
return current_app.send_static_file('news/favicon.ico')
```
#### File: modules/passport/views.py
```python
import random
import re
from flask import abort, jsonify
from flask import current_app
from flask import json
from flask import make_response
from flask import request
from info import constants
from info import redis_store
from info.libs.yuntongxun.sms import CCP
from info.utils.response_code import RET, error_map
from . import passport_blu
from info.utils.captcha.captcha import captcha
@passport_blu.route('/sms_code', methods=["POST"])
def send_sms_code():
"""
发送短信的逻辑
1. 获取参数:手机号,图片验证码内容,图片验证码的编号 (随机值)
2. 校验参数(参数是否符合规则,判断是否有值)
3. 先从redis中取出真实的验证码内容
4. 与用户的验证码内容进行对比,如果对比不一致,那么返回验证码输入错误
5. 如果一致,生成验证码的内容(随机数据)
6. 发送短信验证码
7. 告知发送结果
:return:
"""
'{"mobiel": "18811111111", "image_code": "AAAA", "image_code_id": "u23jksdhjfkjh2jh4jhdsj"}'
# 1. 获取参数:手机号,图片验证码内容,图片验证码的编号 (随机值)
# params_dict = json.loads(request.data)
params_dict = request.json
mobile = params_dict.get("mobile")
image_code = params_dict.get("image_code")
image_code_id = params_dict.get("image_code_id")
# 2. 校验参数(参数是否符合规则,判断是否有值)
# 判断参数是否有值
if not all([mobile, image_code, image_code_id]):
# {"errno": "4100", "errmsg": "参数有误"}
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 校验手机号是否正确
if not re.match('1[35678]\\d{9}', mobile):
return jsonify(errno=RET.PARAMERR, errmsg="手机号格式不正确")
# 3. 先从redis中取出真实的验证码内容
try:
real_image_code = redis_store.get("ImageCodeId_" + image_code_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
if not real_image_code:
return jsonify(errno=RET.NODATA, errmsg="图片验证码已过期")
# 4. 与用户的验证码内容进行对比,如果对比不一致,那么返回验证码输入错误
if real_image_code.upper() != image_code.upper():
return jsonify(errno=RET.DATAERR, errmsg="验证码输入错误")
# 5. 如果一致,生成短信验证码的内容(随机数据)
# 随机数字 ,保证数字长度为6位,不够在前面补上0
sms_code_str = "%06d" % random.randint(0, 999999)
current_app.logger.debug("短信验证码内容是:%s" % sms_code_str)
# 6. 发送短信验证码
result = CCP().send_template_sms(mobile, [sms_code_str, constants.SMS_CODE_REDIS_EXPIRES / 5], "1")
if result != 0:
# 代表发送不成功
return jsonify(errno=RET.THIRDERR, errmsg="发送短信失败")
# 保存验证码内容到redis
try:
redis_store.set("SMS_" + mobile, sms_code_str, constants.SMS_CODE_REDIS_EXPIRES)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据保存失败")
# 7. 告知发送结果
# return jsonify(errno=RET.OK, errmsg="发送成功")
return jsonify(errno=RET.OK, errmsg=error_map[RET.OK])
@passport_blu.route('/image_code')
def get_image_code():
"""
生成图片验证码并返回
1. 取到参数
2. 判断参数是否有值
3. 生成图片验证码
4. 保存图片验证码文字内容到redis
5. 返回验证码图片
:return:
"""
# 1. 取到参数
# args: 取到url中 ? 后面的参数
image_code_id = request.args.get("imageCodeId", None)
# 2. 判断参数是否有值
if not image_code_id:
return abort(403)
# 3. 生成图片验证码
name, text, image = captcha.generate_captcha()
# 4. 保存图片验证码文字内容到redis
try:
redis_store.set("ImageCodeId_" + image_code_id, text, constants.IMAGE_CODE_REDIS_EXPIRES)
except Exception as e:
current_app.logger.error(e)
abort(500)
# 5. 返回验证码图片
response = make_response(image)
# 设置数据的类型,以便浏览器更加智能识别其是什么类型
response.headers["Content-Type"] = "image/jpg"
return response
``` |
{
"source": "22014471/malonghui_Django",
"score": 2
} |
#### File: apps/questions/models.py
```python
from django.db import models
from users.models import User
class QuestionCategory(models.Model):
"""
问题分类模型类
"""
name = models.CharField(max_length=64, verbose_name="问题分类名")
sequence = models.IntegerField(verbose_name="问题分类顺序")
is_deleted = models.BooleanField(default=False, verbose_name="逻辑删除")
class Meta:
db_table = "tb_question_categories"
verbose_name = "问题分类表"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class TagCategory(models.Model):
"""问题标签分类表"""
name = models.CharField(max_length=20, verbose_name="问题标签名称")
is_deleted = models.BooleanField(default=False, verbose_name="逻辑删除")
class Meta:
db_table = "tb_tag_categories"
verbose_name = "问题表"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Tag(models.Model):
"""问题标签模型类"""
name = models.CharField(max_length=20, unique=True, verbose_name="标签名称")
concerns = models.IntegerField(default=0, verbose_name="标签关注人数")
describe = models.CharField(max_length=140, verbose_name="标签描述")
image_url = models.CharField(max_length=256, verbose_name="标签图标url")
category = models.ForeignKey(TagCategory, related_name="category_tags", on_delete=models.PROTECT, verbose_name="标签分类")
is_deleted = models.BooleanField(default=False, verbose_name="逻辑删除")
concern_user = models.ManyToManyField(to=User, blank=True, through='TagConcern', related_name="concern_tags", verbose_name="关注该标签的用户")
class Meta:
db_table = "tb_tags"
verbose_name = "标签表"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Question(models.Model):
"""问题模型类"""
title = models.CharField(max_length=64, unique=True, verbose_name="问题标题")
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='question', verbose_name="问题作者")
content = models.CharField(max_length=120, verbose_name="问题内容")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="问题创建时间")
update_time = models.DateTimeField(auto_now=True, verbose_name="问题更新时间")
category = models.ForeignKey(QuestionCategory, on_delete=models.CASCADE, verbose_name="问题分类")
visits = models.IntegerField(default=0, verbose_name="浏览量")
like_count = models.IntegerField(default=0, verbose_name="问题点赞数")
answer_count = models.IntegerField(default=0, verbose_name="问题解答数")
status = models.IntegerField(default=1, verbose_name="问题状态")
is_deleted = models.BooleanField(default=False, verbose_name="逻辑删除")
latest_answer = models.OneToOneField(to="Answer", null=True, blank=True, on_delete=models.CASCADE, related_name='answer_question', verbose_name="问题最新回答")
question_tags = models.ManyToManyField(to='Tag', blank=True, through='QuestionTag', verbose_name="该问题的标签")
like_users = models.ManyToManyField(to=User, blank=True, through="QuestionLike", related_name="like_questions", verbose_name="点赞该问题的用户")
class Meta:
db_table = "tb_questions"
verbose_name = "问题表"
verbose_name_plural = verbose_name
def __str__(self):
return self.title
class Answer(models.Model):
"""解答模型类"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='answer', verbose_name="解答用户")
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='question_answer', verbose_name="问题")
parent = models.ForeignKey('Answer', blank=True, null=True, on_delete=models.CASCADE, related_name='parent_answer', verbose_name="父评论")
content = models.CharField(max_length=200,verbose_name="解答内容")
create_time = models.DateTimeField(auto_now_add=True ,verbose_name="解答时间")
is_deleted = models.BooleanField(default=False, verbose_name="逻辑删除")
like_count = models.IntegerField(default=0, verbose_name="解答点赞数")
like_users = models.ManyToManyField(to=User, blank=True, through="AnswerLike", related_name="like_answers", verbose_name="点赞该解答的用户")
class Meta:
db_table = "tb_answers"
verbose_name = "解答表"
verbose_name_plural = verbose_name
def __str__(self):
return "解答"
class AnswerLike(models.Model):
"""解答模点赞表"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="点赞解答的用户")
answer = models.ForeignKey(Answer, on_delete=models.CASCADE, verbose_name="点赞的解答")
class Meta:
db_table = "tb_answer_likes"
verbose_name = "解答点赞表"
verbose_name_plural = verbose_name
class QuestionTag(models.Model):
"""问题的标签表"""
tag = models.ForeignKey(Tag, on_delete=models.CASCADE, related_name='tag_questions', verbose_name="问题的标签")
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='qtags', verbose_name="标签所属的问题")
class Meta:
db_table = "tb_question_tags"
verbose_name = "问题表"
verbose_name_plural = verbose_name
class TagConcern(models.Model):
"""标签关注表"""
tag = models.ForeignKey(Tag, on_delete=models.CASCADE, verbose_name="关注的标签")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="关注标签的人")
class Meta:
db_table = "tb_tag_concerns"
verbose_name = "标签关注表"
verbose_name_plural = verbose_name
class QuestionLike(models.Model):
"""点赞模型类"""
user = models.ForeignKey(User, on_delete=models.CASCADE,verbose_name="用户")
question = models.ForeignKey(Question, on_delete=models.CASCADE,verbose_name="问题")
class Meta:
db_table = "tb_question_likes"
verbose_name = "点赞表"
verbose_name_plural = verbose_name
```
#### File: apps/questions/serializers.py
```python
from rest_framework import serializers
from questions.models import Question, Tag, Answer, QuestionCategory, QuestionLike, AnswerLike, TagConcern, TagCategory, \
QuestionTag
from users.models import User, Dynamic
class QuestionCategorySerializer(serializers.ModelSerializer):
"""问题分类序列化器"""
class Meta:
model = QuestionCategory
fields = ('id', 'name')
class LatestQASerializer(serializers.ModelSerializer):
"""问题最新回答"""
user = serializers.SlugRelatedField(label="问题作者", read_only=True, slug_field='username')
class Meta:
model = Answer
fields = ('id', 'create_time', 'user')
class QuestionTagsSerializer(serializers.ModelSerializer):
"""该问题所含标签序列化器"""
class Meta:
model = Tag
fields = ('id', 'name')
class QuestionListSerializer(serializers.ModelSerializer):
"""问题列表序列化器"""
author = serializers.SlugRelatedField(label="问题作者", read_only=True, slug_field='username')
latest_answer = LatestQASerializer(read_only=True)
question_tags = QuestionTagsSerializer(read_only=True, many=True)
class Meta:
model = Question
exclude = ('content', 'is_deleted', 'like_users')
class QAAuthorSerializer(serializers.ModelSerializer):
"""该解答的作者序列化器"""
class Meta:
model = User
fields = ('id', 'username', 'avatar')
class ParentAnswerSerializer(serializers.ModelSerializer):
"""父解答序列化器"""
user = QAAuthorSerializer(read_only=True)
class Meta:
model = Answer
exclude = ('like_users',)
class QAnswerSerializer(serializers.ModelSerializer):
"""该问题的解答序列化器"""
user = QAAuthorSerializer(read_only=True)
is_like = serializers.IntegerField(label="用户是否点赞")
is_author = serializers.IntegerField(label="用户是否是作者")
parent = ParentAnswerSerializer()
class Meta:
model = Answer
exclude = ('like_users',)
class QuestionDetailSerializer(serializers.ModelSerializer):
"""问题详情序列化器"""
author = serializers.SlugRelatedField(label="问题作者", read_only=True, slug_field='username')
is_like = serializers.IntegerField(label="用户是否点赞")
question_tags = QuestionTagsSerializer(read_only=True, many=True)
is_author = serializers.IntegerField(label="用户是否作者")
class Meta:
model = Question
exclude = ('visits', 'is_deleted', 'like_users')
class PublishAnswerSerializer(serializers.ModelSerializer):
"""发表解答序列化器"""
question = serializers.CharField(write_only=True, required=True)
parent = serializers.CharField(required=False)
class Meta:
model = Answer
exclude = ('like_users', 'is_deleted')
read_only_fields = ['id', 'user']
extra_kwargs = {
'content':{
'required': True,
},
}
def validate(self, attrs):
question_id = attrs['question']
question = Question.objects.filter(id=question_id).first()
if not question:
raise serializers.ValidationError("问题不存在")
attrs['question'] = question
return attrs
def create(self, validated_data):
content = validated_data['content']
request = self.context['request']
user = request.user
question = validated_data['question']
parent_id = validated_data.get('parent', None)
answer = Answer.objects.create(
content=content,
user=user,
question=question,
parent_id=parent_id,
)
question.answer_count += 1
question.latest_answer = answer
question.save()
# 将 回答问题 记录到 我的动态表 中
if request.user and request.user.is_authenticated():
Dynamic.objects.create(user=request.user, type=1, action="回答了问题", type_id=answer.id)
return answer
class QuestionLikeSerializer(serializers.Serializer):
"""问题点赞序列化器"""
action = serializers.CharField(write_only=True, required=True)
question_id = serializers.CharField(required=False)
user_id = serializers.CharField(read_only=True)
def validate(self, attrs):
question_id = attrs['question_id']
question = Question.objects.filter(id=question_id).first()
if not question:
raise serializers.ValidationError('错误的请求')
action = attrs['action']
if action not in ['like', 'dislike']:
raise serializers.ValidationError('错误的请求')
request = self.context['request']
user = request.user
question_like = QuestionLike.objects.filter(user=user, question=question).first()
if question_like:
raise serializers.ValidationError('一个问题只能点赞或踩一次')
return attrs
def create(self, validated_data):
action = validated_data.get("action")
request = self.context['request']
user = request.user
question_id = validated_data.get('question_id')
question = Question.objects.filter(id=question_id).first()
if not question:
raise serializers.ValidationError('错误的请求')
instance = QuestionLike.objects.create(
user=user,
question=question
)
if action == 'like':
question.like_count += 1
else:
question.like_count -= 1
question.save()
return instance
class AnswerLikeSerializer(serializers.Serializer):
"""解答点赞序列化器"""
action = serializers.CharField(write_only=True, required=True)
answer_id = serializers.CharField(required=False)
user_id = serializers.CharField(read_only=True)
def validate(self, attrs):
answer_id = attrs['answer_id']
answer = Answer.objects.filter(id=answer_id).first()
if not answer:
raise serializers.ValidationError('错误的请求')
action = attrs['action']
if action not in ['like', 'dislike']:
raise serializers.ValidationError('错误的请求')
request = self.context['request']
user = request.user
answer_like = AnswerLike.objects.filter(user=user,answer=answer).first()
if answer_like:
raise serializers.ValidationError('一个解答只能点赞或踩一次')
return attrs
def create(self, validated_data):
action = validated_data.get("action")
request = self.context['request']
user = request.user
answer_id = validated_data.get('answer_id', None)
if answer_id:
answer =Answer.objects.filter(id=answer_id).first()
if not answer:
raise serializers.ValidationError('错误的请求')
instance = AnswerLike.objects.create(
user=user,
answer=answer
)
if action == 'like':
answer.like_count += 1
else:
answer.like_count -= 1
answer.save()
return instance
class TagListSerializer(serializers.ModelSerializer):
"""所有标签序列化器"""
is_like = serializers.IntegerField(label="用户是否关注该标签")
class Meta:
model = Tag
fields = ("id", "name", "concerns", "is_like")
class TagDetailSerializer(serializers.ModelSerializer):
"""标签详情序列化器"""
is_like = serializers.IntegerField(label="用户是否关注该标签")
class Meta:
model = Tag
fields = ('id', 'name', 'describe', 'image_url', 'is_like')
class TagLikeSerializer(serializers.Serializer):
"""标签关注序列化器"""
tag_id = serializers.CharField(required=False)
user_id = serializers.CharField(read_only=True)
def validate(self, attrs):
tag_id = attrs.get("tag_id", None)
if not tag_id:
raise serializers.ValidationError("错误的请求")
tag = Tag.objects.filter(id=tag_id).first()
if not tag:
raise serializers.ValidationError("错误的请求")
request = self.context["request"]
user = request.user
tag_concern = TagConcern.objects.filter(user=user, tag=tag).first()
if tag_concern:
raise serializers.ValidationError("错误的请求")
return attrs
def create(self, validated_data):
request = self.context['request']
user = request.user
tag_id = validated_data.get("tag_id")
tag = Tag.objects.filter(id=tag_id).first()
instance = TagConcern.objects.create(
tag=tag,
user=user,
)
tag.concerns += 1
tag.save()
# 将 关注标签 记录到 我的动态表 中
if user and user.is_authenticated():
Dynamic.objects.create(user=request.user, type=0, action="关注了标签", type_id=tag_id)
return instance
class CustomTagsSerializer(serializers.ModelSerializer):
"""常用标签序列化器"""
class Meta:
model = Tag
fields = ['id', 'name', 'describe']
class TagCategorySerializer(serializers.ModelSerializer):
"""标签分类序列化器"""
category_tags = CustomTagsSerializer(many=True)
class Meta:
model = TagCategory
fields = "__all__"
class SubmitQuestionSerializer(serializers.Serializer):
"""提交问题序列化器"""
id = serializers.IntegerField(read_only=True)
tags = serializers.ListField(required=True, write_only=True)
content = serializers.CharField(required=True)
title = serializers.CharField(required=True)
category = serializers.IntegerField(required=True)
user = serializers.PrimaryKeyRelatedField(required=False, read_only=True)
def create(self, validated_data):
tags = validated_data.get('tags')
content = validated_data.get('content')
title = validated_data.get("title")
user = self.context['request'].user
instance = Question.objects.create(
content=content,
title=title,
category_id=1,
author=user,
)
for tag_name in tags:
tag = Tag.objects.filter(name=tag_name).first()
if not tag:
raise serializers.ValidationError("错误的请求")
questiontag = QuestionTag.objects.create(
tag=tag,
question=instance
)
questiontag.save()
instance.save()
del validated_data['tags']
return instance
class EditAnswerSerializer(serializers.Serializer):
"""编辑解答序列化器"""
id = serializers.IntegerField(required=True)
question_id = serializers.IntegerField(required=True, write_only=True)
content = serializers.CharField(max_length=120, required=True)
def update(self, instance, validated_data):
user = self.context['request'].user
question_id = validated_data.get('question_id')
question = Question.objects.filter(id=question_id).first()
if not question:
raise serializers.ValidationError("错误的请求")
if user.id != instance.user_id:
raise serializers.ValidationError("错误的请求")
if question.id != instance.question_id:
raise serializers.ValidationError("错误的请求")
content = validated_data.get('content')
instance.content = content
instance.save()
return instance
class EditQuestionSerializer(serializers.Serializer):
"""编辑问题序列化器"""
tags = serializers.ListField(required=True, write_only=True)
content = serializers.CharField(required=True)
title = serializers.CharField(required=True)
def validate(self, attrs):
tags = attrs["tags"]
for tag_name in tags:
tag = Tag.objects.filter(name=tag_name).first()
if not tag:
raise serializers.ValidationError("错误的请求")
return attrs
def update(self, instance, validated_data):
user = self.context['request'].user
question_id = validated_data.get('question_id')
question = Question.objects.filter(id=question_id).first()
if not question:
raise serializers.ValidationError("错误的请求")
if user.id != instance.author_id:
raise serializers.ValidationError("错误的请求")
content = validated_data.get('content')
instance.content = content
instance.save()
return instance
``` |
{
"source": "22037/22037-Camera",
"score": 3
} |
#### File: 22037/22037-Camera/loading.py
```python
import cv2
import numpy as np
import h5py
# fo = h5py.File('data.h5', 'r')
# pi=fo.keys()
# n1 = fo.get('dataset_1')
f = h5py.File('20220418164453.hdf5', 'r')
p= f.keys()
n1 = f.get('dataset_1')
data = np.array(n1[:,:,:])
file = 'test.jpg'
cv2.imwrite(file, data)
# # import imageio
# import numpy as np
# import h5py
# f = h5py.File('the_file.h5', 'r')
# dset = f['key']
# data = np.array(dset[:,:,:])
# file = 'test.png' # or .jpg
# # imageio.imwrite(file, data)
```
#### File: 22037/22037-Camera/qt_serial.py
```python
__author__ = '<NAME> - github.com/dekhosla'
import sys
import tkinter
import serial
import serial.tools.list_ports
import warnings
import cv2
import logging
import time
import numpy as np
from turtle import back, pd
from cv2 import detail_SeamFinder
from xmlrpc.client import Boolean
from tracemalloc import stop
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtGui import *
from PyQt5 import QtGui
from PyQt5.QtGui import QPixmap
from datetime import datetime
from timeit import default_timer as timer
from queue import Queue
from sympy import root
from examples.configs.blackfly_configs import configs
from numba import vectorize, jit, prange
from PyQt5.QtCore import QSize, QRect, QObject, pyqtSignal, QThread, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QMainWindow, QWidget, QLabel, QTextEdit, QListWidget, QListView
# Define Variable
display_interval = 1./300. #
# synthetic data
width = 511 # 1920, 720
height = 421 # 1080, 540
test_img = np.random.randint(0, 255,(height, width), 'uint8') # random image
frame = np.zeros((height, width), dtype=np.uint8)
# pre allocate
# Setting up logging
# options are: DEBUG, INFO, ERROR, WARNING
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("Display")
font = cv2.FONT_HERSHEY_SIMPLEX
textLocation0 = (10, 20)
textLocation1 = (10, 60)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
stop = pyqtSignal(Boolean)
# #defining ports
# ports = [
# p.device
# for p in serial.tools.list_ports.comports()
# if 'USB' in p.description
# ]
# if not ports:
# raise IOError("There is no device exist on serial port!")
# if len(ports) > 1:
# warnings.warn('Connected....')
# ser = serial.Serial(ports[0],9600)
#Port Detection END
# MULTI-THREADING
class Worker(QObject):
finished = pyqtSignal()
intReady = pyqtSignal(str)
@pyqtSlot()
def __init__(self):
super(Worker, self).__init__()
self.working = True
def work(self):
ser = serial.Serial(self.ports1[0], 9600)
while self.working:
line = ser.readline().decode('utf-8')
print(line)
time.sleep(0.05)
self.intReady.emit(line)
self.finished.emit()
@vectorize(['uint16(uint8, float32, uint8)'], nopython = True, fastmath = True)
def correction_bgff(background, flatfield, data_cube):
return np.multiply(np.subtract(data_cube,background),flatfield)
@vectorize(['uint16(float32, uint8)'], nopython = True, fastmath = True)
def correction_ff(flatfield, data_cube):
return np.multiply(data_cube,flatfield)
@vectorize(['uint16(uint8, uint8)'], nopython = True, fastmath = True)
def correction_bg(background, data_cube):
return np.subtract(data_cube,background)
class cameraOnSelected(QObject):
def on_pushButton_CameraStop_clicked(self):
self.stop = True
self.stopFuc = False
ConvertToQtFormat = QtGui.QImage()
class qt(QMainWindow):
finished = pyqtSignal()
intReady = pyqtSignal(str)
@pyqtSlot()
def __init__(self):
QMainWindow.__init__(self)
loadUi('qt.ui', self)
self.thread = None
self.worker = None
# self.find_port()
# self.stop=pyqtSignal(Boolean)
self.stop = False
self.pushButton_StartComm.clicked.connect(self.start_loop)
self.menuBar = self.menuBar()
self.working = True
self.stopFuc = True
self.UiComponents()
# camera start,stop,save
self.pushButton_CameraOn.clicked.connect(self.on_pushButton_CameraOn)
self.pushButton_CameraStop.clicked.connect(self.on_pushButton_CameraStop)
self.pushButton_CameraSave.clicked.connect(self.on_pushButton_CameraSave)
# Button for background,flatfield.databinning
self.onBackground = False
self.onFlatfield = False
self.onDatabinning = False
self.onBloodPsio=False
self.onPhysiological_BG = False
self.onPhysiological_RG = False
self.pushButton_Background.setCheckable(True)
self.pushButton_Flatfield.setCheckable(True)
self.pushButton_Databinning.setCheckable(True)
self.pushButton_Physiological_BG.setCheckable(True)
self.pushButton_Physiological_RG.setCheckable(True)
self.pushButton_Background.setStyleSheet("background-color : lightgrey")
self.pushButton_Flatfield.setStyleSheet("background-color : lightgrey")
self.pushButton_Databinning.setStyleSheet("background-color : lightgrey")
self.pushButton_Physiological_BG.setStyleSheet("background-color : lightgrey")
self.pushButton_Physiological_RG.setStyleSheet("background-color : lightgrey")
self.pushButton_Background.clicked.connect(self.on_pushButton_Background)
self.pushButton_Flatfield.clicked.connect(self.on_pushButton_Flatfield)
self.pushButton_Databinning.clicked.connect(self.on_pushButton_Databinning)
# Button on ride side of screen
self.pushButton_DefaultView.clicked.connect(self.on_pushButton_DefaultView)
self.pushButton_Analysis.clicked.connect(self.on_pushButton_Analysis)
self.pushButton_Wavelength.clicked.connect(self.on_pushButton_Wavelength)
self.pushButton_Physiological_BG.clicked.connect(self.on_pushButton_Physiological_BG)
self.pushButton_Physiological_RG.clicked.connect(self.on_pushButton_Physiological_RG)
# Add item list to drop down
def UiComponents(self):
channel_list = ["All", "C0_365", "C1_460", "C2_525", "C3_590", "C4_623", "C5_660", "C6_740", "C7_850","C8_950", "C9_1050", "C10_White", "C11_420", "C12_420", "C13_Background"]
self.comboBoxDropDown.addItems(channel_list)
# use for comm port
def work1(self):
ser = serial.Serial(self.ports1[0], 9600)
while self.working:
line = ser.readline().decode('utf-8')
print(line)
time.sleep(0.05)
self.intReady.emit(line)
self.finished.emit()
def on_pushButton_4_clicked(self):
if self.x != 0:
self.textEdit_displayMessage.setText('Settings Saved!')
else:
self.textEdit_displayMessage.setText(
'Please enter port and speed!')
############################################################### First page Start #############################################
# 1. pushButton_CameraOn_
def on_pushButton_CameraOn(self):
self.stop = False
self.hdfSave = False
self.onBackground = False
self.onFlatfield = False
self.onDatabinning = False
self.onPhysiological_BG = False
self.onPhysiological_RG = False
self.onBloodPsio = False
self.onBloodPsio_BG_RG = False
self.wavelengthSelected = 15
self.data_cube_corr = np.zeros((14, 540, 720), 'uint16')
self.frame = np.zeros((540, 720), dtype=np.uint8)
self.label_Status.setText("Status:")
self.label_SuccesMessage.setText("Started!")
self.label_SuccesMessage.setStyleSheet('color: blue')
# call camera function
self.on_camera()
############################################################ CAMERA CODE
def on_camera(self):
# Initialize Variables
frame_idx = 0 # index to create data cube out of individual frames
num_cubes_stored = 0 # keep track of data cubes sent to storage
num_cubes_generated = 0 # keep track of data cubes generated
last_time = time.perf_counter() # keep track of time to dispay performance
last_display = time.perf_counter() # keeo track of time to display images
num_frames_received = 0 # keep track of how many captured frames reach the main program
num_frames_displayed = 0 # keep track of how many frames are displayed
measured_dps = 0 # computed in main thread, number of frames displayed per second
proc_time = 0
counter = bin_time = 0
min_fr = 0.0
max_fr = 1.0
self.data_cube_corr = np.zeros((14, 540, 720), 'uint16')
self.frame = np.zeros((540, 720), dtype=np.uint8)
self.data_cube = np.zeros((14, 540, 720), dtype=np.uint8)
# stop=pyqtSignal(Boolean)
# Reducing the image resolution by binning (summing up pixels)
bin_x = 20
bin_y = 20
scale = (bin_x*bin_y*255)
if configs['displayfps'] >= configs['fps']:
display_interval = 0
else:
display_interval = 1.0/configs['displayfps']
dps_measure_time = 5.0 # average measurements over 5 secs
#configs
res = configs['camera_res']
height = res[1]
width = res[0]
measure_time = 5.0 # average measurements over 5 secs
camera_index = 0 # default camera starts at 0 by operating system
# Display
window_name = 'Camera'
font = cv2.FONT_HERSHEY_SIMPLEX
textLocation0 = (10, 480)
textLocation1 = (10, 520)
fontScale = 1
fontColor = (0, 0, 255)
lineType = 2
# Setting up logging
# options are: DEBUG, INFO, ERROR, WARNING
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("Main")
# # Setting up Storage
from camera.streamer.h5storageserver import h5Server
# Create camera interface
from camera.capture.blackflycapture import blackflyCapture
print("Starting Capture")
self.camera = blackflyCapture(configs)
print("Getting Images")
self.camera.start()
# Binning 20 pixels of the 8bit images
# @jit(nopython=True, fastmath=True, parallel=True)
# @vectorize(['uint16(uint8, float32, uint8)'], nopython = True, fastmath = True)
stop = self.stop
i = 0
while(not stop):
stop = self.stop
current_time = time.time()
i = (i+1) % 14
# wait for new image
(self.frame_time, frame) = self.camera.capture.get(
block=True, timeout=None)
self.data_cube[frame_idx, :, :] = frame
num_frames_received += 1
frame_idx += 1
while not self.camera.log.empty():
(level, msg) = self.camera.log.get_nowait()
logger.log(level, msg)
# When we have a complete dataset:
if frame_idx >= 14: # 0...13 is populated
frame_idx = 0
num_cubes_stored = 0
onFlatfield = self.onFlatfield
onBackground = self.onBackground
self.background = np.zeros((540, 720), dtype=np.uint8)
# A. Condition for Flat field and Bac
if onFlatfield or onBackground:
self.data_cube, self.background = self.sort_algorithm()
if onFlatfield and onBackground:
self.data_cube_corr = correction_bgff(self.background, self.flatfield, self.data_cube)
elif onFlatfield:
self.data_cube_corr = correction_ff(self.flatfield, self.data_cube)
else:
self.data_cube_corr = correction_bg(self.background, self.data_cube)
# B. Condition for On binning
onDatabinning = self.onDatabinning
if onDatabinning:
onBloodPsio = self.onBloodPsio
if onBloodPsio:
self.data_cube_corr = self.bin20()
test=self.blood_psio()
self.data_cube_corr=(test.astype(np.uint8))
# HDF5
save = self.hdfSave
if save:
frame_idx = 0
num_cubes_generated += 1
now = datetime.now()
filename = now.strftime("%Y%m%d%H%M%S") + ".hdf5"
hdf5 = h5Server("C:\\temp\\" + filename)
print("Starting Storage Server")
hdf5.start()
try:
# self.hdf5.queue.put_nowait((self.frame_time, self.data_cube_corr))
hdf5.queue.put_nowait(
(self.frame_time, self.data_cube_corr))
num_cubes_stored += 1 # executed if above was successful
self.hdfSave = False
save = False
self.label_Status.setText("Status:")
self.label_SuccesMessage.setText("Saved!")
self.label_SuccesMessage.setStyleSheet('color: green')
except:
pass
# logger.log(logging.WARNING, "HDF5:Storage Queue is full!")
hdf5.stop()
# Display performance in main loop
if current_time - last_time >= measure_time:
# how much time did it take to process the data
if num_cubes_generated > 0:
logger.log(logging.INFO, "Status:process time:{:.2f}ms".format(
proc_time*1000./num_cubes_generated))
# how many data cubes did we create
measured_cps_generated = num_cubes_generated/measure_time
logger.log(logging.INFO, "Status:captured cubes generated per second:{}".format(
measured_cps_generated))
num_cubes_generated = 0
# how many data cubes did we send to storage
measured_cps_stored = num_cubes_stored/measure_time
logger.log(logging.INFO, "Status:cubes sent to storage per second:{}".format(
measured_cps_stored))
num_cubes_stored = 0
# how many frames did we display
measured_dps = num_frames_displayed/measure_time
logger.log(
logging.INFO, "Status:frames displayed per second:{}".format(measured_dps))
num_frames_displayed = 0
last_time = current_time
if (current_time - last_display) >= display_interval:
selChannel = self.wavelengthSelected
onBloodPsio=self.onBloodPsio
notRun=False
if self.onBackground or self.onFlatfield or self.onDatabinning or self.onBloodPsio:
if onBloodPsio:
# self.data_cube_corr=cv2.resize(self.data_cube_corr, (540,720), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
display_frame = np.cast['uint8'](
self.data_cube_corr[:, :])
else :
if selChannel == 15:
notRun=True
for i in range(14):
display_frame = np.cast['uint8'](
self.data_cube_corr[i, :, :])
# This section creates significant delay and we need to throttle the display to maintain max capture and storage rate
# cv2.putText(display_frame, "Capture FPS:{} [Hz]".format(
# self.camera.measured_fps), textLocation0, font, fontScale, 255, lineType)
# cv2.putText(display_frame, "Display FPS:{} [Hz]".format(
# measured_dps), textLocation1, font, fontScale, 255, lineType)
# cv2.imshow(window_name, display_frame)
Image1 = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image1, 1)
ConvertToQtFormat = QtGui.QImage(
FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)
self.label_CameraDisplay.setPixmap(
QPixmap.fromImage(ConvertToQtFormat))
self.lcdNumber_FPSin.display(self.camera.measured_fps)
self.lcdNumber_FPSout.display(measured_dps)
# quit the program if users enter q or closes the display window
# this likely is the reason that display frame rate is not faster than 60fps.
if cv2.waitKey(1) & 0xFF == ord('q'):
stop = True
last_display = current_time
num_frames_displayed += 1
else:
display_frame = np.cast['uint8'](
self.data_cube_corr[selChannel, :, :])
else:
display_frame=frame
if not notRun:
# This section creates significant delay and we need to throttle the display to maintain max capture and storage rate
# cv2.putText(display_frame, "Capture FPS:{} [Hz]".format(
# self.camera.measured_fps), textLocation0, font, fontScale, 255, lineType)
# cv2.putText(display_frame, "Display FPS:{} [Hz]".format(
# measured_dps), textLocation1, font, fontScale, 255, lineType)
# cv2.imshow(window_name, display_frame)
FlippedImage = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
# FlippedImage = cv2.flip(Image1, 1)
ConvertToQtFormat = QtGui.QImage(
FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)
self.label_CameraDisplay.setPixmap(
QPixmap.fromImage(ConvertToQtFormat))
self.lcdNumber_FPSin.display(self.camera.measured_fps)
self.lcdNumber_FPSout.display(measured_dps)
# quit the program if users enter q or closes the display window
# this likely is the reason that display frame rate is not faster than 60fps.
if cv2.waitKey(1) & 0xFF == ord('q'):
stop = True
last_display = current_time
num_frames_displayed += 1
#self.stopAnimation()
#curve fit function
def curveFitFlatField(self):
#images are stored in BSstandard folder
# pd.read_csv('fit0_2', dtype='float32', sep=',', header =None)
fit0 = np.loadtxt('fit0_2', dtype='float32', delimiter=',')
fit1 = np.loadtxt('fit1_2', dtype='float32', delimiter=',')
fit2 = np.loadtxt('fit2_2', dtype='float32', delimiter=',')
fit3 = np.loadtxt('fit3_2', dtype='float32', delimiter=',')
fit4 = np.loadtxt('fit4_2', dtype='float32', delimiter=',')
fit5 = np.loadtxt('fit5_2', dtype='float32', delimiter=',')
fit6 = np.loadtxt('fit6_2', dtype='float32', delimiter=',')
fit7 = np.loadtxt('fit7_2', dtype='float32', delimiter=',')
fit8 = np.loadtxt('fit8_2', dtype='float32', delimiter=',')
fit9 = np.loadtxt('fit9_2', dtype='float32', delimiter=',')
fit10 = np.loadtxt('fit10_2', dtype='float32', delimiter=',')
# #comment out 11 and 13
fit11 = np.loadtxt('fit12_2', dtype='float32', delimiter=',')
fit12 = np.loadtxt('fit12_2', dtype='float32', delimiter=',')
fit13 = np.loadtxt('background', dtype='float32', delimiter=',')
#Processing
# looptime = 0.0
# use_queue = True
# data_cube = np.zeros((14, 540, 720), dtype=np.uint8)
flatfield = np.zeros((14, 540, 720), dtype=np.float32)
flatfield[0, :, :] = fit0
flatfield[1, :, :] = fit1
flatfield[2, :, :] = fit2
flatfield[3, :, :] = fit3
flatfield[4, :, :] = fit4
flatfield[5, :, :] = fit5
flatfield[6, :, :] = fit6
flatfield[7, :, :] = fit7
flatfield[8, :, :] = fit8
flatfield[9, :, :] = fit9
flatfield[10, :, :] = fit10
flatfield[11, :, :] = fit11
flatfield[12, :, :] = fit12
flatfield[13, :, :] = fit13
self.flatfield = flatfield
# sorting function
def sort_algorithm(self):
bg_delta: tuple = (64, 64)
bg_dx = bg_delta[1]
bg_dy = bg_delta[0]
inten = np.sum(self.data_cube[:, ::bg_dx, ::bg_dy], axis=(1, 2))
background_indx = np.argmin(inten)
background = self.data_cube[background_indx, :, :]
index_array = np.arange(0, 14)
array_plus_index = index_array + background_indx + 1
ind = array_plus_index % 14
self.data_cube = self.data_cube[ind, :, :]
self.background = background
return self.data_cube, self.background
def bin20(self):
arr_in = self.data_cube_corr
m, n, o = np.shape(arr_in)
arr_tmp = np.empty((m, n//20, o), dtype='uint16')
arr_out = np.empty((m, n//20, o//20), dtype='uint32')
for i in prange(n//20):
arr_tmp[:, i, :] = arr_in[:, i*20, :] + arr_in[:, i*20+1, :] + arr_in[:, i*20+2, :] + arr_in[:, i*20+3, :] + arr_in[:, i*20+4, :] + arr_in[:, i*20+5, :] + \
arr_in[:, i*20+6, :] + arr_in[:, i*20+7, :] + arr_in[:, i*20+8, :] + arr_in[:, i*20+9, :] + arr_in[:, i*20+10, :] + arr_in[:, i*20+11, :] + \
arr_in[:, i*20+12, :] + arr_in[:, i*20+13, :] + arr_in[:, i*20+14, :] + arr_in[:, i*20+15, :] + arr_in[:, i*20+16, :] + arr_in[:, i*20+17, :] + \
arr_in[:, i*20+18, :] + arr_in[:, i*20+19, :]
for j in prange(o//20):
arr_out[:, :, j] = arr_tmp[:, :, j*20] + arr_tmp[:, :, j*20+1] + arr_tmp[:, :, j*20+2] + arr_tmp[:, :, j*20+3] + arr_tmp[:, :, j*10+4] + arr_tmp[:, :, j*20+5] + \
arr_tmp[:, :, j*20+6] + arr_tmp[:, :, j*20+7] + arr_tmp[:, :, j*20+8] + arr_tmp[:, :, j*20+9] + arr_tmp[:, :, j*20+10] + arr_tmp[:, :, j*20+11] + \
arr_tmp[:, :, j*20+12] + arr_tmp[:, :, j*20+13] + arr_tmp[:, :, j*10+14] + arr_tmp[:, :, j*20+15] + arr_tmp[:, :, j*20+16] + arr_tmp[:, :, j*20+17] + \
arr_tmp[:, :, j*20+18] + arr_tmp[:, :, j*20+19]
self.data_cube_corr = arr_out
return self.data_cube_corr
############################################################END CAMERA CODE
#2. Camera Stop spin view Button
def on_pushButton_CameraStop(self):
self.label_Status.setText("Status:")
self.label_SuccesMessage.setText("Stopped!")
self.label_SuccesMessage.setStyleSheet('color: red')
self.stop = True
self.camera.stop()
ConvertToQtFormat = QtGui.QImage()
self.label_CameraDisplay.setPixmap(QPixmap.fromImage(ConvertToQtFormat))
self.label_CameraDisplay.clear()
#3. Camera Save pushButton_CameraSave
def on_pushButton_CameraSave(self):
self.hdfSave = True
# 4. Display Target or default view pushButton_DefaultView_
def on_pushButton_DefaultView(self):
self.onBackground = False
self.onFlatfield = False
self.onDatabinning = False
self.onPhysiological_BG = False
self.onPhysiological_RG = False
self.pushButton_Background.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Background.setText("Off")
self.pushButton_Flatfield.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Flatfield.setText("Off")
self.pushButton_Databinning.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Databinning.setText("Off")
self.pushButton_Physiological_BG.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Physiological_RG.setStyleSheet(
"background-color : lightgrey")
def target(window_name, display_frame):
cv2.imshow(window_name, display_frame)
# 5. Analysis on_pushButton_Analysis_clicked
def on_pushButton_Analysis(self):
z = 0
# 6. Okay Button pushButton_Wavelength
def on_pushButton_Wavelength(self):
content = self.comboBoxDropDown.currentText()
if(content == 'All'):
self.wavelengthSelected = 15
else:
selected = content.split("_", 1)
self.wavelengthSelected = int(selected[0].split("C", 1)[1])
# 7. pushButton_Physicogical
def on_pushButton_Physiological_BG(self):
if self.pushButton_Physiological_BG.isChecked():
self.pushButton_Physiological_BG.setStyleSheet(
"background-color : limegreen")
self.pushButton_Physiological_RG.setStyleSheet(
"background-color : lightgrey")
self.onPhysiological_BG = True
else:
self.pushButton_Physiological_BG.setStyleSheet(
"background-color : lightgrey")
self.onPhysiological_BG = False
self.onBloodPsio=True
self.onBloodPsio_BG_RG = False
# self.blood_psio()
def on_pushButton_Physiological_RG(self):
if self.pushButton_Physiological_RG.isChecked():
self.pushButton_Physiological_RG.setStyleSheet(
"background-color : limegreen")
self.pushButton_Physiological_BG.setStyleSheet(
"background-color : lightgrey")
self.onPhysiological_RG = True
else:
self.pushButton_Physiological_RG.setStyleSheet(
"background-color : lightgrey")
self.onPhysiological_RG = False
self.onBloodPsio=True
self.onBloodPsio_BG_RG = True
# self.blood_psio()
# 8. check buttons
def on_pushButton_Background(self):
if self.pushButton_Background.isChecked():
self.pushButton_Background.setStyleSheet(
"background-color : limegreen")
self.pushButton_Background.setText("On")
self.onBackground = True
else:
self.pushButton_Background.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Background.setText("Off")
self.onBackground = False
def on_pushButton_Flatfield(self):
if self.pushButton_Flatfield.isChecked():
self.pushButton_Flatfield.setStyleSheet(
"background-color : limegreen")
self.pushButton_Flatfield.setText("On")
self.curveFitFlatField()
self.onFlatfield = True
else:
self.pushButton_Flatfield.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Flatfield.setText("Off")
self.onFlatfield = False
def on_pushButton_Databinning(self):
if self.pushButton_Databinning.isChecked():
self.pushButton_Databinning.setStyleSheet(
"background-color : limegreen")
self.pushButton_Databinning.setText("On")
self.onDatabinning = True
else:
self.pushButton_Databinning.setStyleSheet(
"background-color : lightgrey")
self.pushButton_Databinning.setText("Off")
self.onDatabinning = False
# @jit(nopython=True, fastmath=True, parallel=True)
def blood_psio(self):
counter = bin_time = 0
min_fr = 0.0
max_fr = 1.0
start_time = time.time()
frame_bin = self.data_cube_corr
if (self.onBloodPsio_BG_RG):
frame_ratio = np.divide(frame_bin[1, :, :].astype(np.uint32), frame_bin[6, :, :].astype(np.uint32))
else:
frame_ratio = np.divide(frame_bin[1, :, :].astype(np.uint32), frame_bin[4, :, :].astype(np.uint32))
counter += (time.perf_counter() - start_time)
# Display Ratio Image, make it same size as original image
frame_ratio_01 = (frame_ratio).astype(np.float32)
frame_ratio_01 = np.sqrt(frame_ratio_01)
min_fr = 0.95*min_fr + 0.05*frame_ratio_01.min()
max_fr = 0.95*max_fr + 0.05*frame_ratio_01.max()
frame_ratio_01 = (frame_ratio_01 - min_fr)/(max_fr-min_fr)*10
frame_tmp = cv2.resize(
frame_ratio_01, (540, 720), fx=0, fy=0, interpolation=cv2.INTER_NEAREST)
return (frame_tmp)
############################################################### First page End ###############################################
############################################################### Second page Start ############################################
def find_port(self):
self.label_CameraDisplay.clear()
#defining ports
self.ports1 = [
p.device
for p in serial.tools.list_ports.comports()
if 'USB' in p.description
]
if not self.ports1:
raise IOError("There is no device exist on serial port!")
if len(self.ports1) > 1:
warnings.warn('Connected....')
# self.selectedSerial = serial.Serial(self.ports1[0],9600)
# self.label_11.setText(self.ports1[0])
def on_pushButton_StartComm_clicked(self):
self.completed = 0
while self.completed < 100:
self.completed += 0.001
self.progressBar.setValue(int(self.completed))
self.textEdit_displayMessage.setText('Data Gathering...')
self.label_PortStatus.setText("CONNECTED!")
self.label_PortStatus.setStyleSheet('color: green')
x = 1
self.textEdit_displayMessage.setText(":")
def on_pushButton_StopComm_clicked(self):
self.textEdit_displayMessage.setText(
'Stopped! Please click CONNECT...')
def on_pushButton_SendComm_clicked(self):
# Send data from serial port:
mytext = self.textEdit_TextSendDisplay.toPlainText()
# self.portsetup(self)
print(mytext.encode())
self.selectedSerial.write(mytext.encode())
def stop_loop(self):
self.worker.working = False
def onIntReady(self, i):
self.textEdit_DisplayCommData.append("{}".format(i))
print(i)
def loop_finished(self):
print('Loop Finished')
def start_loop(self):
# self.portsetup()
if self.ports1:
self.worker = Worker() # a new worker to perform those tasks
self.thread = QThread() # a new thread to run our background tasks in
# move the worker into the thread,do this first before connecting the signals
self.worker.moveToThread(self.thread)
# begin our worker object's loop when the thread starts running
self.thread.started.connect(self.work)
self.worker.intReady.connect(self.onIntReady)
# stop the loop on the stop button click
self.pushButton_StopComm.clicked.connect(self.stop_loop)
# do something in the gui when the worker loop ends
self.worker.finished.connect(self.loop_finished)
# tell the thread it's time to stop running
self.worker.finished.connect(self.thread.quit)
# have worker mark itself for deletion
self.worker.finished.connect(self.worker.deleteLater)
# have thread mark itself for deletion
self.thread.finished.connect(self.thread.deleteLater)
self.thread.start()
if not self.ports1:
self.label_11.setText("Nothing found")
############################################################### Second page End ##############################################
############################################################### Third page Start #############################################
############################################################### Third page End ###############################################
def run():
app = QApplication(sys.argv)
widget = qt()
widget.show()
sys.exit(app.exec_())
if __name__ == "__main__":
run()
```
#### File: 22037-Camera/UI_Buttons/BloodSyntheticImage.py
```python
@jit(nopython=True, fastmath=True, parallel=True)
def blood_psio(arr_in,width,height,scale, counter, textLocation0, font, fontScale, fontColor, lineType):
frame_ratio = np.divide((arr_in[1,:,:].astype(np.float32),arr_in[2,:,:].astype(np.float32)*255.0).astype(np.uint16))
# Display Ratio Image, make it same size as original image
frame_ratio_01 = (frame_ratio/255).astype(np.float32)
frame_ratio_01 = np.sqrt(frame_ratio_01)
min_fr = 0.95*min_fr + 0.05*frame_ratio_01.min()
max_fr = 0.95*max_fr + 0.05*frame_ratio_01.max()
frame_ratio_01 = (frame_ratio_01 -min_fr)/(max_fr-min_fr)
frame_tmp = cv2.resize(frame_ratio_01, (width,height),fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
cv2.putText(frame_tmp,"Frame:{}".format(counter), textLocation0, font, fontScale, fontColor, lineType)
# cv2.imshow(ratioed_window_name, frame_tmp)
Image1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image1, 1)
ConvertToQtFormat = QtGui.QImage(FlippedImage.data, FlippedImage.shape[1],FlippedImage.shape[0], QImage.Format_RGB888)
self.label_CameraDisplay.setPixmap(QPixmap.fromImage(ConvertToQtFormat))
self.lcdNumber_FPSin.display(self.camera.measured_fps)
self.lcdNumber_FPSout.display(counter)
return (frame_ratio)
```
#### File: 22037-Camera/UI_Buttons/wavelength.py
```python
def wavelength(data_cube):
channel_1=data_cube[0,:,:]
channel_2=data_cube[1,:,:]
channel_3=data_cube[2,:,:]
channel_4=data_cube[3,:,:]
channel_5=data_cube[4,:,:]
channel_6=data_cube[5,:,:]
channel_7=data_cube[6,:,:]
channel_8=data_cube[7,:,:]
channel_9=data_cube[8,:,:]
channel_10=data_cube[9,:,:]
channel_11=data_cube[10,:,:]
channel_12=data_cube[11,:,:]
channel_13=data_cube[12,:,:]
channel_14=data_cube[13,:,:]
#if button 1 is pressed, display channel_1
#if button 2 is pressed, display channel_2
#etc
``` |
{
"source": "2212221352/Multimodal-Transformer",
"score": 2
} |
#### File: Multimodal-Transformer/src/train.py
```python
import torch
from torch import nn
import sys
from src import models
from src import ctc
from src.utils import *
import torch.optim as optim
import numpy as np
import time
from torch.optim.lr_scheduler import ReduceLROnPlateau
import os
import pickle
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score
from src.eval_metrics import *
####################################################################
#
# Construct the model and the CTC module (which may not be needed)
#
####################################################################
def initiate(hyp_params, train_loader, valid_loader, test_loader):
model = getattr(models, hyp_params.model+'Model')(hyp_params)
if hyp_params.use_cuda:
model = model.cuda()
optimizer = getattr(optim, hyp_params.optim)(model.parameters(), lr=hyp_params.lr)
criterion = getattr(nn, hyp_params.criterion)()
if hyp_params.aligned or hyp_params.model=='MULT':
ctc_criterion = None
ctc_a2l_module, ctc_v2l_module = None, None
ctc_a2l_optimizer, ctc_v2l_optimizer = None, None
scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=hyp_params.when, factor=0.1, verbose=True)
settings = {'model': model,
'optimizer': optimizer,
'criterion': criterion,
'ctc_a2l_module': ctc_a2l_module,
'ctc_v2l_module': ctc_v2l_module,
'ctc_a2l_optimizer': ctc_a2l_optimizer,
'ctc_v2l_optimizer': ctc_v2l_optimizer,
'ctc_criterion': ctc_criterion,
'scheduler': scheduler}
return train_model(settings, hyp_params, train_loader, valid_loader, test_loader)
####################################################################
#
# Training and evaluation scripts
#
####################################################################
def train_model(settings, hyp_params, train_loader, valid_loader, test_loader):
model = settings['model']
optimizer = settings['optimizer']
criterion = settings['criterion']
ctc_a2l_module = settings['ctc_a2l_module']
ctc_v2l_module = settings['ctc_v2l_module']
ctc_a2l_optimizer = settings['ctc_a2l_optimizer']
ctc_v2l_optimizer = settings['ctc_v2l_optimizer']
ctc_criterion = settings['ctc_criterion']
scheduler = settings['scheduler']
def train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion):
epoch_loss = 0
model.train()
num_batches = hyp_params.n_train // hyp_params.batch_size
proc_loss, proc_size = 0, 0
start_time = time.time()
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(train_loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(-1) # if num of labels is 1
model.zero_grad()
if ctc_criterion is not None:
ctc_a2l_module.zero_grad()
ctc_v2l_module.zero_grad()
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
batch_chunk = hyp_params.batch_chunk
combined_loss = 0
net = nn.DataParallel(model) if batch_size > 10 else model
preds, hiddens = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
raw_loss = criterion(preds, eval_attr)
combined_loss = raw_loss
combined_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), hyp_params.clip)
optimizer.step()
proc_loss += raw_loss.item() * batch_size
proc_size += batch_size
epoch_loss += combined_loss.item() * batch_size
if i_batch % hyp_params.log_interval == 0 and i_batch > 0:
avg_loss = proc_loss / proc_size
elapsed_time = time.time() - start_time
print('Epoch {:2d} | Batch {:3d}/{:3d} | Time/Batch(ms) {:5.2f} | Train Loss {:5.4f}'.
format(epoch, i_batch, num_batches, elapsed_time * 1000 / hyp_params.log_interval, avg_loss))
proc_loss, proc_size = 0, 0
start_time = time.time()
return epoch_loss / hyp_params.n_train
def evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False):
model.eval()
loader = test_loader if test else valid_loader
total_loss = 0.0
results = []
truths = []
with torch.no_grad():
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(dim=-1) # if num of labels is 1
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
net = nn.DataParallel(model) if batch_size > 10 else model
preds, _ = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
total_loss += criterion(preds, eval_attr).item() * batch_size
# Collect the results into dictionary
results.append(preds)
truths.append(eval_attr)
avg_loss = total_loss / (hyp_params.n_test if test else hyp_params.n_valid)
results = torch.cat(results)
truths = torch.cat(truths)
return avg_loss, results, truths
best_valid = 1e8
for epoch in range(1, hyp_params.num_epochs+1):
start = time.time()
train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion)
val_loss, _, _ = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False)
test_loss, results, truths = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
end = time.time()
duration = end-start
scheduler.step(val_loss) # Decay learning rate by validation loss
print("-"*50)
print('Epoch {:2d} | Time {:5.4f} sec | Valid Loss {:5.4f} | Test Loss {:5.4f}'.format(epoch, duration, val_loss, test_loss))
print("-"*50)
if val_loss < best_valid:
print(f"Saved model at pre_trained_models/{hyp_params.name}.pt!")
save_model(hyp_params, model, name=hyp_params.name)
best_valid = val_loss
if hyp_params.dataset == "mosei":
eval_mosei_senti(results, truths, True)
elif hyp_params.dataset == 'mosi':
eval_mosi(results, truths, True)
model = load_model(hyp_params, name=hyp_params.name)
_, results, truths = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
if hyp_params.dataset == "mosei_senti":
eval_mosei_senti(results, truths, True)
elif hyp_params.dataset == 'mosi':
eval_mosi(results, truths, True)
elif hyp_params.dataset == 'iemocap':
eval_iemocap(results, truths)
sys.stdout.flush()
input('[Press Any Key to start another run]')
``` |
{
"source": "2218084076/hotpoor_autoclick_xhs",
"score": 3
} |
#### File: hotpoor_autoclick_xhs/get_cdf/get_cdf.py
```python
import sys
import os
import pyautogui
import time
import pyperclip
# product-item-default
# document.getElementsByClassName("product-item-default").length 查看页面pages数量
# Chrome打开浏览器 https://pgy.xiaohongshu.com/solar/advertiser/patterns/kol
# 选择分类
# 打开审查元素工具 位置1160px
# 滚动屏幕至全部右下角
page_num = 0
page_num_end = 3
# SK-II 从第二页开始
page_with_items = [20,20,20,2]
action_list = [
{
"x":127,
"y":17,
"sleep":1,
"name":"move_to_click",
"content":"",
"action_name":"切换pgy页面",
},
]
def pyautogui_action(action):
if action["name"] in ["move_to_click"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
elif action["name"] in ["select_all_and_write"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
time.sleep(1)
pyautogui.hotkey("ctrl", "a")
write_content = action.get("content","")
pyautogui.typewrite(write_content)
pyautogui.press('enter')
elif action["name"] in ["select_all_and_js_latest"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.press('backspace')
pyautogui.press('up')
pyautogui.press('enter')
elif action["name"] in ["select_all_and_copy"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "x")
elif action["name"] in ["select_all_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "v")
elif action["name"] in ["select_item_and_close_tab"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "w")
elif action["name"] in ["select_all_and_copy_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
write_content = action.get("content","")
pyperclip.copy(write_content)
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "v")
pyautogui.press('enter')
elif action["name"] in ["open_console"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("f12")
elif action["name"] in ["refresh"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("f5")
elif action["name"] in ["esc"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("esc")
print(action.get("action_name"))
action_sleep = action.get("sleep",0)
time.sleep(action_sleep)
for page in page_with_items:
action_page_change = {
"x":127,
"y":17,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"点击选项卡",
}
pyautogui_action(action_page_change)
for item in range(0,page):
action_item_click_list = [
{
"x":1377,
"y":147,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"切换console",
},
{
"x":1204,
"y":172,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"清空信息console",
},
{
"x":1282,
"y":995,
"sleep":2,
"name":"select_all_and_copy_and_paste",
#document.getElementsByClassName("lamer-product-item")[0].getElementsByTagName("a")[0].click()
# "content": "document.getElementsByClassName(\"lamer-product-item\")[%s].getElementsByTagName(\"a\")[0].click()" % (item),
"content":"document.getElementsByClassName(\"product-item-default\")[%s].children[1].click()"%(item),
"action_name":"切换产品",
},
{
"x":453,
"y":16,
"sleep":0.5,
"name":"open_console",
"content":"",
"action_name":"open_console",
},
{
"x":1377,
"y":147,
"sleep":0.5,
"name":"select_all_and_copy_and_paste",
"content":"",
"action_name":"选择console",
},
{
"x":1204,
"y":172,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "清空信息console",
},
{
"x":1282,
"y":995,
"sleep":0.5,
"name":"select_all_and_copy_and_paste",
"content":
"""
result=[]
result.push(document.getElementsByClassName("detail-box-title")[0].innerText)
result.push(document.getElementsByClassName("product-name")[0].innerText)
result.push(document.getElementsByClassName("product-code-value")[0].innerText)
result.push(document.getElementsByClassName("price-now")[0].innerText)
cxs=document.getElementsByClassName("promotion-item")
cxs_info = []
for (i=0;i<cxs.length;i++){
cxs_info.push(cxs[i].innerText)
}
ths=document.getElementsByClassName("property-item-title")
tds=document.getElementsByClassName("property-item-value")
kv={}
for (i=0;i<ths.length;i++){
kv[ths[i].innerText]=tds[i].innerText
}
result_info = {
"detail-box-title":result[0],
"product-name":result[1],
"product-code-value":result[2],
"price-now":result[3],
"promotion-item":cxs_info,
"property-item":kv,
}
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"
document.body.append(dom)
""",
"action_name":"执行获取内容的JS",
},
{
"x":1023,
"y":152,
"sleep":0.5,
"name":"select_all_and_copy",
"content":"",
"action_name":"copy"
},
{
"x": 443,
"y": 11,
"sleep": 0.5,
"name": "select_item_and_close_tab",
"content": "",
"action_name": "关闭选项卡",
},
{
"x": 443,
"y": 11,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "点击选项卡",
},
{
"x": 443,
"y": 11,
"sleep": 0.5,
"name": "esc",
"content": "",
"action_name": "esc",
},
{
"x": 445,
"y": 232,
"sleep": 0.5,
"name": "select_all_and_paste",
"content": "",
"action_name": "提交",
},
{
"x": 586,
"y": 244,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "submit",
},
{
"x": 127,
"y": 17,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "点击选项卡",
},
{
"x": 127,
"y": 17,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "切换pgy页面",
},
]
for action_item_click in action_item_click_list:
pyautogui_action(action_item_click)
action_page_change_list = [
{
"x":1377,
"y":147,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"切换console",
},
{
"x":1204,
"y":172,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"清空信息console",
},
{
"x":1282,
"y":995,
"sleep":1,
"name":"select_all_and_copy_and_paste",
"content":'''
document.getElementsByClassName("cm-pagination-next")[0].click()
''',
# "content":'document.getElementsByClassName("lamer-pagination-next")[0].click()',
"action_name":"切换产品页",
},
{
"x": 1282,
"y": 995,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": '''
scrollBy(0,9999)
''',
# "content":'document.getElementsByClassName("lamer-pagination-next")[0].click()',
"action_name": "切换产品页",
},
]
for action_page_change in action_page_change_list:
pyautogui_action(action_page_change)
'''
result=[]
result.push(document.getElementsByClassName("detail-box-title")[0].innerText)
result.push(document.getElementsByClassName("product-name")[0].innerText)
result.push(document.getElementsByClassName("product-code-value")[0].innerText)
result.push(document.getElementsByClassName("price-now")[0].innerText)
cxs=document.getElementsByClassName("promotion-item")
cxs_info = []
for (i=0;i<cxs.length;i++){
cxs_info.push(cxs[i].innerText)
}
ths=document.getElementsByClassName("property-item-title")
tds=document.getElementsByClassName("property-item-value")
kv={}
for (i=0;i<ths.length;i++){
kv[ths[i].innerText]=tds[i].innerText
}
result_info = {
"detail-box-title":result[0],
"product-name":result[1],
"product-code-value":result[2],
"price-now":result[3],
"promotion-item":cxs_info,
"property-item":kv,
}
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"
document.body.append(dom)
'''
```
#### File: hotpoor_autoclick_xhs/get_cdf/get_new.py
```python
import pyautogui
import time
import pyperclip
# 打开审查元素位置 921.6
# 2022/01/15
urls = ["tomford-product.html?productId=400463&goodsId=524636&warehouseId=10","tomford-product.html?productId=413813&goodsId=537981&warehouseId=10","tomford-product.html?productId=438131&goodsId=562140&warehouseId=10","tomford-product.html?productId=424801&goodsId=548831&warehouseId=10","tomford-product.html?productId=416242&goodsId=540402&warehouseId=10","tomford-product.html?productId=419681&goodsId=543818&warehouseId=10","tomford-product.html?productId=413809&goodsId=537977&warehouseId=10","tomford-product.html?productId=400469&goodsId=524642&warehouseId=10","tomford-product.html?productId=404550&goodsId=528722&warehouseId=10",
"tomford-product.html?productId=426234&goodsId=550259&warehouseId=10","tomford-product.html?productId=425596&goodsId=549626&warehouseId=10","tomford-product.html?productId=413811&goodsId=537979&warehouseId=10","tomford-product.html?productId=414252&goodsId=538420&warehouseId=10","tomford-product.html?productId=414248&goodsId=538416&warehouseId=10","tomford-product.html?productId=412492&goodsId=536660&warehouseId=10","tomford-product.html?productId=407520&goodsId=531688&warehouseId=10","tomford-product.html?productId=400467&goodsId=524640&warehouseId=10","tomford-product.html?productId=407976&goodsId=532144&warehouseId=10",
"tomford-product.html?productId=406764&goodsId=530934&warehouseId=10","tomford-product.html?productId=406182&goodsId=530353&warehouseId=10","tomford-product.html?productId=404556&goodsId=528728&warehouseId=10","tomford-product.html?productId=400367&goodsId=524540&warehouseId=10","tomford-product.html?productId=400437&goodsId=524610&warehouseId=10"]
def pyautogui_action(action):
if action["name"] in ["move_to_click"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
elif action["name"] in ["select_all_and_write"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
time.sleep(1)
pyautogui.hotkey("ctrl", "a")
write_content = action.get("content","")
pyautogui.typewrite(write_content)
pyautogui.press('enter')
elif action["name"] in ["select_all_and_js_latest"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.press('backspace')
pyautogui.press('up')
pyautogui.press('enter')
elif action["name"] in ["select_all_and_copy"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "c")
elif action["name"] in ["select_all_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "v")
elif action["name"] in ["select_item_and_close_tab"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "w")
elif action["name"] in ["select_all_and_copy_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
write_content = action.get("content","")
pyperclip.copy(write_content)
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "v")
pyautogui.press('enter')
elif action["name"] in ["open_console"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("f12")
elif action["name"] in ["url_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
write_content = action.get("content","")
pyperclip.copy(write_content)
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "l")
pyautogui.hotkey("ctrl", "v")
pyautogui.press('enter')
print(action.get("action_name"))
action_sleep = action.get("sleep",0)
time.sleep(action_sleep)
for u in urls:
print(u)
page={
"x":608,
"y":65,
"sleep":3,
"name":"url_paste",
"content":"http://www.cdfgsanya.com/%s"%(u),
"action_name":"访问链接",
}
pyautogui_action(page)
action_item_click_list = [
{
"x": 1207,
"y": 176,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "清空console",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content":
'''
result=[]
result.push(document.getElementsByClassName("detail-box-title")[0].innerText)
result.push(document.getElementsByClassName("product-name")[0].innerText)
result.push(document.getElementsByClassName("product-code-value")[0].innerText)
result.push(document.getElementsByClassName("price-now")[0].innerText)
cxs=document.getElementsByClassName("promotion-item")
cxs_info = []
for (i=0;i<cxs.length;i++){
cxs_info.push(cxs[i].innerText)
}
ths=document.getElementsByClassName("property-item-title")
tds=document.getElementsByClassName("property-item-value")
kv={}
for (i=0;i<ths.length;i++){
kv[ths[i].innerText]=tds[i].innerText
}
result_info = {
"detail-box-title":result[0],
"product-name":result[1],
"product-code-value":result[2],
"price-now":result[3],
"promotion-item":cxs_info,
"property-item":kv,
}
''',
"action_name": "get店铺信息",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content":
"""
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.style.zIndex=9999999999999999999
""",
"action_name": "写入文本框textarea",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": rf'dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"',
"action_name": "文本框展示",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": 'document.body.append(dom)',
"action_name": "文本框展示",
},
{
"x": 1026,
"y": 149,
"sleep": 0.5,
"name": "select_all_and_copy",
"content": "",
"action_name": "copy"
},
{
"x": 457,
"y": 23,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "点击选项卡_pages",
},
{
"x": 445,
"y": 232,
"sleep": 0.5,
"name": "select_all_and_paste",
"content": "",
"action_name": "提交",
},
{
"x": 586,
"y": 244,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "submit",
},
{
"x": 137,
"y": 24,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "切换pgy页面",
},
]
for action_item_click in action_item_click_list:
pyautogui_action(action_item_click)
'''
result=[]
result.push(document.getElementsByClassName("shop-name")[0].innerText.split("\n")[0])
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[0].getAttribute("class").split("mid-str")[1])
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[1].innerText)
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[2].innerText)
result.push(document.getElementsByClassName("tel")[0].innerText)
result.push(document.getElementsByClassName("address")[0].innerText)
result_info = {
"shop-name":result[0],
"star":result[1]*0.1,
"comment":result[2],
"consume":result[3],
"tel":result[4],
"address":result[5]
}
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.style.zIndex=9999999999999999999
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"
document.body.append(dom)
shop-name = document.getElementsByClassName("shop-name")[0].innerText.split("\n")[0]
star = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[0].getAttribute("class").split("mid-str")[1]
comment = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[1].innerText
consume = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[2].innerText
tel = document.getElementsByClassName("tel")[0].innerText
address = document.getElementsByClassName("address")[0].innerText
'''
```
#### File: hotpoor_autoclick_xhs/office/check_excel.py
```python
import xlrd
import json
filename1 = r"C:\Users\Terry\Desktop\SKUs.xls"
def reader():
data = xlrd.open_workbook(filename1)
table = data.sheets()[0]
company = []
for i in range(0,table.ncols):
company.append(table.col_values(i))
# print(table.col_values(i))
print(company[1])
# print(merged_cells)
reader()
```
#### File: office/excel/check_excel.py
```python
import xlrd
import json
filename = r"D:\github\1\hotpoor_autoclick_xhs\费用报销单模板-12月.xls"
filename1=r"D:\github\1\hotpoor_autoclick_xhs\费用报销单模板-12月.xlsx"
# on_demand=True
def reader():
data = xlrd.open_workbook(filename1)
table = data.sheets()[0]
merged_cells=[]
for i in table.merged_cells:
merged_cells.append(list(i))
company = []
for i in range(0,table.nrows):
company.append(table.row_values(i))
print(table.row_values(i))
# print(company)
# print(merged_cells)
reader()
```
#### File: office/Tornado/web.py
```python
import tornado.ioloop
import tornado.web
import sys
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("welcome!")
class ReadDemoHandler(tornado.web.RequestHandler):
def get(self):
f=open("demo.txt","r")
self.write(f.read())
class ExcelDisplayHandler(tornado.web.RequestHandler):
def get(self):
self.render("../excel.html")
class DocxDisplayHandler(tornado.web.RequestHandler):
def get(self):
self.render("../python-docx读取doc.html")
def make_app():
return tornado.web.Application([
(r"/docx",DocxDisplayHandler),
(r"/demo",ReadDemoHandler),
(r"/excel",ExcelDisplayHandler),
(r"/",MainHandler)
])
if __name__ == "__main__":
a=sys.argv
print(a)
if len(a) >2 :
port=int(a[2])
else:
port=8888
app=make_app()
app.listen(port)
tornado.ioloop.IOLoop.current().start()
```
#### File: office/word/get_word.py
```python
import docx
from docx.shared import Length, Pt, RGBColor
file=docx.Document("test.docx")
text_list=[]
font_list=[]
def read_docx(file):
n=1
for p in file.paragraphs:
p_line = {
"paragraph":n,
"text":p.text,
"font_color":[]
}
for run in p.runs:
name = run.font.name
size =run.font.size
bold=run.bold
if bold in [None]:
bold = 0
else:
bold = 1
if name in[None]:
name = '宋体'
if name in "仿宋_GB2312":
name = '仿宋'
if size in [None]:
size = 200000
result_json = {
"color":list(run.font.color.rgb),
"text":run.text,
"size":size,
"name":name,
"bold":bold
}
p_line["font_color"].append(result_json)
font_list.append(p_line)
n+=1
return font_list
print('docx=',read_docx(file))
```
#### File: 2218084076/hotpoor_autoclick_xhs/picture_notes.py
```python
import requests
import json
from selenium import webdriver
import time
import os
import xlwt
driver = webdriver.Chrome()
excel = xlwt.Workbook(encoding='utf-8',style_compression=0)
url_img = 'https://www.qianshanghua.com/api/page/comment/load?chat_id=8e940b2524c843789d29278c8d2b8cdc&comment_id='
table = excel.add_sheet('1',cell_overwrite_ok=True)
url_list = ['http://xhslink.com/6mAmwe']
img_list = []
def download_image(img_url):
aim_url = img_url
print("download:", aim_url)
aim_response = requests.get(aim_url)
t = int(round(time.time() * 1000)) # 毫秒集
f = open(os.path.join(os.path.dirname(__file__),'D:/images/%s.%s' % (time.time(), "jpg")), "ab")
f.write(aim_response.content)
f.close()
def get_lurl (url_img):
a = requests.get(url_img)
a = a.text
b = json.loads(a)
for comment in b["comments"]:
c = comment[4]
if "http://" in c:
d = "http://" + c.split(",")[1].split("http://")[1]
url_list.append(d)
#解析json中的作品短链接
def get_img (url):
url_list=url
for u in url_list:
print(u)
driver.get(u)
time.sleep(8)
a1 = driver.find_element_by_xpath('/html/body/div/div/div/div/div[2]/div[1]/div[1]/div[2]')
b1 = a1.find_elements_by_tag_name("div")
for b in b1:
c1 = b.find_element_by_tag_name("i")
url_img = c1.get_attribute("style").split("https://")[1].split("\"")[0].split("/2/w")[0]
url_img = "http://"+url_img
print(url_img)
download_image(url_img)
def user_page (url):
url_list = url
for works_u in url_list:
driver.get(works_u)
time.sleep(6)
driver.find_element_by_xpath('//*[@id="app"]/div/div[2]/div[2]/div[1]/span').click()
# get_lurl(url_img)
# print(url_list)
get_img(url_list)
driver.quit()
```
#### File: hotpoor_autoclick_xhs/puco/get_id_list.py
```python
import pyautogui
import pyperclip
import time
def pyautogui_action(action):
if action["name"] in ["move_to_click"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
elif action["name"] in ["select_all_and_write"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
time.sleep(1)
pyautogui.hotkey("ctrl", "a")
write_content = action.get("content","")
pyautogui.typewrite(write_content)
pyautogui.press('enter')
elif action["name"] in ["select_all_and_js_latest"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.press('backspace')
pyautogui.press('up')
pyautogui.press('enter')
elif action["name"] in ["select_all_and_copy"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "x")
elif action["name"] in ["select_all_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "v")
elif action["name"] in ["select_item_and_close_tab"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "w")
elif action["name"] in ["select_all_and_copy_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
write_content = action.get("content","")
pyperclip.copy(write_content)
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "v")
pyautogui.press('enter')
elif action["name"] in ["open_console"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("f12")
elif action["name"] in ["esc"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("esc")
print(action.get("action_name"))
action_sleep = action.get("sleep",0)
time.sleep(action_sleep)
while True:
action_item_click_list = [
{
"x":1377,
"y":147,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"切换console",
},
{
"x":1204,
"y":172,
"sleep":0.5,
"name":"move_to_click",
"content":"",
"action_name":"清空信息console",
},
{
"x": 1282,
"y": 995,
"sleep": 2,
"name": "select_all_and_copy_and_paste",
"content":
'''
cards=document.getElementsByClassName("daren-card")
agree=[]
for (var i=0;i<cards.length;i++){
if (cards[i].getElementsByClassName("daren-card-status").length>0){
if (cards[i].getElementsByClassName("daren-card-status")[0].innerText=="已邀约"){
agree.push(cards[i].getAttribute("data-item-uid"))
}
}
}
console.log(agree)
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.style.zIndex=9999999999999999999
''',
"action_name": "获取页面所有博主ID",
},
{
"x": 1282,
"y": 995,
"sleep": 2,
"name": "select_all_and_copy_and_paste",
"content": r'''
if(JSON.stringify(agree) != '[]'){
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(agree)+"</textarea>"
}
else{
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+ +"</textarea>"
}
''',
"action_name": "展示textarea文本框",
},
{
"x": 1282,
"y": 995,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": 'document.body.append(dom)',
"action_name": "展示textarea文本框"
},
{
"x": 1023,
"y": 152,
"sleep": 0.5,
"name": "esc",
"content": "",
"action_name": "esc"
},
{
"x": 1023,
"y": 152,
"sleep": 0.5,
"name": "select_all_and_copy",
"content": "",
"action_name": "copy"
},
{
"x": 430,
"y": 17,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "点击选项卡",
},
{
"x": 527,
"y": 196,
"sleep": 1,
"name": "select_all_and_paste",
"content": '',
"action_name": "粘贴"
},
{
"x": 400,
"y": 282,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "submit",
},
{
"x": 97,
"y": 21,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "切换console",
},
{
"x": 1204,
"y": 172,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "清空信息console",
},
{
"x": 1282,
"y": 995,
"sleep": 1,
"name": "select_all_and_copy_and_paste",
"content": 'document.getElementsByClassName("ant-pagination-item-link")[2].click()',
"action_name": "下一页"
},
]
for action_item_click in action_item_click_list:
pyautogui_action(action_item_click)
'''
cards=document.getElementsByClassName("daren-card")
agree=[]
for (var i=0;i<cards.length;i++){
if(cards[i].getElementsByTagName("div").length==32){
if(cards[i].getElementsByTagName("div")[31].innerText=="同意合作"){
agree.push(cards[i].getAttribute("data-item-uid"))
else if()
}}}
'''
```
#### File: hotpoor_autoclick_xhs/puco/test1.py
```python
import pyautogui
import time
import pyperclip
import json
# 921.6
url_list=[]
news_urls = []
def pyautogui_action(action):
if action["name"] in ["move_to_click"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
elif action["name"] in ["select_all_and_copy"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "c")
elif action["name"] in ["select_all_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "a")
pyautogui.hotkey("ctrl", "v")
elif action["name"] in ["select_all_and_copy_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
write_content = action.get("content","")
pyperclip.copy(write_content)
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("ctrl", "v")
pyautogui.press('enter')
elif action["name"] in ["open_console"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("f12")
print(action.get("action_name"))
action_sleep = action.get("sleep",0)
time.sleep(action_sleep)
for n in range(0,1):
for i in range(0,20):
pyautogui.moveTo(x=1209,y=178,duration=0.3)
pyautogui.click(x=1209,y=178, button='left')
pyautogui.moveTo(x=1209,y=178,duration=0.3)
pyautogui.click(x=1209,y=178,button='left')
pyautogui.moveTo(x=1324,y=819,duration=0.3)
pyautogui.click(x=1324,y=819,button='left')
pyperclip.copy('document.getElementsByClassName("daren-card")[%s].click()'%(i))
pyautogui.hotkey('ctrl','v')
pyautogui.keyDown('enter')
time.sleep(5)
pyautogui.moveTo(x=194,y=617,duration=0.3)
pyautogui.click(x=194,y=617, button='left')
pyautogui.moveTo(x=972,y=533,duration=0.3)
pyautogui.click(x=972,y=533, button='left')
pyautogui.moveTo(x=626,y=440,duration=0.3)
pyautogui.click(x=626,y=440, button='left')
pyautogui.moveTo(x=1162,y=906,duration=0.3)
pyautogui.click(x=1162,y=906, button='left')
pyautogui.moveTo(x=1723,y=976,duration=0.3)
pyautogui.click(x=1723,y=976,button='left')
time.sleep(1)
pyautogui.hotkey('ctrl', 'w')
time.sleep(0.5)
print('document.getElementsByClassName("daren-card")[%s]'%(i))
pyautogui.moveTo(x=1209, y=178, duration=0.3)
pyautogui.click(x=1209, y=178, button='left')
pyautogui.moveTo(x=1209, y=178, duration=0.3)
pyautogui.click(x=1209, y=178, button='left')
pyautogui.moveTo(x=1324, y=819, duration=0.3)
pyautogui.click(x=1324, y=819, button='left')
pyperclip.copy('document.getElementsByClassName("auxo-pagination-item-link")[2].click()')
pyautogui.hotkey('ctrl', 'v')
pyautogui.keyDown('enter')
time.time(1)
'''
result=[]
result.push($(".daren-overview-base-nameblock").innerText)
result.push($(".daren-overview-base-traitblock").innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[0].innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[1].innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[2].innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[3].innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[4].innerText)
result.push(document.getElementsByClassName("data-overview-dashboard-items-item__value")[6].innerText)
result_info = {
"name":result[0],
"traitblock":result[1],
"frequency":result[2],
"days":result[3],
"promote":result[4],
"cooperation":result[5],
"people":result[6],
"sales":result[7],
}
'''
```
#### File: 2218084076/hotpoor_autoclick_xhs/taobao.py
```python
from selenium import webdriver
import time
import xlwt
import xlrd
import datetime
import random
excel = xlwt.Workbook(encoding='utf-8',style_compression=0)
table = excel.add_sheet('小红书博主及文章信息',cell_overwrite_ok=True)
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('https://www.taobao.com/?spm=a1z02.1.1581860521.1.2qUVWK')
driver.find_element_by_xpath('//*[@id="q"]').send_keys('儿童玩具')
time.sleep(2)
driver.find_element_by_class_name('search-button').click()
print('扫码登陆')
time.sleep(20)
i=0
js = "window.scrollTo(0,document.body.scrollHeight)"
driver.execute_script(js)
time.sleep(4)
js = "window.scrollTo(0,document.body.scrollHeight)"
driver.execute_script(js)
print(i)
i+=1
a = driver.find_element_by_id('mainsrp-itemlist').find_element_by_class_name('items').find_elements_by_tag_name('div')
# [0].find_element_by_tag_name('a').get_attribute('href')
time.sleep(5)
print(len(a))
def get_url(tag_list):
s = 1
a = tag_list
for n in range(0,len(a)+1):
t = random.randint(5,20)
url = a[n].find_element_by_tag_name('a').get_attribute('href')
time.sleep(t)
table.write(s,0,url)
print(f'num{s}',url)
excel.save(f"D:/Desktop/1.xls")
s+=1
driver.find_element_by_xpath('//*[@id="mainsrp-pager"]/div/div/div/ul/li[9]').click()
driver.quit()
```
#### File: www_local/controller/findmaster_tool.py
```python
import sys
import os
import os.path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/vendor/')
import re
import uuid
import time
import random
import string
import hashlib
import urllib
import copy
from functools import partial
import logging
import datetime
import markdown
import tornado
import tornado.web
import tornado.escape
import tornado.websocket
import tornado.httpclient
import tornado.gen
from tornado.escape import json_encode, json_decode
import nomagic
import nomagic.auth
import nomagic.block
from nomagic.cache import get_user, get_users, update_user, get_doc, get_docs, update_doc, get_aim, get_aims, update_aim, get_entity, get_entities, update_entity
from nomagic.cache import BIG_CACHE
from setting import settings
from setting import conn
# from user_agents import parse as uaparse #早年KJ用来判断设备使用
from .base import WebRequest
from .base import WebSocket
import pymail
from .data import DataWebSocket
class CountConnectNumAPIHandler(WebRequest):
def get(self):
block_id = self.get_argument("aim_id",None)
connects = DataWebSocket.h_clients.get(block_id,None)
num = 0
exists = True
if not connects:
exists = False
if exists:
num = len(connects)
self.finish({"info":"ok","num":num,"exists":exists})
class AddToTopPageAPIHandler(WebRequest):
def post(self):
if not self.current_user:
self.finish({"info":"error","about":"not login"})
return
block_id = self.get_argument("aim_id",None)
user_id = self.current_user["id"]
user = get_aim(user_id)
pages = user.get("pages",[])
if block_id not in pages:
self.finish({"info":"error","about":"not in pages"})
return
pages_top = user.get("pages_top",[])
if block_id in pages_top and block_id == pages_top[0]:
self.finish({"info":"error","about":"already top"})
return
elif block_id in pages_top:
pages_top.remove(block_id)
pages_top.insert(0,block_id)
user["pages_top"]=pages_top
user["updatetime"]=int(time.time())
update_aim(user_id,user)
self.finish({"info":"ok","about":"add to top success"})
class RemoveFromTopPageAPIHandler(WebRequest):
def post(self):
if not self.current_user:
self.finish({"info":"error","about":"not login"})
return
block_id = self.get_argument("aim_id",None)
user_id = self.current_user["id"]
user = get_aim(user_id)
pages = user.get("pages",[])
if block_id not in pages:
self.finish({"info":"error","about":"not in pages"})
return
pages_top = user.get("pages_top",[])
if block_id not in pages_top:
self.finish({"info":"error","about":"already remove from top"})
return
else:
pages_top.remove(block_id)
pages.remove(block_id)
pages.insert(0,block_id)
user["pages_top"]=pages_top
user["pages"]=pages
user["updatetime"]=int(time.time())
update_aim(user_id,user)
self.finish({"info":"ok","about":"remove from top success"})
class RemoveToTrashAPIHandler(WebRequest):
def post(self):
if not self.current_user:
self.finish({"info":"error","about":"no login"})
return
user_id = self.current_user["id"]
block_id = self.get_argument("aim_id",None)
user = get_aim(user_id)
pages = user.get("pages",[])
pages_top_ids = user.get("pages_top",[])
pages_trash_ids = user.get("pages_trash",[])
update_need = False
if block_id in pages:
update_need = True
pages.remove(block_id)
if block_id in pages_top_ids:
update_need = True
pages_top_ids.remove(block_id)
user["pages"]=pages
user["pages_top"]=pages_top_ids
if not update_need:
self.finish({"info":"error","about":"not in already"})
return
if block_id not in pages_trash_ids:
pages_trash_ids.insert(0,block_id)
user["pages_trash"]=pages_trash_ids
user["updatetime"]=int(time.time())
update_aim(user_id,user)
self.finish({"info":"ok","about":"remove success"})
```
#### File: www_local/nomagic/group.py
```python
import time
import datetime
import pickle
import uuid
import binascii
import zlib
#import gzip
import hashlib
import json
import random
import string
# import __init__ as nomagic
import nomagic
from setting import conn
def create_chat(group):
group["type"] = "group"
group["owner"] = group.get("owner", "")
group["owner_type"] = group.get("owner_type", "entity")
group["editors"] = group.get("editors",[])
group["helpers"] = group.get("helpers",{})
group["comment_members"] = group.get("comment_members",[])
group["notifyers"] = group.get("notifyers",[])
group["blackers"] = group.get("blackers",[])
group["datetime"] = datetime.datetime.now().isoformat()
group["createtime"] = int(time.time())
new_id = nomagic._new_key()
while True:
new_group = nomagic._get_entity_by_id(new_id)
if not new_group:
break
else:
new_id = nomagic._new_key()
rowcount = nomagic._node(new_id).execute_rowcount("INSERT INTO entities (id, body) VALUES(%s, %s)", new_id, nomagic._pack(group))
assert rowcount
return (new_id, group)
def update_group(group_id, data):
rowcount = nomagic._node(group_id).execute_rowcount("UPDATE entities SET body = %s WHERE id = %s", nomagic._pack(data), nomagic._key(group_id))
assert rowcount
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.