id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
8028315
|
<gh_stars>10-100
import torch
import numpy as np
class MedianImageMeter(object):
def __init__(self, bit_depth, im_shape, device='cpu'):
self.bit_depth = bit_depth
self.im_shape = list(im_shape)
self.device = device
if bit_depth == 8:
self.dtype = np.uint8
elif bit_depth == 16:
self.dtype = np.uint16
else:
raise NotImplementedError(
"MedianMeter cannot find the median of non 8/16 bit-depth images.")
self.reset()
def reset(self):
self.freqs = self.make_freqs_array()
def add(self, val, mask=1):
self.val = torch.LongTensor( val.astype(np.int64).flatten()[np.newaxis,:] ).to(self.device)
if type(mask) == int:
mask = torch.IntTensor(self.val.size()).fill_(mask).to(self.device)
else:
mask = torch.IntTensor(mask.astype(np.int32).flatten()[np.newaxis,:]).to(self.device)
self.freqs.scatter_add_(0, self.val, mask)
self.saved_val = val
def value(self):
self._avg = np.cumsum(
self.freqs.cpu().numpy(),
axis=0)
self._avg = np.apply_along_axis(
lambda a: a.searchsorted(a[-1] / 2.),
axis=0,
arr=self._avg)\
.reshape(tuple([-1] + self.im_shape))
return np.squeeze(self._avg, 0)
def make_freqs_array(self):
# freqs has shape N_categories x W x H x N_channels
shape = tuple([2**self.bit_depth] + self.im_shape)
freqs = torch.IntTensor(shape[0], int(np.prod(shape[1:]))).zero_()
return freqs.to(self.device)
|
StarcoderdataPython
|
6658403
|
"""
Contains tests for the fields included in selenium_yaml.fields
"""
from selenium_yaml import fields
from selenium_yaml import exceptions
import pytest
import os
class FieldTestMixin:
""" Contains common methods for testing field validation successs or
failure
"""
def is_successful_validation(self, field, value, step=None):
""" Uses basic assertions to test that the validation was a success
Parameters
----------
field : An instance of a field derived from
selenium_yaml.fields.Field
value : The value that should be passed on to the field
"""
assert field.validate(value) == value
assert isinstance(field.errors, list)
assert len(field.errors) == 0
assert field.value == value
def is_unsuccessful_validation(self, field, value, step=None):
""" Uses basic assertions to test that the validation was a failure
Parameters
----------
field : An instance of a field derived from
selenium_yaml.fields.Field
value : The value that should be passed on to the field
"""
with pytest.raises(exceptions.ValidationError) as _:
field.validate(value)
assert isinstance(field.errors, list)
assert len(field.errors) > 0
class TestCharField(FieldTestMixin):
""" Contains tests for the char field for required, default,
max_length. options and type
"""
def test_char_field_type(self):
""" Tests that the char field fails with a non-string value """
field = fields.CharField()
self.is_unsuccessful_validation(field, 0)
def test_required_char_field_without_default_on_null(self):
""" Tests that a required char field fails without a default on
a null value
"""
field = fields.CharField(required=True)
self.is_unsuccessful_validation(field, None)
def test_required_char_field_with_default_on_null(self):
""" Tests that a required char field succeeds with a valid string
default on a null value
"""
field = fields.CharField(required=True, default="Test")
self.is_successful_validation(field, field.default)
def test_char_field_max_length_exceeded(self):
""" Tests that the char field validation fails if the value is longer
than the max length
"""
field = fields.CharField(max_length=3)
self.is_unsuccessful_validation(field, "Test")
def test_char_field_max_length_valid(self):
""" Tests that the char field validation succeeds if the value is not
longer than the max length
"""
field = fields.CharField(max_length=6)
self.is_successful_validation(field, "Test")
def test_char_field_fails_on_non_option_member(self):
""" Tests that the char field validation fails if a non-option is
passed in
"""
options = ["Test", "4"]
field = fields.CharField(max_length=6, options=options)
self.is_unsuccessful_validation(field, "Fail")
def test_char_field_succeeds_onoption_member(self):
""" Tests that the char field validation succeeds if a valid option is
passed in
"""
options = ["Test", "4"]
field = fields.CharField(max_length=6, options=options)
self.is_successful_validation(field, "Test")
class TestIntegerField(FieldTestMixin):
""" Contains tests for the integer field for required, default and type """
def test_integer_field_type(self):
""" Tests that the integer field fails with a non-integer value """
field = fields.IntegerField()
self.is_unsuccessful_validation(field, "Test")
def test_required_integer_field_without_default_on_null(self):
""" Tests that a required integer field fails without a default """
field = fields.IntegerField(required=True)
self.is_unsuccessful_validation(field, None)
def test_required_integer_field_with_default_on_null(self):
""" Tests that a required integer field succeeds with a valid int
default on a null value
"""
field = fields.IntegerField(required=True, default=10)
self.is_successful_validation(field, field.default)
class TestBooleanField(FieldTestMixin):
""" Contains tests for the Boolean field for required, default and type """
def test_boolean_field_type(self):
""" Tests that the Boolean field fails with a non-boolean value """
field = fields.BooleanField()
self.is_unsuccessful_validation(field, "Test")
def test_required_boolean_field_without_default_on_null(self):
""" Tests that a required Boolean field fails without a default """
field = fields.BooleanField(required=True)
self.is_unsuccessful_validation(field, None)
def test_required_boolean_field_with_default_on_null(self):
""" Tests that a required boolean field succeeds with a valid bool
default on a null value
"""
field = fields.BooleanField(required=True, default=True)
self.is_successful_validation(field, field.default)
class TestDictField(FieldTestMixin):
""" Contains tests for the dict field for required, default and type """
def test_dict_field_type(self):
""" Tests that the dict field fails with a non-dict value """
field = fields.DictField()
self.is_unsuccessful_validation(field, "Test")
def test_required_dict_field_without_default_on_null(self):
""" Tests that a required dict field fails without a default """
field = fields.DictField(required=True)
self.is_unsuccessful_validation(field, None)
def test_required_dict_field_with_default_on_null(self):
""" Tests that a required dict field succeeds with a valid dict
default on a null value
"""
field = fields.DictField(required=True, default={"test": 1})
self.is_successful_validation(field, field.default)
class TestFilePathField(FieldTestMixin):
""" Contains tests for the FilePathField """
def test_invalid_on_invalid_filepath(self):
""" Tests that the validation fails on non-existent fpaths """
value = os.path.join(os.getcwd(), "thispathshouldnotexist.txt")
field = fields.FilePathField()
self.is_unsuccessful_validation(field, value)
def test_validator_on_valid_filepath(self):
""" Tests that the validation succeeds on valid fpaths """
value = os.path.join(os.getcwd(), ".gitignore")
field = fields.FilePathField()
self.is_successful_validation(field, value)
|
StarcoderdataPython
|
9708914
|
import csv
from urllib.request import Request, urlopen
import dateutil.parser
import re
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
base_url += 'index.cfm'
report_path = ['?show=10&mid=7', '?show=10&mid=8']
strip_char = ';,.# \n\t'
def get_sale_title(this_report):
"""Return the title of the livestock sale."""
title_string = this_report.find('tr').find('td').find('span').get_text()
try:
separator = re.compile('from', flags=re.IGNORECASE)
topprice, sale_title = separator.split(title_string)
sale_title = sale_title.strip(strip_char)
except ValueError:
sale_title = title_string
return sale_title
def get_sale_date(this_report):
"""Return the date of the livestock sale."""
tr = this_report.find_all('tr')
dateheader_string = tr[1].find('td').string
dateheader_string = re.sub(r'\s',' ',dateheader_string)
date_string, head_string = re.split(r' {2,}', dateheader_string)
sale_date = dateutil.parser.parse(date_string)
return sale_date
def get_sale_head(this_report):
"""Return the head of the livestock sale."""
tr = this_report.find_all('tr')
dateheader_string = tr[1].find('td').string
dateheader_string = re.sub(r'\s',' ',dateheader_string)
date_string, head_string = re.split(r' {2,}', dateheader_string)
match = re.search(r'([0-9]+)', head_string)
if match:
return match.group(1)
def is_empty(this_line):
empty = True
for x in this_line:
if is_not_blank(x.string):
empty = False
break
return empty
def is_description(this_line):
"""Determine whether a given line is a description of the sale."""
non_blank_word = list(idx for idx in range(len(this_line)) if is_not_blank(this_line[idx].string))
is_succinct = len(non_blank_word) < 3
is_not_bolded = True
for x in this_line:
if x.find('strong'):
is_not_bolded = False
break
return bool(is_succinct and is_not_bolded)
def is_heading(this_line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
cattle_clue = '(bulls?|steers?|cows?|heiferettes?|heifers?|calves|pairs?)'
has_cattle = re.search(cattle_clue, this_line[0].string, re.IGNORECASE)
non_blank_word = list(idx for idx in range(len(this_line)) if is_not_blank(this_line[idx].string))
is_succinct = len(non_blank_word) < 3
is_bolded = False
for x in this_line:
if x.find('strong'):
is_bolded = True
break
return bool(has_cattle and is_succinct and is_bolded)
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
has_price = False
for x in this_line:
if re.search(r'[0-9]+\.[0-9]{2}', x.string):
has_price = True
break
non_blank_word = list(idx for idx in range(len(this_line)) if is_not_blank(this_line[idx].string))
is_not_succinct = len(non_blank_word) > 3
return bool(has_price and is_not_succinct)
def get_sale_location(word):
"""Convert address strings into a list of address components."""
sale_location = ' '.join(word)
if ',' in sale_location:
sale_location = sale_location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', sale_location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def is_number(string):
"""Test whether a string is numeric. Ignoring units like 'cwt' and 'hd'."""
string = re.sub(r'\$|[,-/]|#|cwt|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
return True
except ValueError:
return False
def is_not_blank(string):
"""Test whether a string is not blank."""
string = re.sub(r'\s','',string)
if string == '':
return False
else:
return True
def get_sale(word, cattle):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
number_word = list(idx for idx in range(len(word)) if is_number(word[idx]))
sale_location = get_sale_location(word[:1])
sale = {
'consignor_city': sale_location.pop(0).strip(strip_char).title(),
'cattle_cattle': cattle + ' ' + word[2]
}
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
head_string = word[number_word[0]].strip(strip_char).replace(',','')
try:
float(head_string)
sale.update({'cattle_head': head_string})
except ValueError:
pass
if len(number_word) > 2:
weight_string = word[number_word[1]].strip(strip_char).replace(',', '')
try:
float(weight_string)
sale.update({'cattle_avg_weight': weight_string})
except ValueError:
pass
price_string = word[number_word[len(number_word)-1]]
match = False
if not match:
match = re.search(r'([0-9,.]+) ?/?he?a?d?', price_string, re.IGNORECASE)
key = 'cattle_price'
if not match:
match = re.search(r'([0-9,.]+) ?/?c?w?t?', price_string, re.IGNORECASE)
key = 'cattle_price_cwt'
if match:
sale[key] = match.group(1).replace(',','').strip(strip_char)
sale = {k:v for k,v in sale.items() if v}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
cattle = ''
for this_line in line:
if is_empty(this_line):
pass
elif is_description(this_line):
pass
elif is_heading(this_line):
cattle = this_line[0].string
elif is_sale(this_line):
sale = this_default_sale.copy()
word = []
for x in this_line:
word.append(x.string)
sale.update(get_sale(word, cattle))
writer.writerow(sale)
def main():
# Get URLs for all reports
for this_report_path in report_path:
request = Request(
base_url + this_report_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
# content = soup.find('table', id = 'mainWrapper')
# report = content.find('table').find('table').find_all('table')
report = [table for table in soup.find_all('tbody') if not table.tbody]
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# Write a CSV file for each report not in the archive
for this_report in report:
sale_date = get_sale_date(this_report)
io_name = archive.new_csv(sale_date)
# Skip iteration if this report is already archived
if not io_name:
continue
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
sale_title = get_sale_title(this_report)
sale_head = get_sale_head(this_report)
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_title': sale_title,
'sale_head': sale_head,
})
# Read the report text into a list of lines
line1 = []
line2 = []
for tr in this_report.find_all('tr'):
td = tr.find_all('td')
if len(td) == 11:
pass
else:
for x in range(10):
try:
if int(td[x]['colspan']) > 1:
for y in range(x+1, x+int(td[x]['colspan'])):
newtag = soup.new_tag("td")
newtag.string = ' '
td.insert(y,newtag)
except KeyError:
continue
newline = [td[idx] for idx in range(5)]
newline2 = [td[idx] for idx in range(6, 11)]
line1.append(newline)
line2.append(newline2)
line = line1 + line2
line[0:2] = []
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3420968
|
# -*- coding: utf-8 -*-
import tensorflow as tf
def fixed_dropout(xs, keep_prob, noise_shape, seed=None):
"""
Apply dropout with same mask over all inputs
Args:
xs: list of tensors
keep_prob:
noise_shape:
seed:
Returns:
list of dropped inputs
"""
with tf.name_scope("dropout", values=xs):
noise_shape = noise_shape
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, seed=seed, dtype=xs[0].dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
outputs = []
for x in xs:
ret = tf.div(x, keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
outputs.append(ret)
return outputs
|
StarcoderdataPython
|
8125329
|
<filename>leetcode/0024/answer.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/12/6 12:17
# @Author : weihuchao
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
head_next = head.next
head.next = self.swapPairs(head_next.next)
head_next.next = head
return head_next
if __name__ == '__main__':
# Solution().swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
Solution().swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4)))))
|
StarcoderdataPython
|
200957
|
<reponame>Chemscribbler/Netrunner
import requests
import csv
import json
root_address = "https://netrunnerdb.com/api/2.0/"
# For pulling from next rotation (AKA Mumbad-Gateway)
valid_codes = [
"sansan",
"honor-and-profit",
"order-and-chaos",
"data-and-destiny",
"mumbad",
"flashpoint",
"red-sand",
"kitara",
"reign-and-reverie",
"magnum-opus",
"ashes",
"sc19",
]
# Requesting all cards, then filtering on valid codes. Will add each card name, side, and faction to a dictionary
# response = requests.get(root_address+"public/cards")
# with open("cards.json",'w',newline='') as jsonfile:
# json.dump(response.json(),jsonfile)
# response = requests.get(root_address+"public/packs")
# with open("packs.json",'w',newline='') as jsonfile:
# json.dump(response.json(),jsonfile)
with open("packs.json") as f:
all_packs = json.load(f)
filtered_packs = []
for pack in all_packs["data"]:
if pack["cycle_code"] in valid_codes:
filtered_packs.append(pack["code"])
with open("cards.json") as f:
all_cards = json.load(f)
legal_cards = {}
for card in all_cards["data"]:
if card["pack_code"] in filtered_packs:
legal_cards[card["title"]] = {
"faction": card["faction_code"],
"card_type": card["type_code"],
}
with open("legal_cards.csv", "w", newline="", encoding="utf-8") as f:
w = csv.writer(f)
for key, value in legal_cards.items():
w.writerow([key, value["faction"], value["card_type"]])
|
StarcoderdataPython
|
1824890
|
<reponame>stefets/mpyg321
import pexpect
import sys
from threading import Thread
from pexpect import exceptions
import time
mpg_outs = [
{
"mpg_code": "@P 0",
"action": "music_stop",
"description": """For mpg123, it corresponds to any stop
For mpg312 it corresponds to user stop only"""
},
{
"mpg_code": "@P 1",
"action": "user_pause",
"description": "Music has been paused by the user."
},
{
"mpg_code": "@P 2",
"action": "user_start_or_resume",
"description": "Music has been started resumed by the user."
},
{
"mpg_code": "@P 3",
"action": "end_of_song",
"description": "Player has reached the end of the song."
},
{
"mpg_code": "@E *",
"action": "error",
"description": "Player has encountered an error."
},
{
"mpg_code": "@silence",
"action": None,
"description": "Player has been silenced by the user."
},
{
"mpg_code": r"@V [0-9\.\s%]*",
"action": None,
"description": "Volume change event.",
},
{
"mpg_code": r"@S [a-zA-Z0-9\.\s-]*",
"action": None,
"description": "Stereo info event."
},
{
"mpg_code": "@I *",
"action": None,
"description": "Information event."
},
{
"mpg_code": pexpect.TIMEOUT,
"action": None,
"description": "Timeout event."
},
{
"mpg_code": "@mute",
"action": "user_mute",
"description": "Player has been muted by the user."
},
{
"mpg_code": "@unmute",
"action": "user_unmute",
"description": "Player has been unmuted by the user."
},
]
mpg_codes = [v["mpg_code"] for v in mpg_outs]
mpg_errors = [
{
"message": "empty list name",
"action": "generic_error"
},
{
"message": "No track loaded!",
"action": "generic_error"
},
{
"message": "Error opening stream",
"action": "file_error"
},
{
"message": "failed to parse given eq file:",
"action": "file_error"
},
{
"message": "Corrupted file:",
"action": "file_error"
},
{
"message": "Unknown command:",
"action": "command_error"
},
{
"message": "Unfinished command:",
"action": "command_error"
},
{
"message": "Unknown command or no arguments:",
"action": "argument_error"
},
{
"message": "invalid arguments for",
"action": "argument_error"
},
{
"message": "Missing argument to",
"action": "argument_error"
},
{
"message": "failed to set eq:",
"action": "eq_error"
},
{
"message": "Error while seeking",
"action": "seek_error"
},
]
suitable_versions = ["mpg123", "mpg321"]
# # # Errors # # #
class MPyg321Error(RuntimeError):
"""Base class for any errors encountered by the player during runtime"""
pass
class MPyg321FileError(MPyg321Error):
"""Errors encountered by the player related to files"""
pass
class MPyg321CommandError(MPyg321Error):
"""Errors encountered by the player related to player commands"""
pass
class MPyg321ArgumentError(MPyg321Error):
"""Errors encountered by the player related to arguments for commands"""
pass
class MPyg321EQError(MPyg321Error):
"""Errors encountered by the player related to the equalizer"""
pass
class MPyg321SeekError(MPyg321Error):
"""Errors encountered by the player related to the seek"""
pass
class MPyg321WrongPlayerPathError(MPyg321Error):
"""Errors encountered when a wrong player path is provided in the
constructor"""
pass
class MPyg321NoPlayerFoundError(MPyg321Error):
"""Errors encountered when no suitable player is found"""
pass
class PlayerStatus:
INSTANCIATED = 0
PLAYING = 1
PAUSED = 2
RESUMING = 3
STOPPING = 4
STOPPED = 5
QUITTED = 6
class MPyg321Player:
"""Main class for mpg321 player management"""
player = None
player_version = "mpg123"
status = None
output_processor = None
song_path = ""
loop = False
performance_mode = True
def __init__(self, player=None, audiodevice=None, performance_mode=True):
"""Builds the player and creates the callbacks"""
self.set_player(player, audiodevice)
self.output_processor = Thread(target=self.process_output)
self.output_processor.daemon = True
self.performance_mode = performance_mode
self.output_processor.start()
self.silence_mpyg_output()
def set_version_and_get_player(self, player):
"""Gets the player """
version_process = None
valid_player = None
if player is not None:
try:
version_process = pexpect.spawn(str(player) + " --version")
valid_player = str(player)
except pexpect.exceptions.ExceptionPexpect:
raise MPyg321WrongPlayerPathError(
"""Invalid file path provided""")
else:
try:
version_process = pexpect.spawn("mpg123 ---version")
valid_player = "mpg123"
except pexpect.exceptions.ExceptionPexpect:
try:
version_process = pexpect.spawn("mpg321 --version")
valid_player = "mpg321"
except pexpect.exceptions.ExceptionPexpect:
raise MPyg321NoPlayerFoundError(
"""No suitable player found""")
index = version_process.expect(suitable_versions)
try:
self.player_version = suitable_versions[index]
except IndexError:
raise MPyg321NoPlayerFoundError("""No suitable player found""")
return valid_player
def set_player(self, player, audiodevice):
"""Sets the player"""
player = self.set_version_and_get_player(player)
args = "--remote" if self.player_version == "mpg123" else "-R test"
args += " --audiodevice " + audiodevice if audiodevice else ""
self.player = pexpect.spawn(str(player) + " " + args)
self.player.delaybeforesend = None
self.status = PlayerStatus.INSTANCIATED
def process_output(self):
"""Parses the output"""
while True:
index = self.player.expect(mpg_codes)
action = mpg_outs[index]["action"]
if action == "music_stop":
self.on_music_stop_int()
if action == "user_pause":
self.on_user_pause_int()
if action == "user_start_or_resume":
self.on_user_start_or_resume_int()
if action == "end_of_song":
self.on_end_of_song_int()
if action == "user_mute":
self.on_user_mute()
if action == "user_unmute":
self.on_user_unmute()
if action == "error":
self.on_error()
def play_song(self, path, loop=False):
"""Plays the song"""
self.loop = loop
self.set_song(path)
self.play()
def play(self):
"""Starts playing the song"""
self.player.sendline("LOAD " + self.song_path)
self.status = PlayerStatus.PLAYING
def pause(self):
"""Pauses the player"""
if self.status == PlayerStatus.PLAYING:
self.player.sendline("PAUSE")
self.status = PlayerStatus.PAUSED
def resume(self):
"""Resume the player"""
if self.status == PlayerStatus.PAUSED:
self.player.sendline("PAUSE")
self.on_user_resume()
def stop(self):
"""Stops the player"""
self.player.sendline("STOP")
if self.player_version == "mpg321":
self.status = PlayerStatus.STOPPED
else:
self.status = PlayerStatus.STOPPING
def quit(self):
"""Quits the player"""
self.player.sendline("QUIT")
self.status = PlayerStatus.QUITTED
def jump(self, pos):
"""Jump to position"""
self.player.sendline("JUMP " + str(pos))
def volume(self, percent):
"""Adjust player's volume"""
if self.player_version == "mpg123":
self.player.sendline("VOLUME {}".format(percent))
if self.player_version == "mpg321":
self.player.sendline("GAIN {}".format(percent))
def mute(self):
"""Mutes the player"""
if self.player_version == "mpg123":
self.player.sendline("MUTE")
def unmute(self):
"""Unmutes the player"""
if self.player_version == "mpg123":
self.player.sendline("UNMUTE")
def silence_mpyg_output(self):
"""Improves performance by silencing the mpg123 process frame output"""
if self.player_version == "mpg123" and not self.performance_mode:
self.player.sendline("SILENCE")
def load_list(self, entry, filepath):
"""Load an entry in a list
Parameters:
entry (int): index of the song in the list - first is 0
filepath: URL/Path to the list
"""
if self.player_version == "mpg123":
self.player.sendline("LOADLIST {} {}".format(entry, filepath))
self.status = PlayerStatus.PLAYING
def on_error(self):
"""Process errors encountered by the player"""
output = self.player.readline().decode("utf-8")
# Check error in list of errors
for mpg_error in mpg_errors:
if mpg_error["message"] in output:
action = mpg_error["action"]
if action == "generic_error":
raise MPyg321Error(output)
if action == "file_error":
raise MPyg321FileError(output)
if action == "command_error":
raise MPyg321CommandError(output)
if action == "argument_error":
raise MPyg321ArgumentError(output)
if action == "eq_error":
raise MPyg321EQError
if action == "seek_error":
raise MPyg321SeekError
# Some other error occurred
raise MPyg321Error(output)
def set_song(self, path):
"""song_path setter"""
self.song_path = path
def set_loop(self, loop):
""""loop setter"""
self.loop = loop
# # # Internal Callbacks # # #
def on_music_stop_int(self):
"""Internal callback when user stops the music"""
if self.player_version == "mpg123":
if self.status == PlayerStatus.STOPPING:
self.status = PlayerStatus.STOPPED
self.on_user_stop_int()
else:
# If not stopped by the user, it is the end of the song
# the on_any_stop function is called inside on_end_of_song_int
self.on_end_of_song_int()
else:
self.on_user_stop_int()
def on_user_stop_int(self):
"""Internal callback when the user stops the music."""
self.on_any_stop()
self.on_user_stop()
def on_user_pause_int(self):
"""Internal callback when user pauses the music"""
self.on_any_stop()
self.on_user_pause()
def on_user_start_or_resume_int(self):
"""Internal callback when user resumes the music"""
self.status = PlayerStatus.PLAYING
def on_end_of_song_int(self):
"""Internal callback when the song ends"""
if(self.loop):
self.play()
else:
# The music doesn't stop if it is looped
self.on_any_stop()
self.on_music_end()
# # # Public Callbacks # # #
def on_any_stop(self):
"""Callback when the music stops for any reason"""
pass
def on_user_pause(self):
"""Callback when user pauses the music"""
pass
def on_user_resume(self):
"""Callback when user resumes the music"""
pass
def on_user_stop(self):
"""Callback when user stops music"""
pass
def on_user_mute(self):
"""Callback when user mutes player"""
pass
def on_user_unmute(self):
"""Callback when user unmutes player"""
pass
def on_music_end(self):
"""Callback when music ends"""
pass
|
StarcoderdataPython
|
9769738
|
import mayaUtils
from selection import Selection
#import vertexColorUtils
|
StarcoderdataPython
|
11213863
|
from django.apps import AppConfig
class JssConfig(AppConfig):
name = 'jss'
|
StarcoderdataPython
|
8042308
|
<filename>Curso_de_Python/Mundo_01/Aula_08/Exercicios/ex016.py
# Corrigido
# Bloco de importação
from math import trunc
print('Exercício 016')
print()
# Bloco de entrada
num = float(input('Informe um número real: '))
# Bloco de cálculo
inteiro = trunc(num)
# Bloco de saída
print('A porção inteira de {} é igual a {}.'.format(num, inteiro))
# Bloco de descarte
'''num = float(input('Informe um número qualquer: '))
print('O valor digitado foi {} e sua porção inteira é {}.'.format(num, int(num)))'''
|
StarcoderdataPython
|
5127778
|
class Solution:
def longestValidParentheses(self, s: str) -> int:
left=0
tag=[]
ready=[]
i=0
for t in s:
if t=='(':
left+=1
ready.append(i)#坐标
tag.append('1')
i+=1
elif t==')':
if left>0:
left-=1
tag[ready[len(ready)-1]]='2'
ready.pop()#配对出栈
else:#无法配对的右括号
ready.append(i)
tag.append('1')
i+=1
tag=[len(item)*2 for item in ''.join(tag).split('1')]
return sorted(tag)[-1]
obj=Solution()
print(obj.longestValidParentheses(")()())"))
|
StarcoderdataPython
|
4861693
|
<gh_stars>0
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.base import TemplateView
from django.contrib import admin
admin.autodiscover()
import settings
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': settings.LOGIN_URL}),
url(r'^accounts/', include('registration.urls')),
url(r'^qa/', include('qatrack.qa.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('genericdropdown.urls')),
)
|
StarcoderdataPython
|
3566779
|
from __future__ import division, print_function, unicode_literals
import os
from os import listdir
import json
import sys
import random
import cv2
import pickle
import numpy as np
seed = 13
random.seed(seed)
np.random.seed(seed)
sys.path.insert(0, "..")
from utils import Timer
from recognition.constants import (
HEIGHT,
WIDTH,
DATASET_FOLDER,
PICKLE_DATASET,
DATA_NAME,
RANDOM_IDX)
class Dataset():
def __init__(self, data_folder, data_name):
# load dataset from pickle files
self.X = None
self.Y = None
self._load_data(data_folder, data_name)
def get_dataset(self):
if not RANDOM_IDX:
idx_list = list(range(self.X.shape[0]))
random.shuffle(idx_list)
with open("random_idx", "w") as f:
f.write(json.dumps(idx_list))
else:
with open("random_idx", "r") as f:
idx_list = json.loads(f.read())
idx_list = np.array(idx_list, dtype='int')
m = idx_list.shape[0]
X = np.zeros((m, HEIGHT, WIDTH, 1), dtype='float32')
Y = np.zeros((m, self.Y.shape[1]), dtype='int')
for i in range(m):
X[i] = self.X[idx_list[i]]
Y[i] = self.Y[idx_list[i]]
return X, Y
def _load_data(self, data_folder, data_name):
with open(data_folder + data_name, "rb") as f:
self.X = pickle.load(f)
self.Y = pickle.load(f)
if __name__ == "__main__": # process raw data
all_labels = listdir(DATASET_FOLDER)
all_labels.sort()
X = np.zeros((0, HEIGHT * WIDTH))
Y = np.zeros((0, len(all_labels)), dtype='int')
t = Timer()
t.start("Processing...")
for i in range(len(all_labels)):
all_names = []
all_img = listdir(DATASET_FOLDER + all_labels[i])
all_names = np.append(all_names, ["{}/{}".format(all_labels[i], k) for k in all_img])
m = all_names.shape[0]
for j in range(m):
img_src = DATASET_FOLDER + all_names[j]
# load image as grayscale
img = cv2.imread(img_src, cv2.IMREAD_GRAYSCALE)
# resize image
img = cv2.resize(img, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
# flat image
img_flat = img.flatten() / 255.0 # normalize from [0, 255] to [0, 1]
X = np.vstack((X, img_flat))
Y = np.vstack((Y, np.zeros((1, len(all_labels)), dtype="int")))
Y[-1][i] = 1
X = X.reshape(X.shape[0], HEIGHT, WIDTH, 1).astype('float32')
print("Saving...")
if not os.path.exists(PICKLE_DATASET):
os.makedirs(PICKLE_DATASET)
with open("{}/{}".format(PICKLE_DATASET, DATA_NAME), "wb") as f:
pickle.dump(X, f, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(Y, f, protocol=pickle.HIGHEST_PROTOCOL)
t.stop()
|
StarcoderdataPython
|
5189185
|
<reponame>celestialteapot/nnc
import gym
import numpy as np
import torch
from torch import nn
from torchdiffeq import odeint
def get_a_conv(in_channel, out_channel):
"""
Generate a convolutional layer based on the torch.nn.Conv2d class
:param in_channel: the number of input channels
:param out_channel: the number of output channels
:return:
"""
res = torch.nn.Conv2d(
in_channel,
out_channel,
[1, 3],
stride=[1, 1],
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
)
return res
class SIRXEnv(gym.Env):
def __init__(self, env_config):
"""
Gym environment for SIRX, in env config one can find everything required for the dynamics.
Please check jupyter notebooks for more
:param env_config:
"""
self.sirx = env_config['sirx']
target_nodes = env_config['target_nodes']
self.n_nodes = self.sirx.adjacency_matrix.shape[1]
self.n_drivers = self.sirx.driver_matrix.shape[1]
self.target_nodes = [target_nodes, np.arange(self.n_nodes)][target_nodes is None]
self.state_size = 4 * self.n_nodes
self.action_size = self.n_drivers
self.x_init = env_config['x0']
self.state = self.x_init[0].detach().cpu().numpy()
self.dt = env_config['dt']
self.total_time = env_config['T']
self.time_steps = torch.linspace(0, self.total_time, int(self.total_time // self.dt),
device=self.x_init.device)
self.budget = env_config['budget']
self.action_space = gym.spaces.Box(low=-100, high=100, shape=[
self.n_drivers]) # these are logits so any closed space in R would do
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=[self.state_size])
self.spaces = [self.action_space, self.observation_space]
self.reward_range = (-np.inf, 0)
self.ode_solve_method = env_config['ode_solve_method']
if "reward_type" in env_config.keys():
self.reward_type = env_config['reward_type']
else:
self.reward_type = 'minus_l2'
self.reset()
def step(self, actions, is_logit=True):
# action preprocessing for compatibility with batched versions of dynamics and ODE integrators
if not isinstance(actions, torch.Tensor):
actions = torch.tensor(actions).to(device=self.x_init.device, dtype=torch.float)
if len(actions.shape) == 1:
actions = actions.unsqueeze(0)
if actions.shape[-1] == self.n_drivers:
# go from N to M by taking into account the driver matrix related values (driver message inputs)
actions = torch.matmul(self.sirx.driver_matrix.unsqueeze(0),
actions.unsqueeze(-1)).squeeze(-1)
# provided actions are considered to be logits for the softmax
# which does budget assignment and produces final u
if is_logit:
# softmax to apply budget constraing. Again relevant to the specific SIRX case.
u = torch.nn.functional.softmax(actions, dim=-1) \
* self.budget \
* self.sirx.driver_matrix.sum(-1)
else:
u = actions
# time for numerical ode solve
integr_time = self.time_steps[self.c_timestep: self.c_timestep + 2]
# print(integr_time)
deriv = lambda t, x: self.sirx(t, x, u=u)
if self.ode_solve_method == 'rk4':
next_state = odeint(deriv, self.state, integr_time, method='rk4')[-1]
else: # e.g. dopri5, adams
next_state = odeint(deriv, self.state, integr_time, method=self.ode_solve_method)[-1]
# state update
self.state = next_state
# determining when done
done = self.c_timestep == self.time_steps.shape[0] - 1
# reward calculation
# for now old skool type without oop and general coding stuff
reward = None
if self.reward_type == "minus_l2":
reward = self.minus_l2(next_state)
elif self.reward_type == "sparse_end":
reward = self.sparse_end(next_state)
elif self.reward_type == 'sum_to_max':
reward = self.sum_to_max(next_state)
else:
raise ValueError(
"Wrong reward type provided! Please choose either \'minus_l2\' or \'sparse_end'\''")
self.c_timestep += 1
return self.state.detach().cpu().numpy()[0], reward.detach().cpu().item(), done, {}
def minus_l2(self, state):
# motivation is that the return of this reward will calculate the integral till t*
return -((state[:, self.target_nodes] ** 2) * self.dt).mean()
def sparse_end(self, state):
if self.c_timestep == 0:
self.max_inf = torch.tensor([0.0]).to(device=state.device)
mean_inf = state[:, self.target_nodes].mean()
self.max_inf = [self.max_inf, mean_inf][mean_inf > self.max_inf]
# motivation is that this reward has the min minus inf rate
if self.c_timestep == self.time_steps.shape[0] - 1:
return -(self.max_inf) ** 2
else:
return torch.tensor([0.0]).to(device=state.device)
def sum_to_max(self, state):
if self.c_timestep == 0:
self.max_inf = torch.tensor([0.0]).to(device=state.device)
mean_inf = state[:, self.target_nodes].mean()
prev_inf = self.max_inf + 0 # how to copy in a lazy way
self.max_inf = [self.max_inf, mean_inf][mean_inf > self.max_inf]
# motivation is that this reward has the min minus inf rate
if prev_inf != self.max_inf:
return -(self.max_inf) ** 2 + prev_inf ** 2
else:
return torch.tensor([0.0]).to(device=state.device)
def reset(self):
self.c_timestep = 0
self.state = self.x_init
return self.x_init[0].detach().cpu().numpy()
def render(self):
raise ValueError("Not implemented yet.")
def close(self):
# raise ValueError("Not implemented yet.")
pass
def seed(self, seed):
pass
# raise ValueError("Not implemented yet.")
class RLGCNN(torch.nn.Module):
'RL graph conv for RL'
def __init__(self, adjacency_matrix, driver_matrix, input_preprocessor,
in_channels=1, feat_channels=5, message_passes=4):
super().__init__()
self.adjacency_matrix = adjacency_matrix
self.driver_matrix = driver_matrix
self.drivers = torch.where(self.driver_matrix == 1)[0]
self.n_nodes = self.adjacency_matrix.shape[0]
self.n_drivers = self.driver_matrix.shape[0]
self.max_degree = self.adjacency_matrix.sum(-1).max().to(torch.long)
self.message_passes = message_passes
self.input_preprocessor = input_preprocessor
j = self.max_degree
self.modlist = torch.nn.ModuleList()
in_chans = in_channels
while j > 2:
if j - 2 > 2:
self.modlist.append(get_a_conv(in_chans, feat_channels))
else:
self.modlist.append(get_a_conv(in_chans, in_channels))
in_chans = feat_channels
j -= 2
self.modlist.append(torch.nn.AvgPool2d(
[1, 2],
stride=[1, 1],
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
))
def forward(self, x, t=torch.zeros([1])[0]):
z = x
# input preparation done
# message pass for 4 turns
for j in range(self.message_passes):
i = 0
z = self.input_preprocessor(z) # go from flat to channels, relevant for SIRX.
for m in self.modlist:
# do convolutions until N+1 shape is reached, extending feature space
z = m(z)
if i < len(self.modlist) - 1:
# after last convolution average pool over features
z = torch.relu(z)
i += 1
if j < self.message_passes - 1:
# before last message pass, flatten and preserve batch for following message passes.
z = z.view(x.shape[0], -1)
z = z.mean(1).squeeze(1) # flatten channels and have a shape of: batch x N
# go from N to M by taking into account the driver matrix related values (driver message inputs)
u = z[:, self.drivers, :].squeeze(-1)
# sotmax would be here, but instead we do the logits now
return u
class Actor(nn.Module):
"""
Simple critic network based on tianshou implementation
"""
def __init__(self, model, device='cuda:0'):
super().__init__()
self.device = device
self.model = model
def forward(self, s, **kwargs):
s = torch.tensor(s, device=self.device, dtype=torch.float)
batch = s.shape[0]
s = s.view(batch, -1)
logits = self.model(s)
return logits, None
class ActorProb(nn.Module):
def __init__(self, model, action_shape, device='cpu'):
super().__init__()
self.device = device
self.model = model
self.mu = nn.Linear(np.prod(action_shape), np.prod(action_shape))
self.sigma = nn.Linear(np.prod(action_shape), np.prod(action_shape))
def forward(self, s, **kwargs):
if not isinstance(s, torch.Tensor):
s = torch.tensor(s, device=self.device, dtype=torch.float)
batch = s.shape[0]
s = s.view(batch, -1)
logits = self.model(s)
mu = torch.tanh(self.mu(logits))
sigma = torch.exp(self.sigma(logits))
return (mu, sigma), None
class Critic(nn.Module):
"""
Simple critic network based on tianshou implementation
"""
def __init__(self, layer_num, state_shape, action_shape=0, device='cpu'):
super().__init__()
self.device = device
self.model = [
nn.Linear(state_shape + action_shape, state_shape),
nn.ReLU(inplace=True)]
for i in range(layer_num):
self.model += [nn.Linear(state_shape, state_shape), nn.ReLU(inplace=True)]
self.model += [nn.Linear(state_shape, 1)]
self.model = nn.Sequential(*self.model)
def forward(self, s, a=None):
if not isinstance(s, torch.Tensor):
s = torch.tensor(s, device=self.device, dtype=torch.float)
if a is not None and not isinstance(a, torch.Tensor):
a = torch.tensor(a, device=self.device, dtype=torch.float)
batch = s.shape[0]
s = s.view(batch, -1)
if a is None:
logits = self.model(s)
else:
a = a.view(batch, -1)
logits = self.model(torch.cat([s, a], dim=1))
return logits
def transform_u(logits, driver_matrix, budget):
"""
A function that transforms RL logits to valid controls.
:param logits: RL action logits
:param driver_matrix: driver matrix for control selection
:budget: total budget available
"""
logits = torch.matmul(driver_matrix.unsqueeze(0), logits.unsqueeze(-1)).squeeze(-1)
u = torch.nn.functional.softmax(logits, dim=-1) \
* budget \
* driver_matrix.sum(-1)
return u
|
StarcoderdataPython
|
3587814
|
<reponame>friedforfun/DocumentTracking
import pytest
from unittest.mock import patch, mock_open
from DocuTrace.Analysis.DataCollector import DataCollector, ReadingData
mock_file_content = '{"visitor_uuid": "745409913574d4c6", "env_doc_id": "130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb", "visitor_country": "MX", "event_readtime": 797, "visitor_useragent":"Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3"}'
json_dict = {
"visitor_country": "MX",
"visitor_uuid": "745409913574d4c6",
"subject_doc_id": "130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb",
"event_readtime": 797,
"event_type": "read",
"visitor_useragent": "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3"}
def _setup_Views_class_():
m = mock_open(read_data=mock_file_content)
with patch('DocuTrace.Analysis.FileRead.open', m) as _file:
loc_views = DataCollector(path='path')
_file.assert_not_called()
return loc_views
def test_count():
m = mock_open(read_data=mock_file_content)
with patch('DocuTrace.Analysis.FileRead.open', m) as _:
loc_views = _setup_Views_class_()
assert loc_views.counted == False
loc_views.gather_data()
assert loc_views.counted == True
def test_count_error():
loc_views = DataCollector()
with pytest.raises(AttributeError):
loc_views.gather_data()
def test_count_countries():
views = DataCollector()
assert 'MX' not in views.countries.keys()
views.count_countries(json_dict)
assert views.countries['MX'] == 1
def test_count_continents():
views = DataCollector()
assert 'North America' not in views.continents.keys()
views.count_continents(json_dict)
assert views.continents['North America'] == 1
def test_count_browsers():
views = DataCollector()
assert 'Mobile Safari' not in views.browser_families.keys()
views.count_browsers(json_dict)
assert views.browser_families['Mobile Safari'] == 1
def test_count_user_reads():
views = DataCollector()
assert '745409913574d4c6' not in views.reader_profiles.keys()
views.collect_reading_data(json_dict)
assert views.reader_profiles['745409913574d4c6'].read_time == 797
def test_collect_document_readers():
views = DataCollector()
assert '130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb' not in views.document_readers.keys()
assert '745409913574d4c6' not in views.visitor_documents.keys()
views.collect_document_readers(json_dict)
assert views.document_readers['130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb'] == ['745409913574d4c6']
assert views.visitor_documents['745409913574d4c6'] == ['130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb']
|
StarcoderdataPython
|
8098629
|
from setuptools import setup, find_packages
setup(name='coconut',
version='0.0.1',
url='https://github.com/imzeki/coconut',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Coconut is a module for simplifying simple things and chunk them down into even simpler tasks',
packages=find_packages(exclude=['tests']),
long_description=open('README.md').read(),
zip_safe=False)
|
StarcoderdataPython
|
4826280
|
#!/usr/bin/env python3
"""exfi.io.read_bed.py: BED importer"""
import logging
import pandas as pd
from exfi.io.bed import BED3_COLS, BED3_DTYPES
def read_bed3(filename):
"""Read a BED file and return the BED3 dataframe."""
logging.info('Reading BED3 from disk')
bed3 = pd.read_csv(
filepath_or_buffer=filename,
header=None,
sep='\t',
usecols=[0, 1, 2],
names=BED3_COLS,
engine='c'
).astype(BED3_DTYPES)
logging.info('Done')
return bed3
|
StarcoderdataPython
|
6415221
|
import logging
logger = logging.getLogger(__name__)
def parameterized(dec):
"""
Meta decorator.
Decorate a decorator that accepts the decorated function as first argument,
and then other arguments with this decorator, to be able to pass it
arguments.
Source: http://stackoverflow.com/a/26151604
>>> @parameterized
... def multiply(f, n):
... def dec(*args, **kwargs):
... return f(*args, **kwargs) * n
... return dec
>>> @multiply(5)
... def add(a, b):
... return a + b
>>> add(3, 2)
25
"""
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
class KeyMap:
"""
A keymap contains two things:
A list of commands, defined using the `@keybind` decorator,
and a list of key to command mappings.
"""
def __init__(self, keys={}):
self.commands = {}
self.keys = {}
for key, commands in keys.items():
self.add_key(key, commands)
def add_command(self, name, func):
"""
Add a command to the command map.
If the command already exists, the function will be added to the list
"""
if name in self.commands.keys():
self.commands[name].append(func)
else:
self.commands[name] = [func]
def add_key(self, key, command):
"""
Map `key` to `command`
command can be one of the following:
* A string, the name of the command
* A tuple `(command, args...)`. `args` will be passed to the
functions
* A list of any of the two above, can be mixed
If the key is already mapped, the command will be added to its list
"""
if not isinstance(command, list) and not isinstance(command, set):
command = {command}
for c in command:
if not isinstance(c, tuple):
c = (c,)
if key in self.keys.keys():
self.keys[key].append(c)
else:
self.keys[key] = [c]
def command(self, func):
"""
Decorator, used to bind `func` to `command`
"""
self.add_command(func.__name__, func)
return func
def keypress(self, func):
"""
Decorator
Calls the decorated function before calling the apropriate commands
for the keypress. The value returned by `func` will be sent to the
commands as the new `key`
"""
def dec(widget, size, key):
key = func(widget, size, key)
return self.press_key(key, widget)
return dec
def press_key(self, key, widget):
"""
Calls the commands associated with `key`
Will return `None` or the result of the last command that wasnt `None`
"""
k = key
if key in self.keys.keys():
k = None
for command in self.keys[key]:
assert command[0] in self.commands
if len(command) > 1:
nk = self.call_command(command[0], widget, *command[1:])
else:
nk = self.call_command(command[0], widget)
if nk is not None:
k = nk
return k
def call_command(self, command, *args, **kwargs):
"""
Call command by name and pass it `*args` and `**kwargs`
Will return `None` or the result of the last function that wasnt `None`
"""
key = None
logger.debug("Calling %d commands for '%s'",
len(self.commands[command]), command)
for fn in self.commands[command]:
k = fn(*args, **kwargs)
if k is not None:
key = k
return key
|
StarcoderdataPython
|
5061289
|
<reponame>jcapriot/simpeg
from .code_utils import *
deprecate_module("codeutils", "code_utils", "0.15.0")
|
StarcoderdataPython
|
6507594
|
<reponame>MalikKeio/valkyrie-anatomia-script
JP = 0
EN = 1
STATUS = -1
TRANSLATED = 1
INPROGRESS = 2
NOSTORY = 3
CHAPTERS = {
1: ["戦乙女の目覚め", "The Awakening of the Battle Maiden", TRANSLATED],
2: ["魂の律動:剣を振る理由", "Spiritual Concentration: What to Wield the Sword For", INPROGRESS],
3: ["魂の律動:禁じられた歌声", "Spiritual Concentration: Forbidden Voice"],
4: ["運命の女神", "The Goddess of Destiny"],
5: ["魂の律動:見果てぬ夢は海を越えて", "Spiritual Concentration: An Unfulfilled Dream Crosses the Sea"],
6: ["魂の律動:約束", "Spiritual Concentration: Promise"],
7: ["魂の律動:還るべき場所", "Spiritual Concentration: A Place to Come Home to"],
# I deliberately remove 先 as it just refers to where the light is guiding (the moon light is guiding Altveer out of the cell)
8: ["魂の律動:光の導く先へ", "Spiritual Concentration: Guiding Light"],
9: ["魂の律動:痛みのワケ", "Spiritual Concentration: <i>Why</i> Pain?"],
10:["魂の律動:命の在処、心の在処", "Spiritual Concentration: The Whereabouts of Life and the Mind"],
11:["魂の律動:無敵の剣", "Spiritual Concentration: The Invincible Sword"],
12:["父と子と", "Father and Son"],
13:["魂の律動:目標を外さなかった男", "Spiritual Concentration: The Man Who Never Missed the Mark"],
14:["魂の律動:鎧が守りしもの", "He That the Armor Shields"],
15:["魂の律動:愛はさだめ", "Love is Karma"],
16:["魂の律動:赦されぬ罪", "Unpardonable Sin"],
# 誰が為に is an archaic word meaning "For whom". The literal translation of this title is "For whom is the king's might".
17:["魂の律動:王の力は誰が為に", "Worthy of the King's Might"],
# I think that "Karmic Repayment" may be a good translation as well. However, it does not fit well with the story, as Altveer never atone for anything (and no one atoned in that story). Altveer was not a sinner to begin with.
# I think that "巡りて" hints at the circulation of karma, especially to the fact that Altveer and Anelian are united by fate.
18:["魂の律動:因果は巡りて", "All Is Fated"],
19:["迷妄の地", "The Land of Illusion"],
20:["魂の律動:狂った歯車", "Spiritual Concentration: Machine Running Amok"],
21:["トラキシア戦記", "Traxian Chronicles"],
22:["轟く雷", "Roaring Thunder"],
23:["シャイロ二世の誕生", "The Birth of Shiloh II"],
24:["オーディンの秘密", "Odin's Secret"]
}
EINHERJAR = "EINHERJAR"
STORIES = "STORIES"
SIDE_STORY_CHAPTERS = [
{EINHERJAR: "アーリィ", STORIES: [
["漆黒の戦乙女", "The Black Battle Maiden", TRANSLATED]
]},
{EINHERJAR: "ルーファス", STORIES: [
["神の器", "The Vessel of the Gods"]
]},
{EINHERJAR: "バルゴ", STORIES: [
["戦い続ける意味", ""],
["業を背負いし者", ""]
]},
{EINHERJAR: "那智", STORIES: [
["神託の少女", ""],
["神様の救い", ""]
]},
{EINHERJAR: "リウ", STORIES: [
["忘却の竜", ""],
["盗まれた運命", ""]
]},
{EINHERJAR: "ノルン", STORIES: [
["運命を紡ぐ糸", ""], #https://www.youtube.com/watch?v=I1CkD-IVFkM
["運命の歯車", "The Cogs of Destiny"],
["運命の子", "The Fated Girl"]
]},
{EINHERJAR: "ソー", STORIES: [
["雷の行方", "The Thunder God's Whereabouts"], # https://youtu.be/KZ6DOIfSs5M?t=56s
["悩める雷神", "A Worried Thunder God"], # https://www.youtube.com/watch?v=4jpM63yKEsk
["雷神の戦", "The Thunder God's Struggle"] # https://www.youtube.com/watch?v=lyPGVveBQBw
]},
{EINHERJAR: "ルチア", STORIES: [
["母の遺した旋律", ""],
["ルチアの決意", ""]
]},
{EINHERJAR: "ヴァルヴァロア", STORIES: [
["新大陸を目指せ", ""],
["老兵の教え", ""]
]},
{EINHERJAR: "ランヴァルド", STORIES: [
["神と人", ""],
["死者の村", ""]
]},
{EINHERJAR: "クラウシュ", STORIES: [
["魔の海域", ""],
["鎧に宿る霊", ""]
]},
{EINHERJAR: "クロエ", STORIES: [
["不死者を残滅せよ", ""],
["湖に潜むドラゴンを討伐せよ", ""]
]},
{EINHERJAR: "ダリネ", STORIES: [
["出既損ないの反魂香", ""],
["再会", ""]
]},
{EINHERJAR: "アネリアン", STORIES: [
["力の代償", ""],
["偽りの魔法", ""]
]},
{EINHERJAR: "ジャンヌ", STORIES: [
["この身に纏うは", ""],
["生き様と死に様", "How to Live and How to Die"]
]},
{EINHERJAR: "マクシミリアン", STORIES: [
["連れ去られた少女を救え", "Save the Abducted Girl"],
["父の背中", ""]
]},
{EINHERJAR: "イングリット", STORIES: [
["連銀術師の野望を阻止せよ", ""],
["囚われた人魚を救え", "Save the Captured Mermaid"]
]},
{EINHERJAR: "クルト", STORIES: [
["夢の残骸", "Remnants of Dream"],
["夢のつづき", "The Dream Continues"]
]},
{EINHERJAR: "セナ", STORIES: [
["闘う理由", ""],
["貴婦人の構え", ""]
]},
{EINHERJAR: "カラドック", STORIES: [
["伝説の剣を求めて", ""],
["剣の神", "The God of the Sword"]
]},
{EINHERJAR: "カチナ", STORIES: [
["災いの種", ""],
["神々の置き土産", ""]
]},
{EINHERJAR: "マルヴァイナ", STORIES: [
["亡国の騎士団", ""],
["迫る危機", ""]
]},
{EINHERJAR: "アルトフェイル", STORIES: [
["森に消えた兄の行方", ""],
["超古代文明の謎を解き明かせ", ""]
]},
{EINHERJAR: "フリー", STORIES: [
["試練の道・初級", "Path of Trials: Elementary Level", NOSTORY],
["試練の道・中級", "Path of Trials: Middle Level", NOSTORY],
["試練の道・上級", "Path of Trials: Upper Level", NOSTORY]
]}
]
SIDE_STORY_CHAPTERS_LEN = 0
for dic in SIDE_STORY_CHAPTERS:
SIDE_STORY_CHAPTERS_LEN += len(dic[STORIES])
OTHER_STORIES = {
1: ["戦乙女再臨! 彼方よりの来訪者", "The Second Advent of the Battle Maiden! A Visitor from Beyond", TRANSLATED],
2: ["魂の律動:偽るモノ、偽らざるモノ", "Spiritual Concentration: Lies and Truths"],
3: ["第1回 ヴァルハラ防衛線", "Valhalla Line of Defence 1", NOSTORY], # There was no story here
4: ["魂の律動:答えなき祈り", "Spiritual Concentration: Unanswered Prayers"],
5: ["第2回 ヴァルハラ防衛線", "Valhalla Line of Defence 2"], # https://www.youtube.com/watch?v=S6Jt_Ac2ey8
6: ["名もなき花", "Flower Without Even a Name"], # https://www.youtube.com/watch?v=XBnudQQcZVE
7: ["深淵の門", "Gate of the Abyss", NOSTORY],
8: ["第3回 ヴァルハラ防衛線", "Valhalla Line of Defence 3"], # https://youtu.be/obwhq6IeGtY?t=5m41s
9: ["女王の教育係", "The Queen's Pedagogue"], # https://youtu.be/CbkbENIuT64?t=3m1s
10: ["神が遺した錬金術", "The Alchemy the Gods Left Behind"], # not found yet
11: ["黒き戦乙女の挑戦状", "The Challenge of the Black Battle Maiden"] # https://www.youtube.com/watch?v=OJwHFrVp27E
}
CHARACTERS = {
"???": "???",
"オーディン": "Odin",
"レナス": "Lenneth",
"ゼフィロス": "Zephyros",
"ヴァン神族": "Vanir",
"フギン": "Huginn",
"ムニン": "Muninn",
"ジャンヌ": "Jeanne",
# Choose raven over crow because that's how Huginn and Muginn are called in Scandinavian literature.
"カラス1": "Raven 1",
"カラス2": "Raven 2",
# Senna
"セナ": "Senna",
"カラドック": "Caradoc",
"店主": "Shopkeeper",
"常連客": "Regular customer",
"ガラの悪い客1": "Ill-bred customer 1",
"ガラの悪い客2": "Ill-bred customer 2"
}
CHARACTERS.update({
"アーリィ": "Hrist",
"ルーファス": "Rufus",
# Another tricky name. バルゴ is the common transcription of "Virgo" (the zodiac
# sign). But naming a heavy manly warrior that deals dark damage Virgo would
# sound odd, wouldn't it? Or maybe it shows the complexity of his character
# *chuckles*.
"バルゴ": "Virgo",
"那智": "Nachi",
# Liu sounds Chinese. It could be Riu, I do not really see any difference.
"リウ": "Liu",
"ノルン": "Norn",
"ソー": "Thor",
# Most likely an Italian name
"ルチア": "Lucia",
# Most likely a gratuitous French name. Choose this spelling to look like the House of Valois.
"ヴァルヴァロア": "Valvalois",
# Ragnvald is an old norse first name: https://en.wikipedia.org/wiki/Rognvald
"ランヴァルド": "Ragnvald",
# The Japanese says Kuraushu. However, I was unable to find anything good to write it in Roman characters. So let's go with the German "Klaus"
"クラウシュ": "Klaus",
# A French name?
"クロエ": "Chloé",
"ダリネ": "Darine",
# I chose the R because Anerian sounds more like as a name (at least as a surname, it does exist)
"アネリアン": "Anerian",
"イングリット": "Ingrid",
"クルト": "Kurt",
"マクシミリアン": "Maximilien",
# I hesitate between ch and ts. Katsina sounds more exotic. Hopi Spirits are called Kachina or Katsina (two spellings exist) while lots of place in the world are called Katsina.
"カチナ": "Katsina",
"マルヴァイナ": "Malvina",
# Another (gratuitous?) German-like name. I cannot find anything from which it is likely to originate. Altveer is a very rare surname that sounds a like (probably of Dutch origin, see Alteveer, though v is pronounced v, and not f, in Dutch).
"アルトフェイル": "Altveer",
"フリー": "Free"
})
|
StarcoderdataPython
|
5065028
|
from tempfile import NamedTemporaryFile
from typing import List
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class EcoindexSpider(CrawlSpider):
name = "EcoindexSpider"
custom_settings = {"LOG_ENABLED": False}
rules = (Rule(LinkExtractor(), callback="parse_item", follow=True),)
def __init__(
self,
allowed_domains: List[str],
start_urls: List[str],
temp_file: NamedTemporaryFile,
*a,
**kw,
):
self.links = set()
self.allowed_domains = allowed_domains
self.start_urls = start_urls
self.temp_file = temp_file
super().__init__(*a, **kw)
def parse_item(self, response):
self.temp_file.write(f"{response.url}\n")
|
StarcoderdataPython
|
5030239
|
<gh_stars>0
# Part of Escala.
# Written by <NAME>.
from sys import argv as ARGS
# Constants that make up a complete SQL script.
SQL_SCRIPT = "add_skills.sql"
INSERT_NODE = (
"INSERT INTO skillNodes VALUES"
" ('{0}', {1}, '{2}', '{3}', {4}, {5}, {6}, {7});\n"
)
INSERT_EDGE = "INSERT INTO skillEdges VALUES ('{0}', {1}, {2});\n"
HEADER = (
"/*\n"
"Part of Escala.\n"
"Written by <NAME>.\n"
"This script has been automatically generated.\n"
"*/\n\n"
"CONNECT 'jdbc:derby:../data/tables';\n"
)
FOOTER = "\nDISCONNECT;\nExit;\n"
class Node:
"""Hold information about graph nodes."""
def __init__(
self, tree,
identifier,
targets, name,
description, cost,
logistics_effect,
marketing_effect,
efficiency_effect):
"""Initialize the graph node."""
self.tree = tree
self.identifier = int(identifier)
self.targets = [int(target) for target in targets.split()]
self.name = name
self.description = description
self.cost = float(cost)
self.logistics_effect = logistics_effect
self.marketing_effect = marketing_effect
self.efficiency_effect = efficiency_effect
def get_node_insertion_command(self):
"""Format an SQL insertion command with the appropriate node information."""
return INSERT_NODE.format(
self.tree,
self.identifier,
self.name,
self.description,
self.cost,
self.logistics_effect,
self.marketing_effect,
self.efficiency_effect
)
def get_edge_insertion_commands(self):
"""Format SQL insertion commands with appropriate node edge information."""
commands = []
if self.targets[0] != -1:
for target in self.targets:
commands.append(
INSERT_EDGE.format(self.tree, self.identifier, target)
)
return commands
def chunk(items, size):
"""Return chunks of a specified size of a list."""
for start in range(0, len(items), size):
yield items[start:start + size]
# Open a destination SQL script and write SQL commands to it.
with open(SQL_SCRIPT, "w") as destination:
destination.write(HEADER)
# Add information from all provided plaintext files to the destination
# SQL script.
for arg in ARGS[1:]:
destination.write("\n")
# Open the source, parse it, create Node objects, and write insertion
# commands for the destination SQL script.
with open(arg, "r") as source:
lines = [item for item in source.read().splitlines() if item is not ""]
tree_id = lines.pop(0)
nodes = [Node(tree_id, *parameters) for parameters in chunk(lines, 8)]
for node in nodes:
destination.write(node.get_node_insertion_command())
destination.write("\n")
for node in nodes:
destination.writelines(node.get_edge_insertion_commands())
# Add a footer at the end of the file.
destination.write(FOOTER)
|
StarcoderdataPython
|
5186539
|
'''
Lucky numbers are subset of integers. Rather than going into much theory, let us see the process of arriving at lucky numbers,
Take the set of integers
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,……
First, delete every second number, we get following reduced set.
1, 3, 5, 7, 9, 11, 13, 15, 17, 19,…………
Now, delete every third number, we get
1, 3, 7, 9, 13, 15, 19,….….
Continue this process indefinitely……
Any number that does NOT get deleted due to above process is called “lucky”.
'''
def isLucky(n, k = 2):
if k > n:
return 1
elif n%k == 0:
return 0
return isLucky(n - n//k, k+1)
def getMax(arr, curr_max = 1, n=5):
if(len(arr) == n):
prod = 1
for i in arr:
prod *= i
return prod
if(len(arr) < n):
return None
return max(getMax(arr[1:], curr_max*arr[0], n-1), getMax(arr[1:], curr_max, n))
if __name__ == '__main__':
# t=int(input())
# for tcs in range(t):
# n=int(input())
# print(isLucky(n))
print(getMax([-1, -2, -3, 1, 2, -1]))
|
StarcoderdataPython
|
306709
|
# log21.Levels.py
# CodeWriter21
import logging as _logging
__all__ = ['CRITICAL', 'FATAL', 'ERROR', 'WARNING', 'WARN', 'INFO', 'DEBUG', 'NOTSET']
CRITICAL = _logging.CRITICAL
FATAL = CRITICAL
ERROR = _logging.ERROR
WARNING = _logging.WARNING
WARN = WARNING
INFO = _logging.INFO
DEBUG = _logging.DEBUG
NOTSET = _logging.NOTSET
|
StarcoderdataPython
|
5197685
|
# coding:utf-8
from LxMaBasic import maBscCfg
class KeyframeOp(maBscCfg.MaUtility):
def __init__(self, rootStr):
self._rootStr = rootStr
|
StarcoderdataPython
|
5140097
|
from mdal import Datasource, MDAL_DataLocation
ds = Datasource("data/tuflowfv/withMaxes/trap_steady_05_3D.nc")
mesh = ds.load()
group = mesh.groups[1]
a, b, c = group.volumetric(0)
ds2 = Datasource("test_vol.ply")
mesh2 = ds2.add_mesh()
mesh2.vertices = mesh.vertices
mesh2.faces = mesh.faces
print(f"Vertex Count :{mesh.vertex_count}")
print(f"Face Count : {mesh.face_count}")
group2 = mesh2.add_group("test", location=MDAL_DataLocation.DataOnVolumes)
group2.add_volumetric(group.data(), a, b)
print(f"Level Count: {group2.level_count}")
print(f"Location: {group2.location}")
print(f"MinMax: {group2.minmax}")
print(f"Dataset Count: {group2.dataset_count}")
data = group2.data(0)
print(f"Data Value Count: {len(data)}")
print(f"{data}")
print(f"{group2.volumetric(0)}")
a, b, c = group2.volumetric(0)
print(f"Number of Extrusion values : {len(b)}")
mesh2.save()
mesh3 = ds2.load()
mesh3.info()
group3 = mesh3.groups[1]
print(f"{group3.location}")
d, e, f = group3.volumetric(0)
print(f"{group3.volumetric(0)}")
print(f"{group3.data(0)}")
print("Mesh Equality : {mesh2 == mesh3}")
|
StarcoderdataPython
|
5081403
|
<reponame>fpsantosx/DesenWeb<filename>SomarNumeros.py<gh_stars>0
def soma(n1, n2, n3):
return n1+n2+n3
print(soma(5,6,7))
|
StarcoderdataPython
|
229775
|
from nose.plugins.skip import SkipTest
from nose.tools import raises, assert_equal, assert_is, assert_list_equal
from slivka.server.forms.fields import IntegerField, ValidationError, \
IntegerArrayField
class TestValue:
def setup(self):
self.field = IntegerField("name")
def test_int(self):
assert_equal(self.field.validate(10), 10)
assert_equal(self.field.validate(-8), -8)
@raises(ValidationError)
def test_float(self):
self.field.validate(2.43)
def test_int_str(self):
assert_equal(self.field.validate('15'), 15)
assert_equal(self.field.validate('-3'), -3)
@raises(ValidationError)
def test_decimal_str(self):
self.field.validate('0.65')
@raises(ValidationError)
def test_invalid_str_conversion(self):
self.field.validate('xyz')
def test_none_optional(self):
self.field.required = False
assert_is(self.field.validate(None), None)
@raises(ValidationError)
def test_none_required(self):
self.field.validate(None)
def test_empty(self):
self.field.required = False
assert_is(self.field.validate(''), None)
@raises(ValidationError)
def test_true(self):
self.field.validate(True)
@raises(ValidationError)
def test_false(self):
self.field.validate(False)
def test_zero(self):
# checks if 0 is not accidentally converted to None or False
assert_is(self.field.validate(0), 0)
class TestBoundedValue:
def setup(self):
self.field = IntegerField("name", min=3, max=7)
@raises(ValidationError)
def test_validate_more_than_max(self):
self.field.validate(8)
def test_validate_equal_to_max(self):
self.field.validate(7)
@raises(ValidationError)
def test_validate_less_than_min(self):
self.field.validate(1)
def test_validate_equal_to_min(self):
self.field.validate(3)
def test_validate_within_bounds(self):
self.field.validate(5)
# validation with default
class TestDefault:
def setup(self):
raise SkipTest("default value substitution no longer applies")
self.field = IntegerField("name", default=49, min=-1)
def test_none(self):
assert_equal(self.field.validate(None), 49)
def test_zero_default(self):
# make sure that default = 0 is not treated as undefined
field = IntegerField('name', default=0)
assert_equal(field.validate(None), 0)
def test_empty(self):
assert_equal(self.field.validate(''), 49)
assert_equal(self.field.validate(()), 49)
def test_valid(self):
assert_equal(self.field.validate(1), 1)
def test_valid_zero(self):
# make sure that 0 is not treated as False
assert_equal(self.field.validate(0), 0)
@raises(ValidationError)
def test_invalid(self):
self.field.validate(-20)
# multiple values validation
def test_multiple_valid_values():
field = IntegerArrayField('name')
assert_list_equal(field.validate([1, 2, 4, 8]), [1, 2, 4, 8])
assert_list_equal(field.validate(['1', 4, '6']), [1, 4, 6])
@raises(ValidationError)
def test_multiple_invalid_value():
field = IntegerArrayField('name')
field.validate([4, 5, 'a'])
|
StarcoderdataPython
|
3408185
|
<filename>pythonocc/lib/OCC/RWStepRepr.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _RWStepRepr.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_RWStepRepr', [dirname(__file__)])
except ImportError:
import _RWStepRepr
return _RWStepRepr
if fp is not None:
try:
_mod = imp.load_module('_RWStepRepr', fp, pathname, description)
finally:
fp.close()
return _mod
_RWStepRepr = swig_import_helper()
del swig_import_helper
else:
import _RWStepRepr
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _RWStepRepr.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_RWStepRepr.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_RWStepRepr.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_RWStepRepr.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_RWStepRepr.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_RWStepRepr.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_RWStepRepr.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_RWStepRepr.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_RWStepRepr.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_RWStepRepr.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_RWStepRepr.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_RWStepRepr.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_RWStepRepr.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_RWStepRepr.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_RWStepRepr.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_RWStepRepr.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_RWStepRepr.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _RWStepRepr.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.Standard
import OCC.Interface
import OCC.TCollection
import OCC.MMgt
import OCC.TColStd
import OCC.Message
import OCC.StepRepr
import OCC.StepBasic
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class RWStepRepr_RWAssemblyComponentUsage(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_swiginit(self,_RWStepRepr.new_RWStepRepr_RWAssemblyComponentUsage(*args))
def ReadStep(self, *args):
"""
* Reads AssemblyComponentUsage
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsage &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes AssemblyComponentUsage
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsage &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsage &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWAssemblyComponentUsage
RWStepRepr_RWAssemblyComponentUsage.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_ReadStep,None,RWStepRepr_RWAssemblyComponentUsage)
RWStepRepr_RWAssemblyComponentUsage.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_WriteStep,None,RWStepRepr_RWAssemblyComponentUsage)
RWStepRepr_RWAssemblyComponentUsage.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_Share,None,RWStepRepr_RWAssemblyComponentUsage)
RWStepRepr_RWAssemblyComponentUsage_swigregister = _RWStepRepr.RWStepRepr_RWAssemblyComponentUsage_swigregister
RWStepRepr_RWAssemblyComponentUsage_swigregister(RWStepRepr_RWAssemblyComponentUsage)
class RWStepRepr_RWAssemblyComponentUsageSubstitute(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_swiginit(self,_RWStepRepr.new_RWStepRepr_RWAssemblyComponentUsageSubstitute(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsageSubstitute &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsageSubstitute &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_AssemblyComponentUsageSubstitute &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWAssemblyComponentUsageSubstitute
RWStepRepr_RWAssemblyComponentUsageSubstitute.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_ReadStep,None,RWStepRepr_RWAssemblyComponentUsageSubstitute)
RWStepRepr_RWAssemblyComponentUsageSubstitute.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_WriteStep,None,RWStepRepr_RWAssemblyComponentUsageSubstitute)
RWStepRepr_RWAssemblyComponentUsageSubstitute.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_Share,None,RWStepRepr_RWAssemblyComponentUsageSubstitute)
RWStepRepr_RWAssemblyComponentUsageSubstitute_swigregister = _RWStepRepr.RWStepRepr_RWAssemblyComponentUsageSubstitute_swigregister
RWStepRepr_RWAssemblyComponentUsageSubstitute_swigregister(RWStepRepr_RWAssemblyComponentUsageSubstitute)
class RWStepRepr_RWCompositeShapeAspect(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWCompositeShapeAspect_swiginit(self,_RWStepRepr.new_RWStepRepr_RWCompositeShapeAspect(*args))
def ReadStep(self, *args):
"""
* Reads CompositeShapeAspect
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_CompositeShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompositeShapeAspect_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes CompositeShapeAspect
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_CompositeShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompositeShapeAspect_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_CompositeShapeAspect &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompositeShapeAspect_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWCompositeShapeAspect
RWStepRepr_RWCompositeShapeAspect.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompositeShapeAspect_ReadStep,None,RWStepRepr_RWCompositeShapeAspect)
RWStepRepr_RWCompositeShapeAspect.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompositeShapeAspect_WriteStep,None,RWStepRepr_RWCompositeShapeAspect)
RWStepRepr_RWCompositeShapeAspect.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompositeShapeAspect_Share,None,RWStepRepr_RWCompositeShapeAspect)
RWStepRepr_RWCompositeShapeAspect_swigregister = _RWStepRepr.RWStepRepr_RWCompositeShapeAspect_swigregister
RWStepRepr_RWCompositeShapeAspect_swigregister(RWStepRepr_RWCompositeShapeAspect)
class RWStepRepr_RWCompoundRepresentationItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWCompoundRepresentationItem(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_CompoundRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_CompoundRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_CompoundRepresentationItem &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWCompoundRepresentationItem
RWStepRepr_RWCompoundRepresentationItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_ReadStep,None,RWStepRepr_RWCompoundRepresentationItem)
RWStepRepr_RWCompoundRepresentationItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_WriteStep,None,RWStepRepr_RWCompoundRepresentationItem)
RWStepRepr_RWCompoundRepresentationItem.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_Share,None,RWStepRepr_RWCompoundRepresentationItem)
RWStepRepr_RWCompoundRepresentationItem_swigregister = _RWStepRepr.RWStepRepr_RWCompoundRepresentationItem_swigregister
RWStepRepr_RWCompoundRepresentationItem_swigregister(RWStepRepr_RWCompoundRepresentationItem)
class RWStepRepr_RWConfigurationDesign(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWConfigurationDesign_swiginit(self,_RWStepRepr.new_RWStepRepr_RWConfigurationDesign(*args))
def ReadStep(self, *args):
"""
* Reads ConfigurationDesign
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ConfigurationDesign &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationDesign_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ConfigurationDesign
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ConfigurationDesign &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationDesign_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ConfigurationDesign &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationDesign_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWConfigurationDesign
RWStepRepr_RWConfigurationDesign.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationDesign_ReadStep,None,RWStepRepr_RWConfigurationDesign)
RWStepRepr_RWConfigurationDesign.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationDesign_WriteStep,None,RWStepRepr_RWConfigurationDesign)
RWStepRepr_RWConfigurationDesign.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationDesign_Share,None,RWStepRepr_RWConfigurationDesign)
RWStepRepr_RWConfigurationDesign_swigregister = _RWStepRepr.RWStepRepr_RWConfigurationDesign_swigregister
RWStepRepr_RWConfigurationDesign_swigregister(RWStepRepr_RWConfigurationDesign)
class RWStepRepr_RWConfigurationEffectivity(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWConfigurationEffectivity_swiginit(self,_RWStepRepr.new_RWStepRepr_RWConfigurationEffectivity(*args))
def ReadStep(self, *args):
"""
* Reads ConfigurationEffectivity
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ConfigurationEffectivity &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationEffectivity_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ConfigurationEffectivity
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ConfigurationEffectivity &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationEffectivity_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ConfigurationEffectivity &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationEffectivity_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWConfigurationEffectivity
RWStepRepr_RWConfigurationEffectivity.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationEffectivity_ReadStep,None,RWStepRepr_RWConfigurationEffectivity)
RWStepRepr_RWConfigurationEffectivity.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationEffectivity_WriteStep,None,RWStepRepr_RWConfigurationEffectivity)
RWStepRepr_RWConfigurationEffectivity.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationEffectivity_Share,None,RWStepRepr_RWConfigurationEffectivity)
RWStepRepr_RWConfigurationEffectivity_swigregister = _RWStepRepr.RWStepRepr_RWConfigurationEffectivity_swigregister
RWStepRepr_RWConfigurationEffectivity_swigregister(RWStepRepr_RWConfigurationEffectivity)
class RWStepRepr_RWConfigurationItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWConfigurationItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWConfigurationItem(*args))
def ReadStep(self, *args):
"""
* Reads ConfigurationItem
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ConfigurationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ConfigurationItem
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ConfigurationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationItem_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ConfigurationItem &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWConfigurationItem_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWConfigurationItem
RWStepRepr_RWConfigurationItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationItem_ReadStep,None,RWStepRepr_RWConfigurationItem)
RWStepRepr_RWConfigurationItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationItem_WriteStep,None,RWStepRepr_RWConfigurationItem)
RWStepRepr_RWConfigurationItem.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWConfigurationItem_Share,None,RWStepRepr_RWConfigurationItem)
RWStepRepr_RWConfigurationItem_swigregister = _RWStepRepr.RWStepRepr_RWConfigurationItem_swigregister
RWStepRepr_RWConfigurationItem_swigregister(RWStepRepr_RWConfigurationItem)
class RWStepRepr_RWDataEnvironment(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWDataEnvironment_swiginit(self,_RWStepRepr.new_RWStepRepr_RWDataEnvironment(*args))
def ReadStep(self, *args):
"""
* Reads DataEnvironment
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_DataEnvironment &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDataEnvironment_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes DataEnvironment
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_DataEnvironment &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDataEnvironment_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_DataEnvironment &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDataEnvironment_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWDataEnvironment
RWStepRepr_RWDataEnvironment.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDataEnvironment_ReadStep,None,RWStepRepr_RWDataEnvironment)
RWStepRepr_RWDataEnvironment.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDataEnvironment_WriteStep,None,RWStepRepr_RWDataEnvironment)
RWStepRepr_RWDataEnvironment.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWDataEnvironment_Share,None,RWStepRepr_RWDataEnvironment)
RWStepRepr_RWDataEnvironment_swigregister = _RWStepRepr.RWStepRepr_RWDataEnvironment_swigregister
RWStepRepr_RWDataEnvironment_swigregister(RWStepRepr_RWDataEnvironment)
class RWStepRepr_RWDefinitionalRepresentation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWDefinitionalRepresentation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_DefinitionalRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_DefinitionalRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_DefinitionalRepresentation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWDefinitionalRepresentation
RWStepRepr_RWDefinitionalRepresentation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_ReadStep,None,RWStepRepr_RWDefinitionalRepresentation)
RWStepRepr_RWDefinitionalRepresentation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_WriteStep,None,RWStepRepr_RWDefinitionalRepresentation)
RWStepRepr_RWDefinitionalRepresentation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_Share,None,RWStepRepr_RWDefinitionalRepresentation)
RWStepRepr_RWDefinitionalRepresentation_swigregister = _RWStepRepr.RWStepRepr_RWDefinitionalRepresentation_swigregister
RWStepRepr_RWDefinitionalRepresentation_swigregister(RWStepRepr_RWDefinitionalRepresentation)
class RWStepRepr_RWDerivedShapeAspect(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWDerivedShapeAspect_swiginit(self,_RWStepRepr.new_RWStepRepr_RWDerivedShapeAspect(*args))
def ReadStep(self, *args):
"""
* Reads DerivedShapeAspect
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_DerivedShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDerivedShapeAspect_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes DerivedShapeAspect
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_DerivedShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDerivedShapeAspect_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_DerivedShapeAspect &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDerivedShapeAspect_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWDerivedShapeAspect
RWStepRepr_RWDerivedShapeAspect.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDerivedShapeAspect_ReadStep,None,RWStepRepr_RWDerivedShapeAspect)
RWStepRepr_RWDerivedShapeAspect.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDerivedShapeAspect_WriteStep,None,RWStepRepr_RWDerivedShapeAspect)
RWStepRepr_RWDerivedShapeAspect.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWDerivedShapeAspect_Share,None,RWStepRepr_RWDerivedShapeAspect)
RWStepRepr_RWDerivedShapeAspect_swigregister = _RWStepRepr.RWStepRepr_RWDerivedShapeAspect_swigregister
RWStepRepr_RWDerivedShapeAspect_swigregister(RWStepRepr_RWDerivedShapeAspect)
class RWStepRepr_RWDescriptiveRepresentationItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWDescriptiveRepresentationItem(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_DescriptiveRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_DescriptiveRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWDescriptiveRepresentationItem
RWStepRepr_RWDescriptiveRepresentationItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_ReadStep,None,RWStepRepr_RWDescriptiveRepresentationItem)
RWStepRepr_RWDescriptiveRepresentationItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_WriteStep,None,RWStepRepr_RWDescriptiveRepresentationItem)
RWStepRepr_RWDescriptiveRepresentationItem_swigregister = _RWStepRepr.RWStepRepr_RWDescriptiveRepresentationItem_swigregister
RWStepRepr_RWDescriptiveRepresentationItem_swigregister(RWStepRepr_RWDescriptiveRepresentationItem)
class RWStepRepr_RWExtension(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWExtension_swiginit(self,_RWStepRepr.new_RWStepRepr_RWExtension(*args))
def ReadStep(self, *args):
"""
* Reads Extension
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_Extension &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWExtension_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes Extension
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_Extension &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWExtension_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_Extension &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWExtension_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWExtension
RWStepRepr_RWExtension.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWExtension_ReadStep,None,RWStepRepr_RWExtension)
RWStepRepr_RWExtension.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWExtension_WriteStep,None,RWStepRepr_RWExtension)
RWStepRepr_RWExtension.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWExtension_Share,None,RWStepRepr_RWExtension)
RWStepRepr_RWExtension_swigregister = _RWStepRepr.RWStepRepr_RWExtension_swigregister
RWStepRepr_RWExtension_swigregister(RWStepRepr_RWExtension)
class RWStepRepr_RWFunctionallyDefinedTransformation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWFunctionallyDefinedTransformation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_FunctionallyDefinedTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_FunctionallyDefinedTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWFunctionallyDefinedTransformation
RWStepRepr_RWFunctionallyDefinedTransformation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_ReadStep,None,RWStepRepr_RWFunctionallyDefinedTransformation)
RWStepRepr_RWFunctionallyDefinedTransformation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_WriteStep,None,RWStepRepr_RWFunctionallyDefinedTransformation)
RWStepRepr_RWFunctionallyDefinedTransformation_swigregister = _RWStepRepr.RWStepRepr_RWFunctionallyDefinedTransformation_swigregister
RWStepRepr_RWFunctionallyDefinedTransformation_swigregister(RWStepRepr_RWFunctionallyDefinedTransformation)
class RWStepRepr_RWGlobalUncertaintyAssignedContext(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_swiginit(self,_RWStepRepr.new_RWStepRepr_RWGlobalUncertaintyAssignedContext(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_GlobalUncertaintyAssignedContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_GlobalUncertaintyAssignedContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_GlobalUncertaintyAssignedContext &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWGlobalUncertaintyAssignedContext
RWStepRepr_RWGlobalUncertaintyAssignedContext.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_ReadStep,None,RWStepRepr_RWGlobalUncertaintyAssignedContext)
RWStepRepr_RWGlobalUncertaintyAssignedContext.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_WriteStep,None,RWStepRepr_RWGlobalUncertaintyAssignedContext)
RWStepRepr_RWGlobalUncertaintyAssignedContext.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_Share,None,RWStepRepr_RWGlobalUncertaintyAssignedContext)
RWStepRepr_RWGlobalUncertaintyAssignedContext_swigregister = _RWStepRepr.RWStepRepr_RWGlobalUncertaintyAssignedContext_swigregister
RWStepRepr_RWGlobalUncertaintyAssignedContext_swigregister(RWStepRepr_RWGlobalUncertaintyAssignedContext)
class RWStepRepr_RWGlobalUnitAssignedContext(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_swiginit(self,_RWStepRepr.new_RWStepRepr_RWGlobalUnitAssignedContext(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_GlobalUnitAssignedContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_GlobalUnitAssignedContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_GlobalUnitAssignedContext &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWGlobalUnitAssignedContext
RWStepRepr_RWGlobalUnitAssignedContext.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_ReadStep,None,RWStepRepr_RWGlobalUnitAssignedContext)
RWStepRepr_RWGlobalUnitAssignedContext.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_WriteStep,None,RWStepRepr_RWGlobalUnitAssignedContext)
RWStepRepr_RWGlobalUnitAssignedContext.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_Share,None,RWStepRepr_RWGlobalUnitAssignedContext)
RWStepRepr_RWGlobalUnitAssignedContext_swigregister = _RWStepRepr.RWStepRepr_RWGlobalUnitAssignedContext_swigregister
RWStepRepr_RWGlobalUnitAssignedContext_swigregister(RWStepRepr_RWGlobalUnitAssignedContext)
class RWStepRepr_RWItemDefinedTransformation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWItemDefinedTransformation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWItemDefinedTransformation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ItemDefinedTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWItemDefinedTransformation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ItemDefinedTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWItemDefinedTransformation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_ItemDefinedTransformation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWItemDefinedTransformation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWItemDefinedTransformation
RWStepRepr_RWItemDefinedTransformation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWItemDefinedTransformation_ReadStep,None,RWStepRepr_RWItemDefinedTransformation)
RWStepRepr_RWItemDefinedTransformation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWItemDefinedTransformation_WriteStep,None,RWStepRepr_RWItemDefinedTransformation)
RWStepRepr_RWItemDefinedTransformation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWItemDefinedTransformation_Share,None,RWStepRepr_RWItemDefinedTransformation)
RWStepRepr_RWItemDefinedTransformation_swigregister = _RWStepRepr.RWStepRepr_RWItemDefinedTransformation_swigregister
RWStepRepr_RWItemDefinedTransformation_swigregister(RWStepRepr_RWItemDefinedTransformation)
class RWStepRepr_RWMakeFromUsageOption(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMakeFromUsageOption_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMakeFromUsageOption(*args))
def ReadStep(self, *args):
"""
* Reads MakeFromUsageOption
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MakeFromUsageOption &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMakeFromUsageOption_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes MakeFromUsageOption
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MakeFromUsageOption &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMakeFromUsageOption_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_MakeFromUsageOption &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMakeFromUsageOption_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMakeFromUsageOption
RWStepRepr_RWMakeFromUsageOption.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMakeFromUsageOption_ReadStep,None,RWStepRepr_RWMakeFromUsageOption)
RWStepRepr_RWMakeFromUsageOption.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMakeFromUsageOption_WriteStep,None,RWStepRepr_RWMakeFromUsageOption)
RWStepRepr_RWMakeFromUsageOption.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMakeFromUsageOption_Share,None,RWStepRepr_RWMakeFromUsageOption)
RWStepRepr_RWMakeFromUsageOption_swigregister = _RWStepRepr.RWStepRepr_RWMakeFromUsageOption_swigregister
RWStepRepr_RWMakeFromUsageOption_swigregister(RWStepRepr_RWMakeFromUsageOption)
class RWStepRepr_RWMappedItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMappedItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMappedItem(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MappedItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMappedItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MappedItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMappedItem_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_MappedItem &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMappedItem_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMappedItem
RWStepRepr_RWMappedItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMappedItem_ReadStep,None,RWStepRepr_RWMappedItem)
RWStepRepr_RWMappedItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMappedItem_WriteStep,None,RWStepRepr_RWMappedItem)
RWStepRepr_RWMappedItem.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMappedItem_Share,None,RWStepRepr_RWMappedItem)
RWStepRepr_RWMappedItem_swigregister = _RWStepRepr.RWStepRepr_RWMappedItem_swigregister
RWStepRepr_RWMappedItem_swigregister(RWStepRepr_RWMappedItem)
class RWStepRepr_RWMaterialDesignation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMaterialDesignation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMaterialDesignation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MaterialDesignation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialDesignation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MaterialDesignation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialDesignation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_MaterialDesignation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialDesignation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMaterialDesignation
RWStepRepr_RWMaterialDesignation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialDesignation_ReadStep,None,RWStepRepr_RWMaterialDesignation)
RWStepRepr_RWMaterialDesignation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialDesignation_WriteStep,None,RWStepRepr_RWMaterialDesignation)
RWStepRepr_RWMaterialDesignation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialDesignation_Share,None,RWStepRepr_RWMaterialDesignation)
RWStepRepr_RWMaterialDesignation_swigregister = _RWStepRepr.RWStepRepr_RWMaterialDesignation_swigregister
RWStepRepr_RWMaterialDesignation_swigregister(RWStepRepr_RWMaterialDesignation)
class RWStepRepr_RWMaterialProperty(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMaterialProperty_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMaterialProperty(*args))
def ReadStep(self, *args):
"""
* Reads MaterialProperty
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MaterialProperty &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialProperty_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes MaterialProperty
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MaterialProperty &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialProperty_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_MaterialProperty &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialProperty_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMaterialProperty
RWStepRepr_RWMaterialProperty.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialProperty_ReadStep,None,RWStepRepr_RWMaterialProperty)
RWStepRepr_RWMaterialProperty.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialProperty_WriteStep,None,RWStepRepr_RWMaterialProperty)
RWStepRepr_RWMaterialProperty.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialProperty_Share,None,RWStepRepr_RWMaterialProperty)
RWStepRepr_RWMaterialProperty_swigregister = _RWStepRepr.RWStepRepr_RWMaterialProperty_swigregister
RWStepRepr_RWMaterialProperty_swigregister(RWStepRepr_RWMaterialProperty)
class RWStepRepr_RWMaterialPropertyRepresentation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMaterialPropertyRepresentation(*args))
def ReadStep(self, *args):
"""
* Reads MaterialPropertyRepresentation
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MaterialPropertyRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes MaterialPropertyRepresentation
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MaterialPropertyRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_MaterialPropertyRepresentation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMaterialPropertyRepresentation
RWStepRepr_RWMaterialPropertyRepresentation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_ReadStep,None,RWStepRepr_RWMaterialPropertyRepresentation)
RWStepRepr_RWMaterialPropertyRepresentation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_WriteStep,None,RWStepRepr_RWMaterialPropertyRepresentation)
RWStepRepr_RWMaterialPropertyRepresentation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_Share,None,RWStepRepr_RWMaterialPropertyRepresentation)
RWStepRepr_RWMaterialPropertyRepresentation_swigregister = _RWStepRepr.RWStepRepr_RWMaterialPropertyRepresentation_swigregister
RWStepRepr_RWMaterialPropertyRepresentation_swigregister(RWStepRepr_RWMaterialPropertyRepresentation)
class RWStepRepr_RWMeasureRepresentationItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWMeasureRepresentationItem(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_MeasureRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_MeasureRepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_MeasureRepresentationItem &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWMeasureRepresentationItem
RWStepRepr_RWMeasureRepresentationItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_ReadStep,None,RWStepRepr_RWMeasureRepresentationItem)
RWStepRepr_RWMeasureRepresentationItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_WriteStep,None,RWStepRepr_RWMeasureRepresentationItem)
RWStepRepr_RWMeasureRepresentationItem.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_Share,None,RWStepRepr_RWMeasureRepresentationItem)
RWStepRepr_RWMeasureRepresentationItem_swigregister = _RWStepRepr.RWStepRepr_RWMeasureRepresentationItem_swigregister
RWStepRepr_RWMeasureRepresentationItem_swigregister(RWStepRepr_RWMeasureRepresentationItem)
class RWStepRepr_RWParametricRepresentationContext(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWParametricRepresentationContext_swiginit(self,_RWStepRepr.new_RWStepRepr_RWParametricRepresentationContext(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ParametricRepresentationContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWParametricRepresentationContext_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ParametricRepresentationContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWParametricRepresentationContext_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWParametricRepresentationContext
RWStepRepr_RWParametricRepresentationContext.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWParametricRepresentationContext_ReadStep,None,RWStepRepr_RWParametricRepresentationContext)
RWStepRepr_RWParametricRepresentationContext.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWParametricRepresentationContext_WriteStep,None,RWStepRepr_RWParametricRepresentationContext)
RWStepRepr_RWParametricRepresentationContext_swigregister = _RWStepRepr.RWStepRepr_RWParametricRepresentationContext_swigregister
RWStepRepr_RWParametricRepresentationContext_swigregister(RWStepRepr_RWParametricRepresentationContext)
class RWStepRepr_RWProductConcept(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWProductConcept_swiginit(self,_RWStepRepr.new_RWStepRepr_RWProductConcept(*args))
def ReadStep(self, *args):
"""
* Reads ProductConcept
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ProductConcept &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductConcept_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ProductConcept
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ProductConcept &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductConcept_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ProductConcept &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductConcept_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWProductConcept
RWStepRepr_RWProductConcept.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductConcept_ReadStep,None,RWStepRepr_RWProductConcept)
RWStepRepr_RWProductConcept.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductConcept_WriteStep,None,RWStepRepr_RWProductConcept)
RWStepRepr_RWProductConcept.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductConcept_Share,None,RWStepRepr_RWProductConcept)
RWStepRepr_RWProductConcept_swigregister = _RWStepRepr.RWStepRepr_RWProductConcept_swigregister
RWStepRepr_RWProductConcept_swigregister(RWStepRepr_RWProductConcept)
class RWStepRepr_RWProductDefinitionShape(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWProductDefinitionShape_swiginit(self,_RWStepRepr.new_RWStepRepr_RWProductDefinitionShape(*args))
def ReadStep(self, *args):
"""
* Reads ProductDefinitionShape
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ProductDefinitionShape &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductDefinitionShape_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ProductDefinitionShape
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ProductDefinitionShape &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductDefinitionShape_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ProductDefinitionShape &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWProductDefinitionShape_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWProductDefinitionShape
RWStepRepr_RWProductDefinitionShape.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductDefinitionShape_ReadStep,None,RWStepRepr_RWProductDefinitionShape)
RWStepRepr_RWProductDefinitionShape.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductDefinitionShape_WriteStep,None,RWStepRepr_RWProductDefinitionShape)
RWStepRepr_RWProductDefinitionShape.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWProductDefinitionShape_Share,None,RWStepRepr_RWProductDefinitionShape)
RWStepRepr_RWProductDefinitionShape_swigregister = _RWStepRepr.RWStepRepr_RWProductDefinitionShape_swigregister
RWStepRepr_RWProductDefinitionShape_swigregister(RWStepRepr_RWProductDefinitionShape)
class RWStepRepr_RWPropertyDefinition(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWPropertyDefinition_swiginit(self,_RWStepRepr.new_RWStepRepr_RWPropertyDefinition(*args))
def ReadStep(self, *args):
"""
* Reads PropertyDefinition
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinition &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinition_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes PropertyDefinition
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinition &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinition_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_PropertyDefinition &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinition_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWPropertyDefinition
RWStepRepr_RWPropertyDefinition.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinition_ReadStep,None,RWStepRepr_RWPropertyDefinition)
RWStepRepr_RWPropertyDefinition.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinition_WriteStep,None,RWStepRepr_RWPropertyDefinition)
RWStepRepr_RWPropertyDefinition.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinition_Share,None,RWStepRepr_RWPropertyDefinition)
RWStepRepr_RWPropertyDefinition_swigregister = _RWStepRepr.RWStepRepr_RWPropertyDefinition_swigregister
RWStepRepr_RWPropertyDefinition_swigregister(RWStepRepr_RWPropertyDefinition)
class RWStepRepr_RWPropertyDefinitionRelationship(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_swiginit(self,_RWStepRepr.new_RWStepRepr_RWPropertyDefinitionRelationship(*args))
def ReadStep(self, *args):
"""
* Reads PropertyDefinitionRelationship
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes PropertyDefinitionRelationship
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRelationship &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWPropertyDefinitionRelationship
RWStepRepr_RWPropertyDefinitionRelationship.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_ReadStep,None,RWStepRepr_RWPropertyDefinitionRelationship)
RWStepRepr_RWPropertyDefinitionRelationship.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_WriteStep,None,RWStepRepr_RWPropertyDefinitionRelationship)
RWStepRepr_RWPropertyDefinitionRelationship.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_Share,None,RWStepRepr_RWPropertyDefinitionRelationship)
RWStepRepr_RWPropertyDefinitionRelationship_swigregister = _RWStepRepr.RWStepRepr_RWPropertyDefinitionRelationship_swigregister
RWStepRepr_RWPropertyDefinitionRelationship_swigregister(RWStepRepr_RWPropertyDefinitionRelationship)
class RWStepRepr_RWPropertyDefinitionRepresentation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWPropertyDefinitionRepresentation(*args))
def ReadStep(self, *args):
"""
* Reads PropertyDefinitionRepresentation
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes PropertyDefinitionRepresentation
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_PropertyDefinitionRepresentation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWPropertyDefinitionRepresentation
RWStepRepr_RWPropertyDefinitionRepresentation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_ReadStep,None,RWStepRepr_RWPropertyDefinitionRepresentation)
RWStepRepr_RWPropertyDefinitionRepresentation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_WriteStep,None,RWStepRepr_RWPropertyDefinitionRepresentation)
RWStepRepr_RWPropertyDefinitionRepresentation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_Share,None,RWStepRepr_RWPropertyDefinitionRepresentation)
RWStepRepr_RWPropertyDefinitionRepresentation_swigregister = _RWStepRepr.RWStepRepr_RWPropertyDefinitionRepresentation_swigregister
RWStepRepr_RWPropertyDefinitionRepresentation_swigregister(RWStepRepr_RWPropertyDefinitionRepresentation)
class RWStepRepr_RWQuantifiedAssemblyComponentUsage(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_swiginit(self,_RWStepRepr.new_RWStepRepr_RWQuantifiedAssemblyComponentUsage(*args))
def ReadStep(self, *args):
"""
* Reads QuantifiedAssemblyComponentUsage
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_QuantifiedAssemblyComponentUsage &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes QuantifiedAssemblyComponentUsage
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_QuantifiedAssemblyComponentUsage &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_QuantifiedAssemblyComponentUsage &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWQuantifiedAssemblyComponentUsage
RWStepRepr_RWQuantifiedAssemblyComponentUsage.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_ReadStep,None,RWStepRepr_RWQuantifiedAssemblyComponentUsage)
RWStepRepr_RWQuantifiedAssemblyComponentUsage.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_WriteStep,None,RWStepRepr_RWQuantifiedAssemblyComponentUsage)
RWStepRepr_RWQuantifiedAssemblyComponentUsage.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_Share,None,RWStepRepr_RWQuantifiedAssemblyComponentUsage)
RWStepRepr_RWQuantifiedAssemblyComponentUsage_swigregister = _RWStepRepr.RWStepRepr_RWQuantifiedAssemblyComponentUsage_swigregister
RWStepRepr_RWQuantifiedAssemblyComponentUsage_swigregister(RWStepRepr_RWQuantifiedAssemblyComponentUsage)
class RWStepRepr_RWReprItemAndLengthMeasureWithUnit(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_swiginit(self,_RWStepRepr.new_RWStepRepr_RWReprItemAndLengthMeasureWithUnit(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ReprItemAndLengthMeasureWithUnit &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ReprItemAndLengthMeasureWithUnit &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWReprItemAndLengthMeasureWithUnit
RWStepRepr_RWReprItemAndLengthMeasureWithUnit.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_ReadStep,None,RWStepRepr_RWReprItemAndLengthMeasureWithUnit)
RWStepRepr_RWReprItemAndLengthMeasureWithUnit.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_WriteStep,None,RWStepRepr_RWReprItemAndLengthMeasureWithUnit)
RWStepRepr_RWReprItemAndLengthMeasureWithUnit_swigregister = _RWStepRepr.RWStepRepr_RWReprItemAndLengthMeasureWithUnit_swigregister
RWStepRepr_RWReprItemAndLengthMeasureWithUnit_swigregister(RWStepRepr_RWReprItemAndLengthMeasureWithUnit)
class RWStepRepr_RWRepresentation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_Representation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_Representation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_Representation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentation
RWStepRepr_RWRepresentation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentation_ReadStep,None,RWStepRepr_RWRepresentation)
RWStepRepr_RWRepresentation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentation_WriteStep,None,RWStepRepr_RWRepresentation)
RWStepRepr_RWRepresentation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentation_Share,None,RWStepRepr_RWRepresentation)
RWStepRepr_RWRepresentation_swigregister = _RWStepRepr.RWStepRepr_RWRepresentation_swigregister
RWStepRepr_RWRepresentation_swigregister(RWStepRepr_RWRepresentation)
class RWStepRepr_RWRepresentationContext(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentationContext_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentationContext(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_RepresentationContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationContext_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_RepresentationContext &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationContext_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentationContext
RWStepRepr_RWRepresentationContext.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationContext_ReadStep,None,RWStepRepr_RWRepresentationContext)
RWStepRepr_RWRepresentationContext.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationContext_WriteStep,None,RWStepRepr_RWRepresentationContext)
RWStepRepr_RWRepresentationContext_swigregister = _RWStepRepr.RWStepRepr_RWRepresentationContext_swigregister
RWStepRepr_RWRepresentationContext_swigregister(RWStepRepr_RWRepresentationContext)
class RWStepRepr_RWRepresentationItem(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentationItem_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentationItem(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_RepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationItem_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_RepresentationItem &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationItem_WriteStep(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentationItem
RWStepRepr_RWRepresentationItem.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationItem_ReadStep,None,RWStepRepr_RWRepresentationItem)
RWStepRepr_RWRepresentationItem.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationItem_WriteStep,None,RWStepRepr_RWRepresentationItem)
RWStepRepr_RWRepresentationItem_swigregister = _RWStepRepr.RWStepRepr_RWRepresentationItem_swigregister
RWStepRepr_RWRepresentationItem_swigregister(RWStepRepr_RWRepresentationItem)
class RWStepRepr_RWRepresentationMap(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentationMap_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentationMap(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_RepresentationMap &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationMap_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_RepresentationMap &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationMap_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_RepresentationMap &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationMap_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentationMap
RWStepRepr_RWRepresentationMap.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationMap_ReadStep,None,RWStepRepr_RWRepresentationMap)
RWStepRepr_RWRepresentationMap.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationMap_WriteStep,None,RWStepRepr_RWRepresentationMap)
RWStepRepr_RWRepresentationMap.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationMap_Share,None,RWStepRepr_RWRepresentationMap)
RWStepRepr_RWRepresentationMap_swigregister = _RWStepRepr.RWStepRepr_RWRepresentationMap_swigregister
RWStepRepr_RWRepresentationMap_swigregister(RWStepRepr_RWRepresentationMap)
class RWStepRepr_RWRepresentationRelationship(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentationRelationship_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentationRelationship(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationship_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationship_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationship &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationship_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentationRelationship
RWStepRepr_RWRepresentationRelationship.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationship_ReadStep,None,RWStepRepr_RWRepresentationRelationship)
RWStepRepr_RWRepresentationRelationship.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationship_WriteStep,None,RWStepRepr_RWRepresentationRelationship)
RWStepRepr_RWRepresentationRelationship.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationship_Share,None,RWStepRepr_RWRepresentationRelationship)
RWStepRepr_RWRepresentationRelationship_swigregister = _RWStepRepr.RWStepRepr_RWRepresentationRelationship_swigregister
RWStepRepr_RWRepresentationRelationship_swigregister(RWStepRepr_RWRepresentationRelationship)
class RWStepRepr_RWRepresentationRelationshipWithTransformation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWRepresentationRelationshipWithTransformation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationshipWithTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationshipWithTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_RepresentationRelationshipWithTransformation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWRepresentationRelationshipWithTransformation
RWStepRepr_RWRepresentationRelationshipWithTransformation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_ReadStep,None,RWStepRepr_RWRepresentationRelationshipWithTransformation)
RWStepRepr_RWRepresentationRelationshipWithTransformation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_WriteStep,None,RWStepRepr_RWRepresentationRelationshipWithTransformation)
RWStepRepr_RWRepresentationRelationshipWithTransformation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_Share,None,RWStepRepr_RWRepresentationRelationshipWithTransformation)
RWStepRepr_RWRepresentationRelationshipWithTransformation_swigregister = _RWStepRepr.RWStepRepr_RWRepresentationRelationshipWithTransformation_swigregister
RWStepRepr_RWRepresentationRelationshipWithTransformation_swigregister(RWStepRepr_RWRepresentationRelationshipWithTransformation)
class RWStepRepr_RWShapeAspect(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWShapeAspect_swiginit(self,_RWStepRepr.new_RWStepRepr_RWShapeAspect(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspect_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ShapeAspect &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspect_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_ShapeAspect &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspect_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWShapeAspect
RWStepRepr_RWShapeAspect.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspect_ReadStep,None,RWStepRepr_RWShapeAspect)
RWStepRepr_RWShapeAspect.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspect_WriteStep,None,RWStepRepr_RWShapeAspect)
RWStepRepr_RWShapeAspect.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspect_Share,None,RWStepRepr_RWShapeAspect)
RWStepRepr_RWShapeAspect_swigregister = _RWStepRepr.RWStepRepr_RWShapeAspect_swigregister
RWStepRepr_RWShapeAspect_swigregister(RWStepRepr_RWShapeAspect)
class RWStepRepr_RWShapeAspectDerivingRelationship(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_swiginit(self,_RWStepRepr.new_RWStepRepr_RWShapeAspectDerivingRelationship(*args))
def ReadStep(self, *args):
"""
* Reads ShapeAspectDerivingRelationship
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectDerivingRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ShapeAspectDerivingRelationship
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectDerivingRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ShapeAspectDerivingRelationship &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWShapeAspectDerivingRelationship
RWStepRepr_RWShapeAspectDerivingRelationship.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_ReadStep,None,RWStepRepr_RWShapeAspectDerivingRelationship)
RWStepRepr_RWShapeAspectDerivingRelationship.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_WriteStep,None,RWStepRepr_RWShapeAspectDerivingRelationship)
RWStepRepr_RWShapeAspectDerivingRelationship.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_Share,None,RWStepRepr_RWShapeAspectDerivingRelationship)
RWStepRepr_RWShapeAspectDerivingRelationship_swigregister = _RWStepRepr.RWStepRepr_RWShapeAspectDerivingRelationship_swigregister
RWStepRepr_RWShapeAspectDerivingRelationship_swigregister(RWStepRepr_RWShapeAspectDerivingRelationship)
class RWStepRepr_RWShapeAspectRelationship(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWShapeAspectRelationship_swiginit(self,_RWStepRepr.new_RWStepRepr_RWShapeAspectRelationship(*args))
def ReadStep(self, *args):
"""
* Reads ShapeAspectRelationship
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectRelationship_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ShapeAspectRelationship
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectRelationship &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectRelationship_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ShapeAspectRelationship &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectRelationship_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWShapeAspectRelationship
RWStepRepr_RWShapeAspectRelationship.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectRelationship_ReadStep,None,RWStepRepr_RWShapeAspectRelationship)
RWStepRepr_RWShapeAspectRelationship.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectRelationship_WriteStep,None,RWStepRepr_RWShapeAspectRelationship)
RWStepRepr_RWShapeAspectRelationship.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectRelationship_Share,None,RWStepRepr_RWShapeAspectRelationship)
RWStepRepr_RWShapeAspectRelationship_swigregister = _RWStepRepr.RWStepRepr_RWShapeAspectRelationship_swigregister
RWStepRepr_RWShapeAspectRelationship_swigregister(RWStepRepr_RWShapeAspectRelationship)
class RWStepRepr_RWShapeAspectTransition(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWShapeAspectTransition_swiginit(self,_RWStepRepr.new_RWStepRepr_RWShapeAspectTransition(*args))
def ReadStep(self, *args):
"""
* Reads ShapeAspectTransition
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectTransition &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectTransition_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes ShapeAspectTransition
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ShapeAspectTransition &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectTransition_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_ShapeAspectTransition &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeAspectTransition_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWShapeAspectTransition
RWStepRepr_RWShapeAspectTransition.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectTransition_ReadStep,None,RWStepRepr_RWShapeAspectTransition)
RWStepRepr_RWShapeAspectTransition.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectTransition_WriteStep,None,RWStepRepr_RWShapeAspectTransition)
RWStepRepr_RWShapeAspectTransition.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeAspectTransition_Share,None,RWStepRepr_RWShapeAspectTransition)
RWStepRepr_RWShapeAspectTransition_swigregister = _RWStepRepr.RWStepRepr_RWShapeAspectTransition_swigregister
RWStepRepr_RWShapeAspectTransition_swigregister(RWStepRepr_RWShapeAspectTransition)
class RWStepRepr_RWShapeRepresentationRelationshipWithTransformation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWShapeRepresentationRelationshipWithTransformation(*args))
def ReadStep(self, *args):
"""
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_ShapeRepresentationRelationshipWithTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_ShapeRepresentationRelationshipWithTransformation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_WriteStep(self, *args)
def Share(self, *args):
"""
:param ent:
:type ent: Handle_StepRepr_ShapeRepresentationRelationshipWithTransformation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWShapeRepresentationRelationshipWithTransformation
RWStepRepr_RWShapeRepresentationRelationshipWithTransformation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_ReadStep,None,RWStepRepr_RWShapeRepresentationRelationshipWithTransformation)
RWStepRepr_RWShapeRepresentationRelationshipWithTransformation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_WriteStep,None,RWStepRepr_RWShapeRepresentationRelationshipWithTransformation)
RWStepRepr_RWShapeRepresentationRelationshipWithTransformation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_Share,None,RWStepRepr_RWShapeRepresentationRelationshipWithTransformation)
RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_swigregister = _RWStepRepr.RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_swigregister
RWStepRepr_RWShapeRepresentationRelationshipWithTransformation_swigregister(RWStepRepr_RWShapeRepresentationRelationshipWithTransformation)
class RWStepRepr_RWSpecifiedHigherUsageOccurrence(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_swiginit(self,_RWStepRepr.new_RWStepRepr_RWSpecifiedHigherUsageOccurrence(*args))
def ReadStep(self, *args):
"""
* Reads SpecifiedHigherUsageOccurrence
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_SpecifiedHigherUsageOccurrence &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes SpecifiedHigherUsageOccurrence
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_SpecifiedHigherUsageOccurrence &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_SpecifiedHigherUsageOccurrence &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWSpecifiedHigherUsageOccurrence
RWStepRepr_RWSpecifiedHigherUsageOccurrence.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_ReadStep,None,RWStepRepr_RWSpecifiedHigherUsageOccurrence)
RWStepRepr_RWSpecifiedHigherUsageOccurrence.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_WriteStep,None,RWStepRepr_RWSpecifiedHigherUsageOccurrence)
RWStepRepr_RWSpecifiedHigherUsageOccurrence.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_Share,None,RWStepRepr_RWSpecifiedHigherUsageOccurrence)
RWStepRepr_RWSpecifiedHigherUsageOccurrence_swigregister = _RWStepRepr.RWStepRepr_RWSpecifiedHigherUsageOccurrence_swigregister
RWStepRepr_RWSpecifiedHigherUsageOccurrence_swigregister(RWStepRepr_RWSpecifiedHigherUsageOccurrence)
class RWStepRepr_RWStructuralResponseProperty(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWStructuralResponseProperty_swiginit(self,_RWStepRepr.new_RWStepRepr_RWStructuralResponseProperty(*args))
def ReadStep(self, *args):
"""
* Reads StructuralResponseProperty
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_StructuralResponseProperty &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponseProperty_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes StructuralResponseProperty
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_StructuralResponseProperty &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponseProperty_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_StructuralResponseProperty &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponseProperty_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWStructuralResponseProperty
RWStepRepr_RWStructuralResponseProperty.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponseProperty_ReadStep,None,RWStepRepr_RWStructuralResponseProperty)
RWStepRepr_RWStructuralResponseProperty.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponseProperty_WriteStep,None,RWStepRepr_RWStructuralResponseProperty)
RWStepRepr_RWStructuralResponseProperty.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponseProperty_Share,None,RWStepRepr_RWStructuralResponseProperty)
RWStepRepr_RWStructuralResponseProperty_swigregister = _RWStepRepr.RWStepRepr_RWStructuralResponseProperty_swigregister
RWStepRepr_RWStructuralResponseProperty_swigregister(RWStepRepr_RWStructuralResponseProperty)
class RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_swiginit(self,_RWStepRepr.new_RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation(*args))
def ReadStep(self, *args):
"""
* Reads StructuralResponsePropertyDefinitionRepresentation
:param data:
:type data: Handle_StepData_StepReaderData &
:param num:
:type num: int
:param ach:
:type ach: Handle_Interface_Check &
:param ent:
:type ent: Handle_StepRepr_StructuralResponsePropertyDefinitionRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_ReadStep(self, *args)
def WriteStep(self, *args):
"""
* Writes StructuralResponsePropertyDefinitionRepresentation
:param SW:
:type SW: StepData_StepWriter &
:param ent:
:type ent: Handle_StepRepr_StructuralResponsePropertyDefinitionRepresentation &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_WriteStep(self, *args)
def Share(self, *args):
"""
* Fills data for graph (shared items)
:param ent:
:type ent: Handle_StepRepr_StructuralResponsePropertyDefinitionRepresentation &
:param iter:
:type iter: Interface_EntityIterator &
:rtype: None
"""
return _RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_Share(self, *args)
__swig_destroy__ = _RWStepRepr.delete_RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation
RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation.ReadStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_ReadStep,None,RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation)
RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation.WriteStep = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_WriteStep,None,RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation)
RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation.Share = new_instancemethod(_RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_Share,None,RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation)
RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_swigregister = _RWStepRepr.RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_swigregister
RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation_swigregister(RWStepRepr_RWStructuralResponsePropertyDefinitionRepresentation)
|
StarcoderdataPython
|
11224887
|
import os
import re
from typing import Dict, List
Index = Dict[str, Dict[int, List[str]]]
def index(in_path: str) -> Index:
workload_index: Index = {}
for test in os.listdir(in_path):
workload = get_workload(test)
clients = get_client_count(test)
technique = get_technique(test)
client_index = workload_index.get(workload, {})
technique_index = client_index.get(clients, [])
technique_index.append(technique)
client_index[clients] = technique_index
workload_index[workload] = client_index
return workload_index
def get_client_count(test_name: str) -> int:
result = re.search(r'\D(\d+)$', test_name)
return int(result.group(1))
def get_technique(test_name: str) -> str:
result = re.search(r'(sa|otel|di|control)', test_name)
return result.group(1)
def get_workload(test_name: str) -> str:
result = re.search(r'(sa|otel|di|control)([\D_]+)\d', test_name)
return result.group(2)
|
StarcoderdataPython
|
11223526
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import astropy.units as u
from .. import data
class TestOHFluorescenceSA88:
def test_linear_interpolation(self, monkeypatch):
monkeypatch.setattr(data, 'scipy', None)
model = data.OHFluorescenceSA88('0-0')
LN = model(-1 * u.km / u.s)
assert np.isclose(LN.value, 1.54e-15)
def test_tau(self):
model = data.OHFluorescenceSA88('0-0')
assert np.isclose(model.tau[0].value, 2.87e5)
def test_inversion(self):
model = data.OHFluorescenceSA88('0-0')
assert np.isclose(model.inversion[0], -0.304)
def test_rdot_error(self):
model = data.OHFluorescenceSA88('0-0')
with pytest.raises(ValueError):
model(-61 * u.km / u.s)
def test_rh_error(self):
model = data.OHFluorescenceSA88('0-0')
with pytest.raises(ValueError):
model({'rdot': 1 * u.km / u.s, 'rh': 0.4 * u.au})
|
StarcoderdataPython
|
302576
|
"""Packaging logic for betamax."""
import os
import re
import sys
import setuptools
packages = setuptools.find_packages(
"src",
exclude=["tests", "tests.integration"],
)
requires = ["requests >= 2.0"]
__version__ = ""
with open("src/betamax/__init__.py", "r") as fd:
reg = re.compile(r"__version__ = [\'']([^\'']*)[\'']")
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
if not __version__:
raise RuntimeError("Cannot find version information")
if sys.argv[-1] in ["submit", "publish"]:
os.system("python setup.py sdist bdist_wheel upload")
sys.exit()
def data_for(filename):
"""Read the file data for a filename."""
with open(filename) as fd:
content = fd.read()
return content
setuptools.setup(
name="betamax",
version=__version__,
description="A VCR imitation for python-requests",
long_description="\n\n".join([data_for("README.rst"),
data_for("HISTORY.rst")]),
license="Apache 2.0",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/sigmavirus24/betamax",
packages=packages,
package_dir={"": "src"},
package_data={"": ["LICENSE", "AUTHORS.rst"]},
include_package_data=True,
install_requires=requires,
entry_points={
"pytest11": ["pytest-betamax = betamax.fixtures.pytest"]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
]
)
|
StarcoderdataPython
|
5062795
|
"""mutes"""
from twitter.core import UserData
class Mutes:
"""mutes"""
def __init__(self, twitter):
self.twitter = twitter
def users_ids(self, params):
"""Hello"""
url = "/".join(["mutes", "users", "ids"])
result = self.twitter.get(url, params=params)
result.data = result.texts["ids"]
result.next_cursor = result.texts["next_cursor"]
result.previous_cursor = result.texts["previous_cursor"]
result.get_texts_array = None
result.get_texts_tuple = None
return result
def users_list(self, params):
"""H"""
url = "/".join(["mutes", "users", "list"])
result = self.twitter.get(url, params=params)
result.data = [UserData(url, text) for text in result.texts["users"]]
result.next_cursor = result.texts["next_cursor"]
result.previous_cursor = result.texts["previous_cursor"]
return result
def users_create(self, params):
"""H"""
url = "/".join(["mutes", "users", "create"])
result = self.twitter.post(url, params=params)
result.data = [UserData(url, result.texts)]
return result
def users_destroy(self, params):
"""H"""
url = "/".join(["mutes", "users", "destroy"])
result = self.twitter.post(url, params=params)
result.data = [UserData(url, result.texts)]
return result
|
StarcoderdataPython
|
1826873
|
import json
class Debug_JSON():
def __init__(self, settings, device):
self.config = settings
self.device = device
def get_debug_json(self, base_url):
debugjson = {
"base_url": base_url,
"total channels": self.device.channels.get_station_total(),
"tuner status": self.device.tuners.status(),
}
return json.dumps(debugjson, indent=4)
|
StarcoderdataPython
|
6516727
|
<reponame>dukagjinramosaj1/python_exercises
#1 - Import the data
#2 - Clean the data
#3 - Split the data: Training set and Test set.
#4 - Create a Model
#5 - Check the Output
#6 - Improve
|
StarcoderdataPython
|
11204374
|
#This example shows the effects of some of the different PSD parameters
import numpy as np
import matplotlib.pyplot as plt
dt = np.pi / 100.
fs = 1. / dt
t = np.arange(0, 8, dt)
y = 10. * np.sin(2 * np.pi * 4 * t) + 5. * np.sin(2 * np.pi * 4.25 * t)
y = y + np.random.randn(*t.shape)
#Plot the raw time series
fig = plt.figure()
fig.subplots_adjust(hspace=0.45, wspace=0.3)
ax = fig.add_subplot(2, 1, 1)
ax.plot(t, y)
#Plot the PSD with different amounts of zero padding. This uses the entire
#time series at once
ax2 = fig.add_subplot(2, 3, 4)
ax2.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*2, Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*4, Fs=fs)
plt.title('zero padding')
#Plot the PSD with different block sizes, Zero pad to the length of the orignal
#data sequence.
ax3 = fig.add_subplot(2, 3, 5, sharex=ax2, sharey=ax2)
ax3.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//2, pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//4, pad_to=len(t), Fs=fs)
ax3.set_ylabel('')
plt.title('block size')
#Plot the PSD with different amounts of overlap between blocks
ax4 = fig.add_subplot(2, 3, 6, sharex=ax2, sharey=ax2)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=0, Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.05*len(t)/2.), Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.2*len(t)/2.), Fs=fs)
ax4.set_ylabel('')
plt.title('overlap')
plt.show()
|
StarcoderdataPython
|
6632150
|
#!/usr/bin/env python3
import pytest
import schemathesis
def pytest_addoption(parser):
parser.addoption(
"--local-url", action="store", default="http://127.0.0.1:8053/api/v0"
)
parser.addoption(
"--compare-url", action="store", default="https://guild.koios.rest/api/v0"
)
parser.addoption(
"--api-schema-file",
action="store",
default="../specs/results/koiosapi-guild.yaml",
)
@pytest.fixture
def local_url(request):
return request.config.getoption("--local-url")
@pytest.fixture
def compare_url(request):
return request.config.getoption("--compare-url")
@pytest.fixture
def api_schema(request):
schema = schemathesis.from_path(request.config.getoption("--api-schema-file"))
return schema
|
StarcoderdataPython
|
5134282
|
import os
from flask import Flask
from flask.ext import restful
from flask import make_response
from json import dumps
from hiphack import app
def output_json(obj, code, headers=None):
resp = make_response(dumps(obj), code)
resp.headers.extend(headers or {})
return resp
DEFAULT_REPRESENTATIONS = {'application/json': output_json}
api = restful.Api(app)
api.representations = DEFAULT_REPRESENTATIONS
import hiphack.api.resources
|
StarcoderdataPython
|
6659736
|
<reponame>cybertraining-dsc/test<filename>azure_cli.py
from cloudmesh.common.Shell import Shell
from textwrap import dedent
from pprint import pprint
class Provider(object):
def __init__(self):
self.debug = True
def login(self):
print("\nconnecting to azure...\n")
r = Shell.live("az login")
r = Shell.execute("az account show", shell = True)
data = eval(r)
print("\ndata:",data)
datalist=[]
for key, value in data.items():
temp = [key,value]
datalist.append(temp)
print("\ntype of data:",type(datalist))
print(datalist)
print("\nazure has been connected\n")
#create a resource group
def resource_group(self):
print("\ncreating a resource group...")
r = Shell.live("az group create --name test --location eastus")
print("\nThe resource group named test has been created")
#create a vm
def create_vm(self, **kwargs):
print("\ncreate a vm...")
command = dedent("""
az vm create \
--resource-group {resourcegroup} \
--name {name} \
--image {image} \
--admin-username {username} \
--generate-ssh-keys
""".format(**kwargs))
print(command)
r = Shell.live(command)
print("\nthe vm has been created")
def get_ip(self, **kwargs):
print("get ip address:")
command = dedent("""
az vm list-ip-addresses \
--resource-group {resourcegroup} \
--name {name}
""".format(**kwargs))
r = Shell.execute(command, shell = True)
print("r:\n",r)
data = eval(r)
print("\ntype of data:",type(data))
print("\ndata:",data)
for entry in data:
pprint(entry)
#connect to vm
def connect_vm(self):
print("connecting to vm...")
r = Shell.live(
"ssh azureuser1@{publicIdAddress}".format(publicIdAddress='172.16.17.32'))
#list vm
def list(self):
print("list all virtual machine:")
r = Shell.live("az vm list")
def stop(self, **kwargs):
print("stopping a virtual machine...")
command = dedent("""
az vm stop \
--resource-group {resourcegroup} \
--name {name}
""".format(**kwargs))
r = Shell.live(command)
print("the vm has been stopped")
def restart(self, **kwargs):
print("restarting a virtual machine...")
command = dedent("""
az vm restart \
--resource-group {resourcegroup} \
--name {name}
""".format(**kwargs))
r = Shell.live(command)
print("the vm has been restarted")
def delete(self, **kwargs):
print("deleting a virtual machine...")
command = dedent("""
az vm delete \
--resource-group {resourcegroup} \
--name {name}
""".format(**kwargs))
r = Shell.live(command)
print("the vm has been deleted")
true = True
p = Provider()
p.login()
#p.resource_group()
'''
p.create_vm(resourcegroup = 'test',
name = 'testvm1',
image = 'UbuntuLTS',
username = 'azureuser1')
'''
'''
p.get_ip(resourcegroup = 'test',
name = 'testvm1')
'''
#p.connect_vm()
#p.list()
'''
p.stop(resourcegroup = 'test',
name = 'testvm1')
'''
'''
p.restart(resourcegroup = 'test',
name = 'testvm1')
'''
'''
p.delete(resourcegroup = 'test',
name = 'testvm1')
'''
|
StarcoderdataPython
|
4976139
|
<filename>custom/onse/tasks.py
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import date, datetime
from time import sleep
from typing import Iterable, List, Optional, Tuple, Union
from urllib.error import HTTPError
import attr
from celery.schedules import crontab
from celery.task import periodic_task, task
from dateutil.relativedelta import relativedelta
from requests import RequestException
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from dimagi.utils.chunked import chunked
from corehq.apps.domain.dbaccessors import domain_exists
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.motech.models import ConnectionSettings
from corehq.util.soft_assert import soft_assert
from custom.onse.const import (
CASE_TYPE,
CONNECTION_SETTINGS_NAME,
DOMAIN,
LAST_IMPORTED_PROPERTY,
MAX_RETRY_ATTEMPTS,
TASK_RETRY_FACTOR
)
from custom.onse.models import iter_mappings
# The production DHIS2 server is on the other side of an
# interoperability service that changes the URL schema from
# "base_url/api/resource" to "service/dhis2core/api/v0/resource".
# Its ConnectionSettings instance uses URL "service/dhis2core/api/v0/"
# Set ``DROP_API_PREFIX = True`` to drop the "/api" before "/resource",
# so that resource URLs end up as "service/dhis2core/api/v0/resource".
DROP_API_PREFIX = True
MAX_THREAD_WORKERS = 10
_soft_assert = soft_assert('@'.join(('nhooper', 'dimagi.com')))
@attr.s(auto_attribs=True)
class CassiusMarcellus: # TODO: Come up with a better name. Please!
"""
Stores a case, and its updates.
Allows us to read current case property values and build a CaseBlock
"""
case: Union[CommCareCase, CommCareCaseSQL]
updates: dict = attr.Factory(dict)
@property
def case_block(self):
return CaseBlock(
case_id=self.case.case_id,
external_id=self.case.external_id,
case_type=CASE_TYPE,
case_name=self.case.name,
update=self.updates,
)
@periodic_task(
# Run on the 5th day of every quarter
run_every=crontab(day_of_month=5, month_of_year='1,4,7,10',
hour=22, minute=30),
queue='background_queue',
)
def update_facility_cases_from_dhis2_data_elements():
_update_facility_cases_from_dhis2_data_elements.delay()
@task(bind=True, max_retries=MAX_RETRY_ATTEMPTS)
def _update_facility_cases_from_dhis2_data_elements(self, period, print_notifications):
if not domain_exists(DOMAIN):
return
dhis2_server = get_dhis2_server(print_notifications)
server_status = check_server_status(dhis2_server)
if server_status['ready']:
execute_update_facility_cases_from_dhis2_data_elements(dhis2_server, period, print_notifications)
else:
exception = server_status['error']
retry_days = 2 ** self.request.retries
message = f'Importing {DOMAIN.upper()} cases from {CONNECTION_SETTINGS_NAME} failed: {exception}. ' \
f'Retrying in {retry_days} days'
_notify_message(print_notifications, message, dhis2_server, exception)
self.retry(countdown=(retry_days * TASK_RETRY_FACTOR))
def check_server_status(dhis2_server: ConnectionSettings):
server_status = {
'ready': True,
'error': None
}
requests = dhis2_server.get_requests()
try:
requests.send_request("HEAD", dhis2_server.url, raise_for_status=True)
except HTTPError as e:
if e.response.status_code != 405: # ignore method not allowed
server_status['ready'] = False
server_status['error'] = e
except RequestException as re:
server_status['ready'] = False
server_status['error'] = re
return server_status
def execute_update_facility_cases_from_dhis2_data_elements(
dhis2_server: ConnectionSettings,
period: Optional[str] = None,
print_notifications: bool = False,
):
"""
Update facility_supervision cases with indicators collected in DHIS2
over the last quarter.
:param dhis2_server: The ConnectionSettings instance to connect to
the remote API.
:param period: The period of data to import. e.g. "2020Q1". Defaults
to last quarter.
:param print_notifications: If True, notifications are printed,
otherwise they are emailed.
"""
try:
clays = get_clays()
with ThreadPoolExecutor(max_workers=MAX_THREAD_WORKERS) as executor:
futures = (executor.submit(set_case_updates,
dhis2_server, clay, period)
for clay in clays)
for futures_chunk in chunked(as_completed(futures), 100):
case_blocks_chunk = [f.result() for f in futures_chunk]
save_cases(case_blocks_chunk)
except Exception as err:
handle_error(err, dhis2_server, print_notifications)
else:
handle_success(dhis2_server, print_notifications)
def get_dhis2_server(
print_notifications: bool = False
) -> ConnectionSettings:
try:
return ConnectionSettings.objects.get(domain=DOMAIN,
name=CONNECTION_SETTINGS_NAME)
except ConnectionSettings.DoesNotExist:
message = (f'ConnectionSettings {CONNECTION_SETTINGS_NAME!r} not '
f'found in domain {DOMAIN!r} for importing DHIS2 data '
f'elements.')
if print_notifications:
print(message, file=sys.stderr)
else:
_soft_assert(False, message)
raise
def get_clays() -> Iterable[CassiusMarcellus]:
case_accessors = CaseAccessors(DOMAIN)
for case_id in case_accessors.get_case_ids_in_domain(type=CASE_TYPE):
case = case_accessors.get_case(case_id)
if not case.external_id:
# This case is not mapped to a facility in DHIS2.
continue
yield CassiusMarcellus(case)
def set_case_updates(
dhis2_server: ConnectionSettings,
clay: CassiusMarcellus,
requested_period: Optional[str],
) -> CassiusMarcellus:
"""
Fetch data sets of data elements for last quarter from ``dhis2_server``
and update the data elements corresponding case properties in
``case_block`` in place.
"""
last_imported = clay.case.get_case_property(LAST_IMPORTED_PROPERTY)
if last_imported:
last_imported = datetime.strptime(last_imported, '%Y-%m-%d').date()
# Several of the data elements we want belong to the same data
# sets. Only fetch a data set if we don't already have it.
data_set_cache = {}
for mapping in iter_mappings():
if not mapping.data_set_id:
raise ValueError(
f'Mapping {mapping} does not include data set ID. '
'Use **fetch_onse_data_set_ids** command.')
for period in get_periods(requested_period, last_imported):
data_set_cache.setdefault(period, {})
if mapping.data_set_id not in data_set_cache[period]:
data_set_cache[period][mapping.data_set_id] = fetch_data_set(
dhis2_server,
mapping.data_set_id,
# facility case external_id is set to its DHIS2 org
# unit. This is the DHIS2 facility whose data we
# want to import.
org_unit_id=clay.case.external_id,
period=period,
)
data_values = data_set_cache[period][mapping.data_set_id]
if data_values is None:
continue # No data for this facility. Try previous quarter
found, total = get_data_element_total(
mapping.data_element_id,
data_values,
)
if found:
clay.updates[mapping.case_property] = total
break
# else: look for values in previous quarter
clay.updates[LAST_IMPORTED_PROPERTY] = date.today().isoformat()
return clay
def get_periods(
requested_period: Optional[str],
last_imported: Optional[date],
) -> Iterable[str]:
if requested_period:
return [requested_period]
if last_imported:
return previous_quarters_up_to(last_imported)
return previous_quarters_up_to(five_years_ago())
def previous_quarters_up_to(some_date: date) -> Iterable[str]:
"""
Returns quarters in DHIS2 web API `period format`_ in reverse
chronological order.
.. _period format: https://docs.dhis2.org/master/en/developer/html/webapi_date_perid_format.html
"""
current_date = datetime.utcnow().date()
while current_date > some_date:
yield previous_quarter(current_date)
current_date -= relativedelta(months=3)
def five_years_ago():
"""
Returns the date five years ago today.
"""
return datetime.utcnow().date() - relativedelta(years=5)
def fetch_data_set(
dhis2_server: ConnectionSettings,
data_set_id: str,
org_unit_id: str,
period: str,
) -> Optional[List[dict]]:
"""
Returns a list of `DHIS2 data values`_, or ``None`` if the the given
org unit has no data collected for the last quarter.
Raises exceptions on connection timeout or non-200 response status.
.. _DHIS2 data values: https://docs.dhis2.org/master/en/developer/html/webapi_data_values.html
"""
max_attempts = 3
backoff_seconds = 3 * 60
requests = dhis2_server.get_requests()
endpoint = '/dataValueSets' if DROP_API_PREFIX else '/api/dataValueSets'
params = {
'period': period,
'dataSet': data_set_id,
'orgUnit': org_unit_id,
}
attempt = 0
while True:
attempt += 1
try:
response = requests.get(endpoint, params, raise_for_status=True)
except (RequestException, HTTPError):
if attempt < max_attempts:
sleep(backoff_seconds * attempt)
else:
raise
else:
break
return response.json().get('dataValues', None)
def previous_quarter(some_date: date) -> str:
"""
Returns the previous quarter in DHIS2 web API `period format`_.
e.g. "2004Q1"
.. _period format: https://docs.dhis2.org/master/en/developer/html/webapi_date_perid_format.html
"""
year = some_date.year
quarter = (some_date.month - 1) // 3
if quarter == 0:
year -= 1
quarter = 4
return f"{year}Q{quarter}"
def get_data_element_total(
data_element_id: str,
data_values: List[dict],
) -> Tuple[bool, int]:
"""
A DHIS2 data element may be broken down by category options, and
``data_values`` can contain multiple entries for the same data
element. This function returns whether ``data_element_id`` is found
in ``data_values``, and its total.
The following doctest shows an example value for ``data_values`` as
might be returned by DHIS2:
>>> data_values = [
... {
... "dataElement": "f7n9E0hX8qk",
... "period": "2014Q1",
... "orgUnit": "DiszpKrYNg8",
... "categoryOption": "FNnj3jKGS7i",
... "value": "12"
... },
... {
... "dataElement": "f7n9E0hX8qk",
... "period": "2014Q1",
... "orgUnit": "DiszpKrYNg8",
... "categoryOption": "Jkhdsf8sdf4",
... "value": "16"
... }
... ]
>>> get_data_element_total('f7n9E0hX8qk', data_values)
(True, 28)
"""
found = False
value = 0
for data_value in data_values:
if data_value['dataElement'] == data_element_id:
found = True
value += int(data_value['value'])
return found, value
def save_cases(clays: List[CassiusMarcellus]):
today = date.today().isoformat()
submit_case_blocks(
[clay.case_block.as_text() for clay in clays],
DOMAIN,
xmlns='http://commcarehq.org/dhis2-import',
device_id=f"dhis2-import-{DOMAIN}-{today}",
)
def handle_error(
err: Exception,
dhis2_server: ConnectionSettings,
print_notifications: bool,
):
message = f'Importing {DOMAIN.upper()} cases from {CONNECTION_SETTINGS_NAME} failed: {err}'
_notify_message(print_notifications, message, dhis2_server, err)
def handle_success(
dhis2_server: ConnectionSettings,
print_notifications: bool,
):
message = f'Successfully imported {DOMAIN.upper()} cases from {CONNECTION_SETTINGS_NAME}'
_notify_message(print_notifications, message, dhis2_server)
def _notify_message(print_notifications, message, connection_settings, exception=None):
if print_notifications:
print(message, file=sys.stderr)
else:
if exception is not None:
connection_settings.get_requests().notify_exception(message)
raise exception
else:
# For most things we pass silently. But we can repurpose
# `notify_error()` to tell admins that the import went through,
# because it only happens once a quarter.
connection_settings.get_requests().notify_error(message)
|
StarcoderdataPython
|
1951939
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> & QuatroPe
# License: BSD-3-Clause
# Full Text: https://github.com/quatrope/djmpl/blob/master/LICENSE
# =============================================================================
# DOCS
# =============================================================================
# =============================================================================
# IMPORTS
# =============================================================================
import django_matplotlib as djmpl
from django.views.generic.base import TemplateView
# =============================================================================
# THE VIEWS
# =============================================================================
class PlotMixinTestView(djmpl.PlotMixin, TemplateView):
plot_data = [1, 2, 3]
template_name = "test_djmpl/SinglePlot.html"
def plot(self, data, fig, ax):
ax.plot(data)
|
StarcoderdataPython
|
1849666
|
<reponame>agilescientific/bruges<gh_stars>1-10
"""
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
from .energy import energy
from .discontinuity import discontinuity
from .discontinuity import similarity
from .dipsteer import dipsteer
from .spectrogram import spectrogram
from .spectraldecomp import spectraldecomp
from .complex import instantaneous_amplitude
from .complex import reflection_strength
from .complex import envelope
from .complex import instantaneous_phase
from .complex import instantaneous_frequency
from .complex import quadrature
from .horizon import *
|
StarcoderdataPython
|
9739540
|
<reponame>YEZHIAN1996/pythonstudy
from PIL import Image
image = Image.open('')
|
StarcoderdataPython
|
1727313
|
import os
import pickle
import time
from functools import partial
from typing import List, Optional, Dict, Tuple, Any, Callable
import jax
import jax.numpy as jnp
import numpy as np
import wandb
from jax import jit, value_and_grad
from jax.experimental.optimizers import adam, sgd
from jax.tree_util import tree_leaves
from sklearn.preprocessing import StandardScaler
from dgm.dynamics.dynamics_model import get_dynamics
from dgm.objectives.objective_builder import get_objective_builder
from dgm.plotter.plotter import Plotter
from dgm.schedules.betas import get_betas, BetaSchedule
from dgm.schedules.learning_rate import get_learning_rate
from dgm.schedules.weight_decay import get_weight_decay
from dgm.simulator.simulator import get_simulator
from dgm.smoother.smoother import get_smoother
from dgm.utils.helper_functions import unroll_dictionary, replace_str
from dgm.utils.representatives import Optimizer
Schedule = Callable[[int], float]
Range = Optional[Tuple[float, float]]
pytree = Any
class LearnSystem:
def __init__(
self,
seed: int,
data_generation: Dict,
smoother: Dict,
dynamics: Dict,
betas: Dict,
optimizer: Dict,
priors: Dict,
logging: Dict,
numerical_correction: float,
num_derivative_points_per_trajectory: int
):
self.numerical_correction = numerical_correction
self.track_just_loss = logging["track_just_loss"]
self.core_type = smoother["core"]["type"]
self.core_kwargs = smoother["core"]["kwargs"]
# Adding kwargs here seems a bit hacky, it works but should you
# think of a better solution don't hesitate to implement it
self.core_kwargs['weight_key'] = "core"
self.mean_head_type = smoother["mean_head"]["type"]
self.mean_head_kwargs = smoother["mean_head"]["kwargs"]
self.mean_head_kwargs['weight_key'] = "mean_head"
self.kernel_core_type = smoother["kernel_core"]["type"]
self.kernel_core_kwargs = smoother["kernel_core"]["kwargs"]
self.kernel_core_kwargs['weight_key'] = "kernel_core"
self.kernel_head_type = smoother['kernel_head']['type']
self.kernel_head_kwargs = smoother["kernel_head"]["kwargs"]
self.kernel_head_kwargs['weight_key'] = "kernel_head"
self.learning_rate: Schedule = get_learning_rate(optimizer["learning_rate"]["type"],
optimizer["learning_rate"]["kwargs"])
self.time_normalizer = StandardScaler()
self.state_normalizer = StandardScaler()
self.parameters = None
self.kernel_type = smoother["kernel"]["type"]
self.kernel_kwargs = smoother["kernel"]["kwargs"]
self.optimizer_type = optimizer["type"]
self.dynamics_model = dynamics["type"]
self.dynamics_kwargs = dynamics['kwargs']
self.simulator_type = data_generation["type"]
self.times = data_generation["times"]
self.test_times = data_generation["test_times"]
self.initial_conditions = data_generation["initial_conditions"]
self.num_trajectories = len(self.initial_conditions)
self.state_dimension = len(self.initial_conditions[0])
simulation_noise = data_generation['noise']
self.simulation_noise = [None] * self.num_trajectories if simulation_noise is None else simulation_noise
self.simulator_parameters = data_generation["parameters"]
self.current_rng = jax.random.PRNGKey(seed)
self.betas: BetaSchedule = get_betas(betas["type"], betas["kwargs"])
self.wd_core = get_weight_decay(priors['wd_core']['type'], priors['wd_core']['kwargs'])
self.wd_kernel_core = get_weight_decay(priors['wd_kernel_core']['type'], priors['wd_core']['kwargs'])
self.wd_kernel_head = get_weight_decay(priors['wd_kernel_head']['type'], priors['wd_core']['kwargs'])
self.wd_mean_head = get_weight_decay(priors['wd_mean_head']['type'], priors['wd_core']['kwargs'])
self.wd_obs_noise = get_weight_decay(priors['wd_pure_kernel']['observation_noise']['type'],
priors['wd_pure_kernel']['observation_noise']['kwargs'])
self.wd_kernel_variance = get_weight_decay(priors['wd_pure_kernel']['kernel_variance']['type'],
priors['wd_pure_kernel']['kernel_variance']['kwargs'])
self.wd_kernel_lengthscales = get_weight_decay(priors['wd_pure_kernel']['kernel_lengthscale']['type'],
priors['wd_pure_kernel']['kernel_lengthscale']['kwargs'])
self.wd_dynamics: Schedule = get_weight_decay(priors["wd_dynamics"]['type'], priors["wd_dynamics"]['kwargs'])
self.track_wandb = logging["track_wandb"]
self.num_derivative_points_per_trajectory = num_derivative_points_per_trajectory
self.simulator = get_simulator(simulator=self.simulator_type, **self.simulator_parameters)
self._prepare_observed_data()
self._normalize_data()
self._prepare_data_for_training()
self._prepare_smoother()
self._prepare_dynamics_model()
self._prepare_objective_builder()
self._prepare_optimizer()
self.plotter = Plotter(simulator=self.simulator, initial_conditions=self.initial_conditions)
def _prepare_observed_data(self):
self.current_rng, key = jax.random.split(self.current_rng)
time_before_data = time.time()
self.observations, self.ground_truth_states, self.ground_truth_derivatives = self.simulator.simulate_trajectories(
initial_conditions=self.initial_conditions, times=self.times, sigmas=self.simulation_noise, rng=key)
print("Time for data preparation", time.time() - time_before_data)
def _normalize_data(self):
time_before_normalization = time.time()
all_times = jnp.concatenate(self.times, axis=0)
all_observations = jnp.concatenate(self.observations, axis=0)
self.time_normalizer.fit(all_times.reshape(-1, 1))
self.state_normalizer.fit(all_observations)
self.normalized_times = []
self.normalized_observations = []
self.normalized_initial_conditions = []
self.normalized_test_times = []
self.normalized_ground_truth_states = []
self.normalized_ground_truth_derivatives = []
derivative_scale = self.time_normalizer.scale_ / self.state_normalizer.scale_
for i in range(self.num_trajectories):
current_normalized_times = self.time_normalizer.transform(self.times[i].reshape(-1, 1))
current_normalized_test_times = self.time_normalizer.transform(self.test_times[i].reshape(-1, 1))
current_normalized_states = self.state_normalizer.transform(self.observations[i])
current_normalized_initial_conditions = self.state_normalizer.transform(
self.initial_conditions[i].reshape(1, -1))
current_normalized_ground_truth_states = self.state_normalizer.transform(self.ground_truth_states[i])
current_normalized_ground_truth_derivatives = derivative_scale * self.ground_truth_derivatives[i]
self.normalized_times.append(jnp.array(current_normalized_times).reshape(-1))
self.normalized_test_times.append(jnp.array(current_normalized_test_times).reshape(-1))
self.normalized_observations.append(jnp.array(current_normalized_states))
self.normalized_initial_conditions.append(jnp.array(current_normalized_initial_conditions.reshape(-1)))
self.normalized_ground_truth_states.append(jnp.array(current_normalized_ground_truth_states))
self.normalized_ground_truth_derivatives.append(jnp.array(current_normalized_ground_truth_derivatives))
print("Time for normalization", time.time() - time_before_normalization)
def _prepare_data_for_training(self):
self.joint_normalized_test_times = jnp.concatenate(self.normalized_test_times)
self.joint_normalized_times = jnp.concatenate(self.normalized_times)
self.joint_normalized_observations = jnp.concatenate(self.normalized_observations)
times_for_derivatives = []
for traj_id in range(self.num_trajectories):
min_time, max_time = jnp.min(self.normalized_times[traj_id]), jnp.max(self.normalized_times[traj_id])
times_for_derivatives.append(jnp.linspace(min_time, max_time, self.num_derivative_points_per_trajectory))
self.joint_normalized_times_for_derivatives = jnp.concatenate(times_for_derivatives)
initial_conditions_to_pass = []
initial_conditions_for_derivatives = []
initial_conditions_for_test = []
for traj_id in range(self.num_trajectories):
initial_conditions_to_pass.append(
jnp.repeat(self.normalized_initial_conditions[traj_id].reshape(1, -1),
self.normalized_times[traj_id].size, axis=0)
)
initial_conditions_for_derivatives.append(
jnp.repeat(self.normalized_initial_conditions[traj_id].reshape(1, -1),
times_for_derivatives[traj_id].size, axis=0)
)
initial_conditions_for_test.append(
jnp.repeat(self.normalized_initial_conditions[traj_id].reshape(1, -1),
self.test_times[traj_id].size, axis=0)
)
self.joint_repeated_normalized_initial_conditions = jnp.concatenate(initial_conditions_to_pass, axis=0)
self.joint_repeated_normalized_initial_conditions_derivatives = jnp.concatenate(
initial_conditions_for_derivatives,
axis=0)
self.joint_repeated_normalized_test_initial_conditions = jnp.concatenate(initial_conditions_for_test, axis=0)
def _prepare_smoother(self):
time_smoother = time.time()
(
self.smoother_init,
self.smoother_apply,
self.smoother_get_means_and_covariances_test,
self.get_smoother_regularization,
) = get_smoother(kernel=self.kernel_type, kernel_kwargs=self.kernel_kwargs,
core_type=self.core_type, core_kwargs=self.core_kwargs,
mean_head_type=self.mean_head_type, mean_head_kwargs=self.mean_head_kwargs,
kernel_core_type=self.kernel_core_type, kernel_core_kwargs=self.kernel_core_kwargs,
kernel_head_type=self.kernel_head_type, kernel_head_kwargs=self.kernel_head_kwargs,
n_dim=self.state_dimension, numerical_correction=self.numerical_correction)
print("Time for smoother preparation: ", time.time() - time_smoother)
def _prepare_dynamics_model(self):
time_dynamics = time.time()
(
self.dynamics_model_init,
self.dynamics_model_apply,
self.dynamics_for_plotting,
self.dynamics_sample_trajectories,
self.get_dynamics_regularization
) = get_dynamics(dynamics_model=self.dynamics_model, state_normalizer=self.state_normalizer,
time_normalizer=self.time_normalizer, state_dimension=self.state_dimension,
dynamics_kwargs=self.dynamics_kwargs)
print("Time for dynamics preparation: ", time.time() - time_dynamics)
def _prepare_objective_builder(self):
time_objective_builder = time.time()
self.current_rng, *keys = jax.random.split(self.current_rng, 3)
dynamics_parameters = self.dynamics_model_init(keys[0])
smoother_parameters = self.smoother_init(keys[1], self.state_dimension)
self.parameters = {"smoother": smoother_parameters, "dynamics": dynamics_parameters, }
self.num_dynamics_parameters = 0
self.num_smoother_parameters = 0
for leave in tree_leaves(dynamics_parameters):
self.num_dynamics_parameters += leave.size
for leave in tree_leaves(smoother_parameters):
self.num_smoother_parameters += leave.size
self.num_parameters = self.num_smoother_parameters + self.num_dynamics_parameters
self.objective_builder = get_objective_builder(apply_smoother=self.smoother_apply,
apply_dynamics=self.dynamics_model_apply,
get_dynamics_regularization=self.get_dynamics_regularization,
get_smoother_regularization=self.get_smoother_regularization)
print("Time to prepare objective builder", time.time() - time_objective_builder)
time_objective_builder = time.time()
self.values_and_grad = jit(value_and_grad(self.objective_builder, 0))
print("Time to jit: ", time.time() - time_objective_builder)
def _prepare_optimizer(self):
if self.optimizer_type == Optimizer.ADAM:
self.optimizer = adam
elif self.optimizer_type == Optimizer.SGD:
self.optimizer = sgd
def train(self, number_of_steps):
current_time = time.time()
initial_time = current_time
opt_init, opt_update, get_params = self.optimizer(self.learning_rate)
params = opt_init(self.parameters)
@jit
def do_step(step, params):
weights = {
"kernel_variance": self.wd_kernel_variance(step),
"kernel_lengthscale": self.wd_kernel_lengthscales(step),
"obs_noise": self.wd_obs_noise(step),
"dynamics": self.wd_dynamics(step),
"core": self.wd_core(step),
'kernel_core': self.wd_kernel_core(step),
"kernel_head": self.wd_kernel_head(step),
"mean_head": self.wd_mean_head(step)
}
loss, params_grad = self.values_and_grad(
get_params(params),
self.joint_normalized_times,
self.joint_normalized_times_for_derivatives,
self.joint_repeated_normalized_initial_conditions,
self.joint_repeated_normalized_initial_conditions_derivatives,
self.joint_normalized_observations,
self.betas(step),
weights
)
return loss, opt_update(step, params_grad, params)
for step in range(number_of_steps):
if step < 10:
next_time = time.time()
print("Time for step {}:".format(step), next_time - current_time)
current_time = next_time
loss, params = do_step(step, params)
if self.track_wandb:
if self.track_just_loss:
variables_dict = dict()
variables_dict["Loss"] = float(loss)
else:
variables_dict = unroll_dictionary(get_params(params))
variables_dict["Loss"] = float(loss)
wandb.log(variables_dict)
time_spent_for_training = time.time() - initial_time
print("Time spent for training:", time_spent_for_training, "seconds")
self.parameters = get_params(params)
# Save parameters_for_dgm
if self.track_wandb:
directory = os.path.join(wandb.run.dir, 'models')
if not os.path.exists(directory):
os.makedirs(directory)
model_path = os.path.join('models', 'final_parameters.pkl')
with open(os.path.join(wandb.run.dir, model_path), 'wb') as handle:
pickle.dump(get_params(params), handle)
wandb.save(os.path.join(wandb.run.dir, model_path), wandb.run.dir)
def _compute_nll_per_dimension(self, denormalized_state_means, denormalized_state_variances,
denormalized_derivative_means, denormalized_derivative_variances,
denormalized_dynamics_means, denormalized_dynamics_variances,
test_states, test_derivatives):
nll_state = []
nll_derivatives_smoother = []
nll_derivatives_dynamics = []
# Compute average (over dimension) average NLL score
# Not over the range of self.num_trajectories but over the range of number of evaluated trajectories
for i in range(len(denormalized_state_means)):
nll_state.append(
self._mean_nll(test_states[i], denormalized_state_means[i], denormalized_state_variances[i]))
nll_derivatives_smoother.append(self._mean_nll(test_derivatives[i], denormalized_derivative_means[i],
denormalized_derivative_variances[i]))
nll_derivatives_dynamics.append(self._mean_nll(test_derivatives[i], denormalized_dynamics_means[i],
denormalized_dynamics_variances[i]))
return nll_state, nll_derivatives_smoother, nll_derivatives_dynamics
@staticmethod
def _mean_nll(test, mean_prediction, variance_prediction):
mean_diff = (test - mean_prediction)
nll_state_current = 0.5 * jnp.mean(mean_diff * mean_diff / variance_prediction)
nll_state_current += 0.5 * jnp.mean(jnp.log(variance_prediction))
nll_state_current += 0.5 * jnp.log(2 * jnp.pi)
return nll_state_current
@staticmethod
def _prepare_nll_for_wandb(nll_state, nll_derivatives_smoother, nll_derivatives_dynamics, quantile):
nll_state = jnp.array(nll_state)
nll_derivatives_smoother = jnp.array(nll_derivatives_smoother)
nll_derivatives_dynamics = jnp.array(nll_derivatives_dynamics)
nll_state_median = jnp.median(nll_state)
nll_derivatives_smoother_median = jnp.median(nll_derivatives_smoother)
nll_derivatives_dynamics_median = jnp.median(nll_derivatives_dynamics)
nll_state_lower_q = jnp.quantile(nll_state, q=1 - quantile)
nll_derivatives_smoother_lower_q = jnp.quantile(nll_derivatives_smoother, q=1 - quantile)
nll_derivatives_dynamics_lower_q = jnp.quantile(nll_derivatives_dynamics, q=1 - quantile)
nll_state_upper_q = jnp.quantile(nll_state, q=quantile)
nll_derivatives_smoother_upper_q = jnp.quantile(nll_derivatives_smoother, q=quantile)
nll_derivatives_dynamics_upper_q = jnp.quantile(nll_derivatives_dynamics, q=quantile)
variables_dict = dict()
variables_dict['nll_state_mean'] = float(jnp.mean(nll_state))
variables_dict['nll_derivatives_smoother_mean'] = float(jnp.mean(nll_derivatives_smoother))
variables_dict['nll_derivatives_dynamics_mean'] = float(jnp.mean(nll_derivatives_dynamics))
variables_dict['nll_state_median'] = float(nll_state_median)
variables_dict['nll_derivatives_smoother_median'] = float(nll_derivatives_smoother_median)
variables_dict['nll_derivatives_dynamics_median'] = float(nll_derivatives_dynamics_median)
variables_dict['nll_state_lower_q'] = float(nll_state_lower_q)
variables_dict['nll_derivatives_smoother_lower_q'] = float(nll_derivatives_smoother_lower_q)
variables_dict['nll_derivatives_dynamics_lower_q'] = float(nll_derivatives_dynamics_lower_q)
variables_dict['nll_state_upper_q'] = float(nll_state_upper_q)
variables_dict['nll_derivatives_smoother_upper_q'] = float(nll_derivatives_smoother_upper_q)
variables_dict['nll_derivatives_dynamics_upper_q'] = float(nll_derivatives_dynamics_upper_q)
return variables_dict
def _denormalize(self, state_means, state_variances, derivative_means, derivative_variances, dynamics_means,
dynamics_variances):
denormalized_state_means = self.state_normalizer.inverse_transform(state_means)
denormalized_state_variances = self.state_normalizer.scale_ ** 2 * state_variances
derivative_scale = self.state_normalizer.scale_ / self.time_normalizer.scale_
denormalized_derivative_means = derivative_scale * derivative_means
denormalized_derivative_variances = derivative_scale ** 2 * derivative_variances
denormalized_dynamics_means = derivative_scale * dynamics_means
denormalized_dynamics_variances = derivative_scale ** 2 * dynamics_variances
return denormalized_state_means, denormalized_state_variances, denormalized_derivative_means, \
denormalized_derivative_variances, denormalized_dynamics_means, denormalized_dynamics_variances
@staticmethod
def join_trajectories(initial_conditions: List[jnp.array], times: List[jnp.array]) -> Tuple[
pytree, jnp.array, jnp.array]:
# initial_conditions are of shape (num_dim, )
# times are of shape (num_times, )
n_trajectories = len(times)
joint_times = jnp.concatenate(times)
joint_initial_conditions = []
for traj_id in range(n_trajectories):
joint_initial_conditions.append(
jnp.repeat(initial_conditions[traj_id].reshape(1, -1), times[traj_id].size, axis=0))
joint_initial_conditions = jnp.concatenate(joint_initial_conditions, axis=0)
return list(map(len, times)), joint_times, joint_initial_conditions
@staticmethod
def split_trajectories(trajectory_lengths, *data) -> List[List[jnp.array]]:
start_index = 0
n_data = len(data)
separated_data = [[] for _ in range(n_data)]
for length in trajectory_lengths:
for index, datum in enumerate(data):
separated_data[index].append(datum[start_index: start_index + length, :])
start_index += length
return separated_data
def _all_predictions(self, joint_normalized_times, joint_repeated_normalized_initial_conditions,
trajectory_lengths):
state_means, state_variances, \
derivative_means, derivative_variances = self.smoother_get_means_and_covariances_test(
joint_normalized_times,
self.joint_normalized_times,
joint_repeated_normalized_initial_conditions,
self.joint_repeated_normalized_initial_conditions,
self.joint_normalized_observations,
self.parameters["smoother"],
)
dynamics_means, dynamics_variances = self.dynamics_model_apply(self.parameters["dynamics"], state_means)
# Denormalize everything
denormalized_state_means, denormalized_state_variances, denormalized_derivative_means, \
denormalized_derivative_variances, denormalized_dynamics_means, denormalized_dynamics_variances = self._denormalize(
state_means, state_variances, derivative_means, derivative_variances, dynamics_means, dynamics_variances)
# Here all data are one big jnp.array now we split it since we would like to perform per trajectory analysis
return self.split_trajectories(trajectory_lengths, denormalized_state_means, denormalized_state_variances,
denormalized_derivative_means, denormalized_derivative_variances,
denormalized_dynamics_means, denormalized_dynamics_variances)
def evaluate_models(self, ground_truth: bool = True, initial_conditions: Optional = None, times: Optional = None,
quantile=0.8):
if ground_truth:
initial_conditions = self.initial_conditions
times = self.test_times
trajectory_lengths, joint_times, joint_repeated_initial_conditions = self.join_trajectories(initial_conditions,
times)
joint_normalized_times = self.time_normalizer.transform(joint_times.reshape(-1, 1)).reshape(-1)
joint_repeated_normalized_initial_conditions = self.state_normalizer.transform(
joint_repeated_initial_conditions)
denormalized_state_means, denormalized_state_variances, \
denormalized_derivative_means, denormalized_derivative_variances, \
denormalized_dynamics_means, denormalized_dynamics_variances = self._all_predictions(
joint_normalized_times,
joint_repeated_normalized_initial_conditions,
trajectory_lengths)
# Prepare (not normalized) ground truth prediction
self.current_rng, subkey = jax.random.split(self.current_rng)
test_states, test_derivatives = self.simulator.simulate_trajectories(
initial_conditions=initial_conditions, times=times, sigmas=[None] * len(times), rng=subkey)[1:]
# Compute average (per dimension) average NLL score
nll_state, nll_derivatives_smoother, nll_derivatives_dynamics = self._compute_nll_per_dimension(
denormalized_state_means, denormalized_state_variances,
denormalized_derivative_means, denormalized_derivative_variances,
denormalized_dynamics_means, denormalized_dynamics_variances,
test_states, test_derivatives)
variables_dict = self._prepare_nll_for_wandb(nll_state, nll_derivatives_smoother, nll_derivatives_dynamics,
quantile)
if self.track_wandb:
wandb.log(variables_dict)
return variables_dict
def plot_learned_vector_field(self, x_range: Range = None, y_range: Range = None):
get_dynamics_derivatives = partial(self.dynamics_for_plotting, self.parameters["dynamics"])
num_trajectories = len(self.observations)
max_per_trajectory = [jnp.max(self.observations[i], 0) for i in range(num_trajectories)]
max_all_trajectories = jnp.max(jnp.vstack(max_per_trajectory), 0)
if x_range is None:
x_range = (0, max_all_trajectories[0] * 1.1)
if y_range is None:
y_range = (0, max_all_trajectories[1] * 1.1)
x, y = jnp.meshgrid(
jnp.linspace(x_range[0], x_range[1], 20),
jnp.linspace(y_range[0], y_range[1], 20),
)
u_mean_learned, v_mean_learned, norm_mean_learned, volume_covariance_learned, max_covariance_eigenvalue = get_dynamics_derivatives(
x, y)
u_true, v_true, norm_true = self.simulator.prepare_vector_field_for_plotting(x, y)
fig = self.plotter.plot_learned_vector_field(
initial_conditions=self.initial_conditions,
observations=self.observations,
grid=(x, y),
true_vector_field=(u_true, v_true, norm_true),
learned_vector_field_mean=(u_mean_learned, v_mean_learned, norm_mean_learned),
volume_covariance_learned=volume_covariance_learned,
max_covariance_eigenvalue=max_covariance_eigenvalue,
)
fig.tight_layout()
if self.track_wandb:
wandb.log({'vector_field_plot': wandb.Image(fig)})
def plot_trajectories_at_times(self, add_all_trajectories: bool = False):
print('Before computing the values for plotting')
trajectory_lengths, joint_normalized_test_times, joint_repeated_normalized_test_initial_conditions = self.join_trajectories(
self.normalized_initial_conditions,
self.normalized_test_times)
denormalized_state_means, denormalized_state_variances, \
denormalized_derivative_means, denormalized_derivative_variances, \
denormalized_dynamics_means, denormalized_dynamics_variances = self._all_predictions(
joint_normalized_test_times,
joint_repeated_normalized_test_initial_conditions,
trajectory_lengths)
print("Plotting")
figure_smoother_states, figure_smoother_derivatives, figure_dynamics_derivatives = self.plotter.plot_at_times(
self.test_times,
denormalized_state_means,
denormalized_state_variances,
denormalized_derivative_means,
denormalized_derivative_variances,
denormalized_dynamics_means,
denormalized_dynamics_variances,
train_times=self.times,
observations=self.observations,
all_initial_conditions=self.initial_conditions if add_all_trajectories else None
)
figure_smoother_states.tight_layout()
figure_smoother_derivatives.tight_layout()
figure_dynamics_derivatives.tight_layout()
state_filename = 'smoother_states_with_all_trajectories' if add_all_trajectories else 'smoother_states'
if self.track_wandb:
wandb.log({state_filename: wandb.Image(figure_smoother_states),
'smoother_derivatives': wandb.Image(figure_smoother_derivatives),
'dynamics_derivatives': wandb.Image(figure_dynamics_derivatives)})
def save_data_for_plotting(self):
trajectory_lengths, joint_normalized_test_times, joint_repeated_normalized_test_initial_conditions = self.join_trajectories(
self.normalized_initial_conditions,
self.normalized_test_times)
denormalized_state_means, denormalized_state_variances, \
denormalized_derivative_means, denormalized_derivative_variances, \
denormalized_dynamics_means, denormalized_dynamics_variances = self._all_predictions(
joint_normalized_test_times,
joint_repeated_normalized_test_initial_conditions,
trajectory_lengths)
data = self.plotter.save_data(
self.test_times,
denormalized_state_means,
denormalized_state_variances,
denormalized_derivative_means,
denormalized_derivative_variances,
denormalized_dynamics_means,
denormalized_dynamics_variances,
train_times=self.times,
observations=self.observations,
all_initial_conditions=self.initial_conditions
)
if self.track_wandb:
directory = os.path.join(wandb.run.dir, 'data')
if not os.path.exists(directory):
os.makedirs(directory)
data_path = os.path.join('data', 'test_plot_data.pkl')
with open(os.path.join(wandb.run.dir, data_path), 'wb') as handle:
pickle.dump(data, handle)
wandb.save(os.path.join(wandb.run.dir, data_path), wandb.run.dir)
return data
def bayesian_path_prediction_from_dynamics(self, rng: np.ndarray, initial_condition: jnp.array, times: jnp.array,
num_samples: int, q: float = 0.7, add_all_trajectories: bool = False):
normalized_times = self.time_normalizer.transform(times.reshape(-1, 1)).reshape(-1)
normalized_initial_conditions = self.state_normalizer.transform(initial_condition.reshape(1, -1)).reshape(-1)
median, upper_quantile, lower_quantile = self.dynamics_sample_trajectories(rng, self.parameters['dynamics'],
normalized_initial_conditions,
normalized_times, num_samples, q)
denormalized_mean = self.state_normalizer.inverse_transform(median)
denormalized_lower_quantile = self.state_normalizer.inverse_transform(lower_quantile)
denormalized_upper_quantile = self.state_normalizer.inverse_transform(upper_quantile)
figure = self.plotter.plot_sample_trajectories(initial_condition, times, denormalized_mean,
denormalized_lower_quantile, denormalized_upper_quantile, q,
all_initial_conditions=self.initial_conditions if add_all_trajectories else None)
if self.track_wandb:
key = 'Bayesian integration from initial condition: ' + replace_str(str(initial_condition))
wandb.log({key: wandb.Image(figure)})
def predict_trajectory(self, initial_condition, times, add_all_trajectories: bool = False):
normalized_initial_condition = self.state_normalizer.transform(initial_condition.reshape(1, -1))
normalized_times = self.time_normalizer.transform(times.reshape(-1, 1)).reshape(-1)
repeated_normalized_initial_condition = jnp.repeat(normalized_initial_condition, normalized_times.size, axis=0)
denormalized_state_means, denormalized_state_variances, \
denormalized_derivative_means, denormalized_derivative_variances, \
denormalized_dynamics_means, denormalized_dynamics_variances = self._all_predictions(
normalized_times,
repeated_normalized_initial_condition,
[len(times)])
figure_prediction = self.plotter.plot_predicted_trajectory(
initial_condition=[initial_condition],
test_times=[times],
state_means=denormalized_state_means,
state_variances=denormalized_state_variances,
derivative_means=denormalized_derivative_means,
derivative_variances=denormalized_derivative_variances,
dynamics_means=denormalized_dynamics_means,
dynamics_variances=denormalized_dynamics_variances,
all_initial_conditions=self.initial_conditions if add_all_trajectories else None
)
if self.track_wandb:
key = 'Initial condition: ' + replace_str(str(initial_condition))
wandb.log({key: wandb.Image(figure_prediction)})
def save_predicted_trajectory(self, initial_condition, times):
# initial condition is one dimensional jnp.array, times is one dimensional jnp.array
normalized_initial_condition = self.state_normalizer.transform(initial_condition.reshape(1, -1))
normalized_times = self.time_normalizer.transform(times.reshape(-1, 1)).reshape(-1)
repeated_normalized_initial_condition = jnp.repeat(normalized_initial_condition, normalized_times.size, axis=0)
denormalized_state_means, denormalized_state_variances, \
denormalized_derivative_means, denormalized_derivative_variances, \
denormalized_dynamics_means, denormalized_dynamics_variances = self._all_predictions(
normalized_times,
repeated_normalized_initial_condition,
[len(times)])
data = self.plotter.save_plot_predicted_trajectory(
initial_condition=[initial_condition],
test_times=[times],
state_means=denormalized_state_means,
state_variances=denormalized_state_variances,
derivative_means=denormalized_derivative_means,
derivative_variances=denormalized_derivative_variances,
dynamics_means=denormalized_dynamics_means,
dynamics_variances=denormalized_dynamics_variances,
all_initial_conditions=self.initial_conditions
)
if self.track_wandb:
directory = os.path.join(wandb.run.dir, 'data')
if not os.path.exists(directory):
os.makedirs(directory)
data_path = os.path.join('data', 'predicted_trajectory{}.pkl'.format(replace_str(str(initial_condition))))
with open(os.path.join(wandb.run.dir, data_path), 'wb') as handle:
pickle.dump(data, handle)
wandb.save(os.path.join(wandb.run.dir, data_path), wandb.run.dir)
return data
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
1669801
|
from django.contrib import admin
from ldap_login.models import *
class UsersInline(admin.TabularInline):
model = user;
extra = 1;
class userAdmin(admin.ModelAdmin):
search_fields = ['username','fullname'];
class groupAdmin(admin.ModelAdmin):
inlines = [UsersInline];
admin.site.register(group);
admin.site.register(Role);
admin.site.register(Permission);
admin.site.register(user,userAdmin);
|
StarcoderdataPython
|
11351465
|
# Take a list of WARC files, containing video/mp4 content, extract one image from each.
import argparse
import base64
import hashlib
import tempfile
import os
import sys
from warcio.archiveiterator import ArchiveIterator
from gluish.utils import shellout
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("f", nargs="*", metavar="FILE", help="files")
parser.add_argument("-p", "--prefix", default="stills-")
args = parser.parse_args()
# Note the seen payload digests.
seen = set()
for fn in args.f:
with open(fn, "rb") as stream:
for record in ArchiveIterator(stream):
if record.rec_type != "response":
continue
rh = record.rec_headers
hh = record.http_headers
if hh.get("Content-Type") not in ("video/mp4",):
continue
payload_digest = rh.get("WARC-Payload-Digest")
if not payload_digest:
continue
hv = payload_digest.split(":")[1]
if hv in seen:
continue
seen.add(hv)
with tempfile.NamedTemporaryFile(delete=False) as tf:
data = record.raw_stream.read()
tf.write(data)
dst = "{}{}.jpg".format(args.prefix, hv)
if os.path.exists(dst):
continue
# Generate a still.
output = shellout(
""" ffmpeg -hide_banner -loglevel panic -y -ss 1 -i {video} -vframes 1 -f image2 {output} """,
video=tf.name,
)
os.rename(output, dst)
os.remove(tf.name) # remove extracted video
|
StarcoderdataPython
|
8152731
|
<filename>cleanup_config.toml.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Lexicographically sort the nicknames in config.toml
Dependencies:
Python 3.4+
The toml package (python -m pip install toml)
Example: Dry run in default location
python cleanup_config.toml.py --dry-run
Example: Overwrite config.toml in working directory
(Windows) python cleanup_config.toml.py --dir=%CD%
(Unix-like) python cleanup_config.toml.py --dir=$PWD
"""
import os
import sys
import unicodedata
from argparse import ArgumentParser, RawTextHelpFormatter
from os.path import expanduser, realpath
import toml
def fullpath(*args):
return realpath(os.path.join(*args))
def parsed_args():
doc_lines = __doc__.strip().split('\n')
parser = ArgumentParser(description=doc_lines[0],
epilog='\n'.join(doc_lines[1:]),
formatter_class=RawTextHelpFormatter)
parser.add_argument('-d', '--dir',
action='store',
dest='dir',
default=fullpath(expanduser('~'), '.abv'),
help='directory containing config.toml (default: $HOME/.abv)')
parser.add_argument('--dry-run',
dest='dry_run',
action='store_const',
const=True,
default=False,
help='prints the resulting config.toml file to stdout rather than overwriting')
args = parser.parse_args()
return args
def unpack_table(lines):
table_toml = toml.loads(''.join(lines))
name = list(table_toml)[0]
# Format table title
title = '[{name}]\n'.format(name=name)
# Define a whitespace insertion method based on the longest key
longest = max([len(key) for key in table_toml[name]])
def align(str_):
return ' '*(longest-len(str_)+1)
# Lexicographically sort and whitespace-align table items
table = sorted(table_toml[name].items(), key=lambda x: x[0])
line = '"{key}"{ws}= "{val}"'
items = '\n'.join([line.format(key=key, ws=align(key), val=val) for key, val in table])
return title + items + '\n\n'
def parse_toml(lines):
final = []
inside_table = False
for line in lines:
if line.strip().startswith('#'):
if not inside_table:
final.append(line)
continue
elif line.strip().startswith('['):
if inside_table:
final.append(unpack_table(table_lines))
inside_table = True
table_lines = [line]
continue
elif line.strip().startswith('"') and inside_table:
table_lines.append(line)
continue
elif line.strip() == '':
if not inside_table:
final.append(line)
continue
else:
inside_table = False
final.append(line)
if inside_table:
final.append(unpack_table(table_lines))
text = ''.join(final)
return text.strip() + '\n'
def clean_config(filepath):
with open(filepath, mode='r', encoding='utf8') as file_:
lines = file_.readlines()
text = parse_toml(lines)
text = unicodedata.normalize('NFC', text)
return text
def main():
args = parsed_args()
filepath = fullpath(args.dir, 'config.toml')
text = clean_config(filepath)
if args.dry_run:
print(text)
else:
with open(filepath, mode='w', encoding='utf8') as file_:
file_.write(text)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11319601
|
<filename>src/constants.py
from typing import Final
from options import FrameType
DEBUG: bool = False
FFMPEG_COMMAND: Final = 'ffmpeg -y -f concat -safe 0 -i {path_listfile} -i {path_audio} -c:v libx264 -c:a copy -vf fps={framerate},format=yuv420p -shortest -hide_banner {path_output}'
FFMPEG_LOG_FILE: Final = "./ffmpeg.log"
FRAMERATE: Final = 25
PATH_LISTFILE: Final = "./out_listfile.txt"
PATH_AUDIO_ORIGINAL: Final = "./audio.aac"
PATH_OUTPUT: Final = "./out.mp4"
# TODO: Fix timings
TIMINGS: Final = [
{'type': FrameType.IDLE, 'dur': 50},
{'type': FrameType.GETDOWN, 'dur': 12},
{'type': FrameType.GETDOWN, 'dur': 12},
*[{'type': FrameType.RANDOM, 'dur': 1}] * 216,
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
*[{'type': FrameType.RANDOM, 'dur': 1}] * 26,
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 2},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 2},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 11},
{'type': FrameType.CHIKAU, 'dur': 11},
{'type': FrameType.CHIKAU, 'dur': 12},
{'type': FrameType.CHIKAU, 'dur': 12},
*[{'type': FrameType.RANDOM, 'dur': 1}] * 216,
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
*[{'type': FrameType.RANDOM, 'dur': 1}] * 26,
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPSHAKE, 'dur': 4},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 2},
{'type': FrameType.HIPSHAKE, 'dur': 1},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 2},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 2},
{'type': FrameType.HIPTHRUST, 'dur': 4},
{'type': FrameType.HIPTHRUST, 'dur': 3},
{'type': FrameType.HIPTHRUST, 'dur': 11},
{'type': FrameType.CHIKAU, 'dur': 11},
{'type': FrameType.CHIKAU, 'dur': 12},
{'type': FrameType.CHIKAU, 'dur': 12},
*[{'type': FrameType.RANDOM, 'dur': 1}] * 101,
{'type': FrameType.IDLE, 'dur': 93},
{'type': FrameType.IDLE, 'dur': 0},
]
|
StarcoderdataPython
|
1984528
|
<filename>flaskee/core/environment.py
"""
The Flaskee is an Open Source project for Microservices.
Develop By <NAME> | https://nadeengamage.com | <EMAIL>
"""
DEBUG = True
SECRET_KEY = 'super-secret-key'
SQLALCHEMY_TRACK_MODIFICATIONS=False
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/flaskee'
MAIL_DEFAULT_SENDER = ''
MAIL_SERVER = ''
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
|
StarcoderdataPython
|
6530388
|
# -*- coding: utf-8 -*-
import os
from . import CleoTestCase
from cleo.terminal import Terminal
class TerminalTest(CleoTestCase):
def test_dimensions(self):
os.environ['COLUMNS'] = '100'
os.environ['LINES'] = '50'
terminal = Terminal()
self.assertEqual(100, terminal.width)
self.assertEqual(50, terminal.height)
|
StarcoderdataPython
|
9747666
|
<gh_stars>1-10
import numpy as np
import six
import tensorflow as tf
import tensorflow.keras.initializers as tfki
import tensorflow.keras.layers as tfkl
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object, deserialize_keras_object
from tensorflow.python.ops import variables as tf_variables
from dynastes.ops.t2t_common import shape_list
from dynastes.weight_normalizers.spectral import SpectralNormalization
def serialize(normalizer):
return serialize_keras_object(normalizer)
def deserialize(config, custom_objects={}):
custom_objects = {**custom_objects, **object_scope}
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='normalizer')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
if identifier == 'spectral':
return SpectralNormalization()
elif identifier == 'spectral_t':
return SpectralNormalization(transposed=True)
elif identifier == 'wscale':
return WscaleNormalizer()
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret normalizer identifier:', identifier)
@tf.keras.utils.register_keras_serializable(package='Dynastes')
class WscaleNormalizer(tfkl.Layer):
def __init__(self,
lrmul=1.,
gain=np.sqrt(2),
next_layer=tfkl.Activation('linear'),
trainable=False,
**kwargs):
if 'trainable' in kwargs:
kwargs.pop('trainable')
super(WscaleNormalizer, self).__init__(trainable=False, **kwargs)
self.next_layer = get(next_layer)
self.gain = gain
self.lrmul = lrmul
def call(self, inputs, training=None):
input_shape = shape_list(inputs)
if len(input_shape) > 1:
fan_in = np.prod(input_shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = self.gain / np.sqrt(fan_in) # He init
runtime_coef = he_std * self.lrmul
else:
runtime_coef = self.lrmul
return self.next_layer(inputs * runtime_coef, training=training)
def get_config(self):
config = {
'lrmul': self.lrmul,
'gain': self.gain,
'next_layer': serialize(self.next_layer)
}
base_config = super(WscaleNormalizer, self).get_config()
return {**base_config, **config}
cs = tf.CriticalSection(name='init_mutex')
@tf.keras.utils.register_keras_serializable(package='Dynastes')
class WeightNormalizer(tfkl.Layer):
def __init__(self,
weight_initializer,
next_layer=tfkl.Activation('linear'),
**kwargs):
super(WeightNormalizer, self).__init__(**kwargs)
self.orig_weight_initializer = tfki.get(weight_initializer)
self.next_layer = get(next_layer)
self._init_critical_section = cs
self.g = None
def build(self, input_shape):
self.layer_depth = int(input_shape[-1])
self.kernel_norm_axes = list(range(len(input_shape) - 1))
"""
self._initialized_g = self.add_weight(
name='initialized_g',
shape=None,
initializer="zeros",
dtype=tf.dtypes.bool,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA,
)"""
# For now it's a workaround, maybe works, who knows ¯\_(ツ)_/¯
def get_weight_init(input_shape, original_init):
def weight_init(target_shape, dtype, input_shape, original_init):
var_init = original_init(input_shape, dtype)
var_norm = tf.norm(tf.reshape(var_init, [-1, target_shape[-1]]), axis=0)
return tf.random.normal(shape=target_shape, mean=tf.reduce_mean(var_norm),
stddev=tf.math.reduce_std(var_norm))
return lambda x, dtype: weight_init(x, dtype, input_shape, original_init)
self.g = self.add_weight(
name="g",
shape=(self.layer_depth,),
synchronization=tf_variables.VariableSynchronization.AUTO,
initializer=get_weight_init(input_shape, self.orig_weight_initializer),
trainable=True,
)
self.built = True
def call(self, inputs, training=None, **kwargs):
# TODO: Fix this when TensorFlow developers learn how to code if I haven't switched to PyTorch by that time
# For now it's a workaround, maybe works, who knows ¯\_(ツ)_/¯
# def _update_or_return_vars():
# return tf.identity(self.g)
# def _init_g():
# V_norm = tf.norm(tf.reshape(inputs, [-1, self.layer_depth]), axis=0)
# with tf.control_dependencies([self.g.assign(V_norm), self._initialized_g.assign(True)]):
# return tf.identity(self.g)
# g = self._init_critical_section.execute(lambda: tf.cond(self._initialized_g, _update_or_return_vars, _init_g))
g = self.g
V_norm = tf.norm(tf.reshape(inputs, [-1, self.layer_depth]), axis=0)
scaler = tf.reshape(tf.math.divide_no_nan(g, V_norm),
list([1] * len(self.kernel_norm_axes)) + [self.layer_depth])
return inputs * scaler
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'weight_initializer': tfki.serialize(self.orig_weight_initializer),
'next_layer': serialize(self.next_layer)
}
base_config = super(WeightNormalizer, self).get_config()
return {**base_config, **config}
object_scope = {
'SpectralNormalization': SpectralNormalization,
'WscaleNormalizer': WscaleNormalizer,
'Activation': tfkl.Activation
}
|
StarcoderdataPython
|
9770595
|
<gh_stars>1-10
DATACASH = 'SystemPay'
VERSION = '0.0.1'
|
StarcoderdataPython
|
215915
|
from app.api.v2.managers.base_api_manager import BaseApiManager
from app.objects.c_adversary import Adversary
class AdversaryApiManager(BaseApiManager):
def __init__(self, data_svc, file_svc):
super().__init__(data_svc=data_svc, file_svc=file_svc)
async def verify_adversary(self, adversary: Adversary):
adversary.verify(log=self.log, abilities=self._data_svc.ram['abilities'],
objectives=self._data_svc.ram['objectives'])
return adversary
|
StarcoderdataPython
|
4952911
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
# @file name : fusion_img.py
# @author : JLChen
# @date : 2020-03-11
# @brief : portrait数据集做前景 , coco数据集做背景
"""
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..'))
import matplotlib.pyplot as plt
import pylab as pl
import cv2
from tools.coco_tool import CocoImg
import random
import shutil
def get_img_list(root):
"""
读取portrait2000的(图片路径,标签路径)
:param root:
:return:
"""
file_list = os.listdir(root)
file_list = list(filter(lambda x: x.endswith("_matte.png"), file_list))
label_lst = [os.path.join(root, name) for name in file_list]
img_lst = [string.replace("_matte.png", ".png") for string in label_lst]
data_lst = [(path_img, path_label) for path_img, path_label in zip(img_lst, label_lst)]
return data_lst
def fusion(fore_path, mask_path, back_path):
"""
融合乳片
:param fore_path: portrait2000中的原图图片路径
:param mask_path: portrait2000中的标签图片路径
:param back_path: coco数据集中的图片路径
:return:
"""
raw_img = cv2.imread(fore_path)
mask_img = cv2.imread(mask_path) / 255
back_img = cv2.imread(back_path)
fore_img = np.clip(raw_img * mask_img, a_min=0, a_max=255).astype(np.uint8)
h, w, c = fore_img.shape
back_img = cv2.resize(back_img, (w, h))
result = np.clip(fore_img * mask_img + back_img * (1 - mask_img), a_min=0, a_max=255).astype(np.uint8)
return result
def gen_img(img_list, coco_genertor, out_dir, img_num=100):
"""
生成融合的图片
:param img_list: portrait2000的 (人像图片路径,标签图片路径) 列表
:param coco_genertor: CocoImg实例,用于获取coco数据集图片路径
:param out_dir: 输出的目录
:param img_num: 生成数据集的数据量
:return:
"""
for i in range(img_num):
fore_path, mask_path = random.choice(img_list)
# fore_path, mask_path = img_list[0] # 调试用,仅用1张前景生成多张图
_, back_path = random.choice(coco_genertor)
fusion_img = fusion(fore_path, mask_path, back_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_name = "{0:08d}.png".format(i)
msk_name = "{0:08d}_matte.png".format(i)
img_path = os.path.join(out_dir, img_name)
mask_path_dst = os.path.join(out_dir, msk_name)
cv2.imwrite(img_path, fusion_img)
shutil.copyfile(mask_path, mask_path_dst)
print(f"{i}/{img_num}")
if __name__ == '__main__':
img_num = 17000 # 生成数据集的数据量
out_dir = r"/home/elimen/Data/deepshare/Segmentation/Portrait_dataset/data_aug_{}".format(img_num)
portarit_root = r"/home/elimen/Data/deepshare/Segmentation/Portrait_dataset/Portrait-dataset-2000/dataset/training"
coco_root = r"/home/elimen/Data/deepshare/Segmentation/coco2017"
data_type = "train2017" # train2017::118287 张图片, val2017::5000
super_cats_in = ["outdoor", "indoor"]
super_cats_out = ["person"]
# step1:创建coco数据集生成器
coco_genertor = CocoImg(coco_root, data_type, super_cats_in, super_cats_out)
# step2: 获取portrait imglist
img_list = get_img_list(portarit_root)
# step3:执行生成
gen_img(img_list, coco_genertor, out_dir, img_num=img_num)
|
StarcoderdataPython
|
122657
|
<reponame>gwangyi/pygritia
"""Pavement for Pygritia"""
import shlex
import sys
import paver.doctools # pylint: disable=unused-import
import paver.virtual # pylint: disable=unused-import
from paver.easy import * # pylint: disable=unused-wildcard-import,wildcard-import
from paver.options import Bunch
from paver.path import path
from paver.setuputils import setup
if sys.version_info < (3, 7):
sys.exit("Pygritia requires Python >= 3.7")
options = environment.options # pylint: disable=invalid-name
HERE = path(__file__).dirname().abspath()
setup_params = dict( # pylint: disable=invalid-name
name="pygritia",
version="0.2.0",
description="Pygritia: Lazy Symbolic Evaluation",
long_description=open(HERE / 'README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/gwangyi/pygritia",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
package_data={'pygritia': ['py.typed']},
packages=['pygritia'],
)
options(
minilib=Bunch(
extra_files=['doctools'],
versioned_name=False,
extra_packages=[],
),
sphinx=Bunch(
docroot='sphinx',
builddir='build',
sourcedir='source',
apidoc_opts=['-e'],
),
)
setup(**setup_params)
@task
@no_help
def env():
"""Ready env"""
import os
def pathify(key, *args):
paths = os.environ.get(key, '').split(os.path.pathsep)
os.environ[key] = os.path.pathsep.join(list(args) + paths)
pathify('MYPYPATH', HERE)
pathify('PYTHONPATH', HERE, *sys.path)
@task
@needs(['env'])
def test():
"""Run unittest"""
try:
import pytest # type: ignore
except ImportError:
raise BuildFailure('install pytest to test')
pytestopts = ['--cov=' + setup_params['name'], '--cov-report=html', '--cov-report=term']
dry('pytest {}'.format(' '.join(pytestopts)), pytest.main, pytestopts)
@task
@needs(['env'])
def typecheck():
"""Run mypy"""
try:
import mypy.main
except ImportError:
raise BuildFailure('install mypy to typecheck')
mypyopts = ['--strict', '-p', setup_params['name']]
dry('mypy {}'.format(' '.join(mypyopts)), mypy.main.main, None, mypyopts)
@task
@needs(['env'])
def lint():
"""Run mypy and pylint"""
try:
import pylint.lint # type: ignore
except ImportError:
raise BuildFailure('install pylint to lint')
pylintopts = ['pavement.py', 'paverlib', setup_params['name']]
dry('pylint {}'.format(' '.join(pylintopts)), pylint.lint.Run, pylintopts)
@task
@needs(['env'])
@consume_args
def shell(args):
"""Run shell"""
import os
sh(' '.join(shlex.quote(arg) for arg in args)
if args else os.environ.get('SHELL', '/bin/bash'))
@task
@needs(['env'])
def nvim():
"""Launch neovim with env"""
import os
os.environ['BULLETTRAIN_VIRTUALENV_PREFIX'] = 'py'
sh('nvim "+bot sp +terminal" +NERDTreeToggle')
@task
@needs(['paverlib.doctools.apidoc', 'paverlib.doctools.html'])
def html():
"""Override html task to copy result to 'docs' directory"""
import shutil
try:
shutil.rmtree(HERE / 'docs')
except FileNotFoundError:
pass
shutil.copytree(
HERE /
options.sphinx.docroot /
options.sphinx.builddir /
'html',
HERE /
'docs')
@task
@needs('generate_setup', 'minilib', 'setuptools.command.sdist')
def sdist():
"""Overrides sdist to make sure that our setup.py is generated."""
|
StarcoderdataPython
|
11333273
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""classify.py
Code to classify primer sets by predicted specificity
(c) The James Hutton Institute 2018
Author: <NAME>
Contact: <EMAIL>
<NAME>,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2018 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os
from collections import defaultdict, namedtuple
from Bio.Emboss.Primer3 import Primers
from diagnostic_primers import load_primers, PrimersEncoder, write_primers
from diagnostic_primers.primersearch import parse_output
# Convenience struct for collection group data
GroupData = namedtuple("GroupData", "names groups groupdata")
class PDPDiagnosticPrimersEncoder(json.JSONEncoder):
"""JSON encoder for PDPDiagnosticPrimers objects"""
def default(self, o): # pylint: disable=E0202
if isinstance(o, Primers):
encoder = PrimersEncoder()
return encoder.default(o)
if not isinstance(o, PDPDiagnosticPrimers):
return json.JSONEncoder.default(self, o)
# Convert PDPDiagnosticPrimers object to serialisable dictionary
# and return
return o.__dict__
class PDPDiagnosticPrimers:
"""Collection of diagnostic primers."""
def __init__(self, name):
self.name = str(name)
self._groups = defaultdict(list) # Groups with diagnostic primers
self._primers = dict()
def add_diagnostic_primer(self, primer, group):
"""Add a diagnostic primer set, specifying the group
- primer Eprimer3 object describing the primer set
- group the group it's specific to
"""
self._groups[group].append(primer)
self._primers[primer.name] = primer
def diagnostic_primer(self, group):
"""Returns list of primers diagnostic for the passed group"""
return self._groups[group]
@property
def groups(self):
"""List of groups in the diagnostic primer set"""
return sorted(list(self._groups.keys()))
@property
def primers(self):
"""List of primers in the diagnostic primer set"""
return sorted(list(self._primers.keys()))
def process_groups(coll):
"""Return GroupData namedtuple of group information
:param coll: PDPCollection
Parse passed collection and generate local dictionary with a key for
each group in the collection. This will take with values that are a
set of names of members of those groups.
"""
names = set() # set of all genome names
groups = defaultdict(set) # group name: set of genome names
for genome in coll.data:
for group in genome.groups:
groups[group].add(genome.name)
names.add(genome.name)
groupdata = [(members, name) for (name, members) in groups.items()]
return GroupData(names, groups, groupdata)
def process_crosshyb(coll, limits):
"""Return primers and cross-hybridisation data
:param coll: PDPCollection
Parse the collection and follow the linked primersearch JSON file
Populate a dictionary of paths to each input genome, keyed by name
"""
crosshyb = defaultdict(set) # holds cross-hybridisation data
genomepaths = {} # dictionary of paths to each genome file, keyed by name
for item in coll.data:
genomepaths[item.name] = item.seqfile
primers = {}
for genome in coll.data:
# All primers amplify their own source genome. Load the list
# of primers and populate the crosshyb dictionaries
for primer in load_primers(genome.primers, fmt="json"):
crosshyb[primer.name].add(genome.name)
primers[primer.name] = primer
# Load the data for primersearch cross-hybridisation, and populate
# the crosshyb dictionary
with open(genome.primersearch, "r") as ifh:
psdata = json.load(ifh)
# Each key other than "query" and "primers" is the name of
# the genome being tested against, and has a PrimerSearch
# output file
# crosshybnames is a list of target genome names
crosshybnames = [_ for _ in psdata.keys() if _ not in ("primers", "query")]
for name in crosshybnames:
data = parse_output(
psdata[name], genomepaths[name]
) # primersearch results
for primer in data:
for amplimer in primer.amplimers:
if limits[0] < len(amplimer) < limits[1]:
crosshyb[primer.name].add(name)
crosshybdata = [(targets, primer) for (primer, targets) in crosshyb.items()]
return (primers, crosshybdata)
def classify_primers(coll, min_amplicon, max_amplicon):
"""Classifies each of the primer sets referred to in the passed collection
:param coll: PDPCollection
describes the genomes in the run, with links to the predicted primer
sets, and their matches to other genomes post-primersearch/mapping
:param min_amplicon: The minimum length of an amplicon to consider as
a false positive
:param max_amplicon: The maximum length of an amplicon to consider as
a false positive
First, the collection data is parsed, and a dictionary generated, keyed
by group, with values being a set of genome names belonging to the
group.
For each genome in the passed PDPCollection, the path to the
PrimerSearch JSON file corresponding to the primers generated from that
genome is followed, and the file parsed to obtain the
path to the relevant PrimerSearch output.
Each PrimerSearch output file is parsed and a dictionary
populated:
- dictionary keyed by primer ID, with value being a set of the names
of genomes where an amplicon is theoretically produced (filtered for
amplicon length).
Using set logic, each primer's set of potential amplified genomes is
compared against the sets of specific groups to determine if there is
an exact match.
The primers that amplify exactly those genomes which are members of one
of the defined classes are returned as a PDPDiagnosticPrimers object that
is a collection of Primer3.Primers objects.
"""
# Get data about groups from the collection
groupdata = process_groups(coll)
# Create dictionary to hold primer cross-hybridisation targets
# - keyed by primer name with value a set of all genome target
primers, crosshybdata = process_crosshyb(coll, (min_amplicon, max_amplicon))
# To determine group-specific primer sets, we loop through the group
# data, and compare their members to each of the targets. As we find
# specific primer sets, we remove them from the pool.
results = PDPDiagnosticPrimers(coll.name)
for members, group in groupdata.groupdata:
for targets, primer in crosshybdata:
if members == targets: # Primers are specific
results.add_diagnostic_primer(primers[primer], group)
return results
def write_results(results, outfilename, fmt="json"):
"""Writes files describing PDPDiagnosticPrimers object data to outdir
- results PDPDiagnosticPrimers object
- outfilename Path to output file/directory
- fmt Result format
Several output formats may be written:
json
====
JSON representation of the complete PDPDiagnosticPrimers object
summary
=======
writes primers, and also tab-separated plain text table with the columns:
Group (specifically-amplified group)
NumPrimers (number of primer sets specifically amplifying that group)
Primers (path to file describing specific primers)
primers
=======
ePrimer3 and JSON format files describing specific primers, one per group
"""
funcs = {
"json": __write_results_json,
"summary": __write_results_summary,
"primers": __write_results_primers,
}
funcs[fmt](results, outfilename)
def __write_results_json(results, outfilename):
"""Write PDPDiagnosticPrimers JSON representation
- results PDPDiagnosticPrimers object
- outfilename path to output file
"""
with open(outfilename, "w") as ofh:
json.dump(results, ofh, cls=PDPDiagnosticPrimersEncoder)
def __write_results_primers(results, outdir):
"""Write JSON/ePrimer3 files describing diagnostic primers
- results PDPDiagnosticPrimers object
- outdir path to directory for output files
"""
for group in results.groups:
outstem = os.path.join(outdir, "%s_primers" % group)
write_primers(results.diagnostic_primer(group), outstem + ".json", "json")
write_primers(results.diagnostic_primer(group), outstem + ".ePrimer3", "ep3")
# We don't write .bed files using write_primers(), as this reports the primer
# sets in the context of the genomes they're defined from. However, for
# classify (and extract), we want locations on each genome they amplify from
# write_primers(results.diagnostic_primer(group), outstem + ".bed", "bed")
def __write_results_summary(results, outfilename):
"""Write summary table of diagnostic primer results"""
# Write the diagnostic primer sets first
outdir = os.path.split(outfilename)[0]
__write_results_primers(results, outdir)
# Write the summary table
outstr = ["\t".join(["Group", "NumPrimers", "Primers"])]
for group in results.groups:
outstr.append(
"\t".join(
[
group,
str(len(results.diagnostic_primer(group))),
os.path.join(outdir, "%s_primers.json" % group),
]
)
)
with open(outfilename, "w") as ofh:
ofh.write("\n".join(outstr) + "\n")
|
StarcoderdataPython
|
5059359
|
# Generated by Django 2.0.7 on 2018-08-19 14:41
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='notification',
name='comment',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='notification',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='notification',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='images.Image'),
),
]
|
StarcoderdataPython
|
5182106
|
<reponame>jperez999/systems-1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import sys
import traceback
from typing import List
import triton_python_backend_utils as pb_utils
from triton_python_backend_utils import (
InferenceRequest,
InferenceResponse,
Tensor,
get_input_tensor_by_name,
)
from merlin.systems.dag.op_runner import OperatorRunner
from merlin.systems.dag.ops.operator import InferenceDataFrame
class TritonPythonModel:
def initialize(self, args):
self.model_config = json.loads(args["model_config"])
self.runner = OperatorRunner(self.model_config)
def execute(self, requests: List[InferenceRequest]) -> List[InferenceResponse]:
params = self.model_config["parameters"]
op_names = json.loads(params["operator_names"]["string_value"])
first_operator_name = op_names[0]
operator_params = json.loads(params[first_operator_name]["string_value"])
input_column_names = list(json.loads(operator_params["input_dict"]).keys())
responses = []
for request in requests:
try:
# transform the triton tensors to a dict of name:numpy tensor
input_tensors = {
name: get_input_tensor_by_name(request, name).as_numpy()
for name in input_column_names
}
inf_df = InferenceDataFrame(input_tensors)
raw_tensor_tuples = self.runner.execute(inf_df)
tensors = {
name: (data.get() if hasattr(data, "get") else data)
for name, data in raw_tensor_tuples
}
result = [Tensor(name, data) for name, data in tensors.items()]
responses.append(InferenceResponse(result))
except Exception: # pylint: disable=broad-except
exc_type, exc_value, exc_traceback = sys.exc_info()
tb_string = repr(traceback.extract_tb(exc_traceback))
responses.append(
pb_utils.InferenceResponse(
tensors=[], error=f"{exc_type}, {exc_value}, {tb_string}"
)
)
return responses
|
StarcoderdataPython
|
1704066
|
import json
from lambda_assistant.handlers.event_handler import EventHandler
from lambda_assistant.errors import LambdaError, InternalServerError
from lambda_assistant.response.headers import CORSHeaders
from lambda_assistant.types import APIGatewayProxyResult
def buildResponse(statusCode, headers: dict, body: dict):
return APIGatewayProxyResult(
HTTPStatus=statusCode,
Headers=headers,
Body= body
)
def buildBody(operation, response):
return {
'operationResource': operation,
'response': json.dumps(response, default=str)
}
|
StarcoderdataPython
|
6407951
|
# Generated by Django 2.2.7 on 2019-11-22 00:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('description', models.TextField(blank=True, verbose_name='Descrição')),
],
),
migrations.CreateModel(
name='MenuCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(verbose_name='ordem')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Category')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu')),
],
options={
'ordering': ('order',),
'unique_together': {('menu', 'category')},
},
),
]
|
StarcoderdataPython
|
226564
|
import inspect
from copy import deepcopy
from inspect import Parameter, Signature
from typing import (
Any,
Callable,
Dict,
Final,
Mapping,
Tuple,
get_args,
get_origin,
)
from pydantic.tools import schema_of
# FIXME: name and key seem to be the same??!
_PACKAGE_NAME: Final = "ofs" # __name__.split(".")[0]
_TEMPLATE_META: Final = {
"name": "TO_BE_DEFINED",
"thumbnail": "https://upload.wikimedia.org/wikipedia/commons/thumb/b/bd/Test.svg/315px-Test.svg.png",
"description": "",
"key": "simcore/services/comp/TO_BE_DEFINED",
"version": "TO_BE_DEFINED",
"integration-version": "1.0.0",
"type": "computational",
"authors": [
{
"name": "<NAME>",
"email": "<EMAIL>",
"affiliation": "IT'IS Foundation",
},
],
"contact": "<EMAIL>",
"inputs": {},
"outputs": {},
}
# TODO: class MetaDict(TypedDict):
# name: str
# thumbnail: str
# description: str
MetaDict = Dict[str, Any]
def _name_type(parameter_annotation):
try:
if issubclass(parameter_annotation, float):
name = "number"
elif issubclass(parameter_annotation, int):
name = "integer"
elif issubclass(parameter_annotation, str):
name = "string"
else:
name = f"{parameter_annotation}".replace("typing.", "")
except TypeError:
name = f"{parameter_annotation}".replace("typing.", "")
return name
def validate_inputs(parameters: Mapping[str, Parameter]) -> Dict[str, Any]:
inputs = {}
for parameter in parameters.values():
# should only allow keyword argument
assert parameter.kind == parameter.KEYWORD_ONLY
assert parameter.annotation != Parameter.empty
# build each input
description = getattr(
parameter.annotation,
"description",
parameter.name.replace("_", " ").capitalize(),
)
# FIXME: files are represented differently!
content_schema = schema_of(
parameter.annotation,
title=parameter.name.capitalize(),
)
data = {
"label": parameter.name,
"description": description,
"type": "ref_contentSchema",
"contentSchema": content_schema,
}
if parameter.default != Parameter.empty:
# TODO: what if partial-field defaults?
data["defaultValue"] = parameter.default
inputs[parameter.name] = data
return inputs
def _as_args_tuple(return_annotation: Any) -> Tuple:
if return_annotation == Signature.empty:
return tuple()
origin = get_origin(return_annotation)
if origin and origin is tuple:
# multiple outputs
return_args_types = get_args(return_annotation)
else:
# single output
return_args_types = (return_annotation,)
return return_args_types
def validate_outputs(return_annotation: Any) -> Dict[str, Any]:
# TODO: add extra info on outputs?
outputs = {}
return_args_types = _as_args_tuple(return_annotation)
for index, return_type in enumerate(return_args_types, start=1):
name = f"out_{index}"
display_name = f"Out{index} {_name_type(return_type)}"
data = {
"label": display_name,
"description": "",
"type": "ref_contentSchema",
"contentSchema": schema_of(
return_type,
title=display_name,
),
}
outputs[name] = data
return outputs
def create_meta(func: Callable, service_version: str) -> MetaDict:
if inspect.isgeneratorfunction(func):
raise NotImplementedError(f"Cannot process function iterators as {func}")
signature = inspect.signature(func)
inputs = validate_inputs(signature.parameters)
outputs = validate_outputs(signature.return_annotation)
service_name = f"{_PACKAGE_NAME}-{func.__name__}"
meta = deepcopy(_TEMPLATE_META)
meta["name"] = service_name
meta["key"] = f"simcore/services/comp/ofs-{func.__name__}"
meta["version"] = service_version
meta["inputs"] = inputs
meta["outputs"] = outputs
return meta
|
StarcoderdataPython
|
9780241
|
<gh_stars>0
from flask import render_template,request,redirect,url_for,abort
from . import main
from flask_login import login_required,current_user
from ..models import User,Pitch,Comment,Upvote,Downvote
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db
import datetime
@main.route('/')
def index():
pitches = Pitch.query.all()
advertisement = Pitch.query.filter_by(category = 'Advertisement').all()
interview = Pitch.query.filter_by(category = 'Interview').all()
product = Pitch.query.filter_by(category = 'Product').all()
technology = Pitch.query.filter_by(category = 'Technology').all()
return render_template('index.html', interview = interview,product = product, pitches = pitches,advertisement= advertisement,technology=technology)
@main.route('/create_new', methods = ['POST','GET'])
@login_required
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
post = form.post.data
category = form.category.data
new_pitch = Pitch(title = title,post=post,category=category,user=current_user)
new_pitch.save_pitch()
return redirect(url_for('main.index'))
return render_template('new_pitch.html', form = form)
@main.route('/comment/<int:pitch_id>', methods = ['POST','GET'])
@login_required
def comment(pitch_id):
form = CommentForm()
pitch = Pitch.query.get(pitch_id)
comments = Comment.query.filter_by(pitch_id = pitch_id).all()
if form.validate_on_submit():
comment = form.comment.data
pitch_id = pitch_id
new_comment = Comment(comment = comment,pitch_id = pitch_id,user=current_user)
new_comment.save_comment()
return redirect(url_for('.comment', pitch_id = pitch_id))
return render_template('comment.html', form =form, pitch = pitch,comments=comments)
@main.route('/user/<uname>')
@login_required
def profile(uname):
user = User.query.filter_by(username = uname).first()
posts = Pitch.query.filter_by(user = current_user).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user,posts=posts)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/like/<int:id>',methods = ['POST','GET'])
@login_required
def upvote(id):
pitches = Upvote.get_upvotes(id)
usr_id = f'{current_user.id}:{id}'
for pitch in pitches:
to_string = f'{pitch}'
if usr_id == to_string:
return redirect(url_for('main.index',id=id))
else:
continue
new_vote = Upvote(user = current_user, pitch_id=id)
new_vote.save()
return redirect(url_for('main.index',id=id))
@main.route('/dislike/<int:id>',methods = ['POST','GET'])
@login_required
def downvote(id):
pitches = Downvote.get_downvotes(id)
usr_id = f'{current_user.id}:{id}'
for pitch in pitches:
to_string = f'{pitch}'
if usr_id == to_string:
return redirect(url_for('main.index',id=id))
else:
continue
new_downvote = Downvote(user = current_user, pitch_id=id)
new_downvote.save()
return redirect(url_for('main.index',id = id))
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
|
StarcoderdataPython
|
8181099
|
from pymodbus.interfaces import IModbusFramer
import struct
# Unit ID, Function Code
BYTE_ORDER = '>'
FRAME_HEADER = 'BB'
# Transaction Id, Protocol ID, Length, Unit ID, Function Code
SOCKET_FRAME_HEADER = BYTE_ORDER + 'HHH' + FRAME_HEADER
# Function Code
TLS_FRAME_HEADER = BYTE_ORDER + 'B'
class ModbusFramer(IModbusFramer):
"""
Base Framer class
"""
def _validate_unit_id(self, units, single):
"""
Validates if the received data is valid for the client
:param units: list of unit id for which the transaction is valid
:param single: Set to true to treat this as a single context
:return: """
if single:
return True
else:
if 0 in units or 0xFF in units:
# Handle Modbus TCP unit identifier (0x00 0r 0xFF)
# in asynchronous requests
return True
return self._header['uid'] in units
def sendPacket(self, message):
"""
Sends packets on the bus with 3.5char delay between frames
:param message: Message to be sent over the bus
:return:
"""
return self.client.send(message)
def recvPacket(self, size):
"""
Receives packet from the bus with specified len
:param size: Number of bytes to read
:return:
"""
return self.client.recv(size)
|
StarcoderdataPython
|
6655460
|
import mmcv
import torch
from copy import deepcopy
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from os import path as osp
from mmdet3d.core import Box3DMode, show_result
from mmdet3d.core.bbox import get_box_type
from mmdet3d.datasets.pipelines import Compose
from mmdet3d.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str): Device to use.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = config.class_names
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_detector(model, pcd):
"""Inference point cloud with the detector.
Args:
model (nn.Module): The loaded detector.
pcd (str): Point cloud files.
Returns:
tuple: Predicted results and data from pipeline.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = deepcopy(cfg.data.test.pipeline)
test_pipeline = Compose(test_pipeline)
box_type_3d, box_mode_3d = get_box_type(cfg.data.test.box_type_3d)
data = dict(
pts_filename=pcd,
box_type_3d=box_type_3d,
box_mode_3d=box_mode_3d,
img_fields=[],
bbox3d_fields=[],
pts_mask_fields=[],
pts_seg_fields=[],
bbox_fields=[],
mask_fields=[],
seg_fields=[])
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device.index])[0]
else:
# this is a workaround to avoid the bug of MMDataParallel
data['img_metas'] = data['img_metas'][0].data
data['points'] = data['points'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result, data
def show_result_meshlab(data, result, out_dir):
"""Show result by meshlab.
Args:
data (dict): Contain data from pipeline.
result (dict): Predicted result from model.
out_dir (str): Directory to save visualized result.
"""
points = data['points'][0][0].cpu().numpy()
pts_filename = data['img_metas'][0][0]['pts_filename']
file_name = osp.split(pts_filename)[-1].split('.')[0]
assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes = result['boxes_3d'].tensor.numpy()
# for now we convert points into depth mode
if data['img_metas'][0][0]['box_mode_3d'] != Box3DMode.DEPTH:
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
pred_bboxes = Box3DMode.convert(pred_bboxes,
data['img_metas'][0][0]['box_mode_3d'],
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
else:
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, None, pred_bboxes, out_dir, file_name)
|
StarcoderdataPython
|
3341977
|
<filename>Machine Learning Scientist with Python Track/11. Model Validation in Python/ch4_exercises.py
# Exercise_1
#1
# Review the parameters of rfr
print(rfr.get_params())
#2
# Review the parameters of rfr
print(rfr.get_params())
# Maximum Depth
max_depth = [4, 8, 12]
# Minimum samples for a split
min_samples_split = [2, 5, 10]
#3
# Review the parameters of rfr
print(rfr.get_params())
# Maximum Depth
max_depth = [4, 8, 12]
# Minimum samples for a split
min_samples_split = [2, 5, 10]
# Max features
max_features = [4,6,8,10]
--------------------------------------------------
# Exercise_2
from sklearn.ensemble import RandomForestRegressor
# Fill in rfr using your variables
rfr = RandomForestRegressor(
n_estimators=100,
max_depth=random.choice(max_depth),
min_samples_split=random.choice(min_samples_split),
max_features=random.choice(max_features))
# Print out the parameters
print(rfr.get_params())
--------------------------------------------------
# Exercise_3
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import make_scorer, mean_squared_error
# Finish the dictionary by adding the max_depth parameter
param_dist = {"max_depth": [2,4,6,8],
"max_features": [2, 4, 6, 8, 10],
"min_samples_split": [2, 4, 8, 16]}
# Create a random forest regression model
rfr = RandomForestRegressor(n_estimators=10, random_state =1111)
# Create a scorer to use (use the mean squared error)
scorer = make_scorer(mean_squared_error)
--------------------------------------------------
# Exercise_4
# Import the method for random search
from sklearn.model_selection import RandomizedSearchCV
# Build a random search using param_dist, rfr, and scorer
random_search =\
RandomizedSearchCV(
estimator=rfr,
param_distributions=param_dist,
n_iter=10,
cv=5,
scoring=scorer)
--------------------------------------------------
# Exercise_5
from sklearn.metrics import precision_score, make_scorer
# Create a precision scorer
precision = make_scorer(precision_score)
# Finalize the random search
rs = RandomizedSearchCV(
estimator=rfc, param_distributions=param_dist,
scoring = precision,
cv=5, n_iter=10, random_state=1111)
rs.fit(X, y)
# print the mean test scores:
print('The accuracy for each run was: {}.'.format(rs.cv_results_['mean_test_score']))
# print the best model score:
print('The best accuracy for a single model was: {}'.format(rs.best_score_))
--------------------------------------------------
|
StarcoderdataPython
|
8038452
|
<gh_stars>0
class Solution:
def deleteNode(self, node):
node.val = node.next.val
node.next = node.next.next
|
StarcoderdataPython
|
11278589
|
import pathlib
def cache(string):
with open(pathlib.Path() / "tests" / "test_bot" / ".cache", "a") as cache:
cache.write(f"{string}\n")
|
StarcoderdataPython
|
9787465
|
import random
print('this is a dice rolling simulator'.upper())
x = 'y'
while x == 'y':
number = random.randint(1, 6)
if number == 1:
print("---------")
print('| |')
print('| 0 |')
print('| |')
print("---------")
elif number == 2:
print("---------")
print('| |')
print('| 00 |')
print('| |')
print("---------")
elif number == 3:
print("---------")
print('| 0 |')
print('| |')
print('| 0 0 |')
print("---------")
elif number == 4:
print("---------")
print('| 0 0 |')
print('| |')
print('| 0 0 |')
print("---------")
elif number == 5:
print("---------")
print('| 0 0 |')
print('| 0 |')
print('| 0 0 |')
print("---------")
elif number == 6:
print("---------")
print('| 0 0 |')
print('| 0 0 |')
print('| 0 0 |')
print("---------")
x = input("press y to roll it again")
|
StarcoderdataPython
|
4824885
|
<reponame>andocoyote/AndoEconAPIs<filename>MaximumRevenue/__init__.py
from ..Common import Calculations as calc
import json
import logging
import sympy
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
symbols = ''
fx = ''
try:
req_body = req.get_json()
# Example symbols = 'x'
symbols = req_body.get('symbols')
# Example: fx = '10 - 0.001*x'
fx = req_body.get('fx')
if symbols and fx:
demandFunction = fx
# Revenue is quantity x * price p which is given by the demand function (price function)
revenueFunction = symbols + ' * (' + demandFunction + ')'
# MR is the derivative of the revenue function
marginalRevenueFunction = calc.Derivative(symbols, revenueFunction)
# Solve the equation set to zero and obtain the result
optimumQuantitySet = calc.Solve(symbols, marginalRevenueFunction)
optimumQuantity = [float(num) for num in optimumQuantitySet]
# Evaluate the demand function using optimum quantity to obtain price per item
itemPrice = calc.Evalutate(symbols, demandFunction, {'x': optimumQuantity[0]})
# Evaluate the revenue function using optimum quantity to obtain total revenue
totalRevenue = calc.Evalutate(symbols, revenueFunction, {'x': optimumQuantity[0]})
# Format the response body JSON
response = json.dumps(
{'demandFunction': str(demandFunction),
'revenueFunction': str(revenueFunction),
'marginalRevenueFunction': str(marginalRevenueFunction),
'optimumQuantity': float(optimumQuantity[0]),
'itemPrice': float(itemPrice),
'totalRevenue': float(totalRevenue)})
return func.HttpResponse(response)
status_code=200
else:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Supply symbols and fx.",
status_code=200
)
except:
return func.HttpResponse('Error: failed to calculate result from symbols {0} and fx {1}'
.format(symbols, fx))
status_code=400
|
StarcoderdataPython
|
9744397
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example.settings')
app = Celery('example', broker=settings.BROKER_URL)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
StarcoderdataPython
|
9635684
|
from django.urls import path
from .views import CreateUser, Me, get_csrf, login_set_cookie, login_view, logout_view
app_name = 'user'
urlpatterns = [
path('login_set_cookie', login_set_cookie, name='login_set_cookie'),
path('get_csrf', get_csrf, name='get_csrf'),
path('login', login_view, name='login'),
path('logout', logout_view, name='logout'),
path('create', CreateUser.as_view(), name='create'),
path('me', Me.as_view(), name='check_current_user'),
]
|
StarcoderdataPython
|
8023624
|
<filename>src/django_globals/__init__.py
import threading
globals = threading.local()
|
StarcoderdataPython
|
146001
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GraphSAGE tests
"""
from tensorflow import keras
from tensorflow.keras import initializers, regularizers
import numpy as np
import pytest
from stellargraph.mapper import GraphSAGENodeGenerator
from stellargraph.layer.graphsage import (
GraphSAGE,
MeanAggregator,
MaxPoolingAggregator,
MeanPoolingAggregator,
AttentionalAggregator,
)
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
# Mean aggregator tests
def test_mean_agg_constructor():
agg = MeanAggregator(2)
assert agg.output_dim == 2
assert not agg.has_bias
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_mean_agg_constructor_1():
agg = MeanAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_mean_agg_apply():
agg = MeanAggregator(5, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
assert agg.weight_dims == [3, 2]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 5, 5]]])
assert expected == pytest.approx(actual)
def test_mean_agg_apply_groups():
agg = MeanAggregator(11, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
inp3 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2, inp3])
assert agg.weight_dims == [5, 3, 3]
model = keras.Model(inputs=[inp1, inp2, inp3], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
x3 = np.array([[[[5, 5], [4, 4]]]])
actual = model.predict([x1, x2, x3])
print(actual)
expected = np.array([[[2] * 5 + [5] * 3 + [9] * 3]])
assert expected == pytest.approx(actual)
def test_mean_agg_zero_neighbours():
agg = MeanAggregator(4, bias=False, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MaxPooling aggregator tests
def test_maxpool_agg_constructor():
agg = MaxPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == False
assert config["act"] == "relu"
def test_maxpool_agg_constructor_1():
agg = MaxPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_maxpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MaxPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + ones(2)), axis=1) = max([[5,5],[7,7]]) = [[7,7]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[14]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 14]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MaxPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + zeros(2)), axis=1) = max([[4,4],[6,6]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_zero_neighbours():
agg = MaxPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MeanPooling aggregator tests
def test_meanpool_agg_constructor():
agg = MeanPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_meanpool_agg_constructor_1():
agg = MeanPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_meanpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MeanPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + ones(2)), axis=1)
# = mean([[5,5],[7,7]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones(2x1)) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MeanPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + zeros(2)), axis=1)
# = mean([[4,4],[6,6]]) = [[5,5]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[10]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 10]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_zero_neighbours():
agg = MeanPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
# Now we have an input shape with a 0, the attention model switches to
# a MLP and the first group will have non-zero output size.
assert agg.weight_dims == [4, 0]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# Attentional aggregator tests
def test_attn_agg_constructor():
agg = AttentionalAggregator(2, bias=False)
assert agg.output_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
# assert agg.attn_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_attn_agg_constructor_1():
agg = AttentionalAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_attn_agg_apply():
agg = AttentionalAggregator(2, bias=False, act="linear", kernel_initializer="ones")
agg.attn_act = keras.activations.get("linear")
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# The AttentionalAggregator implmentation is a hack at the moment, it doesn't
# assign any dimensions in the output to head-node features.
assert agg.weight_dims == [0, 2]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# hs = relu(x1 · ones(2x2)) = [2,2]
# hn = relu(x2 · ones(2x2)) = [[2,2], [4,4], [6,6]]
# attn_u = ones(2) · hs + ones(2) · hn = [8, 12, 16]
# attn = softmax(attn_u) = [3.3e-4, 1.8e-4, 9.81e-1]
# hout = attn · hn = [5.96, 5.96]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[5.963, 5.963]]])
assert expected == pytest.approx(actual, rel=1e-4)
def test_attn_agg_zero_neighbours():
agg = AttentionalAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
def test_graphsage_constructor():
gs = GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, normalize="l2", multiplicity=1
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
# Check incorrect normalization flag
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize=lambda x: x,
multiplicity=1,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize="unknown",
multiplicity=1,
)
# Check requirement for generator or n_samples
with pytest.raises(KeyError):
GraphSAGE(layer_sizes=[4])
# Construction from generator
G = example_graph(feature_size=3)
gen = GraphSAGENodeGenerator(G, batch_size=2, num_samples=[2, 2])
gs = GraphSAGE(layer_sizes=[4, 8], generator=gen, bias=True)
# The GraphSAGE should no longer accept a Sequence
t_gen = gen.flow([1, 2])
with pytest.raises(TypeError):
gs = GraphSAGE(layer_sizes=[4, 8], generator=t_gen, bias=True)
assert gs.dims == [3, 4, 8]
assert gs.n_samples == [2, 2]
assert gs.max_hops == 2
assert gs.bias
assert len(gs._aggs) == 2
def test_graphsage_constructor_passing_aggregator():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
aggregator=MeanAggregator,
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
with pytest.raises(TypeError):
GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1, aggregator=1
)
def test_graphsage_constructor_1():
gs = GraphSAGE(
layer_sizes=[4, 6, 8],
n_samples=[2, 4, 6],
input_dim=2,
multiplicity=1,
bias=True,
dropout=0.5,
)
assert gs.dims == [2, 4, 6, 8]
assert gs.n_samples == [2, 4, 6]
assert gs.max_hops == 3
assert gs.bias
assert len(gs._aggs) == 3
def test_graphsage_apply():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
def test_graphsage_apply_1():
gs = GraphSAGE(
layer_sizes=[2, 2, 2],
n_samples=[2, 2, 2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 2, 4, 8]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]]),
]
expected = np.array([[16, 25]])
actual = model.predict(x)
assert expected == pytest.approx(actual)
# Use the node model:
xinp, xout = gs.build()
model2 = keras.Model(inputs=xinp, outputs=xout)
assert pytest.approx(expected) == model2.predict(x)
def test_graphsage_serialize():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json, custom_objects={"MeanAggregator": MeanAggregator}
)
model2.set_weights(model_weights)
# Test loaded model
x1 = np.array([[[1, 1]]])
x2 = np.array([[[2, 2], [3, 3]]])
expected = np.array([[2, 2, 5, 5]])
actual = model2.predict([x1, x2])
assert expected == pytest.approx(actual)
def test_graphsage_zero_neighbours():
gs = GraphSAGE(
layer_sizes=[2, 2],
n_samples=[0, 0],
bias=False,
input_dim=2,
multiplicity=1,
normalize="none",
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 0, 0]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [np.array([[[1.5, 1]]]), np.zeros((1, 0, 2)), np.zeros((1, 0, 2))]
actual = model.predict(x)
expected = np.array([[5, 5]])
assert actual == pytest.approx(expected)
def test_graphsage_passing_activations():
gs = GraphSAGE(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1)
assert gs.activations == ["linear"]
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
assert gs.activations == ["relu", "linear"]
gs = GraphSAGE(
layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2, multiplicity=1
)
assert gs.activations == ["relu", "relu", "linear"]
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"],
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"] * 2,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["fred", "wilma", "barney"],
)
gs = GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["linear"] * 3,
)
assert gs.activations == ["linear"] * 3
def test_graphsage_passing_regularisers():
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="fred",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="ones",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer=initializers.ones(),
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer=regularizers.l2(0.01),
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer="wilma",
)
|
StarcoderdataPython
|
9724877
|
<reponame>Crashillo/CatAtom2Osm
import os
from qgis.core import QgsFeature, QgsField, QgsFields
from qgis.PyQt.QtCore import QVariant
from catatom2osm.geo.geometry import Geometry
from catatom2osm.geo.layer.base import BaseLayer
from catatom2osm.geo.types import WKBPoint
class DebugWriter:
"""A QgsVectorFileWriter for debugging purposess."""
def __init__(
self, filename, layer, driver_name="ESRI Shapefile", geom_type=WKBPoint
):
"""
Construct.
Args:
filename (str): File name of this layer
layer (QgsVectorLayer): A layer to test.
driver_name (str): Defaults to ESRI Shapefile.
"""
fpath = os.path.join(
os.path.dirname(layer.dataProvider().dataSourceUri()), filename
)
fields = QgsFields()
fields.append(QgsField("note", QVariant.String, len=100))
writer = BaseLayer.get_writer(fpath, layer.crs(), fields, geom_type)
self.fields = fields
self.writer = writer
def add_point(self, point, note=None):
"""Add a point to the layer with the attribute note."""
feat = QgsFeature(QgsFields(self.fields))
geom = Geometry.fromPointXY(point)
feat.setGeometry(geom)
if note:
feat.setAttribute("note", note[:254])
return self.addFeature(feat)
def addFeature(self, *args, **kwargs):
self.writer.addFeature(*args, **kwargs)
|
StarcoderdataPython
|
1632613
|
import json
import os
import threading
from os.path import exists, join
from typing import List, Optional, Callable, Dict
from entity import Item, from_dict
from tool4log import logger
from tool4time import now_str
def load_data(filename):
with open(filename, "r", encoding="utf8") as f:
return json.load(f)
class MemoryDataBase:
DATABASE_FOLDER = "data/database"
DATA_FILE = join(DATABASE_FOLDER, "data.json")
def __init__(self):
self.lock = threading.Lock()
self.__init_folder()
self.filename = MemoryDataBase.DATA_FILE
self.data: List = []
if exists(self.filename):
self.data = load_data(self.filename)
logger.info(f"{MemoryDataBase.__name__}: Load Data From File")
if self.data:
self.current_id = max(map(lambda i: i['id'], self.data))
else:
# 默认从1开始, 避免0值产生问题
self.current_id = 1
def __init_folder(self):
if not exists(self.DATABASE_FOLDER):
os.mkdir(self.DATABASE_FOLDER)
def __next_id(self):
with self.lock:
self.current_id = self.current_id + 1
return self.current_id
def select(self, iid: int) -> Optional[dict]:
for item in self.data:
if item['id'] == iid:
return item
return None
def select_by(self, where: Callable[[dict], bool] = lambda _: True,
select: Callable[[dict], tuple] = lambda x: from_dict(x)) -> list:
return list(map(select, filter(where, self.data)))
def select_one(self, where: Callable[[dict], bool] = lambda _: True,
select: Callable[[dict], tuple] = lambda x: from_dict(x)):
return self.select_by(where, select)[0]
def update_by(self, where: Callable[[dict], bool], update: Callable[[dict], None]) -> int:
ans = len(list(map(update, filter(where, self.data))))
self.save2file()
return ans
def select_group_by(self, ans: Dict[str, List], f: Callable[[dict], str]) -> Dict[str, List]:
for item in self.data:
key = f(item)
if key in ans:
ans[key].append(from_dict(item))
return ans
def insert(self, item: Item) -> int:
item.id = self.__next_id()
item.create_time = now_str()
with self.lock:
self.data.append(item.to_dict())
logger.info(f"{MemoryDataBase.__name__}: Insert Item: {item}")
self.save2file()
return item.id
def __get_idx(self, item: Item) -> Optional[int]:
for idx, dic in enumerate(self.data):
if dic['id'] == item.id:
return idx
return None
def remove(self, item: Item):
with self.lock:
idx = self.__get_idx(item)
# 判断None和直接判断大部分时候都是一致的, 但当idx等于0时会产生错误
if idx is not None:
self.data.pop(idx)
self.save2file()
def save2file(self):
json_data = json.dumps(self.data)
with open(self.filename, "w", encoding="utf8") as f:
f.write(json_data)
logger.info(f"{MemoryDataBase.__name__}: Success Save Date to File")
|
StarcoderdataPython
|
5092649
|
<filename>csv2json.py
import csv, json, sys
csvFilePath = sys.argv[1]
jsonFilePath = sys.argv[2]
data = {}
with open(csvFilePath) as csvFile:
csvReader = csv.DictReader(csvFile)
for rows in csvReader:
id_ = rows['id']
data[id_] = rows
with open(jsonFilePath, 'w') as jsonFile:
jsonFile.write(json.dumps(data, indent=4))
|
StarcoderdataPython
|
3485642
|
"""
Test query of case recorder file.
"""
import glob
import os.path
import unittest
from math import isnan
from openmdao.main.api import Assembly, Component, VariableTree, set_as_top
from openmdao.main.datatypes.api import Array, Float, VarTree
from openmdao.lib.casehandlers.api import CaseDataset, \
JSONCaseRecorder, BSONCaseRecorder
from openmdao.lib.drivers.api import FixedPointIterator, SLSQPdriver
from openmdao.lib.optproblems import sellar
from openmdao.util.testutil import assert_rel_error
class States(VariableTree):
y = Array([0.0, 0.0])
class Globals(VariableTree):
z1 = Float(0.0)
z2 = Float(0.0)
class Half(Component):
z2a = Float(0.0, iotype='in')
z2b = Float(0.0, iotype='out')
def execute(self):
self.z2b = 0.5*self.z2a
class SellarMDF(Assembly):
""" Optimization of the Sellar problem using MDF
Disciplines coupled with FixedPointIterator.
"""
def configure(self):
""" Creates a new Assembly with this problem
Optimal Design at (1.9776, 0, 0)
Optimal Objective = 3.18339"""
# Sub assembly
sub = self.add('sub', Assembly())
# Inner Loop - Full Multidisciplinary Solve via fixed point iteration
sub.add('driver', FixedPointIterator())
sub.add('dis1', sellar.Discipline1())
sub.add('dis2', sellar.Discipline2())
sub.driver.workflow.add(['dis1', 'dis2'])
# Make all connections
sub.connect('dis1.y1', 'dis2.y1')
sub.connect('dis1.z1', 'dis2.z1')
# Iteration loop
sub.driver.add_parameter('dis1.y2')
sub.driver.add_constraint('dis2.y2 = dis1.y2')
# Solver settings
sub.driver.max_iteration = 100
sub.driver.tolerance = .00001
sub.driver.print_convergence = False
# Subassy boundaries
sub.add('globals', VarTree(Globals(), iotype='in'))
sub.add('states', VarTree(States(), iotype='out'))
sub.connect('globals.z1', 'dis1.z1')
# Note, dis1.z2 is connected by input-input conn
sub.connect('globals.z2', 'dis1.z2')
sub.connect('globals.z2', 'dis2.z2')
sub.create_passthrough('dis1.x1')
sub.connect('dis1.y1', 'states.y[0]')
sub.connect('dis2.y2', 'states.y[1]')
# Global Optimization
self.add('driver', SLSQPdriver())
self.driver.gradient_options.force_fd = True
#self.driver.iprint = 3
# Extra comp
self.add('half', Half())
self.connect('half.z2b', 'sub.globals.z2')
self.driver.workflow.add(['half', 'sub'])
# Add Parameters to optimizer
self.driver.add_parameter('sub.globals.z1', low=-10.0, high=10.0)
self.driver.add_parameter('half.z2a', low=0.0, high=10.0)
self.driver.add_parameter('sub.x1', low=0.0, high=10.0)
# Optimization parameters
self.driver.add_objective('(sub.x1)**2 + sub.globals.z2 + sub.states.y[0] + math.exp(-sub.states.y[1])')
self.driver.add_constraint('3.16 < sub.states.y[0]')
self.driver.add_constraint('sub.states.y[1] < 24.0')
self.sub.globals.z1 = 5.0
self.half.z2a = 2.0
self.sub.x1 = 1.0
def create_files():
""" Create/update test data files. """
prob = set_as_top(SellarMDF())
prob.name = "top"
prob.recorders = [JSONCaseRecorder('sellar_json.new'),
BSONCaseRecorder('sellar_bson.new')]
prob.run()
class TestCase(unittest.TestCase):
def setUp(self):
# create_files() # Uncomment to create 'sellar.new'
path = os.path.join(os.path.dirname(__file__), 'sellar.json')
self.cds = CaseDataset(path, 'json')
def tearDown(self):
self.cds = None
for path in glob.glob('cases.*'):
try:
os.remove(path)
except WindowsError:
# Still in use (recorder or dataset hasn't been deleted yet).
pass
def test_query(self):
# Full dataset.
vnames = self.cds.data.var_names().fetch()
expected = [
'_driver_id', '_id', '_parent_id', '_pseudo_0', '_pseudo_1',
'_pseudo_2', 'driver.workflow.itername', 'error_message',
'error_status', 'half.derivative_exec_count', 'half.exec_count',
'half.itername', 'half.z2a', 'half.z2b', 'sub._pseudo_0',
'sub.derivative_exec_count', 'sub.dis1.derivative_exec_count',
'sub.dis1.exec_count', 'sub.dis1.itername', 'sub.dis1.y1',
'sub.dis1.y2', 'sub.dis2.derivative_exec_count',
'sub.dis2.exec_count', 'sub.dis2.itername', 'sub.dis2.y2',
'sub.driver.workflow.itername', 'sub.exec_count', 'sub.globals.z1',
'sub.itername', 'sub.states', 'sub.states.y[0]', 'sub.states.y[1]',
'sub.x1', 'timestamp']
self.assertEqual(vnames, expected)
cases = self.cds.data.fetch()
self.assertEqual(len(cases), 142)
self.assertEqual(len(cases[0]), len(expected))
# Specific variables.
names = ['half.z2a', 'sub.dis1.y1', 'sub.dis2.y2', 'sub.x1']
vnames = self.cds.data.vars(names).var_names().fetch()
self.assertEqual(vnames, names)
cases = self.cds.data.vars(names).fetch()
self.assertEqual(len(cases), 142)
self.assertEqual(len(cases[0]), len(names))
iteration_case_142 = {
"half.z2a": 3.2649235987085278e-15,
"sub.dis1.y1": 3.1600041592009194,
"sub.dis2.y2": 3.755280110989017,
"sub.x1": 2.8984826597319301e-15
}
for name, val in zip(names, cases[-1]):
self.assertAlmostEqual(val, iteration_case_142[name])
# Local to driver.
# For some reason the top-level driver isn't the last recorded.
cases = self.cds.data.local().vars(names).fetch()
self.assertEqual(len(cases), 142)
last = cases[-1]
self.assertEqual(len(last), len(names))
for name in ('half.z2a', 'sub.x1'):
self.assertTrue(isnan(last[name]))
for name in ('sub.dis1.y1', 'sub.dis2.y2'):
assert_rel_error(self, last[name], iteration_case_142[name], 0.001)
# Transposed.
vars = self.cds.data.local().vars(names).by_variable().fetch()
self.assertEqual(len(vars), len(names))
for name in ('half.z2a', 'sub.x1'):
self.assertEqual(len(vars[name]), 142)
self.assertTrue(isnan(vars[name][-1]))
for name in ('sub.dis1.y1', 'sub.dis2.y2'):
self.assertEqual(len(vars[name]), 142)
assert_rel_error(self, vars[name][-1], iteration_case_142[name], 0.001)
def test_parent(self):
# Full dataset names by specifying a top-level case.
parent = 'e52a477a-588e-11e4-8355-080027a1f086' # iteration_case_6
vnames = self.cds.data.parent_case(parent).var_names().fetch()
expected = [
'_driver_id', '_id', '_parent_id', '_pseudo_0', '_pseudo_1',
'_pseudo_2', 'driver.workflow.itername', 'error_message',
'error_status', 'half.derivative_exec_count', 'half.exec_count',
'half.itername', 'half.z2a', 'half.z2b', 'sub._pseudo_0',
'sub.derivative_exec_count', 'sub.dis1.derivative_exec_count',
'sub.dis1.exec_count', 'sub.dis1.itername', 'sub.dis1.y1',
'sub.dis1.y2', 'sub.dis2.derivative_exec_count',
'sub.dis2.exec_count', 'sub.dis2.itername', 'sub.dis2.y2',
'sub.driver.workflow.itername', 'sub.exec_count', 'sub.globals.z1',
'sub.itername', 'sub.states', 'sub.states.y[0]', 'sub.states.y[1]',
'sub.x1', 'timestamp']
self.assertEqual(vnames, expected)
cases = self.cds.data.parent_case(parent).fetch()
self.assertEqual(len(cases), 6)
self.assertEqual(len(cases[0]), len(expected))
iteration_case_1 = {
"sub._pseudo_0": 10.176871642217915,
"sub.dis1.derivative_exec_count": 0,
"sub.dis1.exec_count": 1,
"sub.dis1.itername": "1-sub.1-dis1",
"sub.dis1.y1": 26.8,
"sub.dis1.y2": 1.0,
"sub.dis2.derivative_exec_count": 0,
"sub.dis2.exec_count": 1,
"sub.dis2.itername": "1-sub.1-dis2",
"sub.dis2.y2": 11.176871642217915,
"sub.driver.workflow.itername": "1-sub.1"
}
self.verify(vnames, cases[0], iteration_case_1)
iteration_case_6 = {
# Data from parent.
"_pseudo_0": 26.803946487677322,
"_pseudo_1": -21.643929454616536,
"_pseudo_2": -13.019645649693533,
"driver.workflow.itername": "1",
"half.derivative_exec_count": 0,
"half.exec_count": 1,
"half.itername": "1-half",
"half.z2a": 2.0,
"half.z2b": 1.0,
"sub.derivative_exec_count": 0,
"sub.exec_count": 1,
"sub.globals.z1": 5.0,
"sub.itername": "1-sub",
"sub.states": {
"y": [
24.803929454616537,
10.980354350306467
]
},
"sub.states.y[0]": 24.803929454616537,
"sub.states.y[1]": 10.980354350306467,
"sub.x1": 1.0,
# Last data from sub.
"sub._pseudo_0": 1.6233891457773097e-06,
"sub.dis1.derivative_exec_count": 0,
"sub.dis1.exec_count": 5,
"sub.dis1.itername": "1-sub.5-dis1",
"sub.dis1.y1": 24.803929454616537,
"sub.dis1.y2": 10.980352726917321,
"sub.dis2.derivative_exec_count": 0,
"sub.dis2.exec_count": 5,
"sub.dis2.itername": "1-sub.5-dis2",
"sub.dis2.y2": 10.980354350306467,
"sub.driver.workflow.itername": "1-sub.5"
}
self.verify(vnames, cases[-1], iteration_case_6)
def verify(self, names, case, expected):
for name, value in expected.items():
i = names.index(name)
if isinstance(value, float):
assert_rel_error(self, case[i], value, 0.001)
else:
self.assertEqual(case[i], value)
def test_driver(self):
# Dataset of a driver.
vnames = self.cds.data.driver('sub.driver').var_names().fetch()
expected = [
'_driver_id', '_id', '_parent_id', 'error_message', 'error_status',
'sub._pseudo_0', 'sub.dis1.derivative_exec_count',
'sub.dis1.exec_count', 'sub.dis1.itername', 'sub.dis1.y1',
'sub.dis1.y2', 'sub.dis2.derivative_exec_count',
'sub.dis2.exec_count', 'sub.dis2.itername', 'sub.dis2.y2',
'sub.driver.workflow.itername', 'timestamp']
self.assertEqual(vnames, expected)
cases = self.cds.data.driver('sub.driver').fetch()
self.assertEqual(len(cases), 126)
self.assertEqual(len(cases[0]), len(expected))
def test_bson(self):
# Simple check of _BSONReader.
names = ['half.z2a', 'sub.dis1.y1', 'sub.dis2.y2', 'sub.x1']
path = os.path.join(os.path.dirname(__file__), 'sellar.json')
json_cases = CaseDataset(path, 'json').data.vars(names).fetch()
path = os.path.join(os.path.dirname(__file__), 'sellar.bson')
bson_cases = CaseDataset(path, 'bson').data.vars(*names).fetch()
for json_case, bson_case in zip(json_cases, bson_cases):
for json_val, bson_val in zip(json_case, bson_case):
if isnan(json_val):
self.assertTrue(isnan(bson_val))
else:
self.assertEqual(bson_val, json_val)
def test_json(self):
# Simple check of _JSONReader.
path = os.path.join(os.path.dirname(__file__), 'jsonrecorder.json')
cases = CaseDataset(path, 'json').data.fetch()
self.assertEqual(len(cases), 10)
path = os.path.join(os.path.dirname(__file__), 'truncated.json')
cases = CaseDataset(path, 'json').data.fetch()
self.assertEqual(len(cases), 7)
def test_restore(self):
# Restore from case, run, verify outputs match expected.
top = set_as_top(SellarMDF())
top.name = 'top'
top.recorders = [JSONCaseRecorder()]
top.run()
assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001)
assert_rel_error(self, top.half.z2a, 0., .0001)
assert_rel_error(self, top.sub.x1, 0., .0001)
assert_rel_error(self, top.sub.states.y[0], 3.160004, .0001)
assert_rel_error(self, top.sub.states.y[1], 3.755280, .0001)
assert_rel_error(self, top.driver.eval_objective(), 3.18339413394, .0001)
cds = CaseDataset('cases.json', 'json')
cases = cds.data.fetch()
n_orig = len(cases) # Typically 142
top = set_as_top(SellarMDF())
cds.restore(top, cases[-1]['_id'])
top.recorders = [JSONCaseRecorder('cases.restored')]
top.run()
assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001)
assert_rel_error(self, top.half.z2a, 0., .0001)
assert_rel_error(self, top.sub.x1, 0., .0001)
assert_rel_error(self, top.sub.states.y[0], 3.160000, .0001)
assert_rel_error(self, top.sub.states.y[1], 3.755278, .0001)
assert_rel_error(self, top.driver.eval_objective(), 3.18339397762, .0001)
cases = CaseDataset('cases.restored', 'json').data.fetch()
# Exact case counts are unreliable, just assure restore was quicker.
self.assertTrue(len(cases) < n_orig/4) # Typically 15
def test_write(self):
# Read in a dataset and write out a selected portion of it.
path = os.path.join(os.path.dirname(__file__), 'jsonrecorder.json')
cases = CaseDataset(path, 'json').data.fetch()
self.assertEqual(len(cases), 10)
self.assertEqual(len(cases[0]), 19)
names = ('comp1.x', 'comp1.y', 'comp1.z', 'comp2.z')
CaseDataset(path, 'json').data.vars(names).write('cases.reduced')
reduced = CaseDataset('cases.reduced', 'json').data.fetch()
self.assertEqual(len(reduced), 10)
self.assertEqual(len(reduced[0]), 10)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6623623
|
<reponame>hvnobug/stock
# -*-coding=utf-8-*-
__author__ = 'Rocky'
'''
http://30daydo.com
Contact: <EMAIL>
'''
'''
记录每天的盈亏情况 完成度100%
'''
import pandas as pd
import os
import tushare as ts
import datetime
def getCodeFromExcel(filename):
#从excel表中获取代码, 并且补充前面几位000
#获取股票数目
df=pd.read_excel(filename)
code_list = df['证券代码'].values
quantity_list=df['股票余额'].values
code=[]
quantity=[]
for i in range(len(code_list)):
code.append(str(code_list[i]).zfill(6))
#后面学会了map函数,可以直接搞定
quantity.append(quantity_list[i])
return code,quantity
def calc(code):
settlement = df[df['code']==code]['settlement'].values
percentage = df[df['code']==code]['changepercent'].values
trade = df[df['code']==code]['trade'].values
#print(percentage)
#settlement=df[df['code'==code]]['settlement'].values
#percentage=df[df['code'==code].index]['changepercent'].values
#返回四舍五入的结果
return settlement,percentage,trade
def today_win_lost(filename_path):
filename=os.path.join(filename_path,'ownstock.xls')
code,quantity=getCodeFromExcel(filename)
result=[]
percentage_list=[]
trade_list=[]
for i in range(len(code)):
settlement,percentage,trade=calc(code[i])
print("settlement", settlement)
print("percent", percentage)
print("trade", trade)
profit=round(settlement[0]*percentage[0]*quantity[i]*0.01,1)
result.append(profit)
percentage_list.append(percentage[0])
trade_list.append(trade[0])
return result,code,percentage_list,trade_list
def join_dataframe(filename,today):
current_profile=today+'当天贡献'
result,code,percentage_list,trade_list=today_win_lost()
s1=pd.DataFrame({current_profile:result})
#s2=pd.DataFrame({'当天涨幅':percentage_list})
#s3=pd.DataFrame({'当天价钱':trade_list})
#print(s)
df=pd.read_excel(filename)
#del df['交易市场']
#del df['股东帐户']
#del df['盈亏比(%)']
#del df['在途数量']
#del df['当天贡献']
#del df['']
#del df['']
df['证券代码']=code
#print(code)
df['市价']=trade_list
df['当天涨幅']=percentage_list
#可以这样直接替换某一列的值
#df=df.join(s2,how='right')
df=df.join(s1,how='right')
#df=df.join(s3,how='right')
return df
def main(today):
path=os.path.join(os.path.dirname(__file__),'data')
filename=os.path.join(path,'each_day_profile.xls')
org_filename=os.path.join(path,'2016-09-30_all_.xls')
#df_filename=os.path.join(path,'each_day_profile.xls')
#df=pd.read_excel(org_filename)
df=ts.get_today_all()
new_df=join_dataframe(filename,today)
save_name=os.path.join(path,"each_day_profile.xls")
#这样会不会把原来的覆盖掉?
new_df.to_excel(save_name)
if __name__ == "__main__":
today=datetime.datetime.now().strftime("%Y-%m-%d")
if not ts.is_holiday(today):
main(today)
|
StarcoderdataPython
|
328882
|
#########################################################################
# #
# Grupo Developers #
# #
# GNU General Public License v3 #
# #
#########################################################################
from models.message import Message
from models.chat import Chat
from models.user import User
class MessagesController:
def __init__(self):
self.chats = []
self.messages = []
self.users = []
def add(self, telegram_message):
message = extract_message_object(telegram_message)
chat = extract_chat_object(telegram_message)
user = extract_user_object(telegram_message)
if chat not in self.chats:
self.chats.append(chat)
if user not in self.users:
self.users.append(user)
self.messages.append(message)
for chat in self.chats:
if chat.chat_id == message.chat_id:
chat.messages.append(message)
def autoclear(self):
self.chats = []
self.messages = []
self.users = []
def extract_message_object(telegram_message):
return Message(
message_id=telegram_message.message_id,
text=telegram_message.text,
date=telegram_message.date,
chat_id=telegram_message.chat.id,
author_id=telegram_message['from'].id,
reply_message_id=telegram_message.reply_to_message if hasattr(
telegram_message, 'reply_to_message') else ""
)
def extract_chat_object(telegram_message):
return Chat(
chat_id=telegram_message.chat.id,
chat_type=telegram_message.chat.type,
title=telegram_message.chat.title if hasattr(
telegram_message.chat, 'title') else ""
)
def extract_user_object(telegram_message):
return User(
telegram_id=telegram_message['from'].id,
first_name=telegram_message['from'].first_name,
last_name=telegram_message['from'].last_name or '',
username=telegram_message['from'].username or '',
is_bot=telegram_message['from'].is_bot
)
|
StarcoderdataPython
|
3353183
|
import math
import pickle
import torch
from torch.utils import data
from tensorfn import load_arg_config
from tqdm import tqdm
import lmdb
from torch_imputer import best_alignment
from config import CTCASR
from dataset import ASRDataset, collate_data
from model import Transformer
from evaluate import ctc_decode
def get_symbol(state, targets_list):
if state % 2 == 0:
symbol = 0
else:
symbol = targets_list[state // 2]
return symbol
return state
if __name__ == "__main__":
device = "cuda"
conf = load_arg_config(CTCASR)
with open("trainval_indices.pkl", "rb") as f:
split_indices = pickle.load(f)
train_set = ASRDataset(conf.dataset.path, indices=split_indices["train"])
model = Transformer(
conf.dataset.n_vocab,
conf.model.delta,
conf.dataset.n_mels,
conf.model.feature_channel,
conf.model.dim,
conf.model.dim_ff,
conf.model.n_layer,
conf.model.n_head,
conf.model.dropout,
).to(device)
ckpt = torch.load(conf.ckpt, map_location=lambda storage, loc: storage)
model.load_state_dict(ckpt["model"])
model.eval()
train_loader = conf.training.dataloader.make(train_set, collate_fn=collate_data)
pbar = tqdm(train_loader)
show_sample = 0
db_i = 0
with torch.no_grad() as no_grad, lmdb.open(
conf.dataset.alignment, map_size=1024 ** 4, readahead=False
) as env:
for mels, tokens, mel_lengths, token_lengths, texts, files in pbar:
mels = mels.to(device)
tokens = tokens.to(device).to("cpu")
mel_len_reduce = torch.ceil(
mel_lengths.to(torch.float32) / conf.model.reduction
).to(torch.int64)
align_in = tokens.new_ones(
mels.shape[0], math.ceil(mels.shape[1] / conf.model.reduction)
).to(device)
log_p = torch.log_softmax(model(mels, align_in), 2)
# log_p = log_p.to("cpu")
tokens = tokens.to("cpu")
best_align = best_alignment(
log_p.transpose(0, 1),
tokens,
mel_len_reduce,
token_lengths,
zero_infinity=True,
)
align_probs = []
for l_p, best_a, toks in zip(log_p.to("cpu"), best_align, tokens.tolist()):
align_p = []
for p, a in zip(l_p, best_a):
align_p.append(p[get_symbol(a, toks)].item())
align_probs.append(align_p)
for model_align, mel_l, b_align, b_p, file, toks in zip(
log_p, mel_len_reduce, best_align, align_probs, files, tokens.tolist()
):
model_p, model_align = model_align.max(1)
model_p = model_p[:mel_l].sum().item()
model_align = model_align[:mel_l].tolist()
b_p = sum(b_p)
with env.begin(write=True) as txn:
txn.put(
str(db_i).encode("utf-8"),
pickle.dumps((b_align, b_p, model_align, model_p, file)),
)
db_i += 1
if show_sample < 8:
model_align = train_set.decode(ctc_decode(model_align))
b_align = train_set.decode(
ctc_decode([get_symbol(a, toks) for a in b_align])
)
print(
f"model: {model_align} ({model_p:.3f})\nbest: {b_align} ({b_p:.3f})"
)
show_sample += 1
with env.begin(write=True) as txn:
txn.put(b"length", str(db_i).encode("utf-8"))
txn.put(b"meta", pickle.dumps({"conf": conf}))
|
StarcoderdataPython
|
153330
|
<reponame>alex4acre/ab-python<filename>main.py
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import json
import pylogix
from pylogix import PLC
import datalayer
from datalayer.variant import Variant
from app.ab_provider_node import ABnode
bPLCPresent = False
testJson = """{"tag":
[
{"name":"SinCounter","type":"REAL"},
{"name":"Line1_OEE","type":"REAL"},
{"name":"mySINReflection_x10","type":"REAL"},
{"name":"myLINT","type":"LINT"},
{"name":"MyString","type":"STRING"},
{"name":"MyControllerBOOL","type":"BOOL"},
{"name":"MyControllerBOOL1","type":"BOOL"},
{"name":"MyControllerBOOL2","type":"REAL"},
{"name":"MyControllerBOOL3","type":"REAL"},
{"name":"MyControllerBOOL4","type":"REAL"},
{"name":"MyControllerBOOL5","type":"REAL"},
{"name":"MyControllerBOOL6","type":"REAL"}
]
}"""
# {"name":"MyControllerReal","type":"REAL"}
#{"name":"MyControllerBOOL","type":"BOOL"}
def main():
sys.settrace
with datalayer.system.System("") as datalayer_system:
datalayer_system.start(False)
# This is the connection string for TCP in the format: tcp://USER:PASSWORD@IP_ADDRESS:PORT
# Please check and change according your environment:
# - USER: Enter your user name here - default is boschrexroth
# - PASSWORD: Enter your password here - default is <PASSWORD>
# - IP_ADDRESS: 127.0.0.1 If you develop in WSL and you want to connect to a ctrlX CORE virtual with port forwarding
# 10.0.2.2 If you develop in a VM (Virtual Box, QEMU,...) and you want to connect to a ctrlX virtual with port forwarding
# 192.168.1.1 If you are using a ctrlX CORE or ctrlX CORE virtual with TAP adpater
#connectionProvider = "tcp://boschrexroth:[email protected]:2070"
connectionProvider = "tcp://boschrexroth:[email protected]:2070"
if 'SNAP' in os.environ:
connectionProvider = "ipc://"
if bPLCPresent:
print("PLC is present")
#Load the json from file
else:
print("PLC is not present")
jsonData = json.loads(testJson)
#the list that contains all of the read data
my_list = []
print("Connecting", connectionProvider)
with datalayer_system.factory().create_provider(connectionProvider) as provider:
result = provider.start()
if result is not datalayer.variant.Result.OK:
print("ERROR Starting Data Layer Provider failed with:", result)
return
myVariantList = []
myTaglist = []
#parse the tag list
tagList = jsonData['tag']
for idx, tag in enumerate(tagList):
print(idx)
print(tag['name'])
if bPLCPresent:
with PLC("192.168.1.9") as comm:
ret = comm.Read(tag['name'])
my_list.append(ret.Value)
else:
data = "testData"
if tag['type'] == "BOOL":
data = True
elif tag['type'] == "SINT":
data = -1
elif tag['type'] == "INT":
data = -100
elif tag['type'] == "DINT":
data = -1000
elif tag['type'] == "LINT":
data = -10000
elif tag['type'] == "USINT":
data = 1
elif tag['type'] == "UINT":
data = 10
elif tag['type'] == "UDINT":
data = 100
elif tag['type'] == "LWORD":
data = 1000
elif tag['type'] == "REAL":
data = 1.2345
elif tag['type'] == "LREAL":
data = 123456789.0123
elif tag['type'] == "DWORD":
data = 1000
elif tag['type'] == "STRING":
data = "test data"
print("appended data: " + str(data) + ", Type :" + str(type(data)))
my_list.append(data)
#segmentation faults occur if there are not distinct nodes
myTaglist.append(ABnode(provider, tag['name'], my_list, idx, tag['type']))
myTaglist[idx].register_node()
#print("Start provider")
#provider.start()
print("Running endless loop...")
counter = 0
while provider.is_connected() and counter < 6:
time.sleep(1.0) # Seconds
counter = counter + 1
print("ERROR Data Layer Provider is disconnected")
print("Stopping Data Layer Provider: ", end=" ")
result = provider.stop()
print(result)
for idx, tag in enumerate(tagList):
print("Unregister provider Node", tag['name'], end=" ")
result = provider.unregister_node("AB/" + tag['name'])
print(result)
datalayer_system.stop(True)
def provide_string(provider: datalayer.provider, name: str, abTagValues : list, listIndex : int, datatype : str):
# Create and register simple string provider node
print("Creating string provider node")
variantString = Variant()
variantString.set_string("Enter SQL script here. Use ';' as the last character to suppress result")
provider_node_str = ABnode(provider, name, variantString, abTagValues, listIndex, datatype)
provider_node_str.register_node()
return provider_node_str
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
106723
|
import os
import json
from pathlib import Path
import pem
from Crypto.PublicKey import RSA
from jupyterhub.handlers import BaseHandler
from illumidesk.authenticators.utils import LTIUtils
from illumidesk.lti13.auth import get_jwk
from tornado import web
from urllib.parse import urlencode
from urllib.parse import quote
class LTI13ConfigHandler(BaseHandler):
"""
Handles JSON configuration file for LTI 1.3
"""
async def get(self) -> None:
"""
Gets the JSON config which is used by LTI platforms
to install the external tool.
- The extensions key contains settings for specific vendors, such as canvas,
moodle, edx, among others.
- The tool uses public settings by default. Users that wish to install the tool with
private settings should either copy/paste the json or toggle the application to private
after it is installed with the platform.
- Usernames are obtained by first attempting to get and normalize values sent when
tools are installed with public settings. If private, the username is set using the
anonumized user data when requests are sent with private installation settings.
"""
lti_utils = LTIUtils()
self.set_header('Content-Type', 'application/json')
# get the origin protocol
protocol = lti_utils.get_client_protocol(self)
self.log.debug('Origin protocol is: %s' % protocol)
# build the full target link url value required for the jwks endpoint
target_link_url = f'{protocol}://{self.request.host}/'
self.log.debug('Target link url is: %s' % target_link_url)
keys = {
'title': 'IllumiDesk',
'scopes': [
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
'https://purl.imsglobal.org/spec/lti-nrps/scope/contextmembership.readonly',
'https://canvas.instructure.com/lti/public_jwk/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/create',
'https://canvas.instructure.com/lti/data_services/scope/show',
'https://canvas.instructure.com/lti/data_services/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/list',
'https://canvas.instructure.com/lti/data_services/scope/destroy',
'https://canvas.instructure.com/lti/data_services/scope/list_event_types',
'https://canvas.instructure.com/lti/feature_flags/scope/show',
'https://canvas.instructure.com/lti/account_lookup/scope/show',
],
'extensions': [
{
'platform': 'canvas.instructure.com',
'settings': {
'platform': 'canvas.instructure.com',
'placements': [
{
'placement': 'course_navigation',
'message_type': 'LtiResourceLinkRequest',
'windowTarget': '_blank',
'target_link_uri': target_link_url,
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
},
{
'placement': 'assignment_selection',
'message_type': 'LtiResourceLinkRequest',
'target_link_uri': target_link_url,
},
],
},
'privacy_level': 'public',
}
],
'description': 'IllumiDesk Learning Tools Interoperability (LTI) v1.3 tool.',
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
'public_jwk_url': f'{target_link_url}hub/lti13/jwks',
'target_link_uri': target_link_url,
'oidc_initiation_url': f'{target_link_url}hub/oauth_login',
}
self.write(json.dumps(keys))
class LTI13JWKSHandler(BaseHandler):
"""
Handler to serve our JWKS
"""
def get(self) -> None:
"""
- This method requires that the LTI13_PRIVATE_KEY environment variable
is set with the full path to the RSA private key in PEM format.
"""
if not os.environ.get('LTI13_PRIVATE_KEY'):
raise EnvironmentError('LTI13_PRIVATE_KEY environment variable not set')
key_path = os.environ.get('LTI13_PRIVATE_KEY')
# check the pem permission
if not os.access(key_path, os.R_OK):
self.log.error(f'The pem file {key_path} cannot be load')
raise PermissionError()
private_key = pem.parse_file(key_path)
public_key = RSA.import_key(private_key[0].as_text()).publickey().exportKey()
self.log.debug('public_key is %s' % public_key)
jwk = get_jwk(public_key)
self.log.debug('the jwks is %s' % jwk)
keys_obj = {'keys': []}
keys_obj['keys'].append(jwk)
# we do not need to use json.dumps because tornado is converting our dict automatically and adding the content-type as json
# https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write
self.write(keys_obj)
class FileSelectHandler(BaseHandler):
@web.authenticated
async def get(self):
"""Return a sorted list of notebooks recursively found in shared path"""
user = self.current_user
auth_state = await user.get_auth_state()
self.log.debug('Current user for file select handler is %s' % user.name)
# decoded = self.authenticator.decoded
self.course_id = auth_state['course_id']
self.grader_name = f'grader-{self.course_id}'
self.grader_root = Path(
'/home',
self.grader_name,
)
self.course_root = self.grader_root / self.course_id
self.course_shared_folder = Path('/shared', self.course_id)
a = ''
link_item_files = []
notebooks = list(self.course_shared_folder.glob('**/*.ipynb'))
notebooks.sort()
for f in notebooks:
fpath = str(f.relative_to(self.course_shared_folder))
self.log.debug('Getting files fpath %s' % fpath)
if fpath.startswith('.') or f.name.startswith('.'):
self.log.debug('Ignoring file %s' % fpath)
continue
# generate the assignment link that uses gitpuller
user_redirect_path = quote('/user-redirect/git-pull', safe='')
assignment_link_path = f'?next={user_redirect_path}'
urlpath_workspace = f'tree/{self.course_id}/{fpath}'
self.log.debug(f'urlpath_workspace:{urlpath_workspace}')
query_params_for_git = [
('repo', f'/home/jovyan/shared/{self.course_id}'),
('branch', 'master'),
('urlpath', urlpath_workspace),
]
encoded_query_params_without_safe_chars = quote(urlencode(query_params_for_git), safe='')
url = f'https://{self.request.host}/{assignment_link_path}?{encoded_query_params_without_safe_chars}'
self.log.debug('URL to fetch files is %s' % url)
link_item_files.append(
{
'path': fpath,
'content_items': json.dumps(
{
"@context": "http://purl.imsglobal.org/ctx/lti/v1/ContentItem",
"@graph": [
{
"@type": "LtiLinkItem",
"@id": url,
"url": url,
"title": f.name,
"text": f.name,
"mediaType": "application/vnd.ims.lti.v1.ltilink",
"placementAdvice": {"presentationDocumentTarget": "frame"},
}
],
}
),
}
)
self.log.debug('Rendering file-select.html template')
html = self.render_template(
'file_select.html',
files=link_item_files,
action_url=auth_state['launch_return_url'],
)
self.finish(html)
|
StarcoderdataPython
|
93287
|
<reponame>Xeratec/crazyflie-stepStabilizer
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# vicon_wrapper.py
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (C) 2021 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import logging
import time
import math
import numpy as np
from datetime import datetime
from threading import Thread
import cflib.crtp
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
logger = logging.getLogger(__name__)
try:
sys.path.append("../extern/pyvicon/")
from pyvicon.pyvicon import PyVicon, StreamMode, Direction, Result
except Exception:
logger.exception("No PyVicon available!")
pass
class ViconWrapper(Thread):
def __init__(self, ip, period, subjects, time0, filename, exposeVicon = 'None', sendDataToCF=None):
Thread.__init__(self)
self.name = "ViconWrapper"
self.ip = ip
self.period = period
self.vicon = []
self.subjects = subjects
self.exposeVicon = None if exposeVicon == 'None' else exposeVicon
self.send_data = sendDataToCF
self.data_log = np.array([])
self.data_logging_en = False
self.text = " "
self.position = dict([])
self.quaternions = dict([])
self.t0 = time0
self.is_running = False
self.filename = filename
def run(self):
self.connect()
self.loop()
def connect(self):
self.vicon = PyVicon()
logger.info("SDK version : {}".format(self.vicon.__version__))
logger.info("{}".format(self.vicon.connect(self.ip)))
logger.info("Vicon connection status : {}".format(self.vicon.is_connected()))
self.vicon.set_stream_mode(StreamMode.ServerPush)
self.vicon.enable_segment_data()
self.vicon.enable_marker_data()
self.vicon.enable_unlabeled_marker_data()
self.vicon.enable_device_data()
self.vicon.set_axis_mapping(Direction.Forward, Direction.Left, Direction.Up)
def loop(self):
self.is_running = True
while self.is_running:
while self.vicon.get_frame() != Result.Success:
time.sleep(self.period)
subj_count = self.vicon.get_subject_count()
for i in range(0, subj_count):
name = self.vicon.get_subject_name(i)
if name in self.subjects:
timestamp = round(1000 * (datetime.now() - self.t0).total_seconds(), 3)
pos = self.vicon.get_segment_global_translation(name, name)
quat = self.vicon.get_segment_global_quaternion(name, name)
if quat is not None:
pos = pos / 1000.0
self.position[name] = pos
self.quaternions[name] = quat
if self.data_logging_en:
self.log(timestamp, name + "_" + "posx", pos[0])
self.log(timestamp, name + "_" + "posy", pos[1])
self.log(timestamp, name + "_" + "posz", pos[2])
self.log(timestamp, name + "_" + "qw", quat[0])
self.log(timestamp, name + "_" + "qx", quat[1])
self.log(timestamp, name + "_" + "qy", quat[2])
self.log(timestamp, name + "_" + "qz", quat[3])
if (self.exposeVicon is not None and self.send_data):
if name == self.exposeVicon:
self.send_data(pos, 0)
time.sleep(self.period / 1000.0)
def log(self, timestamp, id_var, value):
data_row = np.array([timestamp, id_var, value]).reshape(1, -1)
if self.data_log.shape[0] == 0:
self.data_log = data_row
else:
self.data_log = np.append(self.data_log, data_row, axis=0)
def save_log(self):
if self.filename != "":
np.savetxt(self.filename + "_vicon.csv", self.data_log, fmt='%s', delimiter=',')
logger.info("Log saved to {}".format(self.filename + "_vicon.csv"))
self.is_running = False
def save_log_noExit(self):
if self.filename != "":
np.savetxt(self.filename + "_vicon.csv", self.data_log, fmt='%s', delimiter=',')
logger.info("Log saved to {}".format(self.filename + "_vicon.csv"))
def logging_enabled(self, val):
if val == 0:
self.data_logging_en = False
else:
self.data_logging_en = True
|
StarcoderdataPython
|
3594622
|
<reponame>bluesky0960/AlgorithmTest<filename>AlgorithmTest/BOJ_STEP_PYTHON/Step2/BOJ14681.py
#https://www.acmicpc.net/problem/14681
x = int(input())
y = int(input())
if(x>0 and y>0) :
print(1)
elif(x>0 and y<0):
print(4)
elif(x<0 and y<0):
print(3)
else:
print(2)
|
StarcoderdataPython
|
8077701
|
# coding: utf-8
from apiclient.discovery import build
from apiclient.http import MediaIoBaseDownload
from httplib2 import Http
from oauth2client import file, client, tools
import io,os
from re import match
store = file.Storage('token.json')
creds = store.get()
service = build(serviceName='drive', version='v3', http=creds.authorize(Http()))
project_id = service.files().list( q="name contains '<NAME>' " ).execute().get('files')[0].get('id')
project_id = '0B0M5IL0AEOXidHZmTjZzMHlLLWc'
project_root_items = service.files().list( q=f"'{project_id}' in parents").execute()
# Get species data table
print("Getting species data table")
species_table = list(filter( lambda x: 'tabela_especies' in x.get('name'), project_root_items.get('files') ))[0]
file_content = service.files().export_media( fileId=species_table.get('id'), mimeType='text/csv').execute()
fname = 'speciesdata.csv'
with open (fname, 'wb') as f:
f.write(file_content)
print(f"Done! Created file {fname} with species data.\n")
# Get species and families articles
print("Getting species and families articles ids")
articlesFolderId = list(filter( lambda x: x.get('name')=='Perfil', project_root_items.get('files')))[0].get('id')
def getFamiliesFoldersIds():
results = service.files().list( q=f"'{articlesFolderId}' in parents and mimeType contains 'vnd.google-apps.folder'",
fields='files(id,name)').execute().get('files')
return { r['name']: r['id'] for r in results if match('^[a-zA-Z]*ae$',r['name'])}
def getFamilyDocuments( family, families_folders_ids ):
family_folder_id = families_folders_ids.get(family)
results = service.files().list(
q=f"'{family_folder_id}' in parents and mimeType contains 'document'",
fields='files(id,name)').execute().get('files')
return results
families_folders_ids = getFamiliesFoldersIds()
ids_to_download = [ f.get('id') for f in getFamilyDocuments( 'leptodactylidae', families_folders_ids ) ]
family_folders_ids = getFamiliesFoldersIds()
data = { family: [doc for doc in getFamilyDocuments(family, family_folders_ids)] for family, family_id in family_folders_ids.items()}
print("Done\n")
# Update species pages
print("Getting articles contents and updating species pages...")
basepath = '../especies/'
if not os.path.exists(basepath):
os.makedirs(basepath)
for family in data:
for doc in [ doc for doc in data[family] if doc['name']!='familia' ]:
docid = doc['id']
species = doc['name']
print(f"Requesting article for species {species} ({family})")
request = service.files().export_media(fileId=docid, mimeType='text/plain')
fname = basepath+f'{species}.md'
with open(fname, 'wb') as f:
f.write(request.execute())
print(f"Wrote file {fname}")
print("Done")
print("Updating families pages")
basepath = '../familias/'
if not os.path.exists(basepath):
os.makedirs(basepath)
for family in data:
for doc in [doc for doc in data[family] if doc['name']=='familia']:
docid=doc['id']
print(f"Requesting article for family {family}")
request = service.files().export_media(fileId=docid, mimeType='text/plain')
fname = basepath+f'{family}.md'
with open(fname, 'wb') as f:
f.write(request.execute())
print(f"wrote file {fname}")
print("Done")
|
StarcoderdataPython
|
204931
|
# -*- coding: utf-8 -*-
"""
Pipe Catalogue Data - Single Steel Pipe by LOGSTOR
Created on Mon Nov 2 20:14:25 2020
@author: <NAME>, PhD
References:
[1] LOGSTOR, Product Catalogue Version 2018.12.
https://www.logstor.com/media/6115/product-catalogue-uk-201812.pdf
"""
def LayerDiameters(DN,IS):
# DN: Nominal pipe diameter
# IS: Insulation series
DN_l=[20,25,32,40,50,65,80,100,125]
if DN not in DN_l:
raise TypeError("Nominal Pipe Diameter can be:", DN_l)
d1_l=[21.7,28.5,37.2,43.1,54.5,70.3,82.5,107.1,132.5]
d2_l=[26.9,33.7,42.4,48.3,60.3,76.1,88.9,114.3,139.7]
if IS==1:
d3_l=[84,84,104,104,119,134,154,193.6,218.2]
d4_l=[90,90,110,110,125,140,160,200,225]
elif IS==2:
d3_l=[104,104,119,119,134,154,174,218.2,242.8]
d4_l=[110,110,125,125,140,160,180,225,250]
elif IS==3:
d3_l=[119,119,134,134,154,174,193.6,242.8,272.2]
d4_l=[125,125,140,140,160,180,200,250,280]
else:
raise TypeError("Insulation Series (IS) can be one of (poor) 1, 2, or 3 (good)")
ind=DN_l.index(DN)
return d1_l[ind]*0.001,d2_l[ind]*0.001,d3_l[ind]*0.001,d4_l[ind]*0.001
|
StarcoderdataPython
|
1831965
|
from trainerhost.trainerhostSlack import TrainerHost
if __name__ == "__main__":
trainer_host = TrainerHost()
|
StarcoderdataPython
|
3309316
|
class RingBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.list = []
self.to_be_popped = 0 # Keeps track of the oldest index
def append(self, item):
if len(self.list) < self.capacity:
self.list.append(item)
else:
self.list.pop(self.to_be_popped)
self.list.insert(self.to_be_popped, item)
if self.to_be_popped == self.capacity - 1:
self.to_be_popped = 0
else:
self.to_be_popped += 1
def get(self):
return self.list
|
StarcoderdataPython
|
8135211
|
<filename>mne/tests/test_event.py
import os
import os.path as op
from numpy.testing import assert_array_almost_equal
import mne
fname = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data',
'test-eve.fif')
def test_io_cov():
"""Test IO for noise covariance matrices
"""
events = mne.read_events(fname)
mne.write_events('events.fif', events)
events2 = mne.read_events(fname)
assert_array_almost_equal(events, events2)
|
StarcoderdataPython
|
293683
|
## =========================================================
## nsl/go/utils.py
## ---------------------------------------------------------
import sys
import os
from nsl.go.__about__ import __version__
from nsl.go import gotypes
## =========================================================
## Version
## ---------------------------------------------------------
def get_version():
"""Return package version as defined in `setup.py` (ex: 1.2.3)."""
return __version__
def get_version_long():
"""Return long package version (ex: 1.2.3 (Python 3.4.5))."""
return '{} (Python {})'.format(get_version(), sys.version[:5])
## =========================================================
## COLS
## ---------------------------------------------------------
COLS = 'ABCDEFGHJKLMNOPQRST'
## =========================================================
## STONE_TO_CHAR
## ---------------------------------------------------------
STONE_TO_CHAR = {
None: '.',
gotypes.Player.black: 'x',
gotypes.Player.white: 'o',
}
## =========================================================
## clear_screen()
## ---------------------------------------------------------
def clear_screen():
"""Clear the screen.
"""
# Use `cls' on Windows;
# and `clear' on Unix systems.
os.system('cls' if os.name == 'nt' else 'clear')
## =========================================================
## print_move()
## ---------------------------------------------------------
def print_move(player, move, prefix=' '):
if move.is_pass:
move_str = 'passes'
elif move.is_resign:
move_str = 'resigns'
else:
move_str = '%s%d' % (COLS[move.point.col - 1], move.point.row)
print('%s%s %s' % (prefix, player, move_str))
## =========================================================
## print_board()
## ---------------------------------------------------------
def print_board(board, prefix=' '):
for row in range(board.num_rows, 0, -1):
bump = " " if row <= 9 else ""
line = []
for col in range(1, board.num_cols + 1):
stone = board.get(gotypes.Point(row=row, col=col))
line.append(' ' + STONE_TO_CHAR[stone])
print('%s%s%d %s' % (prefix, bump, row, ''.join(line)))
print('')
print(prefix + ' ' + ' '.join(COLS[:board.num_cols]))
## =========================================================
## point_from_coords()
## ---------------------------------------------------------
def point_from_coords(coords):
# Exit when asked for
if coords == 'e' or coords == 'exit' or \
coords == 'q' or coords == 'quit':
print('')
exit()
# Converting the column letter to the corresponding column index
col = COLS.index(coords[0].upper()) + 1
# Getting the row index
row = int(coords[1:])
# Returning the corresponding Point
return gotypes.Point(row=row, col=col)
## =========================================================
## =========================================================
## fin.
|
StarcoderdataPython
|
4817963
|
<gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
PIPELINE_ROOT = './build'
DATA_ROOT = '/tmp'
GCP_AI_PLATFORM_SERVING_ARGS = {
'model_name': 'my_pipeline',
'project_id': '',
'regions': [""],
}
GCP_AI_PLATFORM_TRAINING_ARGS = {
'project': '',
'region': "",
'masterConfig': {
'imageUri': 'gcr.io/' + 'project' + '/pipeline-name'
},
}
SYNTHETIC_DATASET = {
'local-bootstrap': f'{DATA_ROOT}/simple-data-5-step/tfx-data/data/',
'local-raw': f'{DATA_ROOT}/simple-data-5-step/tfx-data/data/'
}
|
StarcoderdataPython
|
6703959
|
<reponame>bopopescu/webrtc-streaming-node
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain. It also conveniently sets the CHECKOUT_SOURCE_ROOT
environment variable.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(__file__)
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(SRC_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner # pylint: disable=W0406
from pylib.gtest import gtest_test_instance
def main():
# Set our own paths to the .isolate files.
# pylint: disable=protected-access
gtest_test_instance._DEFAULT_ISOLATE_FILE_PATHS.update({
'audio_decoder_unittests':
'webrtc/modules/audio_decoder_unittests.isolate',
'common_audio_unittests':
'webrtc/common_audio/common_audio_unittests.isolate',
'common_video_unittests':
'webrtc/common_video/common_video_unittests.isolate',
'modules_tests': 'webrtc/modules/modules_tests.isolate',
'modules_unittests': 'webrtc/modules/modules_unittests.isolate',
'rtc_unittests': 'webrtc/rtc_unittests.isolate',
'system_wrappers_unittests':
'webrtc/system_wrappers/system_wrappers_unittests.isolate',
'test_support_unittests': 'webrtc/test/test_support_unittests.isolate',
'tools_unittests': 'webrtc/tools/tools_unittests.isolate',
'video_capture_tests':
'webrtc/modules/video_capture/video_capture_tests.isolate',
'video_engine_tests': 'webrtc/video_engine_tests.isolate',
'video_engine_core_unittests':
'webrtc/video_engine/video_engine_core_unittests.isolate',
'voice_engine_unittests':
'webrtc/voice_engine/voice_engine_unittests.isolate',
'webrtc_perf_tests': 'webrtc/webrtc_perf_tests.isolate',
})
# Override environment variable to make it possible for the scripts to find
# the root directory (our symlinking of the Chromium build toolchain would
# otherwise make them fail to do so).
os.environ['CHECKOUT_SOURCE_ROOT'] = SRC_DIR
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
11291448
|
<gh_stars>0
import logging
from natcap.invest.ui import inputs, model
from natcap.invest import pollination, MODEL_METADATA
LOGGER = logging.getLogger(__name__)
class Pollination(model.InVESTModel):
def __init__(self):
model.InVESTModel.__init__(
self,
label=MODEL_METADATA['pollination'].model_title,
target=pollination.execute,
validator=pollination.validate,
localdoc=MODEL_METADATA['pollination'].userguide)
self.landcover_raster_path = inputs.File(
args_key='landcover_raster_path',
helptext=(
"This is the landcover map that's used to map "
"biophyiscal properties about habitat and floral "
"resources of landcover types to a spatial layout."),
label='Land Cover Map (Raster)',
validator=self.validator)
self.add_input(self.landcover_raster_path)
self.landcover_biophysical_table_path = inputs.File(
args_key='landcover_biophysical_table_path',
helptext=(
"A CSV table mapping landcover codes in the landcover "
"raster to indexes of nesting availability for each "
"nesting substrate referenced in guilds table as well "
"as indexes of abundance of floral resources on that "
"landcover type per season in the bee activity columns "
"of the guild table.<br/>All indexes are in the range "
"[0.0, 1.0].<br/>Columns in the table must be at "
"least<br/>* 'lucode': representing all the unique "
"landcover codes in the raster st "
"`args['landcover_path']`<br/>* For every nesting "
"matching _NESTING_SUITABILITY_PATTERN in the guild "
"stable, a column matching the pattern in "
"`_LANDCOVER_NESTING_INDEX_HEADER`.<br/>* For every "
"season matching _FORAGING_ACTIVITY_PATTERN in the "
"guilds table, a column matching the pattern in "
"`_LANDCOVER_FLORAL_RESOURCES_INDEX_HEADER`."),
label='Land Cover Biophysical Table (CSV)',
validator=self.validator)
self.add_input(self.landcover_biophysical_table_path)
self.guild_table_path = inputs.File(
args_key='guild_table_path',
helptext=(
"A table indicating the bee species to analyze in "
"this model run. Table headers must include:<br/>* "
"'species': a bee species whose column string names "
"will be referred to in other tables and the model "
"will output analyses per species.<br/> * any number "
"of columns matching _NESTING_SUITABILITY_PATTERN with "
"values in the range [0.0, 1.0] indicating the "
"suitability of the given species to nest in a "
"particular substrate.<br/>* any number of "
"_FORAGING_ACTIVITY_PATTERN columns with values in the "
"range [0.0, 1.0] indicating the relative level of "
"foraging activity for that species during a "
"particular season.<br/>* 'alpha': the sigma average "
"flight distance of that bee species in meters.<br/>* "
"'relative_abundance': a weight indicating the "
"relative abundance of the particular species with "
"respect to the sum of all relative abundance weights "
"in the table."),
label='Guild Table (CSV)',
validator=self.validator)
self.add_input(self.guild_table_path)
self.farm_vector_path = inputs.File(
args_key='farm_vector_path',
helptext=(
"This is a layer of polygons representing farm sites "
"to be analyzed. The shapefile must have at least the "
"following fields:<br/><br/>* season (string): season "
"in which the farm needs pollination.<br/>* half_sat "
"(float): a real in the range [0.0, 1.0] representing "
"the proportion of wild pollinators to achieve a 50% "
"yield of that crop.<br/>* p_wild_dep (float): a "
"number in the range [0.0, 1.0] representing the "
"proportion of yield dependent on pollinators.<br/>* "
"p_managed (float): proportion of pollinators that "
"come from non-native/managed hives.<br/>* f_[season] "
"(float): any number of fields that match this pattern "
"such that `season` also matches the season headers in "
"the biophysical and guild table. Any areas that "
"overlap the landcover map will replace seasonal "
"floral resources with this value. Ranges from "
"0..1.<br/>* n_[substrate] (float): any number of "
"fields that match this pattern such that `substrate` "
"also matches the nesting substrate headers in the "
"biophysical and guild table. Any areas that overlap "
"the landcover map will replace nesting substrate "
"suitability with this value. Ranges from 0..1."),
label='Farm Vector (Vector) (optional)',
validator=self.validator)
self.add_input(self.farm_vector_path)
def assemble_args(self):
args = {
self.workspace.args_key: self.workspace.value(),
self.suffix.args_key: self.suffix.value(),
self.landcover_raster_path.args_key:
self.landcover_raster_path.value(),
self.landcover_biophysical_table_path.args_key:
self.landcover_biophysical_table_path.value(),
self.guild_table_path.args_key: self.guild_table_path.value(),
self.farm_vector_path.args_key: self.farm_vector_path.value(),
}
return args
|
StarcoderdataPython
|
60060
|
from django.contrib import admin
from .models import Profile
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
class Meta:
fields = '__all__'
admin.site.register(Profile, ProfileAdmin)
|
StarcoderdataPython
|
11313708
|
# No unittest
|
StarcoderdataPython
|
11262189
|
<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^apps/$', views.apps_list,name="apps_list"),
url(r'^apps/model/$', views.apps_model,name="apps_model"),
url(r'^apps/run/$', views.ansible_run,name="ansible_run"),
url(r'^apps/log/$', views.ansible_log,name="ansible_log"),
url(r'^apps/log/(?P<model>[a-z]+)/(?P<id>[0-9]+)/$', views.ansible_log_view,name="ansible_log"),
url(r'^apps/playbook/add/$', views.apps_add,name="ansible_add"),
url(r'^apps/playbook/file/(?P<pid>[0-9]+)/$', views.apps_playbook_file,name="apps_playbook_file"),
url(r'^apps/playbook/run/(?P<pid>[0-9]+)/$', views.apps_playbook_run,name="app_playbook_run"),
url(r'^apps/playbook/modf/(?P<pid>[0-9]+)/$', views.apps_playbook_modf,name="app_playbook_modf"),
#url(r'test',views.test,name='test'),
]
|
StarcoderdataPython
|
4862021
|
import soundfile as sf
import sounddevice as sd
from scipy.io.wavfile import write
def record_voice():
"""This function records your voice and saves the output as .wav file."""
fs = 44100 # Sample rate
seconds = 3 # Duration of recording
# sd.default.device = "Built-in Audio" # Speakers full name here
print("Say something:")
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait() # Wait until recording is finished
write("speech_emotion_recognition/recordings/myvoice.wav", fs, myrecording)
print("Voice recording saved.")
|
StarcoderdataPython
|
3406046
|
from __future__ import unicode_literals
from django.apps import AppConfig
class CeleryExampleConfig(AppConfig):
name = 'celery_example'
|
StarcoderdataPython
|
5076646
|
<filename>cap_5/exercicios/5.1.py<gh_stars>1-10
# Testes Condicionais: Escreva uma série de testes condicionais. Exiba uma frase que descreva o teste e o resultado prevsito para cada um. Seu código deverá
# ser semelhante a: Crie pelo menos 10 testes. Tenha no mínimo 5 testes avaliados como True e outros cinco avaliados como False.
# Observe atentamente seus resultados e cerifique-se de que compreende por que cada linha é avaliada
# como True ou False:
print('Questão do exemplo:') # Exibe uma mensagem informando se tratar de uma questão do exemplo.
car = 'subaru' # Cria a variável subaru
print("Is car == 'subaru'? I predict True.") # Exibe uma mensagem perguntando se subaru é o nome da variável.
print(car == 'subaru') # Se sim, Python exibe a mensagem True
print("\nIs car == 'audi'? I predict False.")
print(car == 'audi') # Como nesse caso a vairável car é diferente de subaru, Python dispara False
print('\nPrimeira resposta:')
favorite = 'Ice Cream'
print("My favorite == 'Ice Cream'? I predict True.")
print(favorite == 'Ice Cream')
print("\nMy favorite == 'waffles'? I predict False.")
print(favorite == 'waffles')
print('\nSegunda resposta:')
disciplina = 'programação'
print("Is disciplina == 'programação'? I predict True!")
print(disciplina == 'programação')
print("\nIs disciplina == 'hardware'? I predict False!")
print(disciplina == 'hardware')
print('\nTerceira resposta:')
livro = 'Stalker'
print("Is livro == 'Stalker'? I predict True!")
print(livro == 'Stalker')
print("\nIs livro == 'Salem'? I predict False!")
print(livro == 'Salem')
print('\nQuarta resposta:')
favorit_autor = '<NAME>'
print("Is favorit_autor == '<NAME>'? I predict True!")
print(favorit_autor == '<NAME>')
print("\nIs favorit_autor == '<NAME>'? I predict False!")
print(favorit_autor == '<NAME>')
print('\nQuinta resposta:')
favorit_movie = 'Interestelar'
print("Is favorit_movie == 'Interestelar'? I predict True!")
print(favorit_movie == 'Interestelar')
print("\nIs favorit_movie == 'Interestelar'? I predict False!")
print(favorit_movie == 'Interestelar')
|
StarcoderdataPython
|
11341002
|
"""Longer tests for simplifier module
"""
# pylint: disable=relative-import
import unittest
import os
from sspam import simplifier
from templates import SimplifierTest
class TestSimplifierLong(SimplifierTest):
"""
Longer tests for simplifier module.
"""
def test_long_basics(self):
'Long basic test'
input_str = """a = 3 + x + 0
b = 4 + x - x + x
c = - 7 + a + b"""
ref_str = """a = (3 + x)
b = (4 + x)
c = (2 * x)"""
self.generic_test(input_str, ref_str)
def test_samples(self):
'Test all samples'
samples_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"samples")
for samplefilename in os.listdir(samples_dir):
fname = os.path.join(samples_dir, samplefilename)
samplefile = open(fname, 'r')
refstring = samplefile.readline()[2:-1]
output_string = simplifier.simplify(fname).split('\n')[-1]
self.assertTrue(refstring == output_string,
"Processing file %s: %s is not equal to %s"
% (samplefilename, refstring, output_string))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.