max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
programmers/monthly_challenge/bit.py | mrbartrns/swacademy_structure | 0 | 6630951 | # 두개 이하로 다른 비트
def solution(numbers):
answer = []
for number in numbers:
if not (number & 1):
answer.append(number + 1)
else:
idx = 1
while True:
if not (number & (1 << idx)):
number |= 1 << idx
number ^= 1 << (idx - 1)
answer.append(number)
break
idx += 1
return answer
if __name__ == "__main__":
numbers = [2, 7]
print(solution(numbers)) | # 두개 이하로 다른 비트
def solution(numbers):
answer = []
for number in numbers:
if not (number & 1):
answer.append(number + 1)
else:
idx = 1
while True:
if not (number & (1 << idx)):
number |= 1 << idx
number ^= 1 << (idx - 1)
answer.append(number)
break
idx += 1
return answer
if __name__ == "__main__":
numbers = [2, 7]
print(solution(numbers)) | ko | 1.00007 | # 두개 이하로 다른 비트 | 3.436757 | 3 |
user/admin.py | simonprast/bestconnect-backend | 0 | 6630952 | <gh_stars>0
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .models import EmailAddress, EmailToken, EmailTokenSpamBlock, PhoneNumber, SystemMessage, User
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Passwords don\'t match')
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['<PASSWORD>'])
if commit:
user.save()
return user
class UserAdmin(BaseUserAdmin):
# The custom form handling for creating a user
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = (
'id',
'first_name',
'last_name',
'get_main_email',
'is_active',
'is_admin',
'default_superuser'
)
list_filter = (
'is_active',
'is_admin',
'default_superuser'
)
readonly_fields = ('is_admin', 'default_superuser', 'created_at')
fieldsets = (
(
None, {
'fields': (
'username',
'is_active',
'ban_reason'
)
}
),
(
'Personal info', {
'fields': (
'first_name',
'last_name'
)
}
),
(
'Contact data anti-spam', {
'fields': (
'last_phone_request',
'last_phone_code_request',
'last_email_request'
)
}
),
(
'Permissions', {
'fields': (
'utype',
'is_admin',
'default_superuser'
)
}
),
(
'Meta', {
'fields': (
'created_at',
'last_logout_all'
)
}
)
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(
None, {
'fields': (
'username',
'utype',
'<PASSWORD>',
'<PASSWORD>'
)
}
),
)
search_fields = (
'id',
'username',
'email'
)
ordering = (
'id',
)
filter_horizontal = ()
def get_main_email(self, obj):
return str(obj.primary_email)
get_main_email.short_description = 'Main Email Address'
get_main_email.admin_order_field = 'main_email_address'
# Now register the new UserAdmin...
admin.site.register(User, UserAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
class EmailAddressAdmin(admin.ModelAdmin):
list_display = (
'email_address',
'user',
'primary',
'verified'
)
admin.site.register(EmailAddress, EmailAddressAdmin)
class EmailTokenAdmin(admin.ModelAdmin):
list_display = (
'token',
'email_address',
'user',
'created_at'
)
admin.site.register(EmailToken, EmailTokenAdmin)
class EmailTokenSpamBlockAdmin(admin.ModelAdmin):
list_display = (
'email_address',
'last_email_code_request'
)
admin.site.register(EmailTokenSpamBlock, EmailTokenSpamBlockAdmin)
# Add PhoneNumberAdmin
class PhoneNumberAdmin(admin.ModelAdmin):
list_display = (
'phone_number',
'user',
'primary',
'verified'
)
admin.site.register(PhoneNumber, PhoneNumberAdmin)
# Add SystemMessageAdmin
class SystemMessageAdmin(admin.ModelAdmin):
list_display = (
'message',
'code',
'user',
'created_at'
)
admin.site.register(SystemMessage, SystemMessageAdmin)
| from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .models import EmailAddress, EmailToken, EmailTokenSpamBlock, PhoneNumber, SystemMessage, User
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Passwords don\'t match')
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['<PASSWORD>'])
if commit:
user.save()
return user
class UserAdmin(BaseUserAdmin):
# The custom form handling for creating a user
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = (
'id',
'first_name',
'last_name',
'get_main_email',
'is_active',
'is_admin',
'default_superuser'
)
list_filter = (
'is_active',
'is_admin',
'default_superuser'
)
readonly_fields = ('is_admin', 'default_superuser', 'created_at')
fieldsets = (
(
None, {
'fields': (
'username',
'is_active',
'ban_reason'
)
}
),
(
'Personal info', {
'fields': (
'first_name',
'last_name'
)
}
),
(
'Contact data anti-spam', {
'fields': (
'last_phone_request',
'last_phone_code_request',
'last_email_request'
)
}
),
(
'Permissions', {
'fields': (
'utype',
'is_admin',
'default_superuser'
)
}
),
(
'Meta', {
'fields': (
'created_at',
'last_logout_all'
)
}
)
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(
None, {
'fields': (
'username',
'utype',
'<PASSWORD>',
'<PASSWORD>'
)
}
),
)
search_fields = (
'id',
'username',
'email'
)
ordering = (
'id',
)
filter_horizontal = ()
def get_main_email(self, obj):
return str(obj.primary_email)
get_main_email.short_description = 'Main Email Address'
get_main_email.admin_order_field = 'main_email_address'
# Now register the new UserAdmin...
admin.site.register(User, UserAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
class EmailAddressAdmin(admin.ModelAdmin):
list_display = (
'email_address',
'user',
'primary',
'verified'
)
admin.site.register(EmailAddress, EmailAddressAdmin)
class EmailTokenAdmin(admin.ModelAdmin):
list_display = (
'token',
'email_address',
'user',
'created_at'
)
admin.site.register(EmailToken, EmailTokenAdmin)
class EmailTokenSpamBlockAdmin(admin.ModelAdmin):
list_display = (
'email_address',
'last_email_code_request'
)
admin.site.register(EmailTokenSpamBlock, EmailTokenSpamBlockAdmin)
# Add PhoneNumberAdmin
class PhoneNumberAdmin(admin.ModelAdmin):
list_display = (
'phone_number',
'user',
'primary',
'verified'
)
admin.site.register(PhoneNumber, PhoneNumberAdmin)
# Add SystemMessageAdmin
class SystemMessageAdmin(admin.ModelAdmin):
list_display = (
'message',
'code',
'user',
'created_at'
)
admin.site.register(SystemMessage, SystemMessageAdmin) | en | 0.750688 | A form for creating new users. Includes all the required fields, plus a repeated password. # Check that the two password entries match # Save the provided password in hashed format # The custom form handling for creating a user # The fields to be used in displaying the User model. # These override the definitions on the base UserAdmin # that reference specific fields on auth.User. # add_fieldsets is not a standard ModelAdmin attribute. UserAdmin # overrides get_fieldsets to use this attribute when creating a user. # Now register the new UserAdmin... # ... and, since we're not using Django's built-in permissions, # unregister the Group model from admin. # Add PhoneNumberAdmin # Add SystemMessageAdmin | 2.64994 | 3 |
alchemyst/ui/routes.py | alexdmoss/alchemyst | 0 | 6630953 | import yaml
import requests
from datetime import datetime
from flask import render_template, request, Response, send_from_directory
from alchemyst import app, cache
from alchemyst.ui.note import note_view
from alchemyst.api.routes import note, notes, notes_by_category
from alchemyst.api.notes import note_from_dict, notes_from_dicts
from alchemyst.api.document import get_document
with open('app-config.yaml') as app_cfg_file:
app_cfg = yaml.load(app_cfg_file, Loader=yaml.FullLoader)
layout = app_cfg['layout']
layout['year'] = datetime.now().year
bucket = app_cfg['bucket']
@app.template_filter('display_document')
def fetch_note_from_doc_id(id):
return get_document(id)
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def index():
return render_template('index.html', title='Home', layout=layout)
@app.route('/contact', methods=['GET', 'POST'])
def contact():
return render_template('contact.html', title='Contact', layout=layout)
@app.route('/links', methods=['GET'])
def links():
return render_template('links.html', title='Links', layout=layout)
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html', title='About', layout=layout)
@app.route('/privacy', methods=['GET'])
def privacy():
return render_template('privacy.html', title='Privacy Notice', layout=layout)
@app.route('/tags', methods=['GET'])
def tags():
return render_template('tags.html', title='Tags', layout=layout)
@app.route('/search', methods=['GET'])
def search():
return render_template('search.html', title='Search', layout=layout)
@app.route('/notes', methods=['GET'])
@cache.cached()
def display_notes():
url_path = request.path
notes_as_dict = notes().get_json()
notes_list = notes_from_dicts(notes_as_dict["notes"])
view = [note_view(note) for note in notes_list]
return render_template('notes.html', notes=view, title='Notes', layout=layout, path=url_path)
@app.route('/notes/<category>', methods=['GET'])
@cache.cached()
def display_notes_by_category(category):
url_path = request.path
notes_as_dict = notes_by_category(category).get_json()
notes_list = notes_from_dicts(notes_as_dict["notes"])
view = [note_view(note) for note in notes_list]
return render_template('notes.html', notes=view, title='Notes', layout=layout, path=url_path)
@app.route('/note/<note_name>', methods=['GET'])
@cache.cached()
def display_note(note_name):
note_as_dict = note(note_name).get_json()
note_obj = note_from_dict(note_as_dict)
view = note_view(note_obj)
return render_template('note.html', note=view, title='Note', layout=layout)
@app.route('/pdf/<category>/<pdf_file>', methods=['GET'])
@cache.cached()
def download_pdf(category, pdf_file):
resp = requests.request(
method=request.method,
url=request.url.replace(request.host_url, f'https://storage.googleapis.com/{bucket}/'),
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
@app.route('/robots.txt')
@app.route('/favicon.ico')
@app.route('/apple-touch-icon-precomposed.png')
@app.route('/apple-touch-icon.png')
def static_from_root():
return send_from_directory("static", request.path[1:])
| import yaml
import requests
from datetime import datetime
from flask import render_template, request, Response, send_from_directory
from alchemyst import app, cache
from alchemyst.ui.note import note_view
from alchemyst.api.routes import note, notes, notes_by_category
from alchemyst.api.notes import note_from_dict, notes_from_dicts
from alchemyst.api.document import get_document
with open('app-config.yaml') as app_cfg_file:
app_cfg = yaml.load(app_cfg_file, Loader=yaml.FullLoader)
layout = app_cfg['layout']
layout['year'] = datetime.now().year
bucket = app_cfg['bucket']
@app.template_filter('display_document')
def fetch_note_from_doc_id(id):
return get_document(id)
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def index():
return render_template('index.html', title='Home', layout=layout)
@app.route('/contact', methods=['GET', 'POST'])
def contact():
return render_template('contact.html', title='Contact', layout=layout)
@app.route('/links', methods=['GET'])
def links():
return render_template('links.html', title='Links', layout=layout)
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html', title='About', layout=layout)
@app.route('/privacy', methods=['GET'])
def privacy():
return render_template('privacy.html', title='Privacy Notice', layout=layout)
@app.route('/tags', methods=['GET'])
def tags():
return render_template('tags.html', title='Tags', layout=layout)
@app.route('/search', methods=['GET'])
def search():
return render_template('search.html', title='Search', layout=layout)
@app.route('/notes', methods=['GET'])
@cache.cached()
def display_notes():
url_path = request.path
notes_as_dict = notes().get_json()
notes_list = notes_from_dicts(notes_as_dict["notes"])
view = [note_view(note) for note in notes_list]
return render_template('notes.html', notes=view, title='Notes', layout=layout, path=url_path)
@app.route('/notes/<category>', methods=['GET'])
@cache.cached()
def display_notes_by_category(category):
url_path = request.path
notes_as_dict = notes_by_category(category).get_json()
notes_list = notes_from_dicts(notes_as_dict["notes"])
view = [note_view(note) for note in notes_list]
return render_template('notes.html', notes=view, title='Notes', layout=layout, path=url_path)
@app.route('/note/<note_name>', methods=['GET'])
@cache.cached()
def display_note(note_name):
note_as_dict = note(note_name).get_json()
note_obj = note_from_dict(note_as_dict)
view = note_view(note_obj)
return render_template('note.html', note=view, title='Note', layout=layout)
@app.route('/pdf/<category>/<pdf_file>', methods=['GET'])
@cache.cached()
def download_pdf(category, pdf_file):
resp = requests.request(
method=request.method,
url=request.url.replace(request.host_url, f'https://storage.googleapis.com/{bucket}/'),
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
@app.route('/robots.txt')
@app.route('/favicon.ico')
@app.route('/apple-touch-icon-precomposed.png')
@app.route('/apple-touch-icon.png')
def static_from_root():
return send_from_directory("static", request.path[1:])
| none | 1 | 2.194965 | 2 |
|
ABC/146/b.py | fumiyanll23/AtCoder | 0 | 6630954 | <filename>ABC/146/b.py
def rot_n(s, n):
answer = ''
for letter in s:
answer += chr(ord('A') + (ord(letter)-ord('A')+n) % 26)
return answer
### https://qiita.com/TodayInsane/items/94f495db5ba143a8d3e0
N = int(input())
S = str(input())
print(rot_n(S, N)) | <filename>ABC/146/b.py
def rot_n(s, n):
answer = ''
for letter in s:
answer += chr(ord('A') + (ord(letter)-ord('A')+n) % 26)
return answer
### https://qiita.com/TodayInsane/items/94f495db5ba143a8d3e0
N = int(input())
S = str(input())
print(rot_n(S, N)) | en | 0.565759 | ### https://qiita.com/TodayInsane/items/94f495db5ba143a8d3e0 | 3.3019 | 3 |
dev/src/load/__init__.py | iamjli/AnswerALS_QTL | 0 | 6630955 | #!/usr/bin/env python3
__all__ = ["aals", "data", "hg38"]
# module-wide singletons for accessing data
from src.load.aals_data import aals
from src.load.external_data import data
from src.load.genome_data import hg38 | #!/usr/bin/env python3
__all__ = ["aals", "data", "hg38"]
# module-wide singletons for accessing data
from src.load.aals_data import aals
from src.load.external_data import data
from src.load.genome_data import hg38 | en | 0.275299 | #!/usr/bin/env python3 # module-wide singletons for accessing data | 1.195293 | 1 |
gnome/hsctrl2gnome.py | LarsVomMars/hsctrl2X | 1 | 6630956 | <gh_stars>1-10
#!/bin/env python
from gi import require_version
require_version('Gtk', '3.0')
require_version('AppIndicator3', '0.1')
require_version('Notify', '0.7')
from subprocess import check_output, CalledProcessError
from gi.repository import Gtk, GLib, AppIndicator3, Notify
Notify.init("headset-charge-notify")
MAX_BATTERY_LIFE = 16
class Handler:
notified = False
charging = False
charge_notify = Notify.Notification.new(
"hsctrl2gnome", "Battery low", "dialog-warning"
)
panel = None
@staticmethod
def get_battery():
try:
return int(check_output(["headsetcontrol", "-b", "-c"]))
except CalledProcessError:
return -2
@staticmethod
def update(_=None):
battery = Handler.get_battery()
battery_state = None
if battery == -2:
battery_state = "Off"
elif battery == -1:
battery_state = "Charging"
Handler.notified = True
Handler.charging = True
elif 0 <= battery <= 100:
Handler.charging = False
battery_state = f"{battery}% (~{'{:.2f}'.format(round(battery * (MAX_BATTERY_LIFE / 100), 2))}h)"
else:
battery_state = "W8, what?"
Handler.panel.set_label(battery_state, "100%")
if 0 <= battery < 10 and not Handler.notified and not Handler.charging:
Handler.charge_notify.set_timeout(0)
Handler.charge_notify.show()
Handler.notified = True
return True
Handler.panel = AppIndicator3.Indicator.new(
"headset-charge",
"audio-headset",
AppIndicator3.IndicatorCategory.HARDWARE
)
Handler.panel.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
menu = Gtk.Menu()
Handler.panel.set_menu(menu)
GLib.timeout_add(60000, Handler.update, None)
Handler.update()
Gtk.main()
| #!/bin/env python
from gi import require_version
require_version('Gtk', '3.0')
require_version('AppIndicator3', '0.1')
require_version('Notify', '0.7')
from subprocess import check_output, CalledProcessError
from gi.repository import Gtk, GLib, AppIndicator3, Notify
Notify.init("headset-charge-notify")
MAX_BATTERY_LIFE = 16
class Handler:
notified = False
charging = False
charge_notify = Notify.Notification.new(
"hsctrl2gnome", "Battery low", "dialog-warning"
)
panel = None
@staticmethod
def get_battery():
try:
return int(check_output(["headsetcontrol", "-b", "-c"]))
except CalledProcessError:
return -2
@staticmethod
def update(_=None):
battery = Handler.get_battery()
battery_state = None
if battery == -2:
battery_state = "Off"
elif battery == -1:
battery_state = "Charging"
Handler.notified = True
Handler.charging = True
elif 0 <= battery <= 100:
Handler.charging = False
battery_state = f"{battery}% (~{'{:.2f}'.format(round(battery * (MAX_BATTERY_LIFE / 100), 2))}h)"
else:
battery_state = "W8, what?"
Handler.panel.set_label(battery_state, "100%")
if 0 <= battery < 10 and not Handler.notified and not Handler.charging:
Handler.charge_notify.set_timeout(0)
Handler.charge_notify.show()
Handler.notified = True
return True
Handler.panel = AppIndicator3.Indicator.new(
"headset-charge",
"audio-headset",
AppIndicator3.IndicatorCategory.HARDWARE
)
Handler.panel.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
menu = Gtk.Menu()
Handler.panel.set_menu(menu)
GLib.timeout_add(60000, Handler.update, None)
Handler.update()
Gtk.main() | ru | 0.206726 | #!/bin/env python | 2.438191 | 2 |
scrape/scrape_history.py | MOOC-Learner-Project/MOOC-Learner-BigQuery-Data-Science-Analytics | 0 | 6630957 | <gh_stars>0
import pandas as pd
import pickle
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException, NoSuchWindowException, InvalidElementStateException, UnexpectedAlertPresentException
from selenium.webdriver.common.by import By
import sys
from threading import Thread
import time
import login_cred
import problem_params
course_info = {
'2017' : ['Name', 'Offering', 'URL'],
}
yr = sys.argv[1]
unit = int(sys.argv[2])
pb = int(sys.argv[3])
i = int(sys.argv[4])
#sys.argv[5] is 'reload', giving option to restart scraping from a later index
tmpName = '../../data/pickle/{}_unit{}_pb{}_history.pkl'.format(yr, unit, pb)
# points to data dirs with csvs of usernames
unameData = 'u_data/{}_name_id.csv'.format(yr)
cnum, tnum, unit_url = course_info[yr][0], course_info[yr][1], course_info[yr][2]
urls = {
1 : '{}{}{}'.format(unit_url, cnum, tnum),
}
pset1 = problem_params.pset1
pset2 = problem_params.pset2
pset3 = problem_params.pset3
pset4 = problem_params.pset4
pset5 = problem_params.pset5
url = urls[unit] # url for unit pset
# get problem tab webpage id and problem id
(tab_id, problem_id) = eval('pset{}'.format(unit))[pb]
# get button, modal, and form history webpage ids
button_id = '{}_history_trig'.format(problem_id)
modal_id = '{}_history_student_username'.format(problem_id)
form_history_id = '{}_history_text'.format(problem_id)
# get usernames
zusernames = pd.read_csv(unameData)
usernames = list(map(str, zusernames.username))
user_ids = list(map(str, zusernames.user_id))
results = {}
# command line option to reload - start from particular index
if sys.argv[5] == 'reload':
with open(tmpName, "rb") as f:
i, results = pickle.load(f)
browsers = []
browserIdx = 0
def addBrowser():
path_to_chromedriver = 'chromedriver' # change path as needed
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
browser.get(url) # send browser to correct page for unit pset
browser.find_element_by_id("login-email").send_keys(login_cred.login_u);
browser.find_element_by_id("login-password").send_keys(<PASSWORD>);
browser.find_element_by_id("login-password").send_keys(Keys.ENTER);
time.sleep(15) # wait for problem page to load
# once on page for pset, navigate to problem tab
tab = browser.find_element_by_id(tab_id)
tab.click();
time.sleep(2)
# click button to view submission history
button = browser.find_element_by_id(button_id)
button.click();
time.sleep(2)
browsers.append(browser)
def killBrowser(bIdx):
browsers[bIdx].quit()
if bIdx + 2 > len(browsers):
print("adding two browsers")
Thread(target = addBrowser).start()
Thread(target = addBrowser).start()
Thread(target = addBrowser).start()
time.sleep(15)
return bIdx + 1
addBrowser()
addBrowser()
new_window = True
while i < len(usernames):
u, u_id = usernames[i], user_ids[i]
print("%i, %s of %i" % (i, u, len(usernames)))
browser = browsers[browserIdx]
try:
# enter the username in the form and hit enter
modal = browser.find_element_by_id(modal_id)
modal.clear(); # clears the last username
modal.send_keys(u);
modal.send_keys(Keys.ENTER);
time.sleep(10)
submissionsElt = browser.find_element_by_id(form_history_id)
except (StaleElementReferenceException, InvalidElementStateException, NoSuchElementException) as e:
browserIdx = killBrowser(browserIdx)
print("caught exception, retrying...")
print(e)
continue
try:
# get submission history from form HTML
response = submissionsElt.get_attribute("innerHTML")
except UnexpectedAlertPresentException as e:
response = ''
browserIdx = killBrowser(browserIdx)
# save response and write to file only if attempted
if 'attempts' in response:
results[u_id] = response
with open(tmpName, "wb") as f:
pickle.dump((i, results), f)
print("Wrote response for user {} ({}).".format(u, u_id))
else:
print("{} ({}) did not attempt".format(u, u_id))
i += 1
for bi in range(len(browsers)):
browsers[bi].quit()
| import pandas as pd
import pickle
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException, NoSuchWindowException, InvalidElementStateException, UnexpectedAlertPresentException
from selenium.webdriver.common.by import By
import sys
from threading import Thread
import time
import login_cred
import problem_params
course_info = {
'2017' : ['Name', 'Offering', 'URL'],
}
yr = sys.argv[1]
unit = int(sys.argv[2])
pb = int(sys.argv[3])
i = int(sys.argv[4])
#sys.argv[5] is 'reload', giving option to restart scraping from a later index
tmpName = '../../data/pickle/{}_unit{}_pb{}_history.pkl'.format(yr, unit, pb)
# points to data dirs with csvs of usernames
unameData = 'u_data/{}_name_id.csv'.format(yr)
cnum, tnum, unit_url = course_info[yr][0], course_info[yr][1], course_info[yr][2]
urls = {
1 : '{}{}{}'.format(unit_url, cnum, tnum),
}
pset1 = problem_params.pset1
pset2 = problem_params.pset2
pset3 = problem_params.pset3
pset4 = problem_params.pset4
pset5 = problem_params.pset5
url = urls[unit] # url for unit pset
# get problem tab webpage id and problem id
(tab_id, problem_id) = eval('pset{}'.format(unit))[pb]
# get button, modal, and form history webpage ids
button_id = '{}_history_trig'.format(problem_id)
modal_id = '{}_history_student_username'.format(problem_id)
form_history_id = '{}_history_text'.format(problem_id)
# get usernames
zusernames = pd.read_csv(unameData)
usernames = list(map(str, zusernames.username))
user_ids = list(map(str, zusernames.user_id))
results = {}
# command line option to reload - start from particular index
if sys.argv[5] == 'reload':
with open(tmpName, "rb") as f:
i, results = pickle.load(f)
browsers = []
browserIdx = 0
def addBrowser():
path_to_chromedriver = 'chromedriver' # change path as needed
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
browser.get(url) # send browser to correct page for unit pset
browser.find_element_by_id("login-email").send_keys(login_cred.login_u);
browser.find_element_by_id("login-password").send_keys(<PASSWORD>);
browser.find_element_by_id("login-password").send_keys(Keys.ENTER);
time.sleep(15) # wait for problem page to load
# once on page for pset, navigate to problem tab
tab = browser.find_element_by_id(tab_id)
tab.click();
time.sleep(2)
# click button to view submission history
button = browser.find_element_by_id(button_id)
button.click();
time.sleep(2)
browsers.append(browser)
def killBrowser(bIdx):
browsers[bIdx].quit()
if bIdx + 2 > len(browsers):
print("adding two browsers")
Thread(target = addBrowser).start()
Thread(target = addBrowser).start()
Thread(target = addBrowser).start()
time.sleep(15)
return bIdx + 1
addBrowser()
addBrowser()
new_window = True
while i < len(usernames):
u, u_id = usernames[i], user_ids[i]
print("%i, %s of %i" % (i, u, len(usernames)))
browser = browsers[browserIdx]
try:
# enter the username in the form and hit enter
modal = browser.find_element_by_id(modal_id)
modal.clear(); # clears the last username
modal.send_keys(u);
modal.send_keys(Keys.ENTER);
time.sleep(10)
submissionsElt = browser.find_element_by_id(form_history_id)
except (StaleElementReferenceException, InvalidElementStateException, NoSuchElementException) as e:
browserIdx = killBrowser(browserIdx)
print("caught exception, retrying...")
print(e)
continue
try:
# get submission history from form HTML
response = submissionsElt.get_attribute("innerHTML")
except UnexpectedAlertPresentException as e:
response = ''
browserIdx = killBrowser(browserIdx)
# save response and write to file only if attempted
if 'attempts' in response:
results[u_id] = response
with open(tmpName, "wb") as f:
pickle.dump((i, results), f)
print("Wrote response for user {} ({}).".format(u, u_id))
else:
print("{} ({}) did not attempt".format(u, u_id))
i += 1
for bi in range(len(browsers)):
browsers[bi].quit() | en | 0.841441 | #sys.argv[5] is 'reload', giving option to restart scraping from a later index # points to data dirs with csvs of usernames # url for unit pset # get problem tab webpage id and problem id # get button, modal, and form history webpage ids # get usernames # command line option to reload - start from particular index # change path as needed # send browser to correct page for unit pset # wait for problem page to load # once on page for pset, navigate to problem tab # click button to view submission history # enter the username in the form and hit enter # clears the last username # get submission history from form HTML # save response and write to file only if attempted | 2.440285 | 2 |
packages/conan/recipes/python/test_package/testpy.py | boberfly/aswf-docker | 3 | 6630958 | <filename>packages/conan/recipes/python/test_package/testpy.py
import sys
print(sys.version_info)
print(sys.path)
| <filename>packages/conan/recipes/python/test_package/testpy.py
import sys
print(sys.version_info)
print(sys.path)
| none | 1 | 1.134171 | 1 |
|
python/raft/setup.py | kaatish/raft | 0 | 6630959 | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
import os
import shutil
import sys
import sysconfig
# Must import in this order:
# setuptools -> Cython.Distutils.build_ext -> setuptools.command.build_ext
# Otherwise, setuptools.command.build_ext ends up inheriting from
# Cython.Distutils.old_build_ext which we do not want
import setuptools
try:
from Cython.Distutils.build_ext import new_build_ext as _build_ext
except ImportError:
from setuptools.command.build_ext import build_ext as _build_ext
from distutils.sysconfig import get_python_lib
import setuptools.command.build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuputils import clean_folder
from setuputils import get_environment_option
from setuputils import get_cli_option
from pathlib import Path
import versioneer
##############################################################################
# - Dependencies include and lib folder setup --------------------------------
install_requires = [
'cython'
]
cuda_home = get_environment_option("CUDA_HOME")
clean_artifacts = get_cli_option('clean')
single_gpu_build = get_cli_option('--singlegpu')
if not cuda_home:
cuda_home = (
os.popen('echo "$(dirname $(dirname $(which nvcc)))"').read().strip()
)
print("-- Using nvcc to detect CUDA, found at " + str(cuda_home))
cuda_include_dir = os.path.join(cuda_home, "include")
cuda_lib_dir = os.path.join(cuda_home, "lib64")
##############################################################################
# - Clean target -------------------------------------------------------------
if clean_artifacts:
print("-- Cleaning all Python and Cython build artifacts...")
try:
setup_file_path = str(Path(__file__).parent.absolute())
shutil.rmtree(setup_file_path + '/.pytest_cache', ignore_errors=True)
shutil.rmtree(setup_file_path + '/_external_repositories',
ignore_errors=True)
shutil.rmtree(setup_file_path + '/raft.egg-info', ignore_errors=True)
shutil.rmtree(setup_file_path + '/__pycache__', ignore_errors=True)
clean_folder(setup_file_path + '/raft')
shutil.rmtree(setup_file_path + '/build')
except IOError:
pass
# need to terminate script so cythonizing doesn't get triggered after
# cleanup unintendedly
sys.argv.remove("clean")
if "--all" in sys.argv:
sys.argv.remove("--all")
if len(sys.argv) == 1:
sys.exit(0)
##############################################################################
# - Cython extensions build and parameters -----------------------------------
libs = ['cudart', "nccl", "cusolver", "cusparse", "cublas"]
include_dirs = [cuda_include_dir,
numpy.get_include(),
"../../cpp/include/",
os.path.dirname(sysconfig.get_path("include"))]
extensions = [
Extension("*",
sources=["raft/**/*.pyx"],
include_dirs=include_dirs,
library_dirs=[get_python_lib()],
runtime_library_dirs=[cuda_lib_dir,
os.path.join(os.sys.prefix, "lib")],
libraries=libs,
language='c++',
extra_compile_args=['-std=c++17'])
]
class build_ext_no_debug(_build_ext):
def build_extensions(self):
def remove_flags(compiler, *flags):
for flag in flags:
try:
compiler.compiler_so = list(
filter((flag).__ne__, compiler.compiler_so)
)
except Exception:
pass
# Full optimization
self.compiler.compiler_so.append("-O3")
# Ignore deprecation declaration warnings
self.compiler.compiler_so.append("-Wno-deprecated-declarations")
# No debug symbols, full optimization, no '-Wstrict-prototypes' warning
remove_flags(
self.compiler, "-g", "-G", "-O1", "-O2", "-Wstrict-prototypes"
)
super().build_extensions()
def finalize_options(self):
if self.distribution.ext_modules:
# Delay import this to allow for Cython-less installs
from Cython.Build.Dependencies import cythonize
nthreads = getattr(self, "parallel", None) # -j option in Py3.5+
nthreads = int(nthreads) if nthreads else None
self.distribution.ext_modules = cythonize(
self.distribution.ext_modules,
nthreads=nthreads,
force=self.force,
gdb_debug=False,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
)
# Skip calling super() and jump straight to setuptools
setuptools.command.build_ext.build_ext.finalize_options(self)
cmdclass = dict()
cmdclass.update(versioneer.get_cmdclass())
cmdclass["build_ext"] = build_ext_no_debug
##############################################################################
# - Python package generation ------------------------------------------------
setup(name='raft',
description="RAPIDS Analytics Frameworks Toolset",
version=versioneer.get_version(),
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
author="<NAME>",
setup_requires=['cython'],
ext_modules=extensions,
package_data=dict.fromkeys(
find_packages(include=["raft.dask.common",
"raft.dask.common.includes",
"raft.common",
"raft.common.includes"]),
["*.hpp", "*.pxd"],
),
packages=find_packages(include=['raft', 'raft.*']),
install_requires=install_requires,
license="Apache",
cmdclass=cmdclass,
zip_safe=False
)
| #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
import os
import shutil
import sys
import sysconfig
# Must import in this order:
# setuptools -> Cython.Distutils.build_ext -> setuptools.command.build_ext
# Otherwise, setuptools.command.build_ext ends up inheriting from
# Cython.Distutils.old_build_ext which we do not want
import setuptools
try:
from Cython.Distutils.build_ext import new_build_ext as _build_ext
except ImportError:
from setuptools.command.build_ext import build_ext as _build_ext
from distutils.sysconfig import get_python_lib
import setuptools.command.build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuputils import clean_folder
from setuputils import get_environment_option
from setuputils import get_cli_option
from pathlib import Path
import versioneer
##############################################################################
# - Dependencies include and lib folder setup --------------------------------
install_requires = [
'cython'
]
cuda_home = get_environment_option("CUDA_HOME")
clean_artifacts = get_cli_option('clean')
single_gpu_build = get_cli_option('--singlegpu')
if not cuda_home:
cuda_home = (
os.popen('echo "$(dirname $(dirname $(which nvcc)))"').read().strip()
)
print("-- Using nvcc to detect CUDA, found at " + str(cuda_home))
cuda_include_dir = os.path.join(cuda_home, "include")
cuda_lib_dir = os.path.join(cuda_home, "lib64")
##############################################################################
# - Clean target -------------------------------------------------------------
if clean_artifacts:
print("-- Cleaning all Python and Cython build artifacts...")
try:
setup_file_path = str(Path(__file__).parent.absolute())
shutil.rmtree(setup_file_path + '/.pytest_cache', ignore_errors=True)
shutil.rmtree(setup_file_path + '/_external_repositories',
ignore_errors=True)
shutil.rmtree(setup_file_path + '/raft.egg-info', ignore_errors=True)
shutil.rmtree(setup_file_path + '/__pycache__', ignore_errors=True)
clean_folder(setup_file_path + '/raft')
shutil.rmtree(setup_file_path + '/build')
except IOError:
pass
# need to terminate script so cythonizing doesn't get triggered after
# cleanup unintendedly
sys.argv.remove("clean")
if "--all" in sys.argv:
sys.argv.remove("--all")
if len(sys.argv) == 1:
sys.exit(0)
##############################################################################
# - Cython extensions build and parameters -----------------------------------
libs = ['cudart', "nccl", "cusolver", "cusparse", "cublas"]
include_dirs = [cuda_include_dir,
numpy.get_include(),
"../../cpp/include/",
os.path.dirname(sysconfig.get_path("include"))]
extensions = [
Extension("*",
sources=["raft/**/*.pyx"],
include_dirs=include_dirs,
library_dirs=[get_python_lib()],
runtime_library_dirs=[cuda_lib_dir,
os.path.join(os.sys.prefix, "lib")],
libraries=libs,
language='c++',
extra_compile_args=['-std=c++17'])
]
class build_ext_no_debug(_build_ext):
def build_extensions(self):
def remove_flags(compiler, *flags):
for flag in flags:
try:
compiler.compiler_so = list(
filter((flag).__ne__, compiler.compiler_so)
)
except Exception:
pass
# Full optimization
self.compiler.compiler_so.append("-O3")
# Ignore deprecation declaration warnings
self.compiler.compiler_so.append("-Wno-deprecated-declarations")
# No debug symbols, full optimization, no '-Wstrict-prototypes' warning
remove_flags(
self.compiler, "-g", "-G", "-O1", "-O2", "-Wstrict-prototypes"
)
super().build_extensions()
def finalize_options(self):
if self.distribution.ext_modules:
# Delay import this to allow for Cython-less installs
from Cython.Build.Dependencies import cythonize
nthreads = getattr(self, "parallel", None) # -j option in Py3.5+
nthreads = int(nthreads) if nthreads else None
self.distribution.ext_modules = cythonize(
self.distribution.ext_modules,
nthreads=nthreads,
force=self.force,
gdb_debug=False,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
)
# Skip calling super() and jump straight to setuptools
setuptools.command.build_ext.build_ext.finalize_options(self)
cmdclass = dict()
cmdclass.update(versioneer.get_cmdclass())
cmdclass["build_ext"] = build_ext_no_debug
##############################################################################
# - Python package generation ------------------------------------------------
setup(name='raft',
description="RAPIDS Analytics Frameworks Toolset",
version=versioneer.get_version(),
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
author="<NAME>",
setup_requires=['cython'],
ext_modules=extensions,
package_data=dict.fromkeys(
find_packages(include=["raft.dask.common",
"raft.dask.common.includes",
"raft.common",
"raft.common.includes"]),
["*.hpp", "*.pxd"],
),
packages=find_packages(include=['raft', 'raft.*']),
install_requires=install_requires,
license="Apache",
cmdclass=cmdclass,
zip_safe=False
)
| en | 0.497851 | # # Copyright (c) 2020-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Must import in this order: # setuptools -> Cython.Distutils.build_ext -> setuptools.command.build_ext # Otherwise, setuptools.command.build_ext ends up inheriting from # Cython.Distutils.old_build_ext which we do not want ############################################################################## # - Dependencies include and lib folder setup -------------------------------- ############################################################################## # - Clean target ------------------------------------------------------------- # need to terminate script so cythonizing doesn't get triggered after # cleanup unintendedly ############################################################################## # - Cython extensions build and parameters ----------------------------------- # Full optimization # Ignore deprecation declaration warnings # No debug symbols, full optimization, no '-Wstrict-prototypes' warning # Delay import this to allow for Cython-less installs # -j option in Py3.5+ # Skip calling super() and jump straight to setuptools ############################################################################## # - Python package generation ------------------------------------------------ | 1.401652 | 1 |
translation.py | navaneethrkrishna007/Rex-TelegramOrgRoBot | 0 | 6630960 | <reponame>navaneethrkrishna007/Rex-TelegramOrgRoBot
class Translation(object):
START_TEXT = """Hi!
Hai Iam a Simple My.telegram.org Bot.
To Get API ID & API HASH Enter your Telegram Phone Number With Country Code.
🤖 𝙱𝚘𝚝 𝚄𝚙𝚍𝚊𝚝𝚎𝚜 : @Madez_Offical
Click /Start To Restart The Progress"""
AFTER_RECVD_CODE_TEXT = """I see!
Now please send the Telegram code that you received from Telegram!
This code is only used for the purpose of getting the APP ID from my.telegram.org
Click /Start To Restart The Progress"""
BEFORE_SUCC_LOGIN = "recieved code. Scarpping web page ..."
ERRED_PAGE = "something wrongings. failed to get app id. \n\n@Rex_Bots_Support\n\nHow Get Api Code For Website"
CANCELLED_MESG = "Bye! Please re /start the bot conversation"
IN_VALID_CODE_PVDED = "Send me the code that you received from Telegram"
IN_VALID_PHNO_PVDED = "Hey, Send me your Phone Number"
| class Translation(object):
START_TEXT = """Hi!
Hai Iam a Simple My.telegram.org Bot.
To Get API ID & API HASH Enter your Telegram Phone Number With Country Code.
🤖 𝙱𝚘𝚝 𝚄𝚙𝚍𝚊𝚝𝚎𝚜 : @Madez_Offical
Click /Start To Restart The Progress"""
AFTER_RECVD_CODE_TEXT = """I see!
Now please send the Telegram code that you received from Telegram!
This code is only used for the purpose of getting the APP ID from my.telegram.org
Click /Start To Restart The Progress"""
BEFORE_SUCC_LOGIN = "recieved code. Scarpping web page ..."
ERRED_PAGE = "something wrongings. failed to get app id. \n\n@Rex_Bots_Support\n\nHow Get Api Code For Website"
CANCELLED_MESG = "Bye! Please re /start the bot conversation"
IN_VALID_CODE_PVDED = "Send me the code that you received from Telegram"
IN_VALID_PHNO_PVDED = "Hey, Send me your Phone Number" | en | 0.693052 | Hi! Hai Iam a Simple My.telegram.org Bot. To Get API ID & API HASH Enter your Telegram Phone Number With Country Code. 🤖 𝙱𝚘𝚝 𝚄𝚙𝚍𝚊𝚝𝚎𝚜 : @Madez_Offical Click /Start To Restart The Progress I see! Now please send the Telegram code that you received from Telegram! This code is only used for the purpose of getting the APP ID from my.telegram.org Click /Start To Restart The Progress | 2.945044 | 3 |
migrations/versions/ccd22863e633_create_seft_instrument_table.py | ONSdigital/ras-collection-instrument | 2 | 6630961 | """create seft instrument table
Revision ID: <KEY>
Revises: 72912058602c
Create Date: 2018-02-20 13:22:14.773113
"""
import sqlalchemy as sa
from alembic import op
from application.models import GUID
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "72912058602c"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
sql_query = "ALTER TABLE ras_ci.instrument ADD CONSTRAINT U_instrument_id UNIQUE(instrument_id)"
conn.execute(sql_query)
op.create_table(
"seft_instrument",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("data", sa.LargeBinary),
sa.Column("len", sa.Integer),
sa.Column("file_name", sa.String(32)),
sa.Column("instrument_id", GUID),
sa.ForeignKeyConstraint(["instrument_id"], ["ras_ci.instrument.instrument_id"]),
schema="ras_ci",
)
sql_query = (
"INSERT INTO ras_ci.seft_instrument (instrument_id, data, file_name, len) "
"SELECT instrument_id, data, file_name, len FROM ras_ci.instrument"
)
conn.execute(sql_query)
op.drop_column("instrument", "file_name", schema="ras_ci")
op.drop_column("instrument", "data", schema="ras_ci")
op.drop_column("instrument", "len", schema="ras_ci")
op.add_column("instrument", sa.Column("type", sa.String(8)), schema="ras_ci")
sql_query = "UPDATE ras_ci.instrument SET type = 'SEFT'"
conn.execute(sql_query)
def downgrade():
pass
| """create seft instrument table
Revision ID: <KEY>
Revises: 72912058602c
Create Date: 2018-02-20 13:22:14.773113
"""
import sqlalchemy as sa
from alembic import op
from application.models import GUID
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "72912058602c"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
sql_query = "ALTER TABLE ras_ci.instrument ADD CONSTRAINT U_instrument_id UNIQUE(instrument_id)"
conn.execute(sql_query)
op.create_table(
"seft_instrument",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("data", sa.LargeBinary),
sa.Column("len", sa.Integer),
sa.Column("file_name", sa.String(32)),
sa.Column("instrument_id", GUID),
sa.ForeignKeyConstraint(["instrument_id"], ["ras_ci.instrument.instrument_id"]),
schema="ras_ci",
)
sql_query = (
"INSERT INTO ras_ci.seft_instrument (instrument_id, data, file_name, len) "
"SELECT instrument_id, data, file_name, len FROM ras_ci.instrument"
)
conn.execute(sql_query)
op.drop_column("instrument", "file_name", schema="ras_ci")
op.drop_column("instrument", "data", schema="ras_ci")
op.drop_column("instrument", "len", schema="ras_ci")
op.add_column("instrument", sa.Column("type", sa.String(8)), schema="ras_ci")
sql_query = "UPDATE ras_ci.instrument SET type = 'SEFT'"
conn.execute(sql_query)
def downgrade():
pass
| en | 0.378338 | create seft instrument table Revision ID: <KEY> Revises: 72912058602c Create Date: 2018-02-20 13:22:14.773113 # revision identifiers, used by Alembic. | 1.543696 | 2 |
backend/pose_estimation_consumer_sync.py | j4qfrost/pose-estimation-stream | 0 | 6630962 | import subprocess, sys, time, os
import json, numpy
import cv2
from twitchstream.outputvideo import TwitchBufferedOutputStream
from pose_estimation import PoseProcessor
# import tensorflow as tf
import torch
import posenet
FFMPEG= 'ffmpeg'
FFPROBE = 'ffprobe'
def get_stream_resolution(stream_name):
metadata = {}
while 'streams' not in metadata:
info = subprocess.run([FFPROBE, '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', stream_name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = info.stdout
if out:
metadata = json.loads(out.decode('utf-8'))
time.sleep(1)
print('Grabbed resolution!')
return metadata['streams'][0]['width'], metadata['streams'][0]['height']
def get_frame_from_stream(resolution, pipe):
width, height = resolution
raw_image = pipe.stdout.read(width * height * 3) # read 432*240*3 bytes (= 1 frame)
if len(raw_image) == 0:
return None
return numpy.frombuffer(raw_image, dtype=numpy.uint8).reshape((height, width, 3))
def loop_send_frame(streamkey, resolution, stream, pose_processor):
width, height = resolution
try:
# config = tf.ConfigProto()
# config.intra_op_parallelism_threads = 4
# config.inter_op_parallelism_threads = 4
with TwitchBufferedOutputStream(
twitch_stream_key=streamkey,
width=width,
height=height,
fps=30.,
enable_audio=False,
verbose=True) as videostream:
# with tf.Session(config=config) as sess:
# model_cfg, model_outputs = posenet.load_model(3, sess)
# frame = tf.placeholder(tf.uint8, shape=(height, width, 3))
# input_image = tf.placeholder(tf.uint8, shape=(1, height + 1, width + 1, 3))
# while True:
# frame = get_frame_from_stream(resolution, stream)
# if frame is not None:
# start = time.time()
# output_stride = model_cfg['output_stride']
# input_image, frame, output_scale = posenet.process_input(
# frame, output_stride=output_stride)
# heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
# model_outputs,
# feed_dict={'image:0': input_image}
# )
# pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
# heatmaps_result.squeeze(axis=0),
# offsets_result.squeeze(axis=0),
# displacement_fwd_result.squeeze(axis=0),
# displacement_bwd_result.squeeze(axis=0),
# output_stride=output_stride,
# max_pose_detections=1, min_pose_score=0.10)
# keypoint_coords *= output_scale
# frame = posenet.draw_skel_and_kp(
# frame, pose_scores, keypoint_scores, keypoint_coords,
# min_pose_score=0.10, min_part_score=0.10)
# videostream.send_video_frame(frame)
# print(time.time() - start)
model = posenet.load_model(101)
model = model.cuda()
output_stride = model.output_stride
while True:
frame = get_frame_from_stream(resolution, stream)
input_image, frame, output_scale = posenet.process_input(
frame, output_stride=output_stride)
with torch.no_grad():
input_image = torch.Tensor(input_image).cuda()
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
heatmaps_result.squeeze(0),
offsets_result.squeeze(0),
displacement_fwd_result.squeeze(0),
displacement_bwd_result.squeeze(0),
output_stride=output_stride,
max_pose_detections=1,
min_pose_score=0.1)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
frame = 255 - posenet.draw_skel_and_kp(
frame, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.1, min_part_score=0.1)
videostream.send_video_frame(frame)
# save_image(frame)
except Exception as e:
raise
def save_image(img):
cv2.imwrite('test.jpg', img)
def main(stream_name):
print('Starting program...')
# stream_name = argv[1]
pose_processor = PoseProcessor('tf')
resolution = get_stream_resolution(stream_name)
stream = subprocess.Popen([FFMPEG,
'-i', stream_name,
'-loglevel', 'quiet', # no text output
'-c:v', 'h264_nvenc',
'-an', # disable audio
'-f', 'image2pipe',
'-pix_fmt', 'yuv420p',
'-vcodec', 'rawvideo', '-'],
stdout = subprocess.PIPE, stderr=subprocess.PIPE)
loop_send_frame('live_173288790_pEOfgLFUAfocVRZdAQ1D8bUubjL4OY', resolution, stream, pose_processor)
# while True:
# frame = get_frame_from_stream(resolution, stream)
# frame = pose_estimation.process_pose_frame(frame)
# if frame is not None:
# L.put(frame)
if __name__ == '__main__':
main(sys.argv[1]) | import subprocess, sys, time, os
import json, numpy
import cv2
from twitchstream.outputvideo import TwitchBufferedOutputStream
from pose_estimation import PoseProcessor
# import tensorflow as tf
import torch
import posenet
FFMPEG= 'ffmpeg'
FFPROBE = 'ffprobe'
def get_stream_resolution(stream_name):
metadata = {}
while 'streams' not in metadata:
info = subprocess.run([FFPROBE, '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', stream_name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = info.stdout
if out:
metadata = json.loads(out.decode('utf-8'))
time.sleep(1)
print('Grabbed resolution!')
return metadata['streams'][0]['width'], metadata['streams'][0]['height']
def get_frame_from_stream(resolution, pipe):
width, height = resolution
raw_image = pipe.stdout.read(width * height * 3) # read 432*240*3 bytes (= 1 frame)
if len(raw_image) == 0:
return None
return numpy.frombuffer(raw_image, dtype=numpy.uint8).reshape((height, width, 3))
def loop_send_frame(streamkey, resolution, stream, pose_processor):
width, height = resolution
try:
# config = tf.ConfigProto()
# config.intra_op_parallelism_threads = 4
# config.inter_op_parallelism_threads = 4
with TwitchBufferedOutputStream(
twitch_stream_key=streamkey,
width=width,
height=height,
fps=30.,
enable_audio=False,
verbose=True) as videostream:
# with tf.Session(config=config) as sess:
# model_cfg, model_outputs = posenet.load_model(3, sess)
# frame = tf.placeholder(tf.uint8, shape=(height, width, 3))
# input_image = tf.placeholder(tf.uint8, shape=(1, height + 1, width + 1, 3))
# while True:
# frame = get_frame_from_stream(resolution, stream)
# if frame is not None:
# start = time.time()
# output_stride = model_cfg['output_stride']
# input_image, frame, output_scale = posenet.process_input(
# frame, output_stride=output_stride)
# heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
# model_outputs,
# feed_dict={'image:0': input_image}
# )
# pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
# heatmaps_result.squeeze(axis=0),
# offsets_result.squeeze(axis=0),
# displacement_fwd_result.squeeze(axis=0),
# displacement_bwd_result.squeeze(axis=0),
# output_stride=output_stride,
# max_pose_detections=1, min_pose_score=0.10)
# keypoint_coords *= output_scale
# frame = posenet.draw_skel_and_kp(
# frame, pose_scores, keypoint_scores, keypoint_coords,
# min_pose_score=0.10, min_part_score=0.10)
# videostream.send_video_frame(frame)
# print(time.time() - start)
model = posenet.load_model(101)
model = model.cuda()
output_stride = model.output_stride
while True:
frame = get_frame_from_stream(resolution, stream)
input_image, frame, output_scale = posenet.process_input(
frame, output_stride=output_stride)
with torch.no_grad():
input_image = torch.Tensor(input_image).cuda()
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
heatmaps_result.squeeze(0),
offsets_result.squeeze(0),
displacement_fwd_result.squeeze(0),
displacement_bwd_result.squeeze(0),
output_stride=output_stride,
max_pose_detections=1,
min_pose_score=0.1)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
frame = 255 - posenet.draw_skel_and_kp(
frame, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.1, min_part_score=0.1)
videostream.send_video_frame(frame)
# save_image(frame)
except Exception as e:
raise
def save_image(img):
cv2.imwrite('test.jpg', img)
def main(stream_name):
print('Starting program...')
# stream_name = argv[1]
pose_processor = PoseProcessor('tf')
resolution = get_stream_resolution(stream_name)
stream = subprocess.Popen([FFMPEG,
'-i', stream_name,
'-loglevel', 'quiet', # no text output
'-c:v', 'h264_nvenc',
'-an', # disable audio
'-f', 'image2pipe',
'-pix_fmt', 'yuv420p',
'-vcodec', 'rawvideo', '-'],
stdout = subprocess.PIPE, stderr=subprocess.PIPE)
loop_send_frame('live_173288790_pEOfgLFUAfocVRZdAQ1D8bUubjL4OY', resolution, stream, pose_processor)
# while True:
# frame = get_frame_from_stream(resolution, stream)
# frame = pose_estimation.process_pose_frame(frame)
# if frame is not None:
# L.put(frame)
if __name__ == '__main__':
main(sys.argv[1]) | en | 0.444499 | # import tensorflow as tf # read 432*240*3 bytes (= 1 frame) # config = tf.ConfigProto() # config.intra_op_parallelism_threads = 4 # config.inter_op_parallelism_threads = 4 # with tf.Session(config=config) as sess: # model_cfg, model_outputs = posenet.load_model(3, sess) # frame = tf.placeholder(tf.uint8, shape=(height, width, 3)) # input_image = tf.placeholder(tf.uint8, shape=(1, height + 1, width + 1, 3)) # while True: # frame = get_frame_from_stream(resolution, stream) # if frame is not None: # start = time.time() # output_stride = model_cfg['output_stride'] # input_image, frame, output_scale = posenet.process_input( # frame, output_stride=output_stride) # heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run( # model_outputs, # feed_dict={'image:0': input_image} # ) # pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses( # heatmaps_result.squeeze(axis=0), # offsets_result.squeeze(axis=0), # displacement_fwd_result.squeeze(axis=0), # displacement_bwd_result.squeeze(axis=0), # output_stride=output_stride, # max_pose_detections=1, min_pose_score=0.10) # keypoint_coords *= output_scale # frame = posenet.draw_skel_and_kp( # frame, pose_scores, keypoint_scores, keypoint_coords, # min_pose_score=0.10, min_part_score=0.10) # videostream.send_video_frame(frame) # print(time.time() - start) # TODO this isn't particularly fast, use GL for drawing and display someday... # save_image(frame) # stream_name = argv[1] # no text output # disable audio # while True: # frame = get_frame_from_stream(resolution, stream) # frame = pose_estimation.process_pose_frame(frame) # if frame is not None: # L.put(frame) | 2.253386 | 2 |
FeatureMapVisualizer/visualizer.py | lukysummer/FeatureVisualizer | 1 | 6630963 | <filename>FeatureMapVisualizer/visualizer.py
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from collections import Counter
import cv2
from PIL import Image, ImageFile
import torch
import torchvision
import torch.nn.functional as F
from torch import nn, optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset
from torch.autograd import Variable
from .save_features import SaveFeatures
class FeatureMapVisualizer():
def __init__(self,
model,
model_type="resnet",
ec=False,
use_cuda=True):
'''
### Feature Map Visualization class: ###
Contains various functions for visualization methods using convolutional feature maps
|| PARAMETERS ||
model : (PyTorch model)
model_type : (str) must be "resnet" or "vgg"
ec : (bool) True if using encoder, False if using the whole model (encoder + classifier)
'''
assert model_type in ["resnet", "vgg"], 'mode_type must be either "resnet" or "vgg"!'
self.model = model.eval().cuda() if use_cuda else model.eval()
for p in self.model.parameters(): p.requires_grad=False
self.model_type = model_type
self.ec = ec
self.use_cuda = use_cuda
def register_hook(self, layer):
''' Register hook in the requested layer '''
if self.model_type == "vgg":
conv_layers = [c for c in list(self.model.children())[0] if isinstance(c, nn.Conv2d)]
activations = SaveFeatures(conv_layers[layer]) # register hook
elif self.model_type == "resnet":
if self.ec:
activations = SaveFeatures(self.model[-2][-2])
else:
activations = SaveFeatures(self.model.layer4[layer])
return activations
def find_unique_filters(self,
layer,
train_dir,
classes,
n_imgs_dict,
n_each_img=25,
n_each_class=25):
'''
Find indices of feature maps that are activated the most when the model
sees images of a particular class, so we can focus on those feature maps
when visualizing.
|| PARAMETERS ||
layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16
train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/"
classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"]
n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857}
n_each_img : (int) # of top feature maps to save for EACH IMAGE
n_each_class : (int) # of top feature maps to save for EACH CLASS
'''
cls_dirs = [train_dir + cls for cls in classes]
top_feature_maps_dict_each_image = {} # dict to save top feature maps for ALL images for each class
n_maps_last_layer = 2048 if self.model_type=="resnet" else 512
########## Top Feature maps for EACH IMAGE ##########
for dir in cls_dirs: # iterate over class
top_filters = []
### for EACH IMAGE of the class ###
for img_path in os.listdir(dir):
### Save activations of ALL feature maps for the image ###
activations_list = self.one_image_N_top_feature_maps(layer, os.path.join(dir, img_path), plot=False, print_logits=False)
### Add top n_each_img most activated feature maps of the image to the "top filters" list ###
top_filters.extend(list(activations_list.detach().cpu().numpy().argsort()[::-1][:n_each_img]))
cls = dir.split("/")[-1] # class name
### Add the aggregated list of the class to the dict ###
top_feature_maps_dict_each_image[cls] = top_filters
print(cls + " done.")
########## Top Feature maps for EACH CLASS ##########
top_feature_map_dict_each_class = {} # dict to save top feature maps for each class
for cls in classes:
### Count the feature maps appearing in each class's aggregated list of top feature maps for ALL images ###
frequency_counters = Counter(top_feature_maps_dict_each_image[cls])
### Calculate the frequency ratio for each feature map
frequency_ratios = [frequency_counters[i]/n_imgs_dict[cls] if i in frequency_counters.keys() else 0. for i in range(n_maps_last_layer)]
### Add top n_each_class most frequent feature maps of the class to the dict ###
top_feature_map_dict_each_class[cls] = np.argsort(frequency_ratios)[::-1][:n_each_class]
### Eliminate feature maps that exist in more than one classes' top feature map lists ###
unique_top_feature_map_dict_each_class = {}
for cls in classes:
dict_without_this_class = {key:list(val) for key, val in top_feature_map_dict_each_class.items() if key != cls}
if len(classes) > 2:
unique_top_feature_map_dict_each_class[cls] = [map for map in top_feature_map_dict_each_class[cls] if map not in set(sum(dict_without_this_class.values(), []))]
elif len(classes) == 2:
unique_top_feature_map_dict_each_class[cls] = [map for map in top_feature_map_dict_each_class[cls] if map not in list(dict_without_this_class.values())[0]]
print("# of top feature maps:", {key:len(val) for key, val in unique_top_feature_map_dict_each_class.items()})
return unique_top_feature_map_dict_each_class
def visualize_patterns(self,
layer,
filter_n,
init_size=33,
lr=0.2,
opt_steps=20,
upscaling_steps=20,
upscaling_factor=1.2,
print_loss=False,
plot=False):
'''
### VISUALIZATION #1 : ###
Visualize patterns captured by a single feature map
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
filter_n : (int) index of the feature map to investigate in the layer
init_size : (int) intial length of the square random image
lr : (float) learning rate for pixel optimization
opt_steps : (int) number of optimization steps
upscaling_steps : (int) # of upscaling steps
upscaling_factor : (float) >1, upscale factor
print_loss : (bool) if True, log info at each optimizing iteration
*if activation: 0 for all iterations, there's a problem
plot : (bool) if True, plot the generated image at each optimizing iteration
'''
activations = self.register_hook(layer)
### Generate a random image ###
img = np.uint8(np.random.uniform(150, 180, (init_size, init_size, 3)))/255
sz = init_size
if print_loss:
plt.imshow(img)
plt.title("original random image")
plt.show()
### Upscale the image (upscaling_steps) times ###
for upscale_i in range(upscaling_steps):
### Attach graients to the optimized image ###
img_var = torch.autograd.Variable(torch.Tensor(img.transpose((2,0,1))).cuda().unsqueeze(0), requires_grad=True)
### Define Optimizer to update the image pixels ###
optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)
### Update the image's pixel values for (opt_steps) times ###
for n in range(opt_steps):
optimizer.zero_grad()
### Pass the image through the model ###
# Use sigmoid to restrict the image pixels between 0 and 1.
# Without sigmoid, the pixels can become negative.
self.model(torch.sigmoid(img_var))
### Maximize the activation of the (filter_n)th feature map of the requested layer ###
loss = -activations.features[0, filter_n].mean()
if plot:
plt.imshow(activations.features[0, filter_n].detach().cpu().numpy(), cmap="gray")
plt.show()
if print_loss:
print("whole layer shape:", activations.features.shape) # [1, n_filter, intermediate_H, intermediate_W]
print("intermediate feature shape:", activations.features[0, filter_n].shape)
print("parameters shape:", activations.params.shape)
print("activation:", activations.features[0, filter_n].mean().item())
loss.backward()
optimizer.step()
if print_loss:
print()
if upscale_i < upscaling_steps - 1:
img = img_var.detach().cpu().numpy()[0].transpose(1,2,0)
### Scale the optimized image up ###
sz = int(upscaling_factor * sz) # calculate new image size
img = cv2.resize(img, (sz, sz), interpolation = cv2.INTER_CUBIC)
else:
### for the last iteration, convert img_var into a numpy array ###
img = torch.sigmoid(img_var).detach().cpu().numpy()[0].transpose(1,2,0)
### Remove hook ###
activations.close()
### Save the generated image ###
img_name = "layer_"+str(layer)+"_filter_"+str(filter)+".jpg"
plt.imsave(img_name, img)
return img, img_name
def make_img_var(self, img_path):
'''
Given a path to an image (str), convert the image into a PyTorch variable
'''
img = Image.open(img_path).convert('RGB')
transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
img = transform(img)[:3, :, :].unsqueeze(0)
img_var = torch.autograd.Variable(img.cuda(), requires_grad=True) if self.use_cuda else torch.autograd.Variable(img, requires_grad=True)
return img, img_var
def one_image_N_top_feature_maps(self,
layer,
img_path,
plot=True,
n=100,
n_plots_horiz=10,
n_plots_vert=10,
plot_h=50,
plot_w=50,
print_logits=False,
imagenet=False,
plot_overlay=True,
n_top_classes=5):
'''
### VISUALIZATION #2 : ###
1. Find top n feature maps for a single image.
2. Highlight each top feature map's most attended regions of the image
by overlaying its activation map on top of the image.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the LAST convolutional layer, use -2 for resent50 & 12 for vgg16
img_path : (str) path to the image to investigate
plot : (bool) if True, plot the top N feature maps' activation maps on the image
/// MUST BE : n_plots_horiz * n_plots_vert = n ///
n : (int) # of top feature maps to plot
n_plots_horiz : (int) # of feature maps to plot horizontally
n_plots_vert : (int) # of feature maps to plot vertically
/// It's recommended that (n_plots_horiz/n_plots_vert) = (plot_h/plot_w) ///
plot_h : (int) height of the plot
plot_w : (int) width of the plot
print_logits : (bool) if True, print model logits (outputs) for the image
imagenet : (bool) if True, print_logits will print the logits for corresponding imagenet labels
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
'''
activations = self.register_hook(layer)
### Convert the image into a pytorch variable ###
img, img_var = self.make_img_var(img_path)
### Pass the image through the model ###
logits = self.model(img_var)
### Save the activations of ALL feature maps in the requested convolutional layer ###
activations_list = activations.features[0].mean((1,2)).detach().cpu()
### Save only the top N most activated feature maps, in order of largest to smallest activations ###
topN_activated_feature_maps = np.array(activations_list).argsort()[::-1][:n]
if plot:
assert n_plots_horiz*n_plots_vert==n, "n_plots_horiz*n_plots_vert must be equal to n!"
### Show the input image ###
plt.imshow(np.transpose(img.squeeze(0).numpy(), (1,2,0)))
plt.title("original image")
plt.show()
### Print model outputs (logits) ###
if print_logits:
if imagenet:
### Download imagenet labels ###
from urllib.request import urlretrieve
os.makedirs("attention_data", exist_ok=True)
if not os.path.isfile("attention_data/ilsvrc2012_wordnet_lemmas.txt"):
urlretrieve("https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt", "attention_data/ilsvrc2012_wordnet_lemmas.txt")
if not os.path.isfile("attention_data/ViT-B_16-224.npz"):
urlretrieve("https://storage.googleapis.com/vit_models/imagenet21k+imagenet2012/ViT-B_16-224.npz", "attention_data/ViT-B_16-224.npz")
imagenet_labels = dict(enumerate(open('attention_data/ilsvrc2012_wordnet_lemmas.txt')))
probs = torch.nn.Softmax(dim=-1)(logits)
top = torch.argsort(probs, dim=-1, descending=True)
for idx in top[0, :n_top_classes]:
print(f'{probs[0, idx.item()]:.5f} : {imagenet_labels[idx.item()]}', end='')
else:
print("prediction: ", logits)
plt.figure(figsize=(plot_w, plot_h))
for top_i in range(n):
plt.subplot(n_plots_horiz, n_plots_vert, top_i+1)
plt.title("layer "+str(layer)+" filter "+str(topN_activated_feature_maps[top_i]))
if plot_overlay:
### Upscale the feature maps to match the image size ###
img_dim = img.size(-1)
mask = np.array(cv2.resize(activations.features[0, topN_activated_feature_maps[top_i]].detach().cpu().numpy(), (img_dim,img_dim)))
if self.model_type == "resnet":
mask = mask*2 ### double the mask signal for resnet50
### Overlay the mask on top of the image ###
overlay = np.array([ch * mask for ch in img.detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay,0,1), (1,2,0)), cmap="gray")
else:
mask = activations.features[0, topN_activated_feature_maps[top_i]].detach().cpu().numpy()
plt.imshow(mask, cmap="gray")
plt.show()
### Plot a line plot of average activations of ALL feature maps ###
if plot:
plt.plot(activations_list)
plt.xlabel("filter in layer "+str(layer))
plt.ylabel("mean activation")
plt.show()
### Return the activations of ALL feature maps in the requested convolutional layer ###
return activations_list
def one_feature_map_N_images(self,
layer,
dataloader,
filter_idx,
plot=True,
max_n_imgs_to_plot=100,
plot_overlay=True,
normalize=True,
folder="",
class_name=""):
'''
### VISUALIZATION #3 : ###
Given the index of the feature map to investigate (filter_idx),
plot its activation map for images in the dataloader.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class)
filter_idx : (int) index of the feature map to investigate in the layer
plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader
max_n_imgs_to_plot : (int) maximum number of images to plot
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
normalize : (bool) if True, normalize the mask feature map by dividing by maximum value
folder : (str) name of the folder to save images (only if you want to save the visualizations)
class_name : (str) name of the class the images belong to
'''
activations = self.register_hook(layer)
mean_activations_list = []
if plot:
n_imgs = len(dataloader.dataset) if plot_all else int(len(dataloader.dataset)/2)
n_plots_vert, n_plots_horiz = 10, 2*(int(n_imgs/10)+1)
plot_w, plot_h = 50, (50*n_plots_horiz/10) + 1
plt.figure(figsize=(plot_w, plot_h))
plot_i = 1
for batch_i, (img_batch, _) in enumerate(dataloader):
if (plot is False) or (plot_all is True) or (batch_i%2 != 0): # only do odd batch (not enough RAM)
b = img_batch.size(0)
if self.use_cuda:
img_batch = img_batch.cuda()
### Pass the batch of images through the model ###
self.model(img_batch)
### Save only the requested feature map's activation for the images ###
feat = activations.features[:, filter_idx]
for img_i in range(b):
### Compute the average of the 7x7 activation map ###
mean_activation = feat[img_i].mean((0,1)).item()
mean_activations_list.append(mean_activation)
if plot:
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.imshow(np.transpose(img_batch[img_i].detach().cpu().numpy(), (1,2,0)))
plot_i += 1
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.title(str(mean_activation), fontdict={'fontsize':20})
### Upscale the feature maps to match the image size ###
img_dim = img_batch[img_i].size(-1)
mask = np.array(cv2.resize(feat[img_i].detach().cpu().numpy(), (img_dim, img_dim)))
plt.axis("off")
if plot_overlay:
if self.model_type == "resnet":
mask = mask*2 ### double the mask signal for resnet50
else:
if normalize:
mask = mask/mask.max()
### Overlay the mask on top of the image ###
overlay = np.array([ch * mask for ch in img_batch[img_i].detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay, 0, 1), (1,2,0)), cmap="gray")
### Save the masked images ###
if folder:
if not os.path.exists(folder):
os.makedirs(folder)
if not os.path.exists(folder+ "/masked_" + class_name):
os.makedirs(folder+ "/masked_" + class_name)
plt.imsave(folder + "/masked_" + class_name + "_" + str(plot_i) + ".jpg",
np.transpose(np.clip(overlay, 0, 1), (1,2,0)))
else:
plt.imshow(mask, cmap="gray")
plot_i += 1
if plot:
plt.show()
return mean_activations_list
def M_feature_maps_N_images(self,
layer,
dataloader,
filter_idxs,
plot=True,
max_n_imgs_to_plot=100,
plot_overlay=True):
'''
### VISUALIZATION #4 : ###
Given the indices of MULTIPLE feature maps to investigate (filter_idxs),
plot the SUM of their activation maps (one on top of each other) for images in the dataloader.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class)
filter_idxs : (list of ints) index of the feature map to investigate in the layer
plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader
max_n_imgs_to_plot : (int) maximum number of images to plot
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
'''
activations = self.register_hook(layer)
mean_activations_list = []
if plot:
n_imgs = min(len(dataloader.dataset), max_n_imgs_to_plot)
n_plots_vert, n_plots_horiz = 10, 2*(int(n_imgs/10)+1)
plot_w, plot_h = 50, (50*n_plots_horiz/10) + 1
plt.figure(figsize=(plot_w, plot_h))
plot_i = 1
save_i = 1
for batch_i, (img, _) in enumerate(dataloader):
if (plot is False) or (plot_all is True) or (batch_i%2 != 0): # only do odd batch (not enough RAM)
b = img.size(0)
if self.use_cuda:
img = img.cuda()
self.model(img)
for img_i in range(b):
mask = np.zeros((224,224))
mean_activation = 0
for filter_idx in filter_idxs:
feat = activations.features[:, filter_idx]
mask += np.array(cv2.resize(feat[img_i].detach().cpu().numpy(), (224, 224)))
mean_activation += feat[img_i].mean((0,1)).item()
mean_activations_list.append(mean_activation)
overlay = np.array([ch * np.clip(mask, 0, 1) for ch in img[img_i].detach().cpu().squeeze(0).numpy()])
plt.imsave("masked_with_gun_filters/knife/masked_{}.jpg".format(save_i), np.transpose(np.clip(overlay, 0, 1)))
save_i+=1
if plot:
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.imshow(np.transpose(img[img_i].detach().cpu().numpy(), (1,2,0)))
plot_i += 1
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.title(str(mean_activation), fontdict={'fontsize':20})
plt.axis("off")
if plot_overlay:
#overlay = np.array([ch * np.clip(mask, 0, 1) for ch in img[img_i].detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay, 0, 1), (1,2,0)), cmap="gray")
plt.imsave("benign_inch/masked_{}.jpg".format(plot_i), np.transpose(np.clip(overlay, 0, 1)))
else:
plt.imshow(mask, cmap="gray")
plot_i += 1
if plot:
plt.show()
return mean_activations_list
def sum_top_feature_maps_by_class(self,
layer,
transform,
img_dir,
top_feature_maps_dict=None,
training_imgs_dir=None,
classes=None,
n_imgs_dict=None,
plot=True,
colours=[c[4:] for c in list(mcolors.TABLEAU_COLORS)]*1000):
'''
### Visualization #5 ###
Plot the SUM of activations of each class's top feature maps for each image,
for all classes in the same plot
|| PARAMETERS ||
layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16
transform : (torchvision.transforms object) transform to be applied to each test image
img_dir : (str) address of the folder containing image folders
*Image folders' names must be the same as target class names.
/// You MUST either pass `top_feature_maps_dict` or ALL of `train_dir`, `classes`, and `n_imgs_dict`. ///
top_feature_maps_dict : (dict) (key, value)=(class name, list of top feature maps for that class)
e.g. {"cat":[1,3,5], "dog":[2,4,8]}
train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/"
classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"]
n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857}
plot : (bool) show plots if True
'''
if top_feature_maps_dict is None:
top_feature_maps_dict = self.find_unique_filters(layer=layer,
train_dir=training_imgs_dir,
classes=classes,
n_imgs_dict=n_imgs_dict)
sum_dicts_dict = {} # will become a dict of dicts
classes = os.listdir(img_dir)
for cls_i, cls in enumerate(classes):
sum_lists_dict = {_cls:[] for _cls in top_feature_maps_dict.keys()}
for img_path in os.listdir(os.path.join(img_dir, cls)):
# read in the image and transform it into a torch tensor
full_img_path = os.path.join(img_dir, cls, img_path)
img = Image.open(full_img_path).convert('RGB')
img_var = transform(img)[:3, :, :].unsqueeze(0).cuda()
# compute the activations of all feature maps for the image
activations_list = self.one_image_N_top_feature_maps(layer, img_path=full_img_path, plot=False)
# save the sum of only the class top feature maps' activations for each class
for top_feature_map_cls in top_feature_maps_dict.keys():
sum_lists_dict[top_feature_map_cls].append(sum(activations_list[top_feature_maps_dict[top_feature_map_cls]]))
for top_feature_map_cls in top_feature_maps_dict.keys():
sum_dicts_dict[cls] = sum_lists_dict
if plot:
c = {cls:colour for cls, colour in zip(classes, colours)}
for top_feature_map_cls in top_feature_maps_dict.keys():
plt.figure(figsize=(10,7))
for cls in classes:
plt.plot(sum_dicts_dict[cls][top_feature_map_cls], marker=".", color=c[cls])
plt.title(top_feature_map_cls+" activations")
plt.legend(classes)
plt.show()
return sum_dicts_dict
| <filename>FeatureMapVisualizer/visualizer.py
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from collections import Counter
import cv2
from PIL import Image, ImageFile
import torch
import torchvision
import torch.nn.functional as F
from torch import nn, optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset
from torch.autograd import Variable
from .save_features import SaveFeatures
class FeatureMapVisualizer():
def __init__(self,
model,
model_type="resnet",
ec=False,
use_cuda=True):
'''
### Feature Map Visualization class: ###
Contains various functions for visualization methods using convolutional feature maps
|| PARAMETERS ||
model : (PyTorch model)
model_type : (str) must be "resnet" or "vgg"
ec : (bool) True if using encoder, False if using the whole model (encoder + classifier)
'''
assert model_type in ["resnet", "vgg"], 'mode_type must be either "resnet" or "vgg"!'
self.model = model.eval().cuda() if use_cuda else model.eval()
for p in self.model.parameters(): p.requires_grad=False
self.model_type = model_type
self.ec = ec
self.use_cuda = use_cuda
def register_hook(self, layer):
''' Register hook in the requested layer '''
if self.model_type == "vgg":
conv_layers = [c for c in list(self.model.children())[0] if isinstance(c, nn.Conv2d)]
activations = SaveFeatures(conv_layers[layer]) # register hook
elif self.model_type == "resnet":
if self.ec:
activations = SaveFeatures(self.model[-2][-2])
else:
activations = SaveFeatures(self.model.layer4[layer])
return activations
def find_unique_filters(self,
layer,
train_dir,
classes,
n_imgs_dict,
n_each_img=25,
n_each_class=25):
'''
Find indices of feature maps that are activated the most when the model
sees images of a particular class, so we can focus on those feature maps
when visualizing.
|| PARAMETERS ||
layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16
train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/"
classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"]
n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857}
n_each_img : (int) # of top feature maps to save for EACH IMAGE
n_each_class : (int) # of top feature maps to save for EACH CLASS
'''
cls_dirs = [train_dir + cls for cls in classes]
top_feature_maps_dict_each_image = {} # dict to save top feature maps for ALL images for each class
n_maps_last_layer = 2048 if self.model_type=="resnet" else 512
########## Top Feature maps for EACH IMAGE ##########
for dir in cls_dirs: # iterate over class
top_filters = []
### for EACH IMAGE of the class ###
for img_path in os.listdir(dir):
### Save activations of ALL feature maps for the image ###
activations_list = self.one_image_N_top_feature_maps(layer, os.path.join(dir, img_path), plot=False, print_logits=False)
### Add top n_each_img most activated feature maps of the image to the "top filters" list ###
top_filters.extend(list(activations_list.detach().cpu().numpy().argsort()[::-1][:n_each_img]))
cls = dir.split("/")[-1] # class name
### Add the aggregated list of the class to the dict ###
top_feature_maps_dict_each_image[cls] = top_filters
print(cls + " done.")
########## Top Feature maps for EACH CLASS ##########
top_feature_map_dict_each_class = {} # dict to save top feature maps for each class
for cls in classes:
### Count the feature maps appearing in each class's aggregated list of top feature maps for ALL images ###
frequency_counters = Counter(top_feature_maps_dict_each_image[cls])
### Calculate the frequency ratio for each feature map
frequency_ratios = [frequency_counters[i]/n_imgs_dict[cls] if i in frequency_counters.keys() else 0. for i in range(n_maps_last_layer)]
### Add top n_each_class most frequent feature maps of the class to the dict ###
top_feature_map_dict_each_class[cls] = np.argsort(frequency_ratios)[::-1][:n_each_class]
### Eliminate feature maps that exist in more than one classes' top feature map lists ###
unique_top_feature_map_dict_each_class = {}
for cls in classes:
dict_without_this_class = {key:list(val) for key, val in top_feature_map_dict_each_class.items() if key != cls}
if len(classes) > 2:
unique_top_feature_map_dict_each_class[cls] = [map for map in top_feature_map_dict_each_class[cls] if map not in set(sum(dict_without_this_class.values(), []))]
elif len(classes) == 2:
unique_top_feature_map_dict_each_class[cls] = [map for map in top_feature_map_dict_each_class[cls] if map not in list(dict_without_this_class.values())[0]]
print("# of top feature maps:", {key:len(val) for key, val in unique_top_feature_map_dict_each_class.items()})
return unique_top_feature_map_dict_each_class
def visualize_patterns(self,
layer,
filter_n,
init_size=33,
lr=0.2,
opt_steps=20,
upscaling_steps=20,
upscaling_factor=1.2,
print_loss=False,
plot=False):
'''
### VISUALIZATION #1 : ###
Visualize patterns captured by a single feature map
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
filter_n : (int) index of the feature map to investigate in the layer
init_size : (int) intial length of the square random image
lr : (float) learning rate for pixel optimization
opt_steps : (int) number of optimization steps
upscaling_steps : (int) # of upscaling steps
upscaling_factor : (float) >1, upscale factor
print_loss : (bool) if True, log info at each optimizing iteration
*if activation: 0 for all iterations, there's a problem
plot : (bool) if True, plot the generated image at each optimizing iteration
'''
activations = self.register_hook(layer)
### Generate a random image ###
img = np.uint8(np.random.uniform(150, 180, (init_size, init_size, 3)))/255
sz = init_size
if print_loss:
plt.imshow(img)
plt.title("original random image")
plt.show()
### Upscale the image (upscaling_steps) times ###
for upscale_i in range(upscaling_steps):
### Attach graients to the optimized image ###
img_var = torch.autograd.Variable(torch.Tensor(img.transpose((2,0,1))).cuda().unsqueeze(0), requires_grad=True)
### Define Optimizer to update the image pixels ###
optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)
### Update the image's pixel values for (opt_steps) times ###
for n in range(opt_steps):
optimizer.zero_grad()
### Pass the image through the model ###
# Use sigmoid to restrict the image pixels between 0 and 1.
# Without sigmoid, the pixels can become negative.
self.model(torch.sigmoid(img_var))
### Maximize the activation of the (filter_n)th feature map of the requested layer ###
loss = -activations.features[0, filter_n].mean()
if plot:
plt.imshow(activations.features[0, filter_n].detach().cpu().numpy(), cmap="gray")
plt.show()
if print_loss:
print("whole layer shape:", activations.features.shape) # [1, n_filter, intermediate_H, intermediate_W]
print("intermediate feature shape:", activations.features[0, filter_n].shape)
print("parameters shape:", activations.params.shape)
print("activation:", activations.features[0, filter_n].mean().item())
loss.backward()
optimizer.step()
if print_loss:
print()
if upscale_i < upscaling_steps - 1:
img = img_var.detach().cpu().numpy()[0].transpose(1,2,0)
### Scale the optimized image up ###
sz = int(upscaling_factor * sz) # calculate new image size
img = cv2.resize(img, (sz, sz), interpolation = cv2.INTER_CUBIC)
else:
### for the last iteration, convert img_var into a numpy array ###
img = torch.sigmoid(img_var).detach().cpu().numpy()[0].transpose(1,2,0)
### Remove hook ###
activations.close()
### Save the generated image ###
img_name = "layer_"+str(layer)+"_filter_"+str(filter)+".jpg"
plt.imsave(img_name, img)
return img, img_name
def make_img_var(self, img_path):
'''
Given a path to an image (str), convert the image into a PyTorch variable
'''
img = Image.open(img_path).convert('RGB')
transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
img = transform(img)[:3, :, :].unsqueeze(0)
img_var = torch.autograd.Variable(img.cuda(), requires_grad=True) if self.use_cuda else torch.autograd.Variable(img, requires_grad=True)
return img, img_var
def one_image_N_top_feature_maps(self,
layer,
img_path,
plot=True,
n=100,
n_plots_horiz=10,
n_plots_vert=10,
plot_h=50,
plot_w=50,
print_logits=False,
imagenet=False,
plot_overlay=True,
n_top_classes=5):
'''
### VISUALIZATION #2 : ###
1. Find top n feature maps for a single image.
2. Highlight each top feature map's most attended regions of the image
by overlaying its activation map on top of the image.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the LAST convolutional layer, use -2 for resent50 & 12 for vgg16
img_path : (str) path to the image to investigate
plot : (bool) if True, plot the top N feature maps' activation maps on the image
/// MUST BE : n_plots_horiz * n_plots_vert = n ///
n : (int) # of top feature maps to plot
n_plots_horiz : (int) # of feature maps to plot horizontally
n_plots_vert : (int) # of feature maps to plot vertically
/// It's recommended that (n_plots_horiz/n_plots_vert) = (plot_h/plot_w) ///
plot_h : (int) height of the plot
plot_w : (int) width of the plot
print_logits : (bool) if True, print model logits (outputs) for the image
imagenet : (bool) if True, print_logits will print the logits for corresponding imagenet labels
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
'''
activations = self.register_hook(layer)
### Convert the image into a pytorch variable ###
img, img_var = self.make_img_var(img_path)
### Pass the image through the model ###
logits = self.model(img_var)
### Save the activations of ALL feature maps in the requested convolutional layer ###
activations_list = activations.features[0].mean((1,2)).detach().cpu()
### Save only the top N most activated feature maps, in order of largest to smallest activations ###
topN_activated_feature_maps = np.array(activations_list).argsort()[::-1][:n]
if plot:
assert n_plots_horiz*n_plots_vert==n, "n_plots_horiz*n_plots_vert must be equal to n!"
### Show the input image ###
plt.imshow(np.transpose(img.squeeze(0).numpy(), (1,2,0)))
plt.title("original image")
plt.show()
### Print model outputs (logits) ###
if print_logits:
if imagenet:
### Download imagenet labels ###
from urllib.request import urlretrieve
os.makedirs("attention_data", exist_ok=True)
if not os.path.isfile("attention_data/ilsvrc2012_wordnet_lemmas.txt"):
urlretrieve("https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt", "attention_data/ilsvrc2012_wordnet_lemmas.txt")
if not os.path.isfile("attention_data/ViT-B_16-224.npz"):
urlretrieve("https://storage.googleapis.com/vit_models/imagenet21k+imagenet2012/ViT-B_16-224.npz", "attention_data/ViT-B_16-224.npz")
imagenet_labels = dict(enumerate(open('attention_data/ilsvrc2012_wordnet_lemmas.txt')))
probs = torch.nn.Softmax(dim=-1)(logits)
top = torch.argsort(probs, dim=-1, descending=True)
for idx in top[0, :n_top_classes]:
print(f'{probs[0, idx.item()]:.5f} : {imagenet_labels[idx.item()]}', end='')
else:
print("prediction: ", logits)
plt.figure(figsize=(plot_w, plot_h))
for top_i in range(n):
plt.subplot(n_plots_horiz, n_plots_vert, top_i+1)
plt.title("layer "+str(layer)+" filter "+str(topN_activated_feature_maps[top_i]))
if plot_overlay:
### Upscale the feature maps to match the image size ###
img_dim = img.size(-1)
mask = np.array(cv2.resize(activations.features[0, topN_activated_feature_maps[top_i]].detach().cpu().numpy(), (img_dim,img_dim)))
if self.model_type == "resnet":
mask = mask*2 ### double the mask signal for resnet50
### Overlay the mask on top of the image ###
overlay = np.array([ch * mask for ch in img.detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay,0,1), (1,2,0)), cmap="gray")
else:
mask = activations.features[0, topN_activated_feature_maps[top_i]].detach().cpu().numpy()
plt.imshow(mask, cmap="gray")
plt.show()
### Plot a line plot of average activations of ALL feature maps ###
if plot:
plt.plot(activations_list)
plt.xlabel("filter in layer "+str(layer))
plt.ylabel("mean activation")
plt.show()
### Return the activations of ALL feature maps in the requested convolutional layer ###
return activations_list
def one_feature_map_N_images(self,
layer,
dataloader,
filter_idx,
plot=True,
max_n_imgs_to_plot=100,
plot_overlay=True,
normalize=True,
folder="",
class_name=""):
'''
### VISUALIZATION #3 : ###
Given the index of the feature map to investigate (filter_idx),
plot its activation map for images in the dataloader.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class)
filter_idx : (int) index of the feature map to investigate in the layer
plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader
max_n_imgs_to_plot : (int) maximum number of images to plot
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
normalize : (bool) if True, normalize the mask feature map by dividing by maximum value
folder : (str) name of the folder to save images (only if you want to save the visualizations)
class_name : (str) name of the class the images belong to
'''
activations = self.register_hook(layer)
mean_activations_list = []
if plot:
n_imgs = len(dataloader.dataset) if plot_all else int(len(dataloader.dataset)/2)
n_plots_vert, n_plots_horiz = 10, 2*(int(n_imgs/10)+1)
plot_w, plot_h = 50, (50*n_plots_horiz/10) + 1
plt.figure(figsize=(plot_w, plot_h))
plot_i = 1
for batch_i, (img_batch, _) in enumerate(dataloader):
if (plot is False) or (plot_all is True) or (batch_i%2 != 0): # only do odd batch (not enough RAM)
b = img_batch.size(0)
if self.use_cuda:
img_batch = img_batch.cuda()
### Pass the batch of images through the model ###
self.model(img_batch)
### Save only the requested feature map's activation for the images ###
feat = activations.features[:, filter_idx]
for img_i in range(b):
### Compute the average of the 7x7 activation map ###
mean_activation = feat[img_i].mean((0,1)).item()
mean_activations_list.append(mean_activation)
if plot:
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.imshow(np.transpose(img_batch[img_i].detach().cpu().numpy(), (1,2,0)))
plot_i += 1
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.title(str(mean_activation), fontdict={'fontsize':20})
### Upscale the feature maps to match the image size ###
img_dim = img_batch[img_i].size(-1)
mask = np.array(cv2.resize(feat[img_i].detach().cpu().numpy(), (img_dim, img_dim)))
plt.axis("off")
if plot_overlay:
if self.model_type == "resnet":
mask = mask*2 ### double the mask signal for resnet50
else:
if normalize:
mask = mask/mask.max()
### Overlay the mask on top of the image ###
overlay = np.array([ch * mask for ch in img_batch[img_i].detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay, 0, 1), (1,2,0)), cmap="gray")
### Save the masked images ###
if folder:
if not os.path.exists(folder):
os.makedirs(folder)
if not os.path.exists(folder+ "/masked_" + class_name):
os.makedirs(folder+ "/masked_" + class_name)
plt.imsave(folder + "/masked_" + class_name + "_" + str(plot_i) + ".jpg",
np.transpose(np.clip(overlay, 0, 1), (1,2,0)))
else:
plt.imshow(mask, cmap="gray")
plot_i += 1
if plot:
plt.show()
return mean_activations_list
def M_feature_maps_N_images(self,
layer,
dataloader,
filter_idxs,
plot=True,
max_n_imgs_to_plot=100,
plot_overlay=True):
'''
### VISUALIZATION #4 : ###
Given the indices of MULTIPLE feature maps to investigate (filter_idxs),
plot the SUM of their activation maps (one on top of each other) for images in the dataloader.
|| PARAMETERS ||
layer : (int) index of the convolutional layer to investigate feature maps
*For the last convolutional layer, use -2 for resent50 & 12 for vgg16
dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class)
filter_idxs : (list of ints) index of the feature map to investigate in the layer
plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader
max_n_imgs_to_plot : (int) maximum number of images to plot
plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image
if False, plot the original feature map only
'''
activations = self.register_hook(layer)
mean_activations_list = []
if plot:
n_imgs = min(len(dataloader.dataset), max_n_imgs_to_plot)
n_plots_vert, n_plots_horiz = 10, 2*(int(n_imgs/10)+1)
plot_w, plot_h = 50, (50*n_plots_horiz/10) + 1
plt.figure(figsize=(plot_w, plot_h))
plot_i = 1
save_i = 1
for batch_i, (img, _) in enumerate(dataloader):
if (plot is False) or (plot_all is True) or (batch_i%2 != 0): # only do odd batch (not enough RAM)
b = img.size(0)
if self.use_cuda:
img = img.cuda()
self.model(img)
for img_i in range(b):
mask = np.zeros((224,224))
mean_activation = 0
for filter_idx in filter_idxs:
feat = activations.features[:, filter_idx]
mask += np.array(cv2.resize(feat[img_i].detach().cpu().numpy(), (224, 224)))
mean_activation += feat[img_i].mean((0,1)).item()
mean_activations_list.append(mean_activation)
overlay = np.array([ch * np.clip(mask, 0, 1) for ch in img[img_i].detach().cpu().squeeze(0).numpy()])
plt.imsave("masked_with_gun_filters/knife/masked_{}.jpg".format(save_i), np.transpose(np.clip(overlay, 0, 1)))
save_i+=1
if plot:
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.imshow(np.transpose(img[img_i].detach().cpu().numpy(), (1,2,0)))
plot_i += 1
plt.subplot(n_plots_horiz, n_plots_vert, plot_i)
plt.title(str(mean_activation), fontdict={'fontsize':20})
plt.axis("off")
if plot_overlay:
#overlay = np.array([ch * np.clip(mask, 0, 1) for ch in img[img_i].detach().cpu().squeeze(0).numpy()])
plt.imshow(np.transpose(np.clip(overlay, 0, 1), (1,2,0)), cmap="gray")
plt.imsave("benign_inch/masked_{}.jpg".format(plot_i), np.transpose(np.clip(overlay, 0, 1)))
else:
plt.imshow(mask, cmap="gray")
plot_i += 1
if plot:
plt.show()
return mean_activations_list
def sum_top_feature_maps_by_class(self,
layer,
transform,
img_dir,
top_feature_maps_dict=None,
training_imgs_dir=None,
classes=None,
n_imgs_dict=None,
plot=True,
colours=[c[4:] for c in list(mcolors.TABLEAU_COLORS)]*1000):
'''
### Visualization #5 ###
Plot the SUM of activations of each class's top feature maps for each image,
for all classes in the same plot
|| PARAMETERS ||
layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16
transform : (torchvision.transforms object) transform to be applied to each test image
img_dir : (str) address of the folder containing image folders
*Image folders' names must be the same as target class names.
/// You MUST either pass `top_feature_maps_dict` or ALL of `train_dir`, `classes`, and `n_imgs_dict`. ///
top_feature_maps_dict : (dict) (key, value)=(class name, list of top feature maps for that class)
e.g. {"cat":[1,3,5], "dog":[2,4,8]}
train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/"
classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"]
n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857}
plot : (bool) show plots if True
'''
if top_feature_maps_dict is None:
top_feature_maps_dict = self.find_unique_filters(layer=layer,
train_dir=training_imgs_dir,
classes=classes,
n_imgs_dict=n_imgs_dict)
sum_dicts_dict = {} # will become a dict of dicts
classes = os.listdir(img_dir)
for cls_i, cls in enumerate(classes):
sum_lists_dict = {_cls:[] for _cls in top_feature_maps_dict.keys()}
for img_path in os.listdir(os.path.join(img_dir, cls)):
# read in the image and transform it into a torch tensor
full_img_path = os.path.join(img_dir, cls, img_path)
img = Image.open(full_img_path).convert('RGB')
img_var = transform(img)[:3, :, :].unsqueeze(0).cuda()
# compute the activations of all feature maps for the image
activations_list = self.one_image_N_top_feature_maps(layer, img_path=full_img_path, plot=False)
# save the sum of only the class top feature maps' activations for each class
for top_feature_map_cls in top_feature_maps_dict.keys():
sum_lists_dict[top_feature_map_cls].append(sum(activations_list[top_feature_maps_dict[top_feature_map_cls]]))
for top_feature_map_cls in top_feature_maps_dict.keys():
sum_dicts_dict[cls] = sum_lists_dict
if plot:
c = {cls:colour for cls, colour in zip(classes, colours)}
for top_feature_map_cls in top_feature_maps_dict.keys():
plt.figure(figsize=(10,7))
for cls in classes:
plt.plot(sum_dicts_dict[cls][top_feature_map_cls], marker=".", color=c[cls])
plt.title(top_feature_map_cls+" activations")
plt.legend(classes)
plt.show()
return sum_dicts_dict
| en | 0.656934 | ### Feature Map Visualization class: ### Contains various functions for visualization methods using convolutional feature maps || PARAMETERS || model : (PyTorch model) model_type : (str) must be "resnet" or "vgg" ec : (bool) True if using encoder, False if using the whole model (encoder + classifier) Register hook in the requested layer # register hook Find indices of feature maps that are activated the most when the model sees images of a particular class, so we can focus on those feature maps when visualizing. || PARAMETERS || layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16 train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/" classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"] n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857} n_each_img : (int) # of top feature maps to save for EACH IMAGE n_each_class : (int) # of top feature maps to save for EACH CLASS # dict to save top feature maps for ALL images for each class ########## Top Feature maps for EACH IMAGE ########## # iterate over class ### for EACH IMAGE of the class ### ### Save activations of ALL feature maps for the image ### ### Add top n_each_img most activated feature maps of the image to the "top filters" list ### # class name ### Add the aggregated list of the class to the dict ### ########## Top Feature maps for EACH CLASS ########## # dict to save top feature maps for each class ### Count the feature maps appearing in each class's aggregated list of top feature maps for ALL images ### ### Calculate the frequency ratio for each feature map ### Add top n_each_class most frequent feature maps of the class to the dict ### ### Eliminate feature maps that exist in more than one classes' top feature map lists ### ### VISUALIZATION #1 : ### Visualize patterns captured by a single feature map || PARAMETERS || layer : (int) index of the convolutional layer to investigate feature maps *For the last convolutional layer, use -2 for resent50 & 12 for vgg16 filter_n : (int) index of the feature map to investigate in the layer init_size : (int) intial length of the square random image lr : (float) learning rate for pixel optimization opt_steps : (int) number of optimization steps upscaling_steps : (int) # of upscaling steps upscaling_factor : (float) >1, upscale factor print_loss : (bool) if True, log info at each optimizing iteration *if activation: 0 for all iterations, there's a problem plot : (bool) if True, plot the generated image at each optimizing iteration ### Generate a random image ### ### Upscale the image (upscaling_steps) times ### ### Attach graients to the optimized image ### ### Define Optimizer to update the image pixels ### ### Update the image's pixel values for (opt_steps) times ### ### Pass the image through the model ### # Use sigmoid to restrict the image pixels between 0 and 1. # Without sigmoid, the pixels can become negative. ### Maximize the activation of the (filter_n)th feature map of the requested layer ### # [1, n_filter, intermediate_H, intermediate_W] ### Scale the optimized image up ### # calculate new image size ### for the last iteration, convert img_var into a numpy array ### ### Remove hook ### ### Save the generated image ### Given a path to an image (str), convert the image into a PyTorch variable ### VISUALIZATION #2 : ### 1. Find top n feature maps for a single image. 2. Highlight each top feature map's most attended regions of the image by overlaying its activation map on top of the image. || PARAMETERS || layer : (int) index of the convolutional layer to investigate feature maps *For the LAST convolutional layer, use -2 for resent50 & 12 for vgg16 img_path : (str) path to the image to investigate plot : (bool) if True, plot the top N feature maps' activation maps on the image /// MUST BE : n_plots_horiz * n_plots_vert = n /// n : (int) # of top feature maps to plot n_plots_horiz : (int) # of feature maps to plot horizontally n_plots_vert : (int) # of feature maps to plot vertically /// It's recommended that (n_plots_horiz/n_plots_vert) = (plot_h/plot_w) /// plot_h : (int) height of the plot plot_w : (int) width of the plot print_logits : (bool) if True, print model logits (outputs) for the image imagenet : (bool) if True, print_logits will print the logits for corresponding imagenet labels plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image if False, plot the original feature map only ### Convert the image into a pytorch variable ### ### Pass the image through the model ### ### Save the activations of ALL feature maps in the requested convolutional layer ### ### Save only the top N most activated feature maps, in order of largest to smallest activations ### ### Show the input image ### ### Print model outputs (logits) ### ### Download imagenet labels ### ### Upscale the feature maps to match the image size ### ### double the mask signal for resnet50 ### Overlay the mask on top of the image ### ### Plot a line plot of average activations of ALL feature maps ### ### Return the activations of ALL feature maps in the requested convolutional layer ### ### VISUALIZATION #3 : ### Given the index of the feature map to investigate (filter_idx), plot its activation map for images in the dataloader. || PARAMETERS || layer : (int) index of the convolutional layer to investigate feature maps *For the last convolutional layer, use -2 for resent50 & 12 for vgg16 dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class) filter_idx : (int) index of the feature map to investigate in the layer plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader max_n_imgs_to_plot : (int) maximum number of images to plot plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image if False, plot the original feature map only normalize : (bool) if True, normalize the mask feature map by dividing by maximum value folder : (str) name of the folder to save images (only if you want to save the visualizations) class_name : (str) name of the class the images belong to # only do odd batch (not enough RAM) ### Pass the batch of images through the model ### ### Save only the requested feature map's activation for the images ### ### Compute the average of the 7x7 activation map ### ### Upscale the feature maps to match the image size ### ### double the mask signal for resnet50 ### Overlay the mask on top of the image ### ### Save the masked images ### ### VISUALIZATION #4 : ### Given the indices of MULTIPLE feature maps to investigate (filter_idxs), plot the SUM of their activation maps (one on top of each other) for images in the dataloader. || PARAMETERS || layer : (int) index of the convolutional layer to investigate feature maps *For the last convolutional layer, use -2 for resent50 & 12 for vgg16 dataloader : (torch.utils.data.dataloader object) dataloader containing images to plot (usually images of a single class) filter_idxs : (list of ints) index of the feature map to investigate in the layer plot : (bool) if True, plot the feature maps' activation maps on images in the dataloader max_n_imgs_to_plot : (int) maximum number of images to plot plot_overlay : (bool) if True, overlay the top feature map on top of the image and plot the overlaid image if False, plot the original feature map only # only do odd batch (not enough RAM) #overlay = np.array([ch * np.clip(mask, 0, 1) for ch in img[img_i].detach().cpu().squeeze(0).numpy()]) ### Visualization #5 ### Plot the SUM of activations of each class's top feature maps for each image, for all classes in the same plot || PARAMETERS || layer : (int) if using last convolutional layer, use -2 for resnet & 12 for vgg16 transform : (torchvision.transforms object) transform to be applied to each test image img_dir : (str) address of the folder containing image folders *Image folders' names must be the same as target class names. /// You MUST either pass `top_feature_maps_dict` or ALL of `train_dir`, `classes`, and `n_imgs_dict`. /// top_feature_maps_dict : (dict) (key, value)=(class name, list of top feature maps for that class) e.g. {"cat":[1,3,5], "dog":[2,4,8]} train_dir : (str) address of the folder that contains training data including "/" at the end e.g. "train_data/" classes : (list of strs) list containing (at least two) class names in string e.g. ["cat", "dog"] n_imgs_dict : (dict) key : class name (str), value : # of training images for that class (int) e.g. {"dog":955, "cat":1857} plot : (bool) show plots if True # will become a dict of dicts # read in the image and transform it into a torch tensor # compute the activations of all feature maps for the image # save the sum of only the class top feature maps' activations for each class | 2.480209 | 2 |
thonny/plugins/backend/birdseye_backend.py | rjalif199/thonny | 2 | 6630964 | import os
from thonny.plugins.cpython.cpython_backend import (
get_backend,
Executor,
return_execution_result,
prepare_hooks,
)
def _cmd_Birdseye(cmd):
backend = get_backend()
backend.switch_env_to_script_mode(cmd)
return backend._execute_file(cmd, BirdsEyeRunner)
class BirdsEyeRunner(Executor):
@return_execution_result
@prepare_hooks
def execute_source(self, source, filename, mode, ast_postprocessors):
import webbrowser
assert mode == "exec"
# ignore ast_postprocessors, because birdseye requires source
if isinstance(source, bytes):
source = source.decode("utf-8")
import __main__ # @UnresolvedImport
global_vars = __main__.__dict__
# Following is a trick, which allows importing birdseye in the backends,
# which doesn't have it installed (provided it is installed for frontend Python)
from birdseye.bird import eye
eye.exec_string(source, filename, globs=global_vars, locs=global_vars, deep=True)
port = os.environ.get("BIRDSEYE_PORT", "7777")
webbrowser.open_new_tab("http://localhost:%s/ipython_call/" % port + eye._last_call_id)
def load_plugin():
try:
os.environ["OUTDATED_IGNORE"] = "1"
# TODO: it would be good to do this here, but it's slow
# import birdseye.bird # need to import at plugin load time, because later it may not be in path
except ImportError:
pass
get_backend().add_command("Birdseye", _cmd_Birdseye)
| import os
from thonny.plugins.cpython.cpython_backend import (
get_backend,
Executor,
return_execution_result,
prepare_hooks,
)
def _cmd_Birdseye(cmd):
backend = get_backend()
backend.switch_env_to_script_mode(cmd)
return backend._execute_file(cmd, BirdsEyeRunner)
class BirdsEyeRunner(Executor):
@return_execution_result
@prepare_hooks
def execute_source(self, source, filename, mode, ast_postprocessors):
import webbrowser
assert mode == "exec"
# ignore ast_postprocessors, because birdseye requires source
if isinstance(source, bytes):
source = source.decode("utf-8")
import __main__ # @UnresolvedImport
global_vars = __main__.__dict__
# Following is a trick, which allows importing birdseye in the backends,
# which doesn't have it installed (provided it is installed for frontend Python)
from birdseye.bird import eye
eye.exec_string(source, filename, globs=global_vars, locs=global_vars, deep=True)
port = os.environ.get("BIRDSEYE_PORT", "7777")
webbrowser.open_new_tab("http://localhost:%s/ipython_call/" % port + eye._last_call_id)
def load_plugin():
try:
os.environ["OUTDATED_IGNORE"] = "1"
# TODO: it would be good to do this here, but it's slow
# import birdseye.bird # need to import at plugin load time, because later it may not be in path
except ImportError:
pass
get_backend().add_command("Birdseye", _cmd_Birdseye)
| en | 0.94183 | # ignore ast_postprocessors, because birdseye requires source # @UnresolvedImport # Following is a trick, which allows importing birdseye in the backends, # which doesn't have it installed (provided it is installed for frontend Python) # TODO: it would be good to do this here, but it's slow # import birdseye.bird # need to import at plugin load time, because later it may not be in path | 2.20893 | 2 |
backend/mythbusters/users/views/users_views.py | MayankJ99/MythBuster | 0 | 6630965 | from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from users.serializers import *
from users.models import *
# Create your views here.
from rest_framework import status
from django.contrib.auth.hashers import make_password
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
serializer = UserSerializerWithToken(self.user).data
for k, v in serializer.items():
data[k] = v
return data
class MyTokenObtainPairView(TokenObtainPairView):
serializer_class = MyTokenObtainPairSerializer
#create a GET view to return all users from the User model in Django
@api_view(['GET'])
def get_users(request):
users = CurrentUser.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
#create a GET view to return one particular user from the User model in Django using the pk
@api_view(['GET'])
def get_user_profile(request, pk):
user = CurrentUser.objects.get(pk=pk)
serializer = UserProfileSerializer(user)
return Response(serializer.data)
#create a GET view called get_current_user_profile that will be a protected route to return the current user's profile
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def get_current_user_profile(request):
print(request.user)
user = CurrentUser.objects.get(pk=request.user.id)
serializer = UserProfileSerializer(user)
return Response(serializer.data)
#create a POST view to create a new user in the User model in Django that also creates a UserProfile object associated with the newly created user
@api_view(['POST'])
def create_user(request):
try:
data = request.data
print(data)
user = CurrentUser.objects.create(
username=data['username'],
email = data['email'],
first_name = data['first_name'],
last_name = data['last_name'],
password=make_password(data['password']),
)
user_profile = UserProfile.objects.create(
user=user,
bio=data['bio'],
linkedin=data['linkedin'],
twitter = data['twitter'],
github = data['github'],
occupation = data['occupation'],
location = data['location'],
website = data['website'],
education = data['education'],
)
user.save()
user_profile.save()
serializer = UserSerializerWithToken(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception as err:
print(err)
message = {'detail': 'User with this email already exists'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
#create a PUT call to update a user in the User model in Django. It should include a permission for isauth users to update their own profile
#the PUT call should update the UserProfile model as well. It should only update the fields that are non empty in the request data
@api_view(['PUT'])
@permission_classes((IsAuthenticated,))
def update_user(request):
try:
user = CurrentUser.objects.get(pk=request.user.id)
user_profile = UserProfile.objects.get(user=user)
data = request.data
# if data['username'] != '':
# user.username = data['username']
# if data['email'] != '':
# user.email = data['email']
# if data['first_name'] != '':
# user.first_name = data['first_name']
# if data['last_name'] != '':
# user.last_name = data['last_name']
# if data['bio'] != '':
# user_profile.bio = data['bio']
# if data['linkedin'] != '':
# user_profile.linkedin = data['linkedin']
# if data['twitter'] != '':
# user_profile.twitter = data['twitter']
# if data['github'] != '':
# user_profile.github = data['github']
# if data['occupation'] != '':
# user_profile.occupation = data['occupation']
# if data['location'] != '':
# user_profile.location = data['location']
# if data['website'] != '':
# user_profile.website = data['website']
# if data['education'] != '':
# user_profile.education = data['education']
if 'username' in data:
user.username = data['username']
if 'email' in data:
user.email = data['email']
if 'first_name' in data:
user.first_name = data['first_name']
if 'last_name' in data:
user.last_name = data['last_name']
if 'bio' in data:
user_profile.bio = data['bio']
if 'linkedin' in data:
user_profile.linkedin = data['linkedin']
if 'profile_image' in data:
user_profile.profile_image = data['profile_image']
if 'twitter' in data:
user_profile.twitter = data['twitter']
if 'github' in data:
user_profile.github = data['github']
if 'occupation' in data:
user_profile.occupation = data['occupation']
if 'location' in data:
user_profile.location = data['location']
if 'website' in data:
user_profile.website = data['website']
if 'education' in data:
user_profile.education = data['education']
user.save()
user_profile.save()
serializer = UserProfileSerializer(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception as err:
message = {"error" : err}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
#create a DELETE call to delete the current user from the User model in Django. Must be protected by a permission class
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def delete_user(request):
try:
user = request.user
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception as err:
message = {"error" : err}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
| from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from users.serializers import *
from users.models import *
# Create your views here.
from rest_framework import status
from django.contrib.auth.hashers import make_password
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
serializer = UserSerializerWithToken(self.user).data
for k, v in serializer.items():
data[k] = v
return data
class MyTokenObtainPairView(TokenObtainPairView):
serializer_class = MyTokenObtainPairSerializer
#create a GET view to return all users from the User model in Django
@api_view(['GET'])
def get_users(request):
users = CurrentUser.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
#create a GET view to return one particular user from the User model in Django using the pk
@api_view(['GET'])
def get_user_profile(request, pk):
user = CurrentUser.objects.get(pk=pk)
serializer = UserProfileSerializer(user)
return Response(serializer.data)
#create a GET view called get_current_user_profile that will be a protected route to return the current user's profile
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def get_current_user_profile(request):
print(request.user)
user = CurrentUser.objects.get(pk=request.user.id)
serializer = UserProfileSerializer(user)
return Response(serializer.data)
#create a POST view to create a new user in the User model in Django that also creates a UserProfile object associated with the newly created user
@api_view(['POST'])
def create_user(request):
try:
data = request.data
print(data)
user = CurrentUser.objects.create(
username=data['username'],
email = data['email'],
first_name = data['first_name'],
last_name = data['last_name'],
password=make_password(data['password']),
)
user_profile = UserProfile.objects.create(
user=user,
bio=data['bio'],
linkedin=data['linkedin'],
twitter = data['twitter'],
github = data['github'],
occupation = data['occupation'],
location = data['location'],
website = data['website'],
education = data['education'],
)
user.save()
user_profile.save()
serializer = UserSerializerWithToken(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception as err:
print(err)
message = {'detail': 'User with this email already exists'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
#create a PUT call to update a user in the User model in Django. It should include a permission for isauth users to update their own profile
#the PUT call should update the UserProfile model as well. It should only update the fields that are non empty in the request data
@api_view(['PUT'])
@permission_classes((IsAuthenticated,))
def update_user(request):
try:
user = CurrentUser.objects.get(pk=request.user.id)
user_profile = UserProfile.objects.get(user=user)
data = request.data
# if data['username'] != '':
# user.username = data['username']
# if data['email'] != '':
# user.email = data['email']
# if data['first_name'] != '':
# user.first_name = data['first_name']
# if data['last_name'] != '':
# user.last_name = data['last_name']
# if data['bio'] != '':
# user_profile.bio = data['bio']
# if data['linkedin'] != '':
# user_profile.linkedin = data['linkedin']
# if data['twitter'] != '':
# user_profile.twitter = data['twitter']
# if data['github'] != '':
# user_profile.github = data['github']
# if data['occupation'] != '':
# user_profile.occupation = data['occupation']
# if data['location'] != '':
# user_profile.location = data['location']
# if data['website'] != '':
# user_profile.website = data['website']
# if data['education'] != '':
# user_profile.education = data['education']
if 'username' in data:
user.username = data['username']
if 'email' in data:
user.email = data['email']
if 'first_name' in data:
user.first_name = data['first_name']
if 'last_name' in data:
user.last_name = data['last_name']
if 'bio' in data:
user_profile.bio = data['bio']
if 'linkedin' in data:
user_profile.linkedin = data['linkedin']
if 'profile_image' in data:
user_profile.profile_image = data['profile_image']
if 'twitter' in data:
user_profile.twitter = data['twitter']
if 'github' in data:
user_profile.github = data['github']
if 'occupation' in data:
user_profile.occupation = data['occupation']
if 'location' in data:
user_profile.location = data['location']
if 'website' in data:
user_profile.website = data['website']
if 'education' in data:
user_profile.education = data['education']
user.save()
user_profile.save()
serializer = UserProfileSerializer(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception as err:
message = {"error" : err}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
#create a DELETE call to delete the current user from the User model in Django. Must be protected by a permission class
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def delete_user(request):
try:
user = request.user
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception as err:
message = {"error" : err}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
| en | 0.697569 | # Create your views here. #create a GET view to return all users from the User model in Django #create a GET view to return one particular user from the User model in Django using the pk #create a GET view called get_current_user_profile that will be a protected route to return the current user's profile #create a POST view to create a new user in the User model in Django that also creates a UserProfile object associated with the newly created user #create a PUT call to update a user in the User model in Django. It should include a permission for isauth users to update their own profile #the PUT call should update the UserProfile model as well. It should only update the fields that are non empty in the request data # if data['username'] != '': # user.username = data['username'] # if data['email'] != '': # user.email = data['email'] # if data['first_name'] != '': # user.first_name = data['first_name'] # if data['last_name'] != '': # user.last_name = data['last_name'] # if data['bio'] != '': # user_profile.bio = data['bio'] # if data['linkedin'] != '': # user_profile.linkedin = data['linkedin'] # if data['twitter'] != '': # user_profile.twitter = data['twitter'] # if data['github'] != '': # user_profile.github = data['github'] # if data['occupation'] != '': # user_profile.occupation = data['occupation'] # if data['location'] != '': # user_profile.location = data['location'] # if data['website'] != '': # user_profile.website = data['website'] # if data['education'] != '': # user_profile.education = data['education'] #create a DELETE call to delete the current user from the User model in Django. Must be protected by a permission class | 2.223803 | 2 |
togglcmder/toggl/decoders/time_entry_decoder.py | yatesjr/toggl-cmder | 3 | 6630966 | from json import JSONDecoder
from togglcmder.toggl.builders.time_entry_builder import TimeEntryBuilder
class TimeEntryDecoder(JSONDecoder):
def __init__(self, *args: tuple, **kwargs: dict):
JSONDecoder.__init__(self,
object_hook=TimeEntryDecoder.object_hook,
*args,
**kwargs)
@staticmethod
def object_hook(obj: dict):
if 'data' in obj:
return obj['data']
if 'id' in obj:
return TimeEntryBuilder()\
.identifier(obj['id'])\
.description(obj.get('description', None))\
.workspace_identifier(obj['wid'])\
.project_identifier(obj.get('pid', None))\
.start_time(start_time=obj['start'])\
.duration(obj.get('duration', None))\
.stop_time(stop_time=obj.get('stop', None))\
.tags(obj.get('tags', None))\
.last_updated(last_update=obj.get('at', None))\
.build()
return obj
| from json import JSONDecoder
from togglcmder.toggl.builders.time_entry_builder import TimeEntryBuilder
class TimeEntryDecoder(JSONDecoder):
def __init__(self, *args: tuple, **kwargs: dict):
JSONDecoder.__init__(self,
object_hook=TimeEntryDecoder.object_hook,
*args,
**kwargs)
@staticmethod
def object_hook(obj: dict):
if 'data' in obj:
return obj['data']
if 'id' in obj:
return TimeEntryBuilder()\
.identifier(obj['id'])\
.description(obj.get('description', None))\
.workspace_identifier(obj['wid'])\
.project_identifier(obj.get('pid', None))\
.start_time(start_time=obj['start'])\
.duration(obj.get('duration', None))\
.stop_time(stop_time=obj.get('stop', None))\
.tags(obj.get('tags', None))\
.last_updated(last_update=obj.get('at', None))\
.build()
return obj
| none | 1 | 2.406256 | 2 |
|
pandas/core/algorithms.py | flexlee/pandas | 0 | 6630967 | <gh_stars>0
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
import numpy as np
import pandas.core.common as com
import pandas.lib as lib
import pandas._algos as _algos
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
return _hashtable_algo(f, values.dtype)
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
of values
Parameters
----------
values : array-like
Returns
-------
uniques
"""
values = com._asarray_tuplesafe(values)
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
# def count(values, uniques=None):
# f = lambda htype, caster: _count_generic(values, htype, caster)
# if uniques is not None:
# raise NotImplementedError
# else:
# return _hashtable_algo(f, values.dtype)
def _hashtable_algo(f, dtype):
"""
f(HashTable, type_caster) -> result
"""
if com.is_float_dtype(dtype):
return f(lib.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(lib.Int64HashTable, com._ensure_int64)
else:
return f(lib.PyObjectHashTable, com._ensure_object)
def _count_generic(values, table_type, type_caster):
from pandas.core.series import Series
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques, labels = table.factorize(values)
return Series(counts, index=uniques)
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
table = table_type(min(len(index), 1000000))
table.map_locations(index)
return table.lookup(values)
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques = table.unique(values)
return type_caster(uniques)
def factorize(values, sort=False, order=None, na_sentinel=-1):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : sequence
sort :
order :
Returns
-------
"""
values = np.asarray(values)
is_datetime = com.is_datetime64_dtype(values)
(hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
table = hash_klass(len(values))
uniques = vec_klass()
labels = table.get_labels(values, uniques, 0, na_sentinel)
labels = com._ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
uniques = uniques.take(sorter)
if is_datetime:
uniques = uniques.view('M8[ns]')
return labels, uniques
def value_counts(values, sort=True, ascending=False):
"""
Compute a histogram of the counts of non-null values
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series
from collections import defaultdict
values = np.asarray(values)
if com.is_integer_dtype(values.dtype):
values = com._ensure_int64(values)
keys, counts = lib.value_count_int64(values)
result = Series(counts, index=keys)
else:
counter = defaultdict(lambda: 0)
values = values[com.notnull(values)]
for value in values:
counter[value] += 1
result = Series(counter)
if sort:
result.sort()
if not ascending:
result = result[::-1]
return result
def rank(values, axis=0, method='average', na_option='keep',
ascending=True):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option)
return ranks
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
a : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = com.isnull(x)
x = x[-mask]
values = np.sort(x)
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if (idx % 1 == 0):
score = values[idx]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if np.isscalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return _algos.arrmap_float64(q, _get_score)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_data_algo(values, func_map):
if com.is_float_dtype(values):
f = func_map['float64']
values = com._ensure_float64(values)
elif com.is_datetime64_dtype(values):
f = func_map['int64']
values = values.view('i8')
elif com.is_integer_dtype(values):
f = func_map['int64']
values = com._ensure_int64(values)
else:
f = func_map['generic']
values = com._ensure_object(values)
return f, values
def group_position(*args):
"""
Get group position
"""
from collections import defaultdict
table = defaultdict(int)
result = []
for tup in zip(*args):
result.append(table[tup])
table[tup] += 1
return result
_rank1d_functions = {
'float64': lib.rank_1d_float64,
'int64': lib.rank_1d_int64,
'generic': lib.rank_1d_generic
}
_rank2d_functions = {
'float64': lib.rank_2d_float64,
'int64': lib.rank_2d_int64,
'generic': lib.rank_2d_generic
}
_hashtables = {
'float64': (lib.Float64HashTable, lib.Float64Vector),
'int64': (lib.Int64HashTable, lib.Int64Vector),
'generic': (lib.PyObjectHashTable, lib.ObjectVector)
}
| """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
import numpy as np
import pandas.core.common as com
import pandas.lib as lib
import pandas._algos as _algos
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
return _hashtable_algo(f, values.dtype)
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
of values
Parameters
----------
values : array-like
Returns
-------
uniques
"""
values = com._asarray_tuplesafe(values)
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
# def count(values, uniques=None):
# f = lambda htype, caster: _count_generic(values, htype, caster)
# if uniques is not None:
# raise NotImplementedError
# else:
# return _hashtable_algo(f, values.dtype)
def _hashtable_algo(f, dtype):
"""
f(HashTable, type_caster) -> result
"""
if com.is_float_dtype(dtype):
return f(lib.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(lib.Int64HashTable, com._ensure_int64)
else:
return f(lib.PyObjectHashTable, com._ensure_object)
def _count_generic(values, table_type, type_caster):
from pandas.core.series import Series
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques, labels = table.factorize(values)
return Series(counts, index=uniques)
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
table = table_type(min(len(index), 1000000))
table.map_locations(index)
return table.lookup(values)
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques = table.unique(values)
return type_caster(uniques)
def factorize(values, sort=False, order=None, na_sentinel=-1):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : sequence
sort :
order :
Returns
-------
"""
values = np.asarray(values)
is_datetime = com.is_datetime64_dtype(values)
(hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
table = hash_klass(len(values))
uniques = vec_klass()
labels = table.get_labels(values, uniques, 0, na_sentinel)
labels = com._ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
uniques = uniques.take(sorter)
if is_datetime:
uniques = uniques.view('M8[ns]')
return labels, uniques
def value_counts(values, sort=True, ascending=False):
"""
Compute a histogram of the counts of non-null values
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series
from collections import defaultdict
values = np.asarray(values)
if com.is_integer_dtype(values.dtype):
values = com._ensure_int64(values)
keys, counts = lib.value_count_int64(values)
result = Series(counts, index=keys)
else:
counter = defaultdict(lambda: 0)
values = values[com.notnull(values)]
for value in values:
counter[value] += 1
result = Series(counter)
if sort:
result.sort()
if not ascending:
result = result[::-1]
return result
def rank(values, axis=0, method='average', na_option='keep',
ascending=True):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option)
return ranks
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
a : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = com.isnull(x)
x = x[-mask]
values = np.sort(x)
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if (idx % 1 == 0):
score = values[idx]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if np.isscalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return _algos.arrmap_float64(q, _get_score)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_data_algo(values, func_map):
if com.is_float_dtype(values):
f = func_map['float64']
values = com._ensure_float64(values)
elif com.is_datetime64_dtype(values):
f = func_map['int64']
values = values.view('i8')
elif com.is_integer_dtype(values):
f = func_map['int64']
values = com._ensure_int64(values)
else:
f = func_map['generic']
values = com._ensure_object(values)
return f, values
def group_position(*args):
"""
Get group position
"""
from collections import defaultdict
table = defaultdict(int)
result = []
for tup in zip(*args):
result.append(table[tup])
table[tup] += 1
return result
_rank1d_functions = {
'float64': lib.rank_1d_float64,
'int64': lib.rank_1d_int64,
'generic': lib.rank_1d_generic
}
_rank2d_functions = {
'float64': lib.rank_2d_float64,
'int64': lib.rank_2d_int64,
'generic': lib.rank_2d_generic
}
_hashtables = {
'float64': (lib.Float64HashTable, lib.Float64Vector),
'int64': (lib.Int64HashTable, lib.Int64Vector),
'generic': (lib.PyObjectHashTable, lib.ObjectVector)
} | en | 0.497672 | Generic data algorithms. This module is experimental at the moment and not intended for public consumption Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers Compute unique values (not necessarily sorted) efficiently from input array of values Parameters ---------- values : array-like Returns ------- uniques # def count(values, uniques=None): # f = lambda htype, caster: _count_generic(values, htype, caster) # if uniques is not None: # raise NotImplementedError # else: # return _hashtable_algo(f, values.dtype) f(HashTable, type_caster) -> result Encode input values as an enumerated type or categorical variable Parameters ---------- values : sequence sort : order : Returns ------- Compute a histogram of the counts of non-null values Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order Returns ------- value_counts : Series Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- a : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. Get group position | 2.931167 | 3 |
Solutions/p0017.py | JCMarcoG/project_euler | 0 | 6630968 | # Solution to Problem 0015
def solution():
#dictionary to store the values
dic = {n:0 for n in range(0,1001)}
#initial values manually
dic[0] = 0 #''
dic[1] = 3 #'one'
dic[2] = 3 #'two'
dic[3] = 5 #'three'
dic[4] = 4 #'four'
dic[5] = 4 #'five'
dic[6] = 3 #'six'
dic[7] = 5 #'seven'
dic[8] = 5 #'eight'
dic[9] = 4 #'nine'
dic[10] = 3 #'ten'
dic[11] = 6 #'eleven'
dic[12] = 6 #'twelve'
dic[13] = 8 #'thirteen'
dic[14] = 8 #'fourteen'
dic[15] = 7 #'fifteen'
dic[16] = 7 #'sixteen'
dic[17] = 9 #'seventeen'
dic[18] = 8 #'eighteen'
dic[19] = 8 #'nineteen'
dic[20] = 6 #'twenty'
dic[30] = 6 #'thirty'
dic[40] = 5 #'forty'
dic[50] = 5 #'fifty'
dic[60] = 5 #'sixty'
dic[70] = 7 #'seventy'
dic[80] = 6 #'eighty'
dic[90] = 6 #'ninety'
#for loop to generate the values for 21-99 as we have already entered the values under 20 manually
for i in range(21,100):
tens = int(i/10)*10
ones = i - tens
dic[i] = dic[tens]+dic[ones]
#for loop to generate values for 100-999
for i in range(100,1000):
hundreds = int(i/100)
tens_ones = i - hundreds*100
#if the value of tens and ones place is 0 just use 'hundred' instead of 'and hundred'
if tens_ones == 0:
dic[i] = dic[hundreds] + 7#'hundred'
else:
#10 refers - 'and hundred'
dic[i] = dic[hundreds] +10+dic[tens_ones]
dic[1000] = 11 #'one thousand'
#return solution
return sum(dic.values())
if __name__ == "__main__":
print(solution()) | # Solution to Problem 0015
def solution():
#dictionary to store the values
dic = {n:0 for n in range(0,1001)}
#initial values manually
dic[0] = 0 #''
dic[1] = 3 #'one'
dic[2] = 3 #'two'
dic[3] = 5 #'three'
dic[4] = 4 #'four'
dic[5] = 4 #'five'
dic[6] = 3 #'six'
dic[7] = 5 #'seven'
dic[8] = 5 #'eight'
dic[9] = 4 #'nine'
dic[10] = 3 #'ten'
dic[11] = 6 #'eleven'
dic[12] = 6 #'twelve'
dic[13] = 8 #'thirteen'
dic[14] = 8 #'fourteen'
dic[15] = 7 #'fifteen'
dic[16] = 7 #'sixteen'
dic[17] = 9 #'seventeen'
dic[18] = 8 #'eighteen'
dic[19] = 8 #'nineteen'
dic[20] = 6 #'twenty'
dic[30] = 6 #'thirty'
dic[40] = 5 #'forty'
dic[50] = 5 #'fifty'
dic[60] = 5 #'sixty'
dic[70] = 7 #'seventy'
dic[80] = 6 #'eighty'
dic[90] = 6 #'ninety'
#for loop to generate the values for 21-99 as we have already entered the values under 20 manually
for i in range(21,100):
tens = int(i/10)*10
ones = i - tens
dic[i] = dic[tens]+dic[ones]
#for loop to generate values for 100-999
for i in range(100,1000):
hundreds = int(i/100)
tens_ones = i - hundreds*100
#if the value of tens and ones place is 0 just use 'hundred' instead of 'and hundred'
if tens_ones == 0:
dic[i] = dic[hundreds] + 7#'hundred'
else:
#10 refers - 'and hundred'
dic[i] = dic[hundreds] +10+dic[tens_ones]
dic[1000] = 11 #'one thousand'
#return solution
return sum(dic.values())
if __name__ == "__main__":
print(solution()) | en | 0.342679 | # Solution to Problem 0015 #dictionary to store the values #initial values manually #'' #'one' #'two' #'three' #'four' #'five' #'six' #'seven' #'eight' #'nine' #'ten' #'eleven' #'twelve' #'thirteen' #'fourteen' #'fifteen' #'sixteen' #'seventeen' #'eighteen' #'nineteen' #'twenty' #'thirty' #'forty' #'fifty' #'sixty' #'seventy' #'eighty' #'ninety' #for loop to generate the values for 21-99 as we have already entered the values under 20 manually #for loop to generate values for 100-999 #if the value of tens and ones place is 0 just use 'hundred' instead of 'and hundred' #'hundred' #10 refers - 'and hundred' #'one thousand' #return solution | 3.098328 | 3 |
pdia/responseReconstruction/parseBQResponses.py | yangjiang001/pdia-1 | 0 | 6630969 |
# coding: utf-8
# # Reconstructing 2017 SQ Responses
#
# ```
# <NAME>
# 2017-07-14
# ```
#
import sys
import pandas as pd
from pdia.responseReconstruction.extractBQChoice import parseBQChoice
from pdia.responseReconstruction.extractBQMC import parseBQMC
from pdia.responseReconstruction.extractBQNumeric import parseBQNumeric
def parseStrSQResponses(df,
config=None,
label="ItemTypeCode",
outputCol = "Answer"):
"""Parse the SQ response data, extract the responses from the JSON data
:param df: the input data frame
:type df: Pandas data frame
:param label: optional, name of the column indicating the item type, which determines how to parse.
:type label: string
:param config: optional configuation object; default to None
:type config: object or None
:returns: df with Response.PartId, Response.Index, value
:rtype: Pandas data frame
"""
assert (isinstance(df, pd.DataFrame))
assert (label in df.columns)
if config is None:
config = {
"handlers": {
"BQNumeric": parseBQNumeric,
"BQChoices": parseBQChoice,
"BQMCSS": parseBQMC,
"BQMCMS": parseBQMC
}
}
# check to see if there are events not handled
#print config["handlers"]
#print "Events in the data frame: {}".format(df[label].unique().tolist())
#print "Events to be handled: {}".format(config["handlers"].keys())
if len(set(df[label].unique().tolist())-set(config["handlers"].keys()))>0:
print("Not all item types are handled!\n{}"\
.format(set(df[label].unique().tolist())-set(config["handlers"].keys())))
# now let's revert the config, to get `parser:[list of labels]`
funcMap = {}
for k, v in config["handlers"].items():
funcMap[v] = funcMap.get(v, []) + [k]
# add a output
# we now loop through all funcMap elements and do the conversion
for parser, eventList in funcMap.items():
idx = df.loc[:, label].isin(eventList)
df.loc[idx, outputCol] = df.loc[idx, "Response"].apply(parser)
return df
if __name__ == '__main__':
if len(sys.argv)<2:
print("Usage: python {} csvFileName.csv".format(sys.argv[0]))
exit()
dataFileName = sys.argv[1]
df = pd.read_csv(dataFileName, sep="\t", header=None,
names=["ItemResponseId","SubjectName","Grade","BookletNumber",
"BlockCode","AccessionNumber","ItemTypeCode","IsAnswered",
"IsSkipped","Response"])
res = parseStrSQResponses(df)
# looking for duplicated responses
res.loc[res.duplicated([ 'BookletNumber', 'AccessionNumber'], keep=False)]\
.sort_values([ 'BookletNumber', 'AccessionNumber'])\
.to_csv(dataFileName.replace(".csv", "")+'_DuplicatedResponses.csv')
dfByAccNum = res.drop_duplicates([ 'BookletNumber', 'AccessionNumber'])\
.pivot(columns='AccessionNumber', index="BookletNumber", values="Answer")
# saving to a bunch of csv files
res.to_csv(dataFileName.replace(".csv", "")+'_Responses.csv')
dfByAccNum.to_csv(dataFileName.replace(".csv", "")+'_Responses_byAccNum.csv') |
# coding: utf-8
# # Reconstructing 2017 SQ Responses
#
# ```
# <NAME>
# 2017-07-14
# ```
#
import sys
import pandas as pd
from pdia.responseReconstruction.extractBQChoice import parseBQChoice
from pdia.responseReconstruction.extractBQMC import parseBQMC
from pdia.responseReconstruction.extractBQNumeric import parseBQNumeric
def parseStrSQResponses(df,
config=None,
label="ItemTypeCode",
outputCol = "Answer"):
"""Parse the SQ response data, extract the responses from the JSON data
:param df: the input data frame
:type df: Pandas data frame
:param label: optional, name of the column indicating the item type, which determines how to parse.
:type label: string
:param config: optional configuation object; default to None
:type config: object or None
:returns: df with Response.PartId, Response.Index, value
:rtype: Pandas data frame
"""
assert (isinstance(df, pd.DataFrame))
assert (label in df.columns)
if config is None:
config = {
"handlers": {
"BQNumeric": parseBQNumeric,
"BQChoices": parseBQChoice,
"BQMCSS": parseBQMC,
"BQMCMS": parseBQMC
}
}
# check to see if there are events not handled
#print config["handlers"]
#print "Events in the data frame: {}".format(df[label].unique().tolist())
#print "Events to be handled: {}".format(config["handlers"].keys())
if len(set(df[label].unique().tolist())-set(config["handlers"].keys()))>0:
print("Not all item types are handled!\n{}"\
.format(set(df[label].unique().tolist())-set(config["handlers"].keys())))
# now let's revert the config, to get `parser:[list of labels]`
funcMap = {}
for k, v in config["handlers"].items():
funcMap[v] = funcMap.get(v, []) + [k]
# add a output
# we now loop through all funcMap elements and do the conversion
for parser, eventList in funcMap.items():
idx = df.loc[:, label].isin(eventList)
df.loc[idx, outputCol] = df.loc[idx, "Response"].apply(parser)
return df
if __name__ == '__main__':
if len(sys.argv)<2:
print("Usage: python {} csvFileName.csv".format(sys.argv[0]))
exit()
dataFileName = sys.argv[1]
df = pd.read_csv(dataFileName, sep="\t", header=None,
names=["ItemResponseId","SubjectName","Grade","BookletNumber",
"BlockCode","AccessionNumber","ItemTypeCode","IsAnswered",
"IsSkipped","Response"])
res = parseStrSQResponses(df)
# looking for duplicated responses
res.loc[res.duplicated([ 'BookletNumber', 'AccessionNumber'], keep=False)]\
.sort_values([ 'BookletNumber', 'AccessionNumber'])\
.to_csv(dataFileName.replace(".csv", "")+'_DuplicatedResponses.csv')
dfByAccNum = res.drop_duplicates([ 'BookletNumber', 'AccessionNumber'])\
.pivot(columns='AccessionNumber', index="BookletNumber", values="Answer")
# saving to a bunch of csv files
res.to_csv(dataFileName.replace(".csv", "")+'_Responses.csv')
dfByAccNum.to_csv(dataFileName.replace(".csv", "")+'_Responses_byAccNum.csv') | en | 0.547753 | # coding: utf-8 # # Reconstructing 2017 SQ Responses # # ``` # <NAME> # 2017-07-14 # ``` # Parse the SQ response data, extract the responses from the JSON data :param df: the input data frame :type df: Pandas data frame :param label: optional, name of the column indicating the item type, which determines how to parse. :type label: string :param config: optional configuation object; default to None :type config: object or None :returns: df with Response.PartId, Response.Index, value :rtype: Pandas data frame # check to see if there are events not handled #print config["handlers"] #print "Events in the data frame: {}".format(df[label].unique().tolist()) #print "Events to be handled: {}".format(config["handlers"].keys()) # now let's revert the config, to get `parser:[list of labels]` # add a output # we now loop through all funcMap elements and do the conversion # looking for duplicated responses # saving to a bunch of csv files | 2.83704 | 3 |
var/spack/repos/builtin/packages/r-sandwich/package.py | xiki-tempula/spack | 9 | 6630970 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSandwich(RPackage):
"""Model-robust standard error estimators for cross-sectional, time series,
and longitudinal data."""
homepage = "https://cloud.r-project.org/package=sandwich"
url = "https://cloud.r-project.org/src/contrib/sandwich_2.3-4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/sandwich"
version('2.5-1', sha256='dbef6f4d12b83e166f9a2508b7c732b04493641685d6758d29f3609e564166d6')
version('2.5-0', sha256='6cc144af20739eb23e5539010d3833d7c7fc53cbca2addb583ab933167c11399')
version('2.3-4', sha256='2052f7e3d19a05c372f422c5480f1058a4107e420cd038a9bd7240c4f0746d4d')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-zoo', type=('build', 'run'))
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSandwich(RPackage):
"""Model-robust standard error estimators for cross-sectional, time series,
and longitudinal data."""
homepage = "https://cloud.r-project.org/package=sandwich"
url = "https://cloud.r-project.org/src/contrib/sandwich_2.3-4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/sandwich"
version('2.5-1', sha256='dbef6f4d12b83e166f9a2508b7c732b04493641685d6758d29f3609e564166d6')
version('2.5-0', sha256='6cc144af20739eb23e5539010d3833d7c7fc53cbca2addb583ab933167c11399')
version('2.3-4', sha256='2052f7e3d19a05c372f422c5480f1058a4107e420cd038a9bd7240c4f0746d4d')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-zoo', type=('build', 'run'))
| en | 0.624971 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Model-robust standard error estimators for cross-sectional, time series, and longitudinal data. | 1.326958 | 1 |
app/bot/conversations/query.py | DramatikMan/mlhl-01-python-bot | 0 | 6630971 | <reponame>DramatikMan/mlhl-01-python-bot<gh_stars>0
import sqlite3
from collections.abc import Iterable
from io import BytesIO
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from telegram import (
Update,
ReplyKeyboardMarkup,
ForceReply,
ReplyKeyboardRemove,
InputMediaPhoto
)
from telegram.ext import CommandHandler, MessageHandler, Filters
from app.db import DB_URI
from . import BaseHandler
from ..types import CCT, DataRecord
class QueryHandler(BaseHandler):
CHOOSING, FILTERING, PROMPTING_OUTPUT, PROMPTING_PREDICTION = range(4)
def __init__(self) -> None:
super().__init__(
entry_points=[CommandHandler('query', self.handle_query_command)],
states={
self.CHOOSING: [
MessageHandler(
Filters.regex(f'^({"|".join(self.columns.keys())})$'),
self.handle_choosing
),
CommandHandler('charts', self.handle_charts_command)
],
self.FILTERING: [MessageHandler(
Filters.text & ~Filters.command,
self.handle_filtering
)],
self.PROMPTING_OUTPUT: [MessageHandler(
Filters.regex('^(output|continue)$'),
self.handle_output_prompt
)],
self.PROMPTING_PREDICTION: [MessageHandler(
Filters.regex('^(YES|NO)$'),
self.handle_prediction_prompt
)]
},
fallbacks=[CommandHandler('cancel', self.cancel)],
)
def get_not_yet_filtered_params(self, context: CCT) -> list[str]:
return [
key for key in self.columns.keys()
if key not in context.user_data['filters']
]
def handle_query_command(self, update: Update, context: CCT) -> int:
if 'filters' not in context.user_data:
context.user_data['filters'] = {}
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
'We are in query mode. Choose parameter to filter deals by:\n\n'
f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
def handle_choosing(self, update: Update, context: CCT) -> int:
param: str = update.message.text
context.user_data['param'] = param
update.message.reply_text(
f'Now enter the target value for parameter: {param}.',
reply_markup=ForceReply()
)
return self.FILTERING
def handle_filtering(self, update: Update, context: CCT) -> int:
value: str = update.message.text
context.user_data['filters'] |= {context.user_data['param']: value}
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}'
for key, value in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
count, avg_price = conn.cursor().execute(f'''
SELECT
count(price_doc)
, avg(price_doc)
FROM data
{WHERE_SQL}
''').fetchone()
if count == 0:
update.message.reply_text(
'No records met the current filtering conditions.\n\n'
'Would you like to get a modeled prediction of the price '
'for the current filter (excluding NaN variables)?',
reply_markup=ReplyKeyboardMarkup(
[['YES', 'NO']],
one_time_keyboard=True
)
)
return self.PROMPTING_PREDICTION
elif count == 1:
with sqlite3.connect(DB_URI) as conn:
result: DataRecord = conn.cursor().execute(f'''
SELECT *
FROM data
{WHERE_SQL}
''').fetchone()
single_record: str = '\n'.join((
f'{key} = {value}'
for key, value in zip(self.columns.keys(), result)
))
update.message.reply_text(
'Found a single matching record.\n\n'
f'{single_record}\n\n'
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
context.user_data['filters'] = {}
return self.END
elif count <= 10:
update.message.reply_text(
f'Average price = {avg_price:.2f}.\n\n'
f'{count} records met the current filtering conditions.\n\n'
'Would you like to output these records '
'or to continue filtering?',
reply_markup=ReplyKeyboardMarkup(
[['output', 'continue']],
one_time_keyboard=True
)
)
return self.PROMPTING_OUTPUT
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
f'Average price = {avg_price:.2f}.\n\n'
f'{count} records met the current filtering conditions.\n\n'
'Choose another parameter to narrow down the current selection '
'or type /cancel to quit query mode.\n\n'
+ (
'You can also type /charts to get visualization of how the '
'price depends on each of the not yet filtered parameters '
'(excluding NaNs).\n\n'
if count <= 1000 else ''
)
+ f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
def handle_output_prompt(self, update: Update, context: CCT) -> int:
value: str = update.message.text
if value == 'output':
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}' for key, value
in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
result: Iterable[DataRecord] = conn.cursor().execute(f'''
SELECT *
FROM data
{WHERE_SQL}
''')
multiple_records: str = '\n'.join((
f'{i}: {value}'
for i, value in enumerate(result, 1)
))
update.message.reply_text(
f'{multiple_records}\n\n'
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
context.user_data['filters'] = {}
return self.END
elif value == 'continue':
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
'Choose another parameter to narrow down the current '
'selection or type /cancel to quit query mode.\n\n'
f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
return self.END
def get_chart_images(self, context: CCT) -> list[InputMediaPhoto]:
params: list[str] = self.get_not_yet_filtered_params(context)
VARS_SQL = ', '.join(params)
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}'
for key, value in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
df: pd.DataFrame = pd.read_sql_query(
sql=f'SELECT {VARS_SQL}, price_doc FROM data {WHERE_SQL}',
con=conn
)
label_size = 25
plt.rcParams.update({
'axes.labelsize': label_size,
'xtick.labelsize': label_size,
'ytick.labelsize': label_size,
'figure.figsize': (15, 15)
})
images: list[InputMediaPhoto] = []
for param in (
param for param in params
if param not in ('product_type', 'sub_area')
):
plt.clf()
plt.xlabel(self.columns[param])
plt.ylabel('sale price')
plt.hexbin(
x=df[param],
y=df['price_doc'].apply(lambda x: x / (10 ** 6)),
gridsize=50,
cmap='coolwarm'
)
image_io = BytesIO()
plt.savefig(image_io)
images.append(InputMediaPhoto(image_io.getvalue()))
return images
def handle_charts_command(self, update: Update, context: CCT) -> int:
update.message.reply_text(
'Building charts...',
reply_markup=ReplyKeyboardRemove()
)
images: list[InputMediaPhoto] = self.get_chart_images(context)
update.message.reply_media_group(media=images) # type: ignore
context.user_data['filters'] = {}
return self.END
def get_prediction(self, context: CCT) -> tuple[float, float]:
params = {
key: value for key, value in context.user_data['filters'].items()
if key not in ('product_type', 'sub_area')
}
with sqlite3.connect(DB_URI) as conn:
df: pd.DataFrame = pd.read_sql_query(
sql=f'''
SELECT {', '.join(params)}, price_doc
FROM data
''',
con=conn
)
X = df[[*params]]
y = df['price_doc'] / (10 ** 6)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.33,
random_state=42
)
model = LinearRegression()
model.fit(X=X_train, y=y_train)
return (
float(model.score(X=X_test, y=y_test)),
float(
model.coef_ @ [*map(float, params.values())] + model.intercept_
)
)
def handle_prediction_prompt(self, update: Update, context: CCT) -> int:
value: str = update.message.text
if value == 'NO':
update.message.reply_text(
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
return self.END
elif value == 'YES':
R_squared, prediction = self.get_prediction(context)
update.message.reply_text(
f'Predicted price = {prediction:.6f} M.'
'\n'
f'R-squared for test subset = {R_squared:.2f}.'
'\n\nExiting query mode.'
)
context.user_data['filters'] = {}
return self.END
| import sqlite3
from collections.abc import Iterable
from io import BytesIO
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from telegram import (
Update,
ReplyKeyboardMarkup,
ForceReply,
ReplyKeyboardRemove,
InputMediaPhoto
)
from telegram.ext import CommandHandler, MessageHandler, Filters
from app.db import DB_URI
from . import BaseHandler
from ..types import CCT, DataRecord
class QueryHandler(BaseHandler):
CHOOSING, FILTERING, PROMPTING_OUTPUT, PROMPTING_PREDICTION = range(4)
def __init__(self) -> None:
super().__init__(
entry_points=[CommandHandler('query', self.handle_query_command)],
states={
self.CHOOSING: [
MessageHandler(
Filters.regex(f'^({"|".join(self.columns.keys())})$'),
self.handle_choosing
),
CommandHandler('charts', self.handle_charts_command)
],
self.FILTERING: [MessageHandler(
Filters.text & ~Filters.command,
self.handle_filtering
)],
self.PROMPTING_OUTPUT: [MessageHandler(
Filters.regex('^(output|continue)$'),
self.handle_output_prompt
)],
self.PROMPTING_PREDICTION: [MessageHandler(
Filters.regex('^(YES|NO)$'),
self.handle_prediction_prompt
)]
},
fallbacks=[CommandHandler('cancel', self.cancel)],
)
def get_not_yet_filtered_params(self, context: CCT) -> list[str]:
return [
key for key in self.columns.keys()
if key not in context.user_data['filters']
]
def handle_query_command(self, update: Update, context: CCT) -> int:
if 'filters' not in context.user_data:
context.user_data['filters'] = {}
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
'We are in query mode. Choose parameter to filter deals by:\n\n'
f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
def handle_choosing(self, update: Update, context: CCT) -> int:
param: str = update.message.text
context.user_data['param'] = param
update.message.reply_text(
f'Now enter the target value for parameter: {param}.',
reply_markup=ForceReply()
)
return self.FILTERING
def handle_filtering(self, update: Update, context: CCT) -> int:
value: str = update.message.text
context.user_data['filters'] |= {context.user_data['param']: value}
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}'
for key, value in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
count, avg_price = conn.cursor().execute(f'''
SELECT
count(price_doc)
, avg(price_doc)
FROM data
{WHERE_SQL}
''').fetchone()
if count == 0:
update.message.reply_text(
'No records met the current filtering conditions.\n\n'
'Would you like to get a modeled prediction of the price '
'for the current filter (excluding NaN variables)?',
reply_markup=ReplyKeyboardMarkup(
[['YES', 'NO']],
one_time_keyboard=True
)
)
return self.PROMPTING_PREDICTION
elif count == 1:
with sqlite3.connect(DB_URI) as conn:
result: DataRecord = conn.cursor().execute(f'''
SELECT *
FROM data
{WHERE_SQL}
''').fetchone()
single_record: str = '\n'.join((
f'{key} = {value}'
for key, value in zip(self.columns.keys(), result)
))
update.message.reply_text(
'Found a single matching record.\n\n'
f'{single_record}\n\n'
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
context.user_data['filters'] = {}
return self.END
elif count <= 10:
update.message.reply_text(
f'Average price = {avg_price:.2f}.\n\n'
f'{count} records met the current filtering conditions.\n\n'
'Would you like to output these records '
'or to continue filtering?',
reply_markup=ReplyKeyboardMarkup(
[['output', 'continue']],
one_time_keyboard=True
)
)
return self.PROMPTING_OUTPUT
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
f'Average price = {avg_price:.2f}.\n\n'
f'{count} records met the current filtering conditions.\n\n'
'Choose another parameter to narrow down the current selection '
'or type /cancel to quit query mode.\n\n'
+ (
'You can also type /charts to get visualization of how the '
'price depends on each of the not yet filtered parameters '
'(excluding NaNs).\n\n'
if count <= 1000 else ''
)
+ f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
def handle_output_prompt(self, update: Update, context: CCT) -> int:
value: str = update.message.text
if value == 'output':
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}' for key, value
in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
result: Iterable[DataRecord] = conn.cursor().execute(f'''
SELECT *
FROM data
{WHERE_SQL}
''')
multiple_records: str = '\n'.join((
f'{i}: {value}'
for i, value in enumerate(result, 1)
))
update.message.reply_text(
f'{multiple_records}\n\n'
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
context.user_data['filters'] = {}
return self.END
elif value == 'continue':
params: list[str] = self.get_not_yet_filtered_params(context)
descriptions: str = self.get_descriptions_string(params)
update.message.reply_text(
'Choose another parameter to narrow down the current '
'selection or type /cancel to quit query mode.\n\n'
f'{descriptions}',
reply_markup=ReplyKeyboardMarkup(
[params[i:i + 3] for i in range(0, len(params), 3)],
one_time_keyboard=True
)
)
return self.CHOOSING
return self.END
def get_chart_images(self, context: CCT) -> list[InputMediaPhoto]:
params: list[str] = self.get_not_yet_filtered_params(context)
VARS_SQL = ', '.join(params)
WHERE_SQL = 'WHERE ' + ' AND '.join(
f'{key} = {value}'
for key, value in context.user_data['filters'].items()
)
with sqlite3.connect(DB_URI) as conn:
df: pd.DataFrame = pd.read_sql_query(
sql=f'SELECT {VARS_SQL}, price_doc FROM data {WHERE_SQL}',
con=conn
)
label_size = 25
plt.rcParams.update({
'axes.labelsize': label_size,
'xtick.labelsize': label_size,
'ytick.labelsize': label_size,
'figure.figsize': (15, 15)
})
images: list[InputMediaPhoto] = []
for param in (
param for param in params
if param not in ('product_type', 'sub_area')
):
plt.clf()
plt.xlabel(self.columns[param])
plt.ylabel('sale price')
plt.hexbin(
x=df[param],
y=df['price_doc'].apply(lambda x: x / (10 ** 6)),
gridsize=50,
cmap='coolwarm'
)
image_io = BytesIO()
plt.savefig(image_io)
images.append(InputMediaPhoto(image_io.getvalue()))
return images
def handle_charts_command(self, update: Update, context: CCT) -> int:
update.message.reply_text(
'Building charts...',
reply_markup=ReplyKeyboardRemove()
)
images: list[InputMediaPhoto] = self.get_chart_images(context)
update.message.reply_media_group(media=images) # type: ignore
context.user_data['filters'] = {}
return self.END
def get_prediction(self, context: CCT) -> tuple[float, float]:
params = {
key: value for key, value in context.user_data['filters'].items()
if key not in ('product_type', 'sub_area')
}
with sqlite3.connect(DB_URI) as conn:
df: pd.DataFrame = pd.read_sql_query(
sql=f'''
SELECT {', '.join(params)}, price_doc
FROM data
''',
con=conn
)
X = df[[*params]]
y = df['price_doc'] / (10 ** 6)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.33,
random_state=42
)
model = LinearRegression()
model.fit(X=X_train, y=y_train)
return (
float(model.score(X=X_test, y=y_test)),
float(
model.coef_ @ [*map(float, params.values())] + model.intercept_
)
)
def handle_prediction_prompt(self, update: Update, context: CCT) -> int:
value: str = update.message.text
if value == 'NO':
update.message.reply_text(
'Exiting query mode.',
reply_markup=ReplyKeyboardRemove()
)
return self.END
elif value == 'YES':
R_squared, prediction = self.get_prediction(context)
update.message.reply_text(
f'Predicted price = {prediction:.6f} M.'
'\n'
f'R-squared for test subset = {R_squared:.2f}.'
'\n\nExiting query mode.'
)
context.user_data['filters'] = {}
return self.END | en | 0.14806 | SELECT count(price_doc) , avg(price_doc) FROM data {WHERE_SQL} SELECT * FROM data {WHERE_SQL} SELECT * FROM data {WHERE_SQL} # type: ignore SELECT {', '.join(params)}, price_doc FROM data | 1.993867 | 2 |
AStarSearch/student_code.py | jingr1/SelfDrivingCar | 0 | 6630972 | import math
def dist_between(start,end):
return math.sqrt(pow((start[0]-end[0]),2)+pow((start[1]-end[1]),2))
def get_best_f_score(input_set,scoredict):
idx = input_set.pop()
input_set.add(idx)
best = idx
bv = scoredict[idx]
for idx in input_set:
if scoredict[idx] < bv:
best = idx
bv = scoredict[idx]
return best
def reconstruct_path(start_node,came_from, current_node):
p = [current_node]
while current_node != start_node:
current_node = came_from[current_node]
p.append(current_node)
return p[::-1]
def shortest_path(M,start,goal):
print("shortest path called")
intersections = M.intersections
roads = M.roads
frontierset = set([start])
explorededset = set()
came_from = {}
g_score = {}
h_score = {}
f_score = {}
g_score[start] = 0
h_score[start] = dist_between(intersections[start],intersections[goal])
f_score[start] = g_score[start] + h_score[start]
while frontierset:
currentintersection = get_best_f_score(frontierset,f_score)
frontierset.remove(currentintersection)
explorededset.add(currentintersection)
neighborsets = set(roads[currentintersection])
if currentintersection == goal:
return reconstruct_path(start,came_from, goal)
else:
for neighbor in neighborsets:
if neighbor not in explorededset:
tentative_g_score = g_score[currentintersection] + dist_between(intersections[currentintersection],intersections[neighbor])
if neighbor not in frontierset:
frontierset.add(neighbor)
h_score[neighbor] = dist_between(intersections[neighbor],intersections[goal])
tentative_is_better = True
elif (tentative_g_score < g_score[neighbor]):
tentative_is_better = True
else:
tentative_is_better = False
if tentative_is_better == True:
came_from[neighbor] = currentintersection
g_score[neighbor] = tentative_g_score
f_score[neighbor] = g_score[neighbor] + h_score[neighbor]
print('can not find the shortest path') | import math
def dist_between(start,end):
return math.sqrt(pow((start[0]-end[0]),2)+pow((start[1]-end[1]),2))
def get_best_f_score(input_set,scoredict):
idx = input_set.pop()
input_set.add(idx)
best = idx
bv = scoredict[idx]
for idx in input_set:
if scoredict[idx] < bv:
best = idx
bv = scoredict[idx]
return best
def reconstruct_path(start_node,came_from, current_node):
p = [current_node]
while current_node != start_node:
current_node = came_from[current_node]
p.append(current_node)
return p[::-1]
def shortest_path(M,start,goal):
print("shortest path called")
intersections = M.intersections
roads = M.roads
frontierset = set([start])
explorededset = set()
came_from = {}
g_score = {}
h_score = {}
f_score = {}
g_score[start] = 0
h_score[start] = dist_between(intersections[start],intersections[goal])
f_score[start] = g_score[start] + h_score[start]
while frontierset:
currentintersection = get_best_f_score(frontierset,f_score)
frontierset.remove(currentintersection)
explorededset.add(currentintersection)
neighborsets = set(roads[currentintersection])
if currentintersection == goal:
return reconstruct_path(start,came_from, goal)
else:
for neighbor in neighborsets:
if neighbor not in explorededset:
tentative_g_score = g_score[currentintersection] + dist_between(intersections[currentintersection],intersections[neighbor])
if neighbor not in frontierset:
frontierset.add(neighbor)
h_score[neighbor] = dist_between(intersections[neighbor],intersections[goal])
tentative_is_better = True
elif (tentative_g_score < g_score[neighbor]):
tentative_is_better = True
else:
tentative_is_better = False
if tentative_is_better == True:
came_from[neighbor] = currentintersection
g_score[neighbor] = tentative_g_score
f_score[neighbor] = g_score[neighbor] + h_score[neighbor]
print('can not find the shortest path') | none | 1 | 3.531246 | 4 |
|
release.py | SHSharkar/geolocationapi | 0 | 6630973 | <reponame>SHSharkar/geolocationapi
import io
import os
import shutil
import tarfile
import requests
GEOIP2_DB_URL = (
"https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=iZmGL4IowR8JrRsv&suffix=tar.gz"
)
r = requests.get(GEOIP2_DB_URL)
tar = tarfile.open(mode="r:gz", fileobj=io.BytesIO(r.content))
for member in tar.getmembers():
if member.name.endswith("GeoLite2-Country.mmdb"):
member.name = os.path.basename(member.name)
tar.extract(member, path="data")
| import io
import os
import shutil
import tarfile
import requests
GEOIP2_DB_URL = (
"https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=iZmGL4IowR8JrRsv&suffix=tar.gz"
)
r = requests.get(GEOIP2_DB_URL)
tar = tarfile.open(mode="r:gz", fileobj=io.BytesIO(r.content))
for member in tar.getmembers():
if member.name.endswith("GeoLite2-Country.mmdb"):
member.name = os.path.basename(member.name)
tar.extract(member, path="data") | none | 1 | 2.770785 | 3 |
|
xreshaper/datasets.py | NCAR/xarray-pyreshaper | 0 | 6630974 | <reponame>NCAR/xarray-pyreshaper
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import numpy as np
import pandas as pd
import xarray as xr
def create_data_array(time, lat, lon, name):
"""Generate some random xarray dataarray"""
data_array = xr.DataArray(
np.random.randn(len([time]), len(lat), len(lon)),
coords={'time': [time], 'lat': lat, 'lon': lon},
dims=('time', 'lat', 'lon'),
name=name,
)
return data_array
def generate_fake_data(time, suffix, output_dir=None):
"""Create xarray 'time-slice' dataset and save it to disk"""
# generate latitude and longitude values
lat = np.linspace(start=-90, stop=90, num=180, dtype='int')
lon = np.linspace(start=-180, stop=180, num=360, dtype='int')
# Create some variables
sst = create_data_array(time, lat, lon, name='sst')
prec = create_data_array(time, lat, lon, name='prec')
pressure = create_data_array(time, lat, lon, name='pressure')
# Create some meta data variables. These variables are the same for all
# time slices
meta = xr.DataArray(
np.arange(len(lat) * len(lon)).reshape(len(lat), len(lon)),
coords={'lat': lat, 'lon': lon},
dims=('lat', 'lon'),
name='meta_var',
)
nlat = xr.DataArray(lat / 2, coords={'lat': lat}, dims=('lat'))
nlon = xr.DataArray(lon / 2, coords={'lon': lon}, dims=('lon'))
dset = xr.Dataset(
{
'sst': sst,
'pressure': pressure,
'prec': prec,
'meta_var': meta,
'nlat': nlat,
'nlon': nlon,
}
)
# Add some global attributes to our dataset
dset.attrs['created on'] = '2010-10-10'
dset.attrs['created by'] = 'foo'
dset.attrs['experiment_name'] = 'bar'
path = f'{output_dir}/tslice{str(suffix)}.nc'
dset.to_netcdf(path, engine='netcdf4', mode='w')
def make_netcdf_data(start_date='2000-01-01', freq='1M', periods=24, output_dir=None):
if not output_dir:
home = os.environ.get('HOME')
output_dir = f'{home}/.xreshaper/data'
os.makedirs(output_dir, exist_ok=True)
times = pd.DatetimeIndex(start=start_date, freq=freq, periods=periods)
for index, time in enumerate(times):
generate_fake_data(time, index, output_dir)
print(f'******** The generated data location is : {output_dir} ************')
| #!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import numpy as np
import pandas as pd
import xarray as xr
def create_data_array(time, lat, lon, name):
"""Generate some random xarray dataarray"""
data_array = xr.DataArray(
np.random.randn(len([time]), len(lat), len(lon)),
coords={'time': [time], 'lat': lat, 'lon': lon},
dims=('time', 'lat', 'lon'),
name=name,
)
return data_array
def generate_fake_data(time, suffix, output_dir=None):
"""Create xarray 'time-slice' dataset and save it to disk"""
# generate latitude and longitude values
lat = np.linspace(start=-90, stop=90, num=180, dtype='int')
lon = np.linspace(start=-180, stop=180, num=360, dtype='int')
# Create some variables
sst = create_data_array(time, lat, lon, name='sst')
prec = create_data_array(time, lat, lon, name='prec')
pressure = create_data_array(time, lat, lon, name='pressure')
# Create some meta data variables. These variables are the same for all
# time slices
meta = xr.DataArray(
np.arange(len(lat) * len(lon)).reshape(len(lat), len(lon)),
coords={'lat': lat, 'lon': lon},
dims=('lat', 'lon'),
name='meta_var',
)
nlat = xr.DataArray(lat / 2, coords={'lat': lat}, dims=('lat'))
nlon = xr.DataArray(lon / 2, coords={'lon': lon}, dims=('lon'))
dset = xr.Dataset(
{
'sst': sst,
'pressure': pressure,
'prec': prec,
'meta_var': meta,
'nlat': nlat,
'nlon': nlon,
}
)
# Add some global attributes to our dataset
dset.attrs['created on'] = '2010-10-10'
dset.attrs['created by'] = 'foo'
dset.attrs['experiment_name'] = 'bar'
path = f'{output_dir}/tslice{str(suffix)}.nc'
dset.to_netcdf(path, engine='netcdf4', mode='w')
def make_netcdf_data(start_date='2000-01-01', freq='1M', periods=24, output_dir=None):
if not output_dir:
home = os.environ.get('HOME')
output_dir = f'{home}/.xreshaper/data'
os.makedirs(output_dir, exist_ok=True)
times = pd.DatetimeIndex(start=start_date, freq=freq, periods=periods)
for index, time in enumerate(times):
generate_fake_data(time, index, output_dir)
print(f'******** The generated data location is : {output_dir} ************') | en | 0.3797 | #!/usr/bin/env python Generate some random xarray dataarray Create xarray 'time-slice' dataset and save it to disk # generate latitude and longitude values # Create some variables # Create some meta data variables. These variables are the same for all # time slices # Add some global attributes to our dataset | 3.261089 | 3 |
commands/textchannelname.py | MehmetSalihK/AutoVoiceChannels | 0 | 6630975 | import discord
import utils
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND> `NEW NAME`"),
("Description:",
"Modifiez le nom des canaux de texte privés temporaires créés pour chaque conversation vocale si `textchannels` est activé."
"\nLa valeur par défaut est `voice context`."),
("Example:", "<PREFIX><COMMAND> typing/tts/bot commands"),
]
]
async def execute(ctx, params):
params_str = ' '.join(params)
guild = ctx['guild']
settings = ctx['settings']
author = ctx['message'].author
new_word = params_str.replace('\n', ' ') # Can't have newlines in channel name.
new_word = utils.strip_quotes(new_word)
previous_word = ("text" if 'text_channel_name' not in settings else
func.esc_md(settings['text_channel_name']))
if not new_word:
return False, ("You need to define a new name, e.g. `{}textchannelname links` to make "
"**links** shown instead of **{}**.".format(ctx['print_prefix'], previous_word))
settings['text_channel_name'] = new_word
utils.set_serv_settings(guild, settings)
e_new_word = func.esc_md(new_word)
await func.server_log(
guild,
"💬 {} (`{}`) définissez le nom \"text\" du serveur sur **{}**".format(
func.user_hash(author), author.id, e_new_word
), 2, settings)
for p, pv in settings['auto_channels'].items():
for s, sv in pv['secondaries'].items():
if 'tc' in sv:
tc = guild.get_channel(sv['tc'])
try:
await tc.edit(name=utils.nice_cname(new_word))
except discord.errors.Forbidden:
pass
return True, ("Done! From now on I'll use **{}** instead of **{}**.".format(e_new_word, previous_word))
command = Cmd(
execute=execute,
help_text=help_text,
params_required=1,
gold_required=True,
admin_required=True,
)
| import discord
import utils
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND> `NEW NAME`"),
("Description:",
"Modifiez le nom des canaux de texte privés temporaires créés pour chaque conversation vocale si `textchannels` est activé."
"\nLa valeur par défaut est `voice context`."),
("Example:", "<PREFIX><COMMAND> typing/tts/bot commands"),
]
]
async def execute(ctx, params):
params_str = ' '.join(params)
guild = ctx['guild']
settings = ctx['settings']
author = ctx['message'].author
new_word = params_str.replace('\n', ' ') # Can't have newlines in channel name.
new_word = utils.strip_quotes(new_word)
previous_word = ("text" if 'text_channel_name' not in settings else
func.esc_md(settings['text_channel_name']))
if not new_word:
return False, ("You need to define a new name, e.g. `{}textchannelname links` to make "
"**links** shown instead of **{}**.".format(ctx['print_prefix'], previous_word))
settings['text_channel_name'] = new_word
utils.set_serv_settings(guild, settings)
e_new_word = func.esc_md(new_word)
await func.server_log(
guild,
"💬 {} (`{}`) définissez le nom \"text\" du serveur sur **{}**".format(
func.user_hash(author), author.id, e_new_word
), 2, settings)
for p, pv in settings['auto_channels'].items():
for s, sv in pv['secondaries'].items():
if 'tc' in sv:
tc = guild.get_channel(sv['tc'])
try:
await tc.edit(name=utils.nice_cname(new_word))
except discord.errors.Forbidden:
pass
return True, ("Done! From now on I'll use **{}** instead of **{}**.".format(e_new_word, previous_word))
command = Cmd(
execute=execute,
help_text=help_text,
params_required=1,
gold_required=True,
admin_required=True,
)
| en | 0.898794 | # Can't have newlines in channel name. | 2.588737 | 3 |
src/bxcommon/services/http_service.py | thabaptiser/bxcommon | 0 | 6630976 | <gh_stars>0
import json
from ssl import SSLContext
from typing import Optional, Dict, Any, Union, List
import status
from urllib3 import Retry, HTTPResponse
from urllib3.exceptions import HTTPError, MaxRetryError
from urllib3.poolmanager import PoolManager
from urllib3.util import parse_url
from bxcommon import constants
from bxutils import log_messages
from bxutils import logging
from bxutils.encoding import json_encoder
# recursive types are not supported: https://github.com/python/typing/issues/182
JT = Union[Dict[str, Any], List[Any]]
logger = logging.get_logger(__name__)
_url = constants.SDN_ROOT_URL
_ssl_context: Optional[SSLContext] = None
METHODS_WHITELIST = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST", "PATCH"]
)
def set_root_url(sdn_url: str, ssl_context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _url
_url = sdn_url
update_http_ssl_context(ssl_context)
def update_http_ssl_context(ssl_context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _ssl_context
_ssl_context = ssl_context
def post_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("POST", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def patch_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("PATCH", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def delete_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("DELETE", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def get_json(endpoint: str) -> Optional[JT]:
return _http_request("GET", endpoint,
headers=constants.HTTP_HEADERS)
def get_json_with_payload(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("GET", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def build_url(endpoint: str) -> str:
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Missing or invalid URL")
return _url + endpoint
def raise_for_status(res: HTTPResponse) -> None:
if status.is_client_error(res.status) or status.is_server_error(res.status):
raise HTTPError(f"{res.status}:{res.reason}")
def _http_request(method: str, endpoint: str, **kwargs) -> Optional[JT]:
url = build_url(endpoint)
parsed_url = parse_url(url)
pm_args = {
"num_pools": constants.HTTP_POOL_MANAGER_COUNT,
"host": parsed_url.host,
"port": parsed_url.port,
"retries": Retry(
connect=constants.HTTP_REQUEST_RETRIES_COUNT,
read=constants.HTTP_REQUEST_RETRIES_COUNT,
redirect=constants.HTTP_REQUEST_RETRIES_COUNT,
backoff_factor=constants.HTTP_REQUEST_BACKOFF_FACTOR,
method_whitelist=METHODS_WHITELIST
),
"ssl_context": _ssl_context,
}
if _ssl_context is not None and url.startswith("https"):
pm_args["assert_hostname"] = False
http_pool_manager: PoolManager = PoolManager(**pm_args)
try:
logger.trace("HTTP {0} to {1}", method, url)
response = http_pool_manager.request(
method=method,
url=parsed_url.url,
timeout=constants.HTTP_REQUEST_TIMEOUT,
**kwargs
)
raise_for_status(response)
except MaxRetryError as e:
logger.info("{} to {} failed due to: {}.", method, url, e)
return None
except Exception as e: # pylint: disable=broad-except
logger.error(log_messages.HTTP_REQUEST_RETURNED_ERROR, method, url, e)
return None
return json.loads(response.data)
| import json
from ssl import SSLContext
from typing import Optional, Dict, Any, Union, List
import status
from urllib3 import Retry, HTTPResponse
from urllib3.exceptions import HTTPError, MaxRetryError
from urllib3.poolmanager import PoolManager
from urllib3.util import parse_url
from bxcommon import constants
from bxutils import log_messages
from bxutils import logging
from bxutils.encoding import json_encoder
# recursive types are not supported: https://github.com/python/typing/issues/182
JT = Union[Dict[str, Any], List[Any]]
logger = logging.get_logger(__name__)
_url = constants.SDN_ROOT_URL
_ssl_context: Optional[SSLContext] = None
METHODS_WHITELIST = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST", "PATCH"]
)
def set_root_url(sdn_url: str, ssl_context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _url
_url = sdn_url
update_http_ssl_context(ssl_context)
def update_http_ssl_context(ssl_context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _ssl_context
_ssl_context = ssl_context
def post_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("POST", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def patch_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("PATCH", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def delete_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("DELETE", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def get_json(endpoint: str) -> Optional[JT]:
return _http_request("GET", endpoint,
headers=constants.HTTP_HEADERS)
def get_json_with_payload(endpoint: str, payload=None) -> Optional[JT]:
return _http_request("GET", endpoint, body=json_encoder.to_json(payload),
headers=constants.HTTP_HEADERS)
def build_url(endpoint: str) -> str:
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Missing or invalid URL")
return _url + endpoint
def raise_for_status(res: HTTPResponse) -> None:
if status.is_client_error(res.status) or status.is_server_error(res.status):
raise HTTPError(f"{res.status}:{res.reason}")
def _http_request(method: str, endpoint: str, **kwargs) -> Optional[JT]:
url = build_url(endpoint)
parsed_url = parse_url(url)
pm_args = {
"num_pools": constants.HTTP_POOL_MANAGER_COUNT,
"host": parsed_url.host,
"port": parsed_url.port,
"retries": Retry(
connect=constants.HTTP_REQUEST_RETRIES_COUNT,
read=constants.HTTP_REQUEST_RETRIES_COUNT,
redirect=constants.HTTP_REQUEST_RETRIES_COUNT,
backoff_factor=constants.HTTP_REQUEST_BACKOFF_FACTOR,
method_whitelist=METHODS_WHITELIST
),
"ssl_context": _ssl_context,
}
if _ssl_context is not None and url.startswith("https"):
pm_args["assert_hostname"] = False
http_pool_manager: PoolManager = PoolManager(**pm_args)
try:
logger.trace("HTTP {0} to {1}", method, url)
response = http_pool_manager.request(
method=method,
url=parsed_url.url,
timeout=constants.HTTP_REQUEST_TIMEOUT,
**kwargs
)
raise_for_status(response)
except MaxRetryError as e:
logger.info("{} to {} failed due to: {}.", method, url, e)
return None
except Exception as e: # pylint: disable=broad-except
logger.error(log_messages.HTTP_REQUEST_RETURNED_ERROR, method, url, e)
return None
return json.loads(response.data) | en | 0.714865 | # recursive types are not supported: https://github.com/python/typing/issues/182 # pylint: disable=global-statement # pylint: disable=global-statement # pylint: disable=broad-except | 2.059291 | 2 |
sgtpy/gammamie_mixtures/ares.py | MatKie/SGTPy | 12 | 6630977 | <reponame>MatKie/SGTPy<gh_stars>10-100
from __future__ import division, print_function, absolute_import
import numpy as np
from .a1sB_monomer import a1sB_eval, da1sB_dxhi00_eval, d2a1sB_dxhi00_eval
from .a1sB_monomer import d3a1sB_dxhi00_eval
from .a1sB_monomer import da1sB_dx_eval, da1sB_dx_dxhi00_eval
from .a1sB_monomer import da1sB_dx_dxhi00_dxxhi_eval
from .a1sB_monomer import da1sB_dx_d2xhi00_dxxhi_eval
from .ahs_monomer import ahs, dahs_dxhi00, d2ahs_dxhi00
from .ahs_monomer import dahs_dx, dahs_dxxhi
from .a3_monomer import a3, da3_dxhi00, d2a3_dxhi00
from .a3_monomer import da3_dx, da3_dxxhi
from .a2_monomer import a2, da2_dxhi00, d2a2_dxhi00
from .a2_monomer import da2_dx, da2_dxxhi
from .a1_monomer import a1, da1_dxhi00, d2a1_dxhi00
from .a1_monomer import da1_dx, da1_dxxhi
from .a2new_chain import da2new_dxhi00, d2a2new_dxhi00, d3a2new_dxhi00
from .a2new_chain import da2new_dx_dxhi00, da2new_dxxhi_dxhi00
from .gdHS_chain import gdHS, dgdHS_dxhi00, d2gdHS_dxhi00
from .gdHS_chain import dgdHS_dx, dgdHS_dxxhi
from .gammac_chain import gammac, dgammac_dxhi00, d2gammac_dxhi00
from .gammac_chain import dgammac_dx, dgammac_dxxhi
from .g1sigma_chain import g1sigma, dg1sigma_dxhi00, d2g1sigma_dxhi00
from .g1sigma_chain import dg1sigma_dx, dg1sigma_dxxhi
from .g2mca_chain import g2mca, dg2mca_dxhi00, d2g2mca_dxhi00
from .g2mca_chain import dg2mca_dx, dg2mca_dxxhi
from .lngmie_chain import lngmie, dlngmie_dxhi00, d2lngmie_dxhi00
from .lngmie_chain import dlngmie_dx, dlngmie_dxxhi
from .monomer_aux import dkHS_dxhi00, d2kHS_dxhi00, d3kHS_dxhi00
from .monomer_aux import dkHS_dx_dxhi00, d2kHS_dx_dxhi00
from .association_aux import Xass_solver, CIJ_matrix
from .association_aux import dXass_drho, d2Xass_drho, dXass_dx
from .association_aux import Iab, dIab_drho, d2Iab_drho, dIab
# Eq. (14) Paper 2014
def xhi_eval(xhi00, xs_k, xs_m, d_kk03):
dxhi_dxhi00 = xs_m * np.matmul(xs_k, d_kk03)
xhi = xhi00 * dxhi_dxhi00
return xhi, dxhi_dxhi00
def dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux):
dxhi_dxhi00 = xs_m * np.matmul(xs_k, d_kk03)
xhi = xhi00 * dxhi_dxhi00
dxhi_dx = (xhi00*dxk_dx_aux@d_kk03).T
return xhi, dxhi_dxhi00, dxhi_dx
# Eq (22) Paper 2014
def xhix_eval(xhi00, xs_k, xs_m, d_kl3):
sum1 = np.matmul(np.matmul(xs_k, d_kl3), xs_k)
dxhix_dxhi00 = xs_m * sum1
xhix = xhi00 * dxhix_dxhi00
return xhix, dxhix_dxhi00
def dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3):
aux1 = xs_k * d_kl3
aux2 = np.dot(xs_k, aux1)
aux3 = aux2.sum()
dxhix_dxhi00 = xs_m * aux3
xhix = xhi00 * dxhix_dxhi00
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
dxhix_dx_dxhi00 = (zs_m * aux3 + xs_m * suma1)
dxhix_dx = xhi00 * dxhix_dx_dxhi00
return xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00
def ares(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
a1kl, a2kl = a1sB_eval(xhi00, xhix, xhix_vec[0], xs_m, I_lambdaskl,
J_lambdaskl, ccteskl, a1vdwkl, a1vdw_ctekl)
# zero order pertubation
aHS = ahs(xhi)
# first order pertubation
suma1_monomer = Ckl * np.sum(a1kl * x0_a1, axis=0)
a1m = a1(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs = dkHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(a2kl * x0_a2, axis=0)
a2m = a2(xs_k, khs, xhixm, suma2_monomer, eps_kl, f1, f2, f3)
# third order pertubaton
a3m = a3(xs_k, xhixm, eps_kl, f4, f5, f6)
am = xs_m * (aHS + beta * a1m + beta**2 * a2m + beta**3 * a3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
da1ii, da2ii = da1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m, I_lambdasii,
J_lambdasii, cctesii, a1vdwii,
a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
gHS = gdHS(x0i_matrix, xhix)
# gamma_c
gc = gammac(xhixm, alphaii, tetha)
# g1sigma
da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
g1s = g1sigma(xhi00, xs_m, da1_chain, suma_g1, a1vdw_cteii)
# g2sigma
dsuma2_chain = Cii2 * np.sum(da2ii * x0_a2ii, axis=1)
da2new = da2new_dxhi00(khs, dkhs, dsuma2_chain, eps_ii)
suma_g2 = Cii2 * np.sum(da2ii[0] * x0_g2ii, axis=0)
g2m = g2mca(xhi00, khs, xs_m, da2new, suma_g2, eps_ii, a1vdw_cteii)
g2s = (1 + gc) * g2m
lng = lngmie(gHS, g1s, g2s, beps_ii, beps_ii2)
ach = - lng@(x * (self.zs_m - 1.))
ares = am + ach
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
rho_ad = rho * xs_m * sigma_x3
Iijklab = np.zeros([self.nc, self.nc])
Iab(rho_ad, T_ad, Iijklab)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
ares += np.dot(self.S * xjvk, (np.log(Xass) - Xass/2 + 1/2))
else:
Xass = Xass0
return ares, Xass
def dares_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2., 6*xhix, 12*xhix2]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
da1kl, da2kl = da1sB_dxhi00_eval(xhi00, xhix, xhix_vec[:2], xs_m,
I_lambdaskl, J_lambdaskl, ccteskl,
a1vdwkl, a1vdw_ctekl, dxhix_dxhi00)
# zero order pertubation
daHS = dahs_dxhi00(xhi, dxhi_dxhi00)
# first order pertubation
suma1_monomer = Ckl * np.sum(da1kl * x0_a1, axis=1)
da1m = da1_dxhi00(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs, d2khs = d2kHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(da2kl * x0_a2, axis=1)
da2m = da2_dxhi00(xs_k, khs, dkhs, xhixm, dxhixm_dxhi00, suma2_monomer,
eps_kl, f1, f2, f3)
# third order pertubaton
da3m = da3_dxhi00(xs_k, xhixm, dxhixm_dxhi00, eps_kl, f4, f5, f6)
damono = xs_m * (daHS + beta * da1m + beta**2 * da2m + beta**3 * da3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
d2a1ii, d2a2ii = d2a1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
dgHS = dgdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00)
# gamma_c
dgc = dgammac_dxhi00(xhixm, dxhixm_dxhi00, alphaii, tetha)
# g1sigma
d2a1_chain = Cii * np.sum(d2a1ii[1:] * x0_a1ii, axis=1)
dsuma_g1 = Cii * np.sum(d2a1ii[:2] * x0_g1ii, axis=1)
dg1s = dg1sigma_dxhi00(xhi00, xs_m, d2a1_chain, dsuma_g1, a1vdw_cteii)
# g2sigma
d2suma2_chain = Cii2 * np.sum(d2a2ii * x0_a2ii, axis=1)
d2a2new = d2a2new_dxhi00(khs, dkhs, d2khs, d2suma2_chain, eps_ii)
dsuma_g2 = Cii2 * np.sum(d2a2ii[:2] * x0_g2ii, axis=1)
dg2m = dg2mca_dxhi00(xhi00, khs, dkhs, xs_m, d2a2new, dsuma_g2, eps_ii,
a1vdw_cteii)
dg2s = dg2m * (1 + dgc[0])
dg2s[1] += dg2m[0] * dgc[1]
dlng = dlngmie_dxhi00(dgHS, dg1s, dg2s, beps_ii, beps_ii2)
dachain = - dlng@(x * (self.zs_m - 1.))
ares = damono + dachain
ares *= self.dxhi00_1
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
Iijklab = np.zeros([self.nc, self.nc])
dIijklab_drho = np.zeros([self.nc, self.nc])
dIab_drho(rho_ad, T_ad, drho_ad, Iijklab, dIijklab_drho)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
ares[0] += np.dot(self.S * xjvk, (np.log(Xass) - Xass/2 + 1/2))
ares[1] += np.dot(self.S * xjvk, (1/Xass - 1/2) * dXass)
else:
Xass = Xass0
return ares, Xass
def d2ares_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3.*xhix2, 4.*xhix3],
[0., 2., 6*xhix, 12.*xhix2],
[0., 0., 6., 24.*xhix]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
d2a1kl, d2a2kl = d2a1sB_dxhi00_eval(xhi00, xhix, xhix_vec[:3], xs_m,
I_lambdaskl, J_lambdaskl, ccteskl,
a1vdwkl, a1vdw_ctekl, dxhix_dxhi00)
# zero order pertubation
d2aHS = d2ahs_dxhi00(xhi, dxhi_dxhi00)
# first order pertubation
suma1_monomer = Ckl * np.sum(d2a1kl * x0_a1, axis=1)
d2a1m = d2a1_dxhi00(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs, d2khs, d3khs = d3kHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(d2a2kl * x0_a2, axis=1)
d2a2m = d2a2_dxhi00(xs_k, khs, dkhs, d2khs, xhixm, dxhixm_dxhi00,
suma2_monomer, eps_kl, f1, f2, f3)
# third order pertubaton
d2a3m = d2a3_dxhi00(xs_k, xhixm, dxhixm_dxhi00, eps_kl, f4, f5, f6)
d2amono = xs_m * (d2aHS + beta * d2a1m + beta**2 * d2a2m + beta**3 * d2a3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
d3a1ii, d3a2ii = d3a1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
d2gHS = d2gdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00)
# gamma_c
d2gc = d2gammac_dxhi00(xhixm, dxhixm_dxhi00, alphaii, tetha)
# g1sigma
d3a1_chain = Cii * np.sum(d3a1ii[1:] * x0_a1ii, axis=1)
d2suma_g1 = Cii * np.sum(d3a1ii[:3] * x0_g1ii, axis=1)
d2g1s = d2g1sigma_dxhi00(xhi00, xs_m, d3a1_chain, d2suma_g1, a1vdw_cteii)
# g2sigma
d3suma2_chain = Cii2 * np.sum(d3a2ii * x0_a2ii, axis=1)
d3a2new = d3a2new_dxhi00(khs, dkhs, d2khs, d3khs, d3suma2_chain, eps_ii)
d2suma_g2 = Cii2 * np.sum(d3a2ii[:3] * x0_g2ii, axis=1)
d2g2m = d2g2mca_dxhi00(xhi00, khs, dkhs, d2khs, xs_m, d3a2new, d2suma_g2,
eps_ii, a1vdw_cteii)
d2g2s = d2g2m * (1. + d2gc[0])
d2g2s[1] += d2g2m[0] * d2gc[1]
d2g2s[2] += 2. * d2g2m[1] * d2gc[1] + d2g2m[0] * d2gc[2]
d2lng = d2lngmie_dxhi00(d2gHS, d2g1s, d2g2s, beps_ii, beps_ii2)
d2achain = - d2lng@(x * (self.zs_m - 1.))
ares = d2amono + d2achain
ares *= self.dxhi00_2
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
Iijklab = np.zeros([self.nc, self.nc])
dIijklab_drho = np.zeros([self.nc, self.nc])
d2Iijklab_drho = np.zeros([self.nc, self.nc])
d2Iab_drho(rho_ad, T_ad, drho_ad, Iijklab, dIijklab_drho,
d2Iijklab_drho)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
d2Dijklab_drho = self.kAB_kl * Fklab
d2Dijklab_drho[self.indexABij] *= d2Iijklab_drho[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
d2Xass = d2Xass_drho(rho, xjvk, Xass, dXass, DIJ, Dijklab,
dDijklab_drho, d2Dijklab_drho, CIJ)
aux0 = self.S * xjvk
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares[0] += np.dot(aux0, aux1)
ares[1] += np.dot(aux0, aux2 * dXass)
ares[2] += np.dot(aux0, -(dXass/Xass)**2+d2Xass*aux2)
else:
Xass = Xass0
return ares, Xass
def dares_dx(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
dxkdx = self.dxkdx
zs_m = self.zs_m
x_k = x[self.groups_index]
aux_Skvksvki = Sk*vki*vk
xs_ki = x_k*aux_Skvksvki
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
dxk_dx_aux = aux_Skvksvki * dxkdx
dxsk_dx = dxk_dx_aux * xs_m
dxsk_dx -= np.outer(zs_m, xs_ki)
dxsk_dx /= xs_m**2
out = dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux)
xhi, dxhi_dxhi00, dxhi_dx = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3)
xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00 = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, sigma_kl3)
xhixm, dxhixm_dxhi00, dxhixm_dx, dxhixm_dx_dxhi00 = out
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2, 6*xhix, 12*xhix2]])
khs, dkhs, dkhsx, dkhsxxhi = dkHS_dx_dxhi00(xhix, dxhix_dxhi00, dxhix_dx,
dxhix_dx_dxhi00)
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
out = da1sB_dx_eval(xhi00, xhix, xhix_vec[:2], xs_m, zs_m, I_lambdaskl,
J_lambdaskl, ccteskl, a1vdwkl, a1vdw_ctekl, dxhix_dx)
a1kl, a2kl, da1x_kl, da2x_kl = out
# zero order pertubation
aHS, daHSx = dahs_dx(xhi, dxhi_dx)
# first order pertubation
suma1_monomer = Ckl * np.sum(a1kl * x0_a1, axis=0)
suma1x_monomer = Ckl * (da1x_kl[0]*x0_a1[0] + da1x_kl[1]*x0_a1[1])
a1m, da1mx = da1_dx(xs_k, dxsk_dx, suma1_monomer, suma1x_monomer)
# second order pertubation
suma2_monomer = Ckl2 * np.sum(a2kl * x0_a2, axis=0)
suma2x_monomer = da2x_kl[0]*x0_a2[0] + da2x_kl[1]*x0_a2[1]
suma2x_monomer += da2x_kl[2]*x0_a2[2]
suma2x_monomer *= Ckl2
a2m, da2mx = da2_dx(xs_k, dxsk_dx, khs, dkhsx, xhixm, dxhixm_dx,
suma2_monomer, suma2x_monomer, eps_kl, f1, f2, f3)
# third order pertubation
a3m, da3mx = da3_dx(xs_k, dxsk_dx, xhixm, dxhixm_dx, eps_kl, f4, f5, f6)
beta2 = beta**2
beta3 = beta2*beta
am = aHS + beta * a1m + beta2 * a2m + beta3 * a3m
damx = daHSx + beta * da1mx + beta2 * da2mx + beta3 * da3mx
amono = xs_m * am
damonox = self.zs_m * am + xs_m * damx
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
out = da1sB_dx_dxhi00_dxxhi_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00,
dxhix_dx, dxhix_dx_dxhi00)
da1ii, da2ii, da1x_ii, da2x_ii, da1_xxhi00_ii, da2_xxhi00_ii = out
# g hard sphere
ghs, dghsx = dgdHS_dx(x0i_matrix, xhix, dxhix_dx)
# g1sigma
da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
da1x_chain = Cii*(da1_xxhi00_ii[0]*x0_a1ii[0]+da1_xxhi00_ii[1]*x0_a1ii[1])
suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
suma_g1x = Cii*(da1x_ii[0] * x0_g1ii[0] + da1x_ii[1] * x0_g1ii[1])
g1s, dg1sx = dg1sigma_dx(xhi00, xs_m, zs_m, da1_chain, da1x_chain, suma_g1,
suma_g1x, a1vdw_cteii)
# gamma_c
gc, dgcx = dgammac_dx(xhixm, dxhixm_dx, alphaii, tetha)
# g2sigma
suma_g2 = Cii2 * np.sum(da2ii[0] * x0_g2ii, axis=0)
suma_g2x = da2x_ii[0]*x0_g2ii[0] + da2x_ii[1]*x0_g2ii[1]
suma_g2x += da2x_ii[2]*x0_g2ii[2]
suma_g2x *= Cii2
dsuma2_chain = Cii2 * np.sum(da2ii * x0_a2ii, axis=1)
dsuma2x_chain = da2x_ii[0] * x0_a2ii[0] + da2x_ii[1] * x0_a2ii[1]
dsuma2x_chain += da2x_ii[2] * x0_a2ii[2]
dsuma2x_chain *= Cii2
dsuma2xxhi_chain = da2_xxhi00_ii[0] * x0_a2ii[0]
dsuma2xxhi_chain += da2_xxhi00_ii[1] * x0_a2ii[1]
dsuma2xxhi_chain += da2_xxhi00_ii[2] * x0_a2ii[2]
dsuma2xxhi_chain *= Cii2
da2new, da2newx = da2new_dx_dxhi00(khs, dkhs, dkhsx, dkhsxxhi,
dsuma2_chain, dsuma2x_chain,
dsuma2xxhi_chain, eps_ii)
g2m, dg2mx = dg2mca_dx(xhi00, khs, dkhsx, xs_m, zs_m, da2new, da2newx,
suma_g2, suma_g2x, eps_ii, a1vdw_cteii)
g2s = g2m * (1 + gc)
dg2sx = dgcx*g2m + (1+gc)*dg2mx
lng, dlngx = dlngmie_dx(ghs, g1s, g2s, dghsx, dg1sx, dg2sx, beps_ii,
beps_ii2)
zs_m1 = (zs_m - 1.)
xzs_m1 = x*zs_m1
achain = - lng@xzs_m1
dachainx = - dlngx@xzs_m1 - lng * zs_m1
ares = amono + achain
daresx = damonox + dachainx
if self.asso_bool:
nc = self.nc
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# beta = temp_aux[0]
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
aux1 = xs_k * sigma_kl3
aux2 = np.dot(xs_k, aux1)
sigma_x3 = np.sum(aux2)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
drhoad_dx = rho * (zs_m * sigma_x3 + xs_m * suma1)
Iijklab = np.zeros([nc, nc])
dIijklab = np.zeros([nc, nc])
dIab(rho_ad, T_ad, Iijklab, dIijklab)
dIijklab_dx = np.multiply.outer(drhoad_dx, dIijklab)
diagasso = self.diagasso
vki_asso = self.vki[self.group_asso_index]
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
dxjasso_dx = self.dxjasso_dx
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_dx = np.stack(nc*[self.kAB_kl * Fklab])
dDijklab_dx[:, self.indexABij[0], self.indexABij[1]] *= dIijklab_dx[:, self.indexAB_id[0], self.indexAB_id[1]]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXassx = dXass_dx(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_dx,
dxjasso_dx, CIJ)
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares += np.dot(self.S*xjvk, aux1)
daresx += (dxjasso_dx * aux1 + dXassx * xjvk * aux2)@self.S
else:
Xass = Xass0
return ares, daresx, Xass
def dares_dx_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
dxkdx = self.dxkdx
zs_m = self.zs_m
x_k = x[self.groups_index]
aux_Skvksvki = Sk*vki*vk
xs_ki = x_k*aux_Skvksvki
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
dxk_dx_aux = aux_Skvksvki * dxkdx
dxsk_dx = dxk_dx_aux * xs_m
dxsk_dx -= np.outer(zs_m, xs_ki)
dxsk_dx /= xs_m**2
out = dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux)
xhi, dxhi_dxhi00, dxhi_dx = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3)
xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00 = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, sigma_kl3)
xhixm, dxhixm_dxhi00, dxhixm_dx, dxhixm_dx_dxhi00 = out
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2, 6*xhix, 12*xhix2]])
out = d2kHS_dx_dxhi00(xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00)
khs, dkhs, d2khs, dkhsx, dkhsxxhi = out
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
out = da1sB_dx_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdaskl, J_lambdaskl, ccteskl, a1vdwkl,
a1vdw_ctekl, dxhix_dxhi00, dxhix_dx)
da1kl, da2kl, da1x_kl, da2x_kl = out
# zero order pertubation
aHS, daHSx = dahs_dxxhi(xhi, dxhi_dxhi00, dxhi_dx)
# first order pertubation
suma1_monomer = Ckl * np.sum(da1kl * x0_a1, axis=1)
suma1x_monomer = Ckl * (da1x_kl[0]*x0_a1[0] + da1x_kl[1]*x0_a1[1])
a1m, da1mx = da1_dxxhi(xs_k, dxsk_dx, suma1_monomer, suma1x_monomer)
# second order pertubation
suma2_monomer = Ckl2 * np.sum(da2kl * x0_a2, axis=1)
suma2x_monomer = da2x_kl[0]*x0_a2[0] + da2x_kl[1]*x0_a2[1]
suma2x_monomer += da2x_kl[2]*x0_a2[2]
suma2x_monomer *= Ckl2
a2m, da2mx = da2_dxxhi(xs_k, dxsk_dx, khs, dkhs, dkhsx, xhixm,
dxhixm_dxhi00, dxhixm_dx, suma2_monomer,
suma2x_monomer, eps_kl, f1, f2, f3)
# third order pertubation
a3m, da3mx = da3_dxxhi(xs_k, dxsk_dx, xhixm, dxhixm_dxhi00, dxhixm_dx,
eps_kl, f4, f5, f6)
beta2 = beta**2
beta3 = beta2*beta
am = aHS + beta * a1m + beta2 * a2m + beta3 * a3m
damx = daHSx + beta * da1mx + beta2 * da2mx + beta3 * da3mx
amono = xs_m * am
damonox = self.zs_m * am[0] + xs_m * damx
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
out = da1sB_dx_d2xhi00_dxxhi_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00,
dxhix_dx, dxhix_dx_dxhi00)
d2a1ii, d2a2ii, da1x_ii, da2x_ii, da1_xxhi00_ii, da2_xxhi00_ii = out
# g hard sphere
ghs, dghsx = dgdHS_dxxhi(x0i_matrix, xhix, dxhix_dxhi00, dxhix_dx)
# g1sigma
d2a1_chain = Cii * np.sum(d2a1ii[1:] * x0_a1ii, axis=1)
# da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
da1x_chain = Cii*(da1_xxhi00_ii[0]*x0_a1ii[0]+da1_xxhi00_ii[1]*x0_a1ii[1])
dsuma_g1 = Cii * np.sum(d2a1ii[:2] * x0_g1ii, axis=1)
# suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
suma_g1x = Cii*(da1x_ii[0] * x0_g1ii[0] + da1x_ii[1] * x0_g1ii[1])
g1s, dg1sx = dg1sigma_dxxhi(xhi00, xs_m, zs_m, d2a1_chain, da1x_chain,
dsuma_g1, suma_g1x, a1vdw_cteii)
# gamma_c
gc, dgcx = dgammac_dxxhi(xhixm, dxhixm_dxhi00, dxhixm_dx, alphaii, tetha)
# g2sigma
dsuma_g2 = Cii2 * np.sum(d2a2ii[:2] * x0_g2ii, axis=1)
suma_g2x = da2x_ii[0]*x0_g2ii[0] + da2x_ii[1]*x0_g2ii[1]
suma_g2x += da2x_ii[2]*x0_g2ii[2]
suma_g2x *= Cii2
dsuma2x_chain = da2x_ii[0] * x0_a2ii[0] + da2x_ii[1] * x0_a2ii[1]
dsuma2x_chain += da2x_ii[2] * x0_a2ii[2]
dsuma2x_chain *= Cii2
dsuma2xxhi_chain = da2_xxhi00_ii[0] * x0_a2ii[0]
dsuma2xxhi_chain += da2_xxhi00_ii[1] * x0_a2ii[1]
dsuma2xxhi_chain += da2_xxhi00_ii[2] * x0_a2ii[2]
dsuma2xxhi_chain *= Cii2
d2suma2_chain = Cii2 * np.sum(d2a2ii * x0_a2ii, axis=1)
*d2a2new, da2newx = da2new_dxxhi_dxhi00(khs, dkhs, d2khs, dkhsx, dkhsxxhi,
d2suma2_chain, dsuma2x_chain,
dsuma2xxhi_chain, eps_ii)
g2m, dg2mx = dg2mca_dxxhi(xhi00, khs, dkhs, dkhsx, xs_m, zs_m, d2a2new,
da2newx, dsuma_g2, suma_g2x, eps_ii, a1vdw_cteii)
g2s = g2m * (1 + gc[0])
g2s[1] += g2m[0] * gc[1]
dg2sx = dgcx*g2m[0] + (1 + gc[0])*dg2mx
lng, dlngx = dlngmie_dxxhi(ghs, g1s, g2s, dghsx, dg1sx, dg2sx, beps_ii,
beps_ii2)
zs_m1 = (zs_m - 1.)
xzs_m1 = x*zs_m1
achain = - lng@xzs_m1
dachainx = - dlngx@xzs_m1 - lng[0] * zs_m1
ares = amono + achain
ares *= self.dxhi00_1
daresx = damonox + dachainx
if self.asso_bool:
nc = self.nc
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# beta = temp_aux[0]
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
aux1 = xs_k * sigma_kl3
aux2 = np.dot(xs_k, aux1)
sigma_x3 = np.sum(aux2)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
drhoad_dx = rho * (zs_m * sigma_x3 + xs_m * suma1)
Iijklab = np.zeros([nc, nc])
dIijklab = np.zeros([nc, nc])
dIab(rho_ad, T_ad, Iijklab, dIijklab)
dIijklab_dx = np.multiply.outer(drhoad_dx, dIijklab)
dIijklab_drho = dIijklab*drho_ad
diagasso = self.diagasso
vki_asso = self.vki[self.group_asso_index]
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
dxjasso_dx = self.dxjasso_dx
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
dDijklab_dx = np.stack(nc*[self.kAB_kl * Fklab])
dDijklab_dx[:, self.indexABij[0], self.indexABij[1]] *= dIijklab_dx[:, self.indexAB_id[0], self.indexAB_id[1]]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
dXassx = dXass_dx(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_dx,
dxjasso_dx, CIJ)
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares[0] += np.dot(self.S*xjvk, aux1)
ares[1] += np.dot(self.S*xjvk, aux2 * dXass)
daresx += (dxjasso_dx * aux1 + dXassx * xjvk * aux2)@self.S
else:
Xass = Xass0
return ares, daresx, Xass
| from __future__ import division, print_function, absolute_import
import numpy as np
from .a1sB_monomer import a1sB_eval, da1sB_dxhi00_eval, d2a1sB_dxhi00_eval
from .a1sB_monomer import d3a1sB_dxhi00_eval
from .a1sB_monomer import da1sB_dx_eval, da1sB_dx_dxhi00_eval
from .a1sB_monomer import da1sB_dx_dxhi00_dxxhi_eval
from .a1sB_monomer import da1sB_dx_d2xhi00_dxxhi_eval
from .ahs_monomer import ahs, dahs_dxhi00, d2ahs_dxhi00
from .ahs_monomer import dahs_dx, dahs_dxxhi
from .a3_monomer import a3, da3_dxhi00, d2a3_dxhi00
from .a3_monomer import da3_dx, da3_dxxhi
from .a2_monomer import a2, da2_dxhi00, d2a2_dxhi00
from .a2_monomer import da2_dx, da2_dxxhi
from .a1_monomer import a1, da1_dxhi00, d2a1_dxhi00
from .a1_monomer import da1_dx, da1_dxxhi
from .a2new_chain import da2new_dxhi00, d2a2new_dxhi00, d3a2new_dxhi00
from .a2new_chain import da2new_dx_dxhi00, da2new_dxxhi_dxhi00
from .gdHS_chain import gdHS, dgdHS_dxhi00, d2gdHS_dxhi00
from .gdHS_chain import dgdHS_dx, dgdHS_dxxhi
from .gammac_chain import gammac, dgammac_dxhi00, d2gammac_dxhi00
from .gammac_chain import dgammac_dx, dgammac_dxxhi
from .g1sigma_chain import g1sigma, dg1sigma_dxhi00, d2g1sigma_dxhi00
from .g1sigma_chain import dg1sigma_dx, dg1sigma_dxxhi
from .g2mca_chain import g2mca, dg2mca_dxhi00, d2g2mca_dxhi00
from .g2mca_chain import dg2mca_dx, dg2mca_dxxhi
from .lngmie_chain import lngmie, dlngmie_dxhi00, d2lngmie_dxhi00
from .lngmie_chain import dlngmie_dx, dlngmie_dxxhi
from .monomer_aux import dkHS_dxhi00, d2kHS_dxhi00, d3kHS_dxhi00
from .monomer_aux import dkHS_dx_dxhi00, d2kHS_dx_dxhi00
from .association_aux import Xass_solver, CIJ_matrix
from .association_aux import dXass_drho, d2Xass_drho, dXass_dx
from .association_aux import Iab, dIab_drho, d2Iab_drho, dIab
# Eq. (14) Paper 2014
def xhi_eval(xhi00, xs_k, xs_m, d_kk03):
dxhi_dxhi00 = xs_m * np.matmul(xs_k, d_kk03)
xhi = xhi00 * dxhi_dxhi00
return xhi, dxhi_dxhi00
def dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux):
dxhi_dxhi00 = xs_m * np.matmul(xs_k, d_kk03)
xhi = xhi00 * dxhi_dxhi00
dxhi_dx = (xhi00*dxk_dx_aux@d_kk03).T
return xhi, dxhi_dxhi00, dxhi_dx
# Eq (22) Paper 2014
def xhix_eval(xhi00, xs_k, xs_m, d_kl3):
sum1 = np.matmul(np.matmul(xs_k, d_kl3), xs_k)
dxhix_dxhi00 = xs_m * sum1
xhix = xhi00 * dxhix_dxhi00
return xhix, dxhix_dxhi00
def dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3):
aux1 = xs_k * d_kl3
aux2 = np.dot(xs_k, aux1)
aux3 = aux2.sum()
dxhix_dxhi00 = xs_m * aux3
xhix = xhi00 * dxhix_dxhi00
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
dxhix_dx_dxhi00 = (zs_m * aux3 + xs_m * suma1)
dxhix_dx = xhi00 * dxhix_dx_dxhi00
return xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00
def ares(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
a1kl, a2kl = a1sB_eval(xhi00, xhix, xhix_vec[0], xs_m, I_lambdaskl,
J_lambdaskl, ccteskl, a1vdwkl, a1vdw_ctekl)
# zero order pertubation
aHS = ahs(xhi)
# first order pertubation
suma1_monomer = Ckl * np.sum(a1kl * x0_a1, axis=0)
a1m = a1(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs = dkHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(a2kl * x0_a2, axis=0)
a2m = a2(xs_k, khs, xhixm, suma2_monomer, eps_kl, f1, f2, f3)
# third order pertubaton
a3m = a3(xs_k, xhixm, eps_kl, f4, f5, f6)
am = xs_m * (aHS + beta * a1m + beta**2 * a2m + beta**3 * a3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
da1ii, da2ii = da1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m, I_lambdasii,
J_lambdasii, cctesii, a1vdwii,
a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
gHS = gdHS(x0i_matrix, xhix)
# gamma_c
gc = gammac(xhixm, alphaii, tetha)
# g1sigma
da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
g1s = g1sigma(xhi00, xs_m, da1_chain, suma_g1, a1vdw_cteii)
# g2sigma
dsuma2_chain = Cii2 * np.sum(da2ii * x0_a2ii, axis=1)
da2new = da2new_dxhi00(khs, dkhs, dsuma2_chain, eps_ii)
suma_g2 = Cii2 * np.sum(da2ii[0] * x0_g2ii, axis=0)
g2m = g2mca(xhi00, khs, xs_m, da2new, suma_g2, eps_ii, a1vdw_cteii)
g2s = (1 + gc) * g2m
lng = lngmie(gHS, g1s, g2s, beps_ii, beps_ii2)
ach = - lng@(x * (self.zs_m - 1.))
ares = am + ach
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
rho_ad = rho * xs_m * sigma_x3
Iijklab = np.zeros([self.nc, self.nc])
Iab(rho_ad, T_ad, Iijklab)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
ares += np.dot(self.S * xjvk, (np.log(Xass) - Xass/2 + 1/2))
else:
Xass = Xass0
return ares, Xass
def dares_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2., 6*xhix, 12*xhix2]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
da1kl, da2kl = da1sB_dxhi00_eval(xhi00, xhix, xhix_vec[:2], xs_m,
I_lambdaskl, J_lambdaskl, ccteskl,
a1vdwkl, a1vdw_ctekl, dxhix_dxhi00)
# zero order pertubation
daHS = dahs_dxhi00(xhi, dxhi_dxhi00)
# first order pertubation
suma1_monomer = Ckl * np.sum(da1kl * x0_a1, axis=1)
da1m = da1_dxhi00(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs, d2khs = d2kHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(da2kl * x0_a2, axis=1)
da2m = da2_dxhi00(xs_k, khs, dkhs, xhixm, dxhixm_dxhi00, suma2_monomer,
eps_kl, f1, f2, f3)
# third order pertubaton
da3m = da3_dxhi00(xs_k, xhixm, dxhixm_dxhi00, eps_kl, f4, f5, f6)
damono = xs_m * (daHS + beta * da1m + beta**2 * da2m + beta**3 * da3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
d2a1ii, d2a2ii = d2a1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
dgHS = dgdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00)
# gamma_c
dgc = dgammac_dxhi00(xhixm, dxhixm_dxhi00, alphaii, tetha)
# g1sigma
d2a1_chain = Cii * np.sum(d2a1ii[1:] * x0_a1ii, axis=1)
dsuma_g1 = Cii * np.sum(d2a1ii[:2] * x0_g1ii, axis=1)
dg1s = dg1sigma_dxhi00(xhi00, xs_m, d2a1_chain, dsuma_g1, a1vdw_cteii)
# g2sigma
d2suma2_chain = Cii2 * np.sum(d2a2ii * x0_a2ii, axis=1)
d2a2new = d2a2new_dxhi00(khs, dkhs, d2khs, d2suma2_chain, eps_ii)
dsuma_g2 = Cii2 * np.sum(d2a2ii[:2] * x0_g2ii, axis=1)
dg2m = dg2mca_dxhi00(xhi00, khs, dkhs, xs_m, d2a2new, dsuma_g2, eps_ii,
a1vdw_cteii)
dg2s = dg2m * (1 + dgc[0])
dg2s[1] += dg2m[0] * dgc[1]
dlng = dlngmie_dxhi00(dgHS, dg1s, dg2s, beps_ii, beps_ii2)
dachain = - dlng@(x * (self.zs_m - 1.))
ares = damono + dachain
ares *= self.dxhi00_1
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
Iijklab = np.zeros([self.nc, self.nc])
dIijklab_drho = np.zeros([self.nc, self.nc])
dIab_drho(rho_ad, T_ad, drho_ad, Iijklab, dIijklab_drho)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
ares[0] += np.dot(self.S * xjvk, (np.log(Xass) - Xass/2 + 1/2))
ares[1] += np.dot(self.S * xjvk, (1/Xass - 1/2) * dXass)
else:
Xass = Xass0
return ares, Xass
def d2ares_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
x_k = x[self.groups_index]
xs_ki = x_k*Sk*vki*vk
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3.*xhix2, 4.*xhix3],
[0., 2., 6*xhix, 12.*xhix2],
[0., 0., 6., 24.*xhix]])
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
xhi, dxhi_dxhi00 = xhi_eval(xhi00, xs_k, xs_m, d_kk03)
xhix, dxhix_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, d_kl3)
xhixm, dxhixm_dxhi00 = xhix_eval(xhi00, xs_k, xs_m, sigma_kl3)
d2a1kl, d2a2kl = d2a1sB_dxhi00_eval(xhi00, xhix, xhix_vec[:3], xs_m,
I_lambdaskl, J_lambdaskl, ccteskl,
a1vdwkl, a1vdw_ctekl, dxhix_dxhi00)
# zero order pertubation
d2aHS = d2ahs_dxhi00(xhi, dxhi_dxhi00)
# first order pertubation
suma1_monomer = Ckl * np.sum(d2a1kl * x0_a1, axis=1)
d2a1m = d2a1_dxhi00(xs_k, suma1_monomer)
# second order pertubation
khs, dkhs, d2khs, d3khs = d3kHS_dxhi00(xhix, dxhix_dxhi00)
suma2_monomer = Ckl2 * np.sum(d2a2kl * x0_a2, axis=1)
d2a2m = d2a2_dxhi00(xs_k, khs, dkhs, d2khs, xhixm, dxhixm_dxhi00,
suma2_monomer, eps_kl, f1, f2, f3)
# third order pertubaton
d2a3m = d2a3_dxhi00(xs_k, xhixm, dxhixm_dxhi00, eps_kl, f4, f5, f6)
d2amono = xs_m * (d2aHS + beta * d2a1m + beta**2 * d2a2m + beta**3 * d2a3m)
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
d3a1ii, d3a2ii = d3a1sB_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00)
# g hard sphere
d2gHS = d2gdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00)
# gamma_c
d2gc = d2gammac_dxhi00(xhixm, dxhixm_dxhi00, alphaii, tetha)
# g1sigma
d3a1_chain = Cii * np.sum(d3a1ii[1:] * x0_a1ii, axis=1)
d2suma_g1 = Cii * np.sum(d3a1ii[:3] * x0_g1ii, axis=1)
d2g1s = d2g1sigma_dxhi00(xhi00, xs_m, d3a1_chain, d2suma_g1, a1vdw_cteii)
# g2sigma
d3suma2_chain = Cii2 * np.sum(d3a2ii * x0_a2ii, axis=1)
d3a2new = d3a2new_dxhi00(khs, dkhs, d2khs, d3khs, d3suma2_chain, eps_ii)
d2suma_g2 = Cii2 * np.sum(d3a2ii[:3] * x0_g2ii, axis=1)
d2g2m = d2g2mca_dxhi00(xhi00, khs, dkhs, d2khs, xs_m, d3a2new, d2suma_g2,
eps_ii, a1vdw_cteii)
d2g2s = d2g2m * (1. + d2gc[0])
d2g2s[1] += d2g2m[0] * d2gc[1]
d2g2s[2] += 2. * d2g2m[1] * d2gc[1] + d2g2m[0] * d2gc[2]
d2lng = d2lngmie_dxhi00(d2gHS, d2g1s, d2g2s, beps_ii, beps_ii2)
d2achain = - d2lng@(x * (self.zs_m - 1.))
ares = d2amono + d2achain
ares *= self.dxhi00_2
if self.asso_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
sigma_kl3 = self.sigma_kl3
sigma_x3 = np.matmul(np.matmul(sigma_kl3, xs_k), xs_k)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
Iijklab = np.zeros([self.nc, self.nc])
dIijklab_drho = np.zeros([self.nc, self.nc])
d2Iijklab_drho = np.zeros([self.nc, self.nc])
d2Iab_drho(rho_ad, T_ad, drho_ad, Iijklab, dIijklab_drho,
d2Iijklab_drho)
diagasso = self.diagasso
# vki_asso = self.vki[self.group_asso_index]
vki_asso = self.vki_asso
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
d2Dijklab_drho = self.kAB_kl * Fklab
d2Dijklab_drho[self.indexABij] *= d2Iijklab_drho[self.indexAB_id]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
d2Xass = d2Xass_drho(rho, xjvk, Xass, dXass, DIJ, Dijklab,
dDijklab_drho, d2Dijklab_drho, CIJ)
aux0 = self.S * xjvk
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares[0] += np.dot(aux0, aux1)
ares[1] += np.dot(aux0, aux2 * dXass)
ares[2] += np.dot(aux0, -(dXass/Xass)**2+d2Xass*aux2)
else:
Xass = Xass0
return ares, Xass
def dares_dx(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
dxkdx = self.dxkdx
zs_m = self.zs_m
x_k = x[self.groups_index]
aux_Skvksvki = Sk*vki*vk
xs_ki = x_k*aux_Skvksvki
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
dxk_dx_aux = aux_Skvksvki * dxkdx
dxsk_dx = dxk_dx_aux * xs_m
dxsk_dx -= np.outer(zs_m, xs_ki)
dxsk_dx /= xs_m**2
out = dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux)
xhi, dxhi_dxhi00, dxhi_dx = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3)
xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00 = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, sigma_kl3)
xhixm, dxhixm_dxhi00, dxhixm_dx, dxhixm_dx_dxhi00 = out
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2, 6*xhix, 12*xhix2]])
khs, dkhs, dkhsx, dkhsxxhi = dkHS_dx_dxhi00(xhix, dxhix_dxhi00, dxhix_dx,
dxhix_dx_dxhi00)
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
out = da1sB_dx_eval(xhi00, xhix, xhix_vec[:2], xs_m, zs_m, I_lambdaskl,
J_lambdaskl, ccteskl, a1vdwkl, a1vdw_ctekl, dxhix_dx)
a1kl, a2kl, da1x_kl, da2x_kl = out
# zero order pertubation
aHS, daHSx = dahs_dx(xhi, dxhi_dx)
# first order pertubation
suma1_monomer = Ckl * np.sum(a1kl * x0_a1, axis=0)
suma1x_monomer = Ckl * (da1x_kl[0]*x0_a1[0] + da1x_kl[1]*x0_a1[1])
a1m, da1mx = da1_dx(xs_k, dxsk_dx, suma1_monomer, suma1x_monomer)
# second order pertubation
suma2_monomer = Ckl2 * np.sum(a2kl * x0_a2, axis=0)
suma2x_monomer = da2x_kl[0]*x0_a2[0] + da2x_kl[1]*x0_a2[1]
suma2x_monomer += da2x_kl[2]*x0_a2[2]
suma2x_monomer *= Ckl2
a2m, da2mx = da2_dx(xs_k, dxsk_dx, khs, dkhsx, xhixm, dxhixm_dx,
suma2_monomer, suma2x_monomer, eps_kl, f1, f2, f3)
# third order pertubation
a3m, da3mx = da3_dx(xs_k, dxsk_dx, xhixm, dxhixm_dx, eps_kl, f4, f5, f6)
beta2 = beta**2
beta3 = beta2*beta
am = aHS + beta * a1m + beta2 * a2m + beta3 * a3m
damx = daHSx + beta * da1mx + beta2 * da2mx + beta3 * da3mx
amono = xs_m * am
damonox = self.zs_m * am + xs_m * damx
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
out = da1sB_dx_dxhi00_dxxhi_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00,
dxhix_dx, dxhix_dx_dxhi00)
da1ii, da2ii, da1x_ii, da2x_ii, da1_xxhi00_ii, da2_xxhi00_ii = out
# g hard sphere
ghs, dghsx = dgdHS_dx(x0i_matrix, xhix, dxhix_dx)
# g1sigma
da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
da1x_chain = Cii*(da1_xxhi00_ii[0]*x0_a1ii[0]+da1_xxhi00_ii[1]*x0_a1ii[1])
suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
suma_g1x = Cii*(da1x_ii[0] * x0_g1ii[0] + da1x_ii[1] * x0_g1ii[1])
g1s, dg1sx = dg1sigma_dx(xhi00, xs_m, zs_m, da1_chain, da1x_chain, suma_g1,
suma_g1x, a1vdw_cteii)
# gamma_c
gc, dgcx = dgammac_dx(xhixm, dxhixm_dx, alphaii, tetha)
# g2sigma
suma_g2 = Cii2 * np.sum(da2ii[0] * x0_g2ii, axis=0)
suma_g2x = da2x_ii[0]*x0_g2ii[0] + da2x_ii[1]*x0_g2ii[1]
suma_g2x += da2x_ii[2]*x0_g2ii[2]
suma_g2x *= Cii2
dsuma2_chain = Cii2 * np.sum(da2ii * x0_a2ii, axis=1)
dsuma2x_chain = da2x_ii[0] * x0_a2ii[0] + da2x_ii[1] * x0_a2ii[1]
dsuma2x_chain += da2x_ii[2] * x0_a2ii[2]
dsuma2x_chain *= Cii2
dsuma2xxhi_chain = da2_xxhi00_ii[0] * x0_a2ii[0]
dsuma2xxhi_chain += da2_xxhi00_ii[1] * x0_a2ii[1]
dsuma2xxhi_chain += da2_xxhi00_ii[2] * x0_a2ii[2]
dsuma2xxhi_chain *= Cii2
da2new, da2newx = da2new_dx_dxhi00(khs, dkhs, dkhsx, dkhsxxhi,
dsuma2_chain, dsuma2x_chain,
dsuma2xxhi_chain, eps_ii)
g2m, dg2mx = dg2mca_dx(xhi00, khs, dkhsx, xs_m, zs_m, da2new, da2newx,
suma_g2, suma_g2x, eps_ii, a1vdw_cteii)
g2s = g2m * (1 + gc)
dg2sx = dgcx*g2m + (1+gc)*dg2mx
lng, dlngx = dlngmie_dx(ghs, g1s, g2s, dghsx, dg1sx, dg2sx, beps_ii,
beps_ii2)
zs_m1 = (zs_m - 1.)
xzs_m1 = x*zs_m1
achain = - lng@xzs_m1
dachainx = - dlngx@xzs_m1 - lng * zs_m1
ares = amono + achain
daresx = damonox + dachainx
if self.asso_bool:
nc = self.nc
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# beta = temp_aux[0]
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
aux1 = xs_k * sigma_kl3
aux2 = np.dot(xs_k, aux1)
sigma_x3 = np.sum(aux2)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
drhoad_dx = rho * (zs_m * sigma_x3 + xs_m * suma1)
Iijklab = np.zeros([nc, nc])
dIijklab = np.zeros([nc, nc])
dIab(rho_ad, T_ad, Iijklab, dIijklab)
dIijklab_dx = np.multiply.outer(drhoad_dx, dIijklab)
diagasso = self.diagasso
vki_asso = self.vki[self.group_asso_index]
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
dxjasso_dx = self.dxjasso_dx
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_dx = np.stack(nc*[self.kAB_kl * Fklab])
dDijklab_dx[:, self.indexABij[0], self.indexABij[1]] *= dIijklab_dx[:, self.indexAB_id[0], self.indexAB_id[1]]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXassx = dXass_dx(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_dx,
dxjasso_dx, CIJ)
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares += np.dot(self.S*xjvk, aux1)
daresx += (dxjasso_dx * aux1 + dXassx * xjvk * aux2)@self.S
else:
Xass = Xass0
return ares, daresx, Xass
def dares_dx_drho(self, x, rho, temp_aux, Xass0=None):
beta, beta2, beta3, d_kk, d_kl, d_kl3, d_kk03, x0_kl = temp_aux[:8]
a1vdw_ctekl, a1vdwkl, x0_a1, x0_a2, I_lambdaskl = temp_aux[8:13]
J_lambdaskl, d_ii, d_ii3, x0_ii, a1vdw_cteii, a1vdwii = temp_aux[13:19]
tetha, x0_a1ii, x0_a2ii, x0_g1ii, x0_g2ii, I_lambdasii = temp_aux[19:25]
J_lambdasii, x0i_matrix, beps_ii, beps_ii2 = temp_aux[25:29]
dxhi00_drho = self.dxhi00_drho
xhi00 = rho*dxhi00_drho
sigma_kl3 = self.sigma_kl3
Sk = self.Sk
vki = self.vki
vk = self.vk
dxkdx = self.dxkdx
zs_m = self.zs_m
x_k = x[self.groups_index]
aux_Skvksvki = Sk*vki*vk
xs_ki = x_k*aux_Skvksvki
xs_m = np.sum(xs_ki)
xs_k = xs_ki / xs_m
dxk_dx_aux = aux_Skvksvki * dxkdx
dxsk_dx = dxk_dx_aux * xs_m
dxsk_dx -= np.outer(zs_m, xs_ki)
dxsk_dx /= xs_m**2
out = dxhi_dx_eval(xhi00, xs_k, xs_m, d_kk03, dxk_dx_aux)
xhi, dxhi_dxhi00, dxhi_dx = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, d_kl3)
xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00 = out
out = dxhix_dx_eval(xhi00, xs_k, dxsk_dx, xs_m, zs_m, sigma_kl3)
xhixm, dxhixm_dxhi00, dxhixm_dx, dxhixm_dx_dxhi00 = out
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_vec = np.array([[xhix, xhix2, xhix3, xhix4],
[1., 2 * xhix, 3*xhix2, 4*xhix3],
[0., 2, 6*xhix, 12*xhix2]])
out = d2kHS_dx_dxhi00(xhix, dxhix_dxhi00, dxhix_dx, dxhix_dx_dxhi00)
khs, dkhs, d2khs, dkhsx, dkhsxxhi = out
# monomer contribution calculation
Ckl = self.Ckl
Ckl2 = self.Ckl2
eps_kl = self.eps_kl
f1, f2, f3 = self.f1, self.f2, self.f3
f4, f5, f6 = self.f4, self.f5, self.f6
# lambdaskl = self.lambdaskl
ccteskl = self.ccteskl
out = da1sB_dx_dxhi00_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdaskl, J_lambdaskl, ccteskl, a1vdwkl,
a1vdw_ctekl, dxhix_dxhi00, dxhix_dx)
da1kl, da2kl, da1x_kl, da2x_kl = out
# zero order pertubation
aHS, daHSx = dahs_dxxhi(xhi, dxhi_dxhi00, dxhi_dx)
# first order pertubation
suma1_monomer = Ckl * np.sum(da1kl * x0_a1, axis=1)
suma1x_monomer = Ckl * (da1x_kl[0]*x0_a1[0] + da1x_kl[1]*x0_a1[1])
a1m, da1mx = da1_dxxhi(xs_k, dxsk_dx, suma1_monomer, suma1x_monomer)
# second order pertubation
suma2_monomer = Ckl2 * np.sum(da2kl * x0_a2, axis=1)
suma2x_monomer = da2x_kl[0]*x0_a2[0] + da2x_kl[1]*x0_a2[1]
suma2x_monomer += da2x_kl[2]*x0_a2[2]
suma2x_monomer *= Ckl2
a2m, da2mx = da2_dxxhi(xs_k, dxsk_dx, khs, dkhs, dkhsx, xhixm,
dxhixm_dxhi00, dxhixm_dx, suma2_monomer,
suma2x_monomer, eps_kl, f1, f2, f3)
# third order pertubation
a3m, da3mx = da3_dxxhi(xs_k, dxsk_dx, xhixm, dxhixm_dxhi00, dxhixm_dx,
eps_kl, f4, f5, f6)
beta2 = beta**2
beta3 = beta2*beta
am = aHS + beta * a1m + beta2 * a2m + beta3 * a3m
damx = daHSx + beta * da1mx + beta2 * da2mx + beta3 * da3mx
amono = xs_m * am
damonox = self.zs_m * am[0] + xs_m * damx
# chain contribution calculation
# lambdasii = self.lambdasii
cctesii = self.cctesii
alphaii = self.alphaii
eps_ii = self.eps_ii
Cii = self.Cii
Cii2 = self.Cii2
out = da1sB_dx_d2xhi00_dxxhi_eval(xhi00, xhix, xhix_vec, xs_m, zs_m,
I_lambdasii, J_lambdasii, cctesii,
a1vdwii, a1vdw_cteii, dxhix_dxhi00,
dxhix_dx, dxhix_dx_dxhi00)
d2a1ii, d2a2ii, da1x_ii, da2x_ii, da1_xxhi00_ii, da2_xxhi00_ii = out
# g hard sphere
ghs, dghsx = dgdHS_dxxhi(x0i_matrix, xhix, dxhix_dxhi00, dxhix_dx)
# g1sigma
d2a1_chain = Cii * np.sum(d2a1ii[1:] * x0_a1ii, axis=1)
# da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0)
da1x_chain = Cii*(da1_xxhi00_ii[0]*x0_a1ii[0]+da1_xxhi00_ii[1]*x0_a1ii[1])
dsuma_g1 = Cii * np.sum(d2a1ii[:2] * x0_g1ii, axis=1)
# suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0)
suma_g1x = Cii*(da1x_ii[0] * x0_g1ii[0] + da1x_ii[1] * x0_g1ii[1])
g1s, dg1sx = dg1sigma_dxxhi(xhi00, xs_m, zs_m, d2a1_chain, da1x_chain,
dsuma_g1, suma_g1x, a1vdw_cteii)
# gamma_c
gc, dgcx = dgammac_dxxhi(xhixm, dxhixm_dxhi00, dxhixm_dx, alphaii, tetha)
# g2sigma
dsuma_g2 = Cii2 * np.sum(d2a2ii[:2] * x0_g2ii, axis=1)
suma_g2x = da2x_ii[0]*x0_g2ii[0] + da2x_ii[1]*x0_g2ii[1]
suma_g2x += da2x_ii[2]*x0_g2ii[2]
suma_g2x *= Cii2
dsuma2x_chain = da2x_ii[0] * x0_a2ii[0] + da2x_ii[1] * x0_a2ii[1]
dsuma2x_chain += da2x_ii[2] * x0_a2ii[2]
dsuma2x_chain *= Cii2
dsuma2xxhi_chain = da2_xxhi00_ii[0] * x0_a2ii[0]
dsuma2xxhi_chain += da2_xxhi00_ii[1] * x0_a2ii[1]
dsuma2xxhi_chain += da2_xxhi00_ii[2] * x0_a2ii[2]
dsuma2xxhi_chain *= Cii2
d2suma2_chain = Cii2 * np.sum(d2a2ii * x0_a2ii, axis=1)
*d2a2new, da2newx = da2new_dxxhi_dxhi00(khs, dkhs, d2khs, dkhsx, dkhsxxhi,
d2suma2_chain, dsuma2x_chain,
dsuma2xxhi_chain, eps_ii)
g2m, dg2mx = dg2mca_dxxhi(xhi00, khs, dkhs, dkhsx, xs_m, zs_m, d2a2new,
da2newx, dsuma_g2, suma_g2x, eps_ii, a1vdw_cteii)
g2s = g2m * (1 + gc[0])
g2s[1] += g2m[0] * gc[1]
dg2sx = dgcx*g2m[0] + (1 + gc[0])*dg2mx
lng, dlngx = dlngmie_dxxhi(ghs, g1s, g2s, dghsx, dg1sx, dg2sx, beps_ii,
beps_ii2)
zs_m1 = (zs_m - 1.)
xzs_m1 = x*zs_m1
achain = - lng@xzs_m1
dachainx = - dlngx@xzs_m1 - lng[0] * zs_m1
ares = amono + achain
ares *= self.dxhi00_1
daresx = damonox + dachainx
if self.asso_bool:
nc = self.nc
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
# beta = temp_aux[0]
# T_ad = 1/(self.eps_ij*beta)
T_ad = temp_aux[29]
aux1 = xs_k * sigma_kl3
aux2 = np.dot(xs_k, aux1)
sigma_x3 = np.sum(aux2)
drho_ad = xs_m * sigma_x3
rho_ad = rho * drho_ad
suma1 = 2*np.sum(dxsk_dx@aux1, axis=1)
drhoad_dx = rho * (zs_m * sigma_x3 + xs_m * suma1)
Iijklab = np.zeros([nc, nc])
dIijklab = np.zeros([nc, nc])
dIab(rho_ad, T_ad, Iijklab, dIijklab)
dIijklab_dx = np.multiply.outer(drhoad_dx, dIijklab)
dIijklab_drho = dIijklab*drho_ad
diagasso = self.diagasso
vki_asso = self.vki[self.group_asso_index]
DIJ = self.DIJ
xj_asso = x[self.molecule_id_index_sites]
xjvk = xj_asso*vki_asso
dxjasso_dx = self.dxjasso_dx
# Fklab = np.exp(self.epsAB_kl * beta) - 1
Fklab = temp_aux[30]
Dijklab = self.kAB_kl * Fklab
Dijklab[self.indexABij] *= Iijklab[self.indexAB_id]
dDijklab_drho = self.kAB_kl * Fklab
dDijklab_drho[self.indexABij] *= dIijklab_drho[self.indexAB_id]
dDijklab_dx = np.stack(nc*[self.kAB_kl * Fklab])
dDijklab_dx[:, self.indexABij[0], self.indexABij[1]] *= dIijklab_dx[:, self.indexAB_id[0], self.indexAB_id[1]]
Xass = Xass_solver(rho, xjvk, DIJ, Dijklab, diagasso, Xass)
CIJ = CIJ_matrix(rho, xjvk, Xass, DIJ, Dijklab, diagasso)
dXass = dXass_drho(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_drho, CIJ)
dXassx = dXass_dx(rho, xjvk, Xass, DIJ, Dijklab, dDijklab_dx,
dxjasso_dx, CIJ)
aux1 = np.log(Xass) - Xass/2 + 1/2
aux2 = 1/Xass - 1/2
ares[0] += np.dot(self.S*xjvk, aux1)
ares[1] += np.dot(self.S*xjvk, aux2 * dXass)
daresx += (dxjasso_dx * aux1 + dXassx * xjvk * aux2)@self.S
else:
Xass = Xass0
return ares, daresx, Xass | en | 0.428406 | # Eq. (14) Paper 2014 # Eq (22) Paper 2014 # monomer contribution calculation # lambdaskl = self.lambdaskl # zero order pertubation # first order pertubation # second order pertubation # third order pertubaton # chain contribution calculation # lambdasii = self.lambdasii # g hard sphere # gamma_c # g1sigma # g2sigma # T_ad = 1/(self.eps_ij*beta) # vki_asso = self.vki[self.group_asso_index] # Fklab = np.exp(self.epsAB_kl * beta) - 1 # monomer contribution calculation # lambdaskl = self.lambdaskl # zero order pertubation # first order pertubation # second order pertubation # third order pertubaton # chain contribution calculation # lambdasii = self.lambdasii # g hard sphere # gamma_c # g1sigma # g2sigma # T_ad = 1/(self.eps_ij*beta) # vki_asso = self.vki[self.group_asso_index] # Fklab = np.exp(self.epsAB_kl * beta) - 1 # monomer contribution calculation # lambdaskl = self.lambdaskl # zero order pertubation # first order pertubation # second order pertubation # third order pertubaton # chain contribution calculation # lambdasii = self.lambdasii # g hard sphere # gamma_c # g1sigma # g2sigma # T_ad = 1/(self.eps_ij*beta) # vki_asso = self.vki[self.group_asso_index] # Fklab = np.exp(self.epsAB_kl * beta) - 1 # monomer contribution calculation # lambdaskl = self.lambdaskl # zero order pertubation # first order pertubation # second order pertubation # third order pertubation # chain contribution calculation # lambdasii = self.lambdasii # g hard sphere # g1sigma # gamma_c # g2sigma # beta = temp_aux[0] # T_ad = 1/(self.eps_ij*beta) # Fklab = np.exp(self.epsAB_kl * beta) - 1 # monomer contribution calculation # lambdaskl = self.lambdaskl # zero order pertubation # first order pertubation # second order pertubation # third order pertubation # chain contribution calculation # lambdasii = self.lambdasii # g hard sphere # g1sigma # da1_chain = Cii * np.sum(da1ii[1] * x0_a1ii, axis=0) # suma_g1 = Cii * np.sum(da1ii[0] * x0_g1ii, axis=0) # gamma_c # g2sigma # beta = temp_aux[0] # T_ad = 1/(self.eps_ij*beta) # Fklab = np.exp(self.epsAB_kl * beta) - 1 | 1.293054 | 1 |
panel/models/katex.py | vaishali-verma-19/panel | 0 | 6630978 | """
Defines a custom KaTeX bokeh model to render text using KaTeX.
"""
from bokeh.models import Markup
class KaTeX(Markup):
"""
A bokeh model that renders text using KaTeX.
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js",
"https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js"]
__js_require__ = {'paths': {'katex': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min',
'autoLoad': 'https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min'},
'exports': {'katex': 'katex', 'autoLoad': 'renderMathInElement'}}
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
| """
Defines a custom KaTeX bokeh model to render text using KaTeX.
"""
from bokeh.models import Markup
class KaTeX(Markup):
"""
A bokeh model that renders text using KaTeX.
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js",
"https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js"]
__js_require__ = {'paths': {'katex': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min',
'autoLoad': 'https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min'},
'exports': {'katex': 'katex', 'autoLoad': 'renderMathInElement'}}
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
| en | 0.469744 | Defines a custom KaTeX bokeh model to render text using KaTeX. A bokeh model that renders text using KaTeX. | 2.703484 | 3 |
cmz/cms_news/migrations/0006_newstranslation_body.py | inmagik/cmz | 1 | 6630979 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-23 20:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms_news', '0005_remove_newstranslation_body'),
]
operations = [
migrations.AddField(
model_name='newstranslation',
name='body',
field=models.TextField(default=''),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-23 20:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms_news', '0005_remove_newstranslation_body'),
]
operations = [
migrations.AddField(
model_name='newstranslation',
name='body',
field=models.TextField(default=''),
preserve_default=False,
),
]
| en | 0.800454 | # -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-09-23 20:47 | 1.519499 | 2 |
cfn_pyplates/utils.py | JSainsburyPLC/cfn-pyplates | 0 | 6630980 | <reponame>JSainsburyPLC/cfn-pyplates
# Copyright (c) 2013 ReThought Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
"""
These utilities are ReThought additions that provide additional functionality
Useful to us, they may be quite specific in cases.
"""
import re
from cfn_pyplates.functions import join
from jinja2 import Template
# Match strings of the form {'XXX': 'YYY'} e.g.
# {'Ref': 'AWS::Region'}
CFN_FN_RE = r"{'[^{^}.]*'}"
FN_MATCH = re.compile(r"({})".format(CFN_FN_RE))
# As above, but match only if this comprises the entire string
STRICT_MATCH = re.compile(r"^{}$".format(CFN_FN_RE))
def _selective_eval(s):
"""
Takes supplied string and if it matches STRICT_MATCH, it is returned
evaled so as to be a Python structure (dict), otherwise it is returned
as is.
This is to be used exclusively by templated_read to render correctly
the CloudFormation functions that it finds in the rendered output.
There are no doubt edge-cases on which this does the wrong thing!
"""
if STRICT_MATCH.match(s) is None:
return s
return eval(s)
def templated_read(file_handle, context={}):
"""
This function reads content from a file handle and processes as a template
The Jinja2 templating engine is used, and the supplied context is provided.
Once Jinja template processed, the document is split to extract
CFN functions, e.g. Ref and Fn::Join etc, and the whole lot is
returned Fn::Joined together (using the cfn_pyplates `join` function)
ready to place in a UserData argument.
This process is required in order that the Cloudformation functions are
not embedded in strings where they would not be correctly evaluated
at the time the template is processed by Cloudformation.
Args:
file_handle: any file-like object
context: a dictionary of keys to use in the template
Example
-------
File template:
# snippet of script...
$CFN_ROOT/cfn-init -s {{ stack_id }} -r {{ resource_name }} \
--region {{ aws_region }} || error_exit 'Failed to run cfn-init'
In the PyPlates code:
...
'UserData':
templated_read(
open('my_template_script.sh', 'rt'),
{'resource_name': 'MyWebServer',
'stack_id': ref('AWS::StackId'),
'aws_region': ref('AWS::Region')
}),
...
After processing, in the Cloudformation template:
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"$CFN_ROOT/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r MyWebServer --region ",
{
"Ref": "AWS::Region"
},
" || error_exit 'Failed to run cfn-init'"
]
]
}
},
"""
template = Template(file_handle.read())
rendered = template.render(**context)
tokens = FN_MATCH.split(rendered)
return join("", *[_selective_eval(s) for s in tokens])
| # Copyright (c) 2013 ReThought Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
"""
These utilities are ReThought additions that provide additional functionality
Useful to us, they may be quite specific in cases.
"""
import re
from cfn_pyplates.functions import join
from jinja2 import Template
# Match strings of the form {'XXX': 'YYY'} e.g.
# {'Ref': 'AWS::Region'}
CFN_FN_RE = r"{'[^{^}.]*'}"
FN_MATCH = re.compile(r"({})".format(CFN_FN_RE))
# As above, but match only if this comprises the entire string
STRICT_MATCH = re.compile(r"^{}$".format(CFN_FN_RE))
def _selective_eval(s):
"""
Takes supplied string and if it matches STRICT_MATCH, it is returned
evaled so as to be a Python structure (dict), otherwise it is returned
as is.
This is to be used exclusively by templated_read to render correctly
the CloudFormation functions that it finds in the rendered output.
There are no doubt edge-cases on which this does the wrong thing!
"""
if STRICT_MATCH.match(s) is None:
return s
return eval(s)
def templated_read(file_handle, context={}):
"""
This function reads content from a file handle and processes as a template
The Jinja2 templating engine is used, and the supplied context is provided.
Once Jinja template processed, the document is split to extract
CFN functions, e.g. Ref and Fn::Join etc, and the whole lot is
returned Fn::Joined together (using the cfn_pyplates `join` function)
ready to place in a UserData argument.
This process is required in order that the Cloudformation functions are
not embedded in strings where they would not be correctly evaluated
at the time the template is processed by Cloudformation.
Args:
file_handle: any file-like object
context: a dictionary of keys to use in the template
Example
-------
File template:
# snippet of script...
$CFN_ROOT/cfn-init -s {{ stack_id }} -r {{ resource_name }} \
--region {{ aws_region }} || error_exit 'Failed to run cfn-init'
In the PyPlates code:
...
'UserData':
templated_read(
open('my_template_script.sh', 'rt'),
{'resource_name': 'MyWebServer',
'stack_id': ref('AWS::StackId'),
'aws_region': ref('AWS::Region')
}),
...
After processing, in the Cloudformation template:
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"$CFN_ROOT/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r MyWebServer --region ",
{
"Ref": "AWS::Region"
},
" || error_exit 'Failed to run cfn-init'"
]
]
}
},
"""
template = Template(file_handle.read())
rendered = template.render(**context)
tokens = FN_MATCH.split(rendered)
return join("", *[_selective_eval(s) for s in tokens]) | en | 0.802119 | # Copyright (c) 2013 ReThought Ltd # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. These utilities are ReThought additions that provide additional functionality Useful to us, they may be quite specific in cases. # Match strings of the form {'XXX': 'YYY'} e.g. # {'Ref': 'AWS::Region'} # As above, but match only if this comprises the entire string Takes supplied string and if it matches STRICT_MATCH, it is returned evaled so as to be a Python structure (dict), otherwise it is returned as is. This is to be used exclusively by templated_read to render correctly the CloudFormation functions that it finds in the rendered output. There are no doubt edge-cases on which this does the wrong thing! This function reads content from a file handle and processes as a template The Jinja2 templating engine is used, and the supplied context is provided. Once Jinja template processed, the document is split to extract CFN functions, e.g. Ref and Fn::Join etc, and the whole lot is returned Fn::Joined together (using the cfn_pyplates `join` function) ready to place in a UserData argument. This process is required in order that the Cloudformation functions are not embedded in strings where they would not be correctly evaluated at the time the template is processed by Cloudformation. Args: file_handle: any file-like object context: a dictionary of keys to use in the template Example ------- File template: # snippet of script... $CFN_ROOT/cfn-init -s {{ stack_id }} -r {{ resource_name }} \ --region {{ aws_region }} || error_exit 'Failed to run cfn-init' In the PyPlates code: ... 'UserData': templated_read( open('my_template_script.sh', 'rt'), {'resource_name': 'MyWebServer', 'stack_id': ref('AWS::StackId'), 'aws_region': ref('AWS::Region') }), ... After processing, in the Cloudformation template: "UserData": { "Fn::Base64": { "Fn::Join": [ "", [ "$CFN_ROOT/cfn-init -s ", { "Ref": "AWS::StackId" }, " -r MyWebServer --region ", { "Ref": "AWS::Region" }, " || error_exit 'Failed to run cfn-init'" ] ] } }, | 2.271383 | 2 |
tests/test_command_parser.py | lazToum/redis-py | 0 | 6630981 | <gh_stars>0
import pytest
from redis.commands import CommandsParser
from .conftest import skip_if_server_version_lt
class TestCommandsParser:
def test_init_commands(self, r):
commands_parser = CommandsParser(r)
assert commands_parser.commands is not None
assert "get" in commands_parser.commands
def test_get_keys_predetermined_key_location(self, r):
commands_parser = CommandsParser(r)
args1 = ["GET", "foo"]
args2 = ["OBJECT", "encoding", "foo"]
args3 = ["MGET", "foo", "bar", "foobar"]
assert commands_parser.get_keys(r, *args1) == ["foo"]
assert commands_parser.get_keys(r, *args2) == ["foo"]
assert commands_parser.get_keys(r, *args3) == ["foo", "bar", "foobar"]
@pytest.mark.filterwarnings("ignore:ResponseError")
def test_get_moveable_keys(self, r):
commands_parser = CommandsParser(r)
args1 = [
"EVAL",
"return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
2,
"key1",
"key2",
"first",
"second",
]
args2 = ["XREAD", "COUNT", 2, b"STREAMS", "mystream", "writers", 0, 0]
args3 = ["ZUNIONSTORE", "out", 2, "zset1", "zset2", "WEIGHTS", 2, 3]
args4 = ["GEORADIUS", "Sicily", 15, 37, 200, "km", "WITHCOORD", b"STORE", "out"]
args5 = ["MEMORY USAGE", "foo"]
args6 = [
"MIGRATE",
"192.168.1.34",
6379,
"",
0,
5000,
b"KEYS",
"key1",
"key2",
"key3",
]
args7 = ["MIGRATE", "192.168.1.34", 6379, "key1", 0, 5000]
args8 = ["STRALGO", "LCS", "STRINGS", "string_a", "string_b"]
args9 = ["STRALGO", "LCS", "KEYS", "key1", "key2"]
assert commands_parser.get_keys(r, *args1).sort() == ["key1", "key2"].sort()
assert (
commands_parser.get_keys(r, *args2).sort() == ["mystream", "writers"].sort()
)
assert (
commands_parser.get_keys(r, *args3).sort()
== ["out", "zset1", "zset2"].sort()
)
assert commands_parser.get_keys(r, *args4).sort() == ["Sicily", "out"].sort()
assert commands_parser.get_keys(r, *args5).sort() == ["foo"].sort()
assert (
commands_parser.get_keys(r, *args6).sort()
== ["key1", "key2", "key3"].sort()
)
assert commands_parser.get_keys(r, *args7).sort() == ["key1"].sort()
assert commands_parser.get_keys(r, *args8) is None
assert commands_parser.get_keys(r, *args9).sort() == ["key1", "key2"].sort()
# A bug in redis<7.0 causes this to fail: https://github.com/redis/redis/issues/9493
@skip_if_server_version_lt("7.0.0")
def test_get_eval_keys_with_0_keys(self, r):
commands_parser = CommandsParser(r)
args = [
"EVAL",
"return {ARGV[1],ARGV[2]}",
0,
"key1",
"key2",
]
assert commands_parser.get_keys(r, *args) == []
def test_get_pubsub_keys(self, r):
commands_parser = CommandsParser(r)
args1 = ["PUBLISH", "foo", "bar"]
args2 = ["PUBSUB NUMSUB", "foo1", "foo2", "foo3"]
args3 = ["PUBSUB channels", "*"]
args4 = ["SUBSCRIBE", "foo1", "foo2", "foo3"]
assert commands_parser.get_keys(r, *args1) == ["foo"]
assert commands_parser.get_keys(r, *args2) == ["foo1", "foo2", "foo3"]
assert commands_parser.get_keys(r, *args3) == ["*"]
assert commands_parser.get_keys(r, *args4) == ["foo1", "foo2", "foo3"]
| import pytest
from redis.commands import CommandsParser
from .conftest import skip_if_server_version_lt
class TestCommandsParser:
def test_init_commands(self, r):
commands_parser = CommandsParser(r)
assert commands_parser.commands is not None
assert "get" in commands_parser.commands
def test_get_keys_predetermined_key_location(self, r):
commands_parser = CommandsParser(r)
args1 = ["GET", "foo"]
args2 = ["OBJECT", "encoding", "foo"]
args3 = ["MGET", "foo", "bar", "foobar"]
assert commands_parser.get_keys(r, *args1) == ["foo"]
assert commands_parser.get_keys(r, *args2) == ["foo"]
assert commands_parser.get_keys(r, *args3) == ["foo", "bar", "foobar"]
@pytest.mark.filterwarnings("ignore:ResponseError")
def test_get_moveable_keys(self, r):
commands_parser = CommandsParser(r)
args1 = [
"EVAL",
"return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
2,
"key1",
"key2",
"first",
"second",
]
args2 = ["XREAD", "COUNT", 2, b"STREAMS", "mystream", "writers", 0, 0]
args3 = ["ZUNIONSTORE", "out", 2, "zset1", "zset2", "WEIGHTS", 2, 3]
args4 = ["GEORADIUS", "Sicily", 15, 37, 200, "km", "WITHCOORD", b"STORE", "out"]
args5 = ["MEMORY USAGE", "foo"]
args6 = [
"MIGRATE",
"192.168.1.34",
6379,
"",
0,
5000,
b"KEYS",
"key1",
"key2",
"key3",
]
args7 = ["MIGRATE", "192.168.1.34", 6379, "key1", 0, 5000]
args8 = ["STRALGO", "LCS", "STRINGS", "string_a", "string_b"]
args9 = ["STRALGO", "LCS", "KEYS", "key1", "key2"]
assert commands_parser.get_keys(r, *args1).sort() == ["key1", "key2"].sort()
assert (
commands_parser.get_keys(r, *args2).sort() == ["mystream", "writers"].sort()
)
assert (
commands_parser.get_keys(r, *args3).sort()
== ["out", "zset1", "zset2"].sort()
)
assert commands_parser.get_keys(r, *args4).sort() == ["Sicily", "out"].sort()
assert commands_parser.get_keys(r, *args5).sort() == ["foo"].sort()
assert (
commands_parser.get_keys(r, *args6).sort()
== ["key1", "key2", "key3"].sort()
)
assert commands_parser.get_keys(r, *args7).sort() == ["key1"].sort()
assert commands_parser.get_keys(r, *args8) is None
assert commands_parser.get_keys(r, *args9).sort() == ["key1", "key2"].sort()
# A bug in redis<7.0 causes this to fail: https://github.com/redis/redis/issues/9493
@skip_if_server_version_lt("7.0.0")
def test_get_eval_keys_with_0_keys(self, r):
commands_parser = CommandsParser(r)
args = [
"EVAL",
"return {ARGV[1],ARGV[2]}",
0,
"key1",
"key2",
]
assert commands_parser.get_keys(r, *args) == []
def test_get_pubsub_keys(self, r):
commands_parser = CommandsParser(r)
args1 = ["PUBLISH", "foo", "bar"]
args2 = ["PUBSUB NUMSUB", "foo1", "foo2", "foo3"]
args3 = ["PUBSUB channels", "*"]
args4 = ["SUBSCRIBE", "foo1", "foo2", "foo3"]
assert commands_parser.get_keys(r, *args1) == ["foo"]
assert commands_parser.get_keys(r, *args2) == ["foo1", "foo2", "foo3"]
assert commands_parser.get_keys(r, *args3) == ["*"]
assert commands_parser.get_keys(r, *args4) == ["foo1", "foo2", "foo3"] | en | 0.764123 | # A bug in redis<7.0 causes this to fail: https://github.com/redis/redis/issues/9493 | 2.380957 | 2 |
tests/test_models/test_place.py | calypsobronte/AirBnB_clone | 0 | 6630982 | #!/usr/bin/python3
"""Test State"""
import unittest
import pep8
from models.place import Place
from models.user import User
from models.city import City
from models.amenity import Amenity
class Testplace(unittest.TestCase):
""" Test Place """
def test_pep8_conformance_place(self):
"""Test that we conform to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['models/place.py'])
self.assertEqual(result.total_errors, 0, "Found code style errors.")
def test_place(self):
"""
Test attributes of Class Place
"""
my_amenity = Amenity()
my_city = City()
my_user = User()
my_place = Place()
my_place.city_id = my_city.id
my_place.user_id = my_user.id
my_place.name = 'Coworking'
my_place.description = 'description'
my_place.number_rooms = 4
my_place.number_bathrooms = 2
my_place.max_guest = 4
my_place.price_by_night = 200
my_place.latitude = 25.0342808
my_place.longitude = -77.3962784
my_place.amenity_ids = str(my_amenity.id)
self.assertEqual(my_place.city_id, my_city.id)
self.assertEqual(my_place.user_id, my_user.id)
self.assertEqual(my_place.name, 'Coworking')
self.assertEqual(my_place.description, 'description')
self.assertEqual(my_place.number_rooms, 4)
self.assertTrue(type(my_place.number_rooms), int)
self.assertEqual(my_place.number_bathrooms, 2)
self.assertTrue(type(my_place.number_bathrooms), int)
self.assertEqual(my_place.max_guest, 4)
self.assertTrue(type(my_place.max_guest), int)
self.assertEqual(my_place.price_by_night, 200)
self.assertTrue(type(my_place.price_by_night), int)
self.assertEqual(my_place.latitude, 25.0342808)
self.assertTrue(type(my_place.latitude), float)
self.assertEqual(my_place.longitude, -77.3962784)
self.assertTrue(type(my_place.longitude), float)
self.assertEqual(my_place.amenity_ids, str(my_amenity.id))
self.assertTrue(type(my_place.amenity_ids), str)
| #!/usr/bin/python3
"""Test State"""
import unittest
import pep8
from models.place import Place
from models.user import User
from models.city import City
from models.amenity import Amenity
class Testplace(unittest.TestCase):
""" Test Place """
def test_pep8_conformance_place(self):
"""Test that we conform to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['models/place.py'])
self.assertEqual(result.total_errors, 0, "Found code style errors.")
def test_place(self):
"""
Test attributes of Class Place
"""
my_amenity = Amenity()
my_city = City()
my_user = User()
my_place = Place()
my_place.city_id = my_city.id
my_place.user_id = my_user.id
my_place.name = 'Coworking'
my_place.description = 'description'
my_place.number_rooms = 4
my_place.number_bathrooms = 2
my_place.max_guest = 4
my_place.price_by_night = 200
my_place.latitude = 25.0342808
my_place.longitude = -77.3962784
my_place.amenity_ids = str(my_amenity.id)
self.assertEqual(my_place.city_id, my_city.id)
self.assertEqual(my_place.user_id, my_user.id)
self.assertEqual(my_place.name, 'Coworking')
self.assertEqual(my_place.description, 'description')
self.assertEqual(my_place.number_rooms, 4)
self.assertTrue(type(my_place.number_rooms), int)
self.assertEqual(my_place.number_bathrooms, 2)
self.assertTrue(type(my_place.number_bathrooms), int)
self.assertEqual(my_place.max_guest, 4)
self.assertTrue(type(my_place.max_guest), int)
self.assertEqual(my_place.price_by_night, 200)
self.assertTrue(type(my_place.price_by_night), int)
self.assertEqual(my_place.latitude, 25.0342808)
self.assertTrue(type(my_place.latitude), float)
self.assertEqual(my_place.longitude, -77.3962784)
self.assertTrue(type(my_place.longitude), float)
self.assertEqual(my_place.amenity_ids, str(my_amenity.id))
self.assertTrue(type(my_place.amenity_ids), str)
| en | 0.685444 | #!/usr/bin/python3 Test State Test Place Test that we conform to PEP8. Test attributes of Class Place | 3.318442 | 3 |
appengine/chrome_infra_packages/cipd/api.py | eunchong/infra | 0 | 6630983 | <filename>appengine/chrome_infra_packages/cipd/api.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cloud Endpoints API for Package Repository service."""
import functools
import logging
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from components import auth
from components import utils
from . import acl
from . import client
from . import impl
# This is used by endpoints indirectly.
package = 'cipd'
################################################################################
## Messages used by other messages.
class Status(messages.Enum):
"""Response status code, shared by all responses."""
# Operation finished successfully (generic "success" response).
SUCCESS = 1
# The package instance was successfully registered.
REGISTERED = 2
# The package instance was already registered (not a error).
ALREADY_REGISTERED = 3
# Some uncategorized non-transient error happened.
ERROR = 4
# No such package.
PACKAGE_NOT_FOUND = 5
# Package itself is known, but requested instance_id isn't registered.
INSTANCE_NOT_FOUND = 6
# Need to upload package data before registering the package.
UPLOAD_FIRST = 7
# Client binary is not available, the call should be retried later.
NOT_EXTRACTED_YET = 8
# Some asynchronous package processing failed.
PROCESSING_FAILED = 9
# Asynchronous package processing is still running.
PROCESSING_NOT_FINISHED_YET = 10
# More than one instance matches criteria in resolveVersion.
AMBIGUOUS_VERSION = 11
class Package(messages.Message):
"""Information about some registered package."""
package_name = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def package_to_proto(entity):
"""Package entity -> Package proto message."""
return Package(
package_name=entity.package_name,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageInstance(messages.Message):
"""Information about some registered package instance."""
package_name = messages.StringField(1, required=True)
instance_id = messages.StringField(2, required=True)
registered_by = messages.StringField(3, required=True)
registered_ts = messages.IntegerField(4, required=True)
def instance_to_proto(entity):
"""PackageInstance entity -> PackageInstance proto message."""
return PackageInstance(
package_name=entity.package_name,
instance_id=entity.instance_id,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class InstanceTag(messages.Message):
"""Some single package instance tag."""
tag = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def tag_to_proto(entity):
"""InstanceTag entity -> InstanceTag proto message."""
return InstanceTag(
tag=entity.tag,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageRef(messages.Message):
"""Information about some ref belonging to a package."""
ref = messages.StringField(1, required=True)
instance_id = messages.StringField(2, required=True)
modified_by = messages.StringField(3, required=True)
modified_ts = messages.IntegerField(4, required=True)
def package_ref_to_proto(entity):
"""PackageRef entity -> PackageRef proto message."""
return PackageRef(
ref=entity.ref,
instance_id=entity.instance_id,
modified_by=entity.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(entity.modified_ts))
class PackageACL(messages.Message):
"""Access control list for some package path and all parent paths."""
class ElementaryACL(messages.Message):
"""Single per role, per package path ACL."""
package_path = messages.StringField(1, required=True)
role = messages.StringField(2, required=True)
principals = messages.StringField(3, repeated=True)
modified_by = messages.StringField(4, required=True)
modified_ts = messages.IntegerField(5, required=True)
# List of ACLs split by package path and role. No ordering.
acls = messages.MessageField(ElementaryACL, 1, repeated=True)
def package_acls_to_proto(per_role_acls):
"""Dict {role -> list of PackageACL entities} -> PackageACL message."""
acls = []
for role, package_acl_entities in per_role_acls.iteritems():
for e in package_acl_entities:
principals = []
principals.extend(u.to_bytes() for u in e.users)
principals.extend('group:%s' % g for g in e.groups)
acls.append(PackageACL.ElementaryACL(
package_path=e.package_path,
role=role,
principals=principals,
modified_by=e.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(e.modified_ts),
))
return PackageACL(acls=acls)
class RoleChange(messages.Message):
"""Describes a single modification to ACL."""
class Action(messages.Enum):
GRANT = 1
REVOKE = 2
# Action to perform.
action = messages.EnumField(Action, 1, required=True)
# Role to modify ('OWNER', 'WRITER', 'READER', ...).
role = messages.StringField(2, required=True)
# Principal ('user:...' or 'group:...') to grant or revoke a role for.
principal = messages.StringField(3, required=True)
def role_change_from_proto(proto, package_path):
"""RoleChange proto message -> acl.RoleChange object.
Raises ValueError on format errors.
"""
if not acl.is_valid_role(proto.role):
raise ValueError('Invalid role %s' % proto.role)
user = None
group = None
if proto.principal.startswith('group:'):
group = proto.principal[len('group:'):]
if not auth.is_valid_group_name(group):
raise ValueError('Invalid group name: "%s"' % group)
else:
# Raises ValueError if proto.user has invalid format, e.g. not 'user:...'.
user = auth.Identity.from_bytes(proto.principal)
return acl.RoleChange(
package_path=package_path,
revoke=(proto.action != RoleChange.Action.GRANT),
role=proto.role,
user=user,
group=group)
class Processor(messages.Message):
"""Status of some package instance processor."""
class Status(messages.Enum):
PENDING = 1
SUCCESS = 2
FAILURE = 3
# Name of the processor, defines what it does.
name = messages.StringField(1, required=True)
# Status of the processing.
status = messages.EnumField(Status, 2, required=True)
def processors_protos(instance):
"""Given PackageInstance entity returns a list of Processor messages."""
def procs_to_msg(procs, status):
return [Processor(name=name, status=status) for name in procs ]
processors = []
processors += procs_to_msg(
instance.processors_pending,
Processor.Status.PENDING)
processors += procs_to_msg(
instance.processors_success,
Processor.Status.SUCCESS)
processors += procs_to_msg(
instance.processors_failure,
Processor.Status.FAILURE)
return processors
################################################################################
class FetchPackageResponse(messages.Message):
"""Results of fetchPackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package.
package = messages.MessageField(Package, 3, required=False)
refs = messages.MessageField(PackageRef, 4, repeated=True)
################################################################################
class ListPackagesResponse(messages.Message):
"""Results of listPackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, names of the packages and names of directories.
packages = messages.StringField(3, repeated=True)
directories = messages.StringField(4, repeated=True)
################################################################################
class DeletePackageResponse(messages.Message):
"""Results of deletePackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class FetchInstanceResponse(messages.Message):
"""Results of fetchInstance call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, a signed url to fetch the package instance file from.
fetch_url = messages.StringField(4, required=False)
# For SUCCESS, list of processors applied to the instance.
processors = messages.MessageField(Processor, 5, repeated=True)
################################################################################
class RegisterInstanceResponse(messages.Message):
"""Results of registerInstance call.
upload_session_id and upload_url (if present) can be used with CAS service
(finishUpload call in particular).
Callers are expected to execute following protocol:
1. Attempt to register a package instance by calling registerInstance(...).
2. On UPLOAD_FIRST response, upload package data and finalize the upload by
using upload_session_id and upload_url and calling cas.finishUpload.
3. Once upload is finalized, call registerInstance(...) again.
"""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For REGISTERED or ALREADY_REGISTERED, info about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For UPLOAD_FIRST status, a unique identifier of the upload operation.
upload_session_id = messages.StringField(4, required=False)
# For UPLOAD_FIRST status, URL to PUT file to via resumable upload protocol.
upload_url = messages.StringField(5, required=False)
################################################################################
class SetRefRequest(messages.Message):
"""Body of setRef call."""
# ID of the package instance to point the ref too.
instance_id = messages.StringField(1, required=True)
class SetRefResponse(messages.Message):
"""Results of setRef call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about the ref.
ref = messages.MessageField(PackageRef, 3, required=False)
class FetchRefsResponse(messages.Message):
"""Results of fetchRefs call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about fetches refs.
refs = messages.MessageField(PackageRef, 3, repeated=True)
################################################################################
class FetchTagsResponse(messages.Message):
"""Results of fetchTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about found tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class AttachTagsRequest(messages.Message):
"""Body of attachTags call."""
tags = messages.StringField(1, repeated=True)
class AttachTagsResponse(messages.Message):
"""Results of attachTag call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about attached tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class DetachTagsResponse(messages.Message):
"""Results of detachTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class SearchResponse(messages.Message):
"""Results of searchInstances call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, list of instances found.
instances = messages.MessageField(PackageInstance, 3, repeated=True)
class ResolveVersionResponse(messages.Message):
"""Results of resolveVersion call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, concrete existing instance ID.
instance_id = messages.StringField(3, required=False)
################################################################################
class FetchACLResponse(messages.Message):
"""Results of fetchACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, list of ACLs split by package path and role.
acls = messages.MessageField(PackageACL, 3, required=False)
################################################################################
class ModifyACLRequest(messages.Message):
"""Body of modifyACL call."""
changes = messages.MessageField(RoleChange, 1, repeated=True)
class ModifyACLResponse(messages.Message):
"""Results of modifyACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class FetchClientBinaryResponse(messages.Message):
"""Results of fetchClientBinary call."""
class ClientBinary(messages.Message):
# SHA1 hex digest of the extracted binary, for verification on the client.
sha1 = messages.StringField(1, required=True)
# Size of the binary file, just for information.
size = messages.IntegerField(2, required=True)
# A signed url to fetch the binary file from.
fetch_url = messages.StringField(3, required=True)
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS or NOT_EXTRACTED_YET, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, information about the client binary.
client_binary = messages.MessageField(ClientBinary, 4, required=False)
################################################################################
class Error(Exception):
status = Status.ERROR
class PackageNotFoundError(Error):
status = Status.PACKAGE_NOT_FOUND
class InstanceNotFoundError(Error):
status = Status.INSTANCE_NOT_FOUND
class ProcessingFailedError(Error):
status = Status.PROCESSING_FAILED
class ProcessingNotFinishedYetError(Error):
status = Status.PROCESSING_NOT_FINISHED_YET
class ValidationError(Error):
# TODO(vadimsh): Use VALIDATION_ERROR. It changes JSON protocol.
status = Status.ERROR
def validate_package_name(package_name):
if not impl.is_valid_package_path(package_name):
raise ValidationError('Invalid package name')
return package_name
def validate_package_path(package_path):
if not impl.is_valid_package_path(package_path):
raise ValidationError('Invalid package path')
return package_path
def validate_package_ref(ref):
if not impl.is_valid_package_ref(ref):
raise ValidationError('Invalid package ref name')
return ref
def validate_package_ref_list(refs):
if not refs: # pragma: no cover
raise ValidationError('Ref list is empty')
return [validate_package_ref(ref) for ref in refs]
def validate_instance_id(instance_id):
if not impl.is_valid_instance_id(instance_id):
raise ValidationError('Invalid package instance ID')
return instance_id
def validate_instance_tag(tag):
if not impl.is_valid_instance_tag(tag):
raise ValidationError('Invalid tag "%s"' % tag)
return tag
def validate_instance_tag_list(tags):
if not tags:
raise ValidationError('Tag list is empty')
return [validate_instance_tag(tag) for tag in tags]
def validate_instance_version(version):
if not impl.is_valid_instance_version(version):
raise ValidationError('Not a valid instance ID or tag: "%s"' % version)
return version
def endpoints_method(request_message, response_message, **kwargs):
"""Wrapper around Endpoint methods to simplify error handling.
Catches Error exceptions and converts them to error responses. Assumes
response_message has fields 'status' and 'error_message'.
"""
assert hasattr(response_message, 'status')
assert hasattr(response_message, 'error_message')
def decorator(f):
@auth.endpoints_method(request_message, response_message, **kwargs)
@functools.wraps(f)
def wrapper(*args):
try:
response = f(*args)
if response.status is None:
response.status = Status.SUCCESS
return response
except Error as e:
return response_message(
status=e.status,
error_message=e.message if e.message else None)
except auth.Error as e:
caller = auth.get_current_identity().to_bytes()
logging.warning('%s (%s): %s', e.__class__.__name__, caller, e)
raise
return wrapper
return decorator
################################################################################
@auth.endpoints_api(
name='repo',
version='v1',
title='CIPD Package Repository API')
class PackageRepositoryApi(remote.Service):
"""Package Repository API."""
# Cached value of 'service' property.
_service = None
@property
def service(self):
"""Returns configured impl.RepoService."""
if self._service is None:
self._service = impl.get_repo_service()
if self._service is None or not self._service.is_fetch_configured():
raise endpoints.InternalServerErrorException(
'Service is not configured')
return self._service
def get_instance(self, package_name, instance_id):
"""Grabs PackageInstance or raises appropriate *NotFoundError."""
instance = self.service.get_instance(package_name, instance_id)
if instance is None:
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
raise InstanceNotFoundError()
return instance
def verify_instance_exists(self, package_name, instance_id):
"""Raises appropriate *NotFoundError if instance is missing."""
self.get_instance(package_name, instance_id)
def verify_instance_is_ready(self, package_name, instance_id):
"""Raises appropriate error if instance doesn't exist or not ready yet.
Instance is ready when all processors successfully finished.
"""
instance = self.get_instance(package_name, instance_id)
if instance.processors_failure:
raise ProcessingFailedError(
'Failed processors: %s' % ', '.join(instance.processors_failure))
if instance.processors_pending:
raise ProcessingNotFinishedYetError(
'Pending processors: %s' % ', '.join(instance.processors_pending))
### Package methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
with_refs=messages.BooleanField(2, required=False)),
FetchPackageResponse,
http_method='GET',
path='package',
name='fetchPackage')
@auth.public # ACL check is inside
def fetch_package(self, request):
"""Returns information about a package."""
package_name = validate_package_name(request.package_name)
caller = auth.get_current_identity()
if not acl.can_fetch_package(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
refs = []
if request.with_refs:
refs = self.service.query_package_refs(package_name)
return FetchPackageResponse(
package=package_to_proto(pkg),
refs=[package_ref_to_proto(r) for r in refs])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
path=messages.StringField(1, required=False),
recursive=messages.BooleanField(2, required=False)),
ListPackagesResponse,
http_method='GET',
path='package/search',
name='listPackages')
@auth.public # ACL check is inside
def list_packages(self, request):
"""Returns packages in the given directory and possibly subdirectories."""
path = request.path or ''
recursive = request.recursive or False
pkgs, dirs = self.service.list_packages(path, recursive)
caller = auth.get_current_identity()
visible_pkgs = [p for p in pkgs if acl.can_fetch_package(p, caller)]
visible_dirs = [d for d in dirs if acl.can_fetch_package(d, caller)]
return ListPackagesResponse(packages=visible_pkgs, directories=visible_dirs)
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True)),
DeletePackageResponse,
http_method='DELETE',
path='package',
name='deletePackage')
@auth.public # ACL check is inside
def delete_package(self, request):
"""Deletes a package along with all its instances."""
package_name = validate_package_name(request.package_name)
caller = auth.get_current_identity()
if not acl.can_delete_package(package_name, caller):
raise auth.AuthorizationError()
deleted = self.service.delete_package(package_name)
if not deleted:
raise PackageNotFoundError()
return DeletePackageResponse()
### PackageInstance methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchInstanceResponse,
http_method='GET',
path='instance',
name='fetchInstance')
@auth.public # ACL check is inside
def fetch_instance(self, request):
"""Returns signed URL that can be used to fetch a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.get_instance(package_name, instance_id)
return FetchInstanceResponse(
instance=instance_to_proto(instance),
fetch_url=self.service.generate_fetch_url(instance),
processors=processors_protos(instance))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
RegisterInstanceResponse,
path='instance',
http_method='POST',
name='registerInstance')
@auth.public # ACL check is inside
def register_instance(self, request):
"""Registers a new package instance in the repository."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_register_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.service.get_instance(package_name, instance_id)
if instance is not None:
return RegisterInstanceResponse(
status=Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
# Need to upload to CAS first? Open an upload session. Caller must use
# CASServiceApi to finish the upload and then call registerInstance again.
if not self.service.is_instance_file_uploaded(package_name, instance_id):
upload_url, upload_session_id = self.service.create_upload_session(
package_name, instance_id, caller)
return RegisterInstanceResponse(
status=Status.UPLOAD_FIRST,
upload_session_id=upload_session_id,
upload_url=upload_url)
# Package data is in the store. Make an entity.
instance, registered = self.service.register_instance(
package_name=package_name,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return RegisterInstanceResponse(
status=Status.REGISTERED if registered else Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
### Refs methods.
@endpoints_method(
endpoints.ResourceContainer(
SetRefRequest,
package_name=messages.StringField(1, required=True),
ref=messages.StringField(2, required=True)),
SetRefResponse,
path='ref',
http_method='POST',
name='setRef')
@auth.public # ACL check is inside
def set_ref(self, request):
"""Creates a ref or moves an existing one."""
package_name = validate_package_name(request.package_name)
ref = validate_package_ref(request.ref)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_move_ref(package_name, ref, caller):
raise auth.AuthorizationError('Not authorized to move "%s"' % ref)
self.verify_instance_is_ready(package_name, instance_id)
ref_entity = self.service.set_package_ref(
package_name=package_name,
ref=ref,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return SetRefResponse(ref=package_ref_to_proto(ref_entity))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
ref=messages.StringField(3, repeated=True)),
FetchRefsResponse,
path='ref',
http_method='GET',
name='fetchRefs')
@auth.public # ACL check is inside
def fetch_refs(self, request):
"""Lists package instance refs (newest first)."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
refs = validate_package_ref_list(request.ref) if request.ref else None
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
self.verify_instance_exists(package_name, instance_id)
if not refs:
# Fetch all.
output = self.service.query_instance_refs(package_name, instance_id)
else:
# Fetch selected refs, pick ones pointing to the instance.
output = [
r
for r in self.service.get_package_refs(package_name, refs).itervalues()
if r and r.instance_id == instance_id
]
output.sort(key=lambda r: r.modified_ts, reverse=True)
return FetchRefsResponse(refs=[package_ref_to_proto(ref) for ref in output])
### Tags methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
FetchTagsResponse,
path='tags',
http_method='GET',
name='fetchTags')
@auth.public # ACL check is inside
def fetch_tags(self, request):
"""Lists package instance tags (newest first)."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag) if request.tag else None
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
self.verify_instance_exists(package_name, instance_id)
if not tags:
# Fetch all.
attached = self.service.query_tags(package_name, instance_id)
else:
# Fetch selected only. "Is tagged by?" check essentially.
found = self.service.get_tags(package_name, instance_id, tags)
attached = [found[tag] for tag in tags if found[tag]]
attached.sort(key=lambda t: t.registered_ts, reverse=True)
return FetchTagsResponse(tags=[tag_to_proto(tag) for tag in attached])
@endpoints_method(
endpoints.ResourceContainer(
AttachTagsRequest,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
AttachTagsResponse,
path='tags',
http_method='POST',
name='attachTags')
@auth.public # ACL check is inside
def attach_tags(self, request):
"""Attaches a set of tags to a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tags)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_attach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to attach "%s"' % tag)
self.verify_instance_is_ready(package_name, instance_id)
attached = self.service.attach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags,
caller=caller,
now=utils.utcnow())
return AttachTagsResponse(tags=[tag_to_proto(attached[t]) for t in tags])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
DetachTagsResponse,
path='tags',
http_method='DELETE',
name='detachTags')
@auth.public # ACL check is inside
def detach_tags(self, request):
"""Removes given tags from a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_detach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to detach "%s"' % tag)
self.verify_instance_exists(package_name, instance_id)
self.service.detach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags)
return DetachTagsResponse()
### Search methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
tag=messages.StringField(1, required=True),
package_name=messages.StringField(2, required=False)),
SearchResponse,
path='instance/search',
http_method='GET',
name='searchInstances')
@auth.public # ACL check is inside
def search_instances(self, request):
"""Returns package instances with given tag (in no particular order)."""
tag = validate_instance_tag(request.tag)
if request.package_name:
package_name = validate_package_name(request.package_name)
else:
package_name = None
caller = auth.get_current_identity()
callback = None
if package_name:
# If search is limited to one package, check its ACL only once.
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
else:
# Filter out packages not allowed by ACL.
acl_cache = {}
def check_readable(package_name, _instance_id):
if package_name not in acl_cache:
acl_cache[package_name] = acl.can_fetch_instance(package_name, caller)
return acl_cache[package_name]
callback = check_readable
found = self.service.search_by_tag(tag, package_name, callback)
return SearchResponse(instances=[instance_to_proto(i) for i in found])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
version=messages.StringField(2, required=True)),
ResolveVersionResponse,
path='instance/resolve',
http_method='GET',
name='resolveVersion')
@auth.public # ACL check is inside
def resolve_version(self, request):
"""Returns instance ID of an existing instance given a ref or a tag."""
package_name = validate_package_name(request.package_name)
version = validate_instance_version(request.version)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
ids = self.service.resolve_version(package_name, version, limit=2)
if not ids:
raise InstanceNotFoundError()
if len(ids) > 1:
return ResolveVersionResponse(
status=Status.AMBIGUOUS_VERSION,
error_message='More than one instance has tag "%s" set' % version)
return ResolveVersionResponse(instance_id=ids[0])
### ACL methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_path=messages.StringField(1, required=True)),
FetchACLResponse,
http_method='GET',
path='acl',
name='fetchACL')
@auth.public # ACL check is inside
def fetch_acl(self, request):
"""Returns access control list for a given package path."""
package_path = validate_package_path(request.package_path)
caller = auth.get_current_identity()
if not acl.can_fetch_acl(package_path, caller):
raise auth.AuthorizationError()
return FetchACLResponse(
acls=package_acls_to_proto({
role: acl.get_package_acls(package_path, role)
for role in acl.ROLES
}))
@endpoints_method(
endpoints.ResourceContainer(
ModifyACLRequest,
package_path=messages.StringField(1, required=True)),
ModifyACLResponse,
http_method='POST',
path='acl',
name='modifyACL')
@auth.public # ACL check is inside
def modify_acl(self, request):
"""Changes access control list for a given package path."""
package_path = validate_package_path(request.package_path)
try:
changes = [
role_change_from_proto(msg, package_path)
for msg in request.changes
]
except ValueError as exc:
raise ValidationError('Invalid role change request: %s' % exc)
caller = auth.get_current_identity()
if not acl.can_modify_acl(package_path, caller):
raise auth.AuthorizationError()
# Apply changes. Do not catch ValueError. Validation above should be
# sufficient. If it is not, HTTP 500 and an uncaught exception in logs is
# exactly what is needed.
acl.modify_roles(changes, caller, utils.utcnow())
return ModifyACLResponse()
### ClientBinary methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchClientBinaryResponse,
http_method='GET',
path='client',
name='fetchClientBinary')
@auth.public # ACL check is inside
def fetch_client_binary(self, request):
"""Returns signed URL that can be used to fetch CIPD client binary."""
package_name = validate_package_name(request.package_name)
if not client.is_cipd_client_package(package_name):
raise ValidationError('Not a CIPD client package')
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
# Grab the location of the extracted binary.
instance = self.get_instance(package_name, instance_id)
client_info, error_message = self.service.get_client_binary_info(instance)
if error_message:
raise Error(error_message)
if client_info is None:
return FetchClientBinaryResponse(
status=Status.NOT_EXTRACTED_YET,
instance=instance_to_proto(instance))
return FetchClientBinaryResponse(
instance=instance_to_proto(instance),
client_binary=FetchClientBinaryResponse.ClientBinary(
sha1=client_info.sha1,
size=client_info.size,
fetch_url=client_info.fetch_url))
| <filename>appengine/chrome_infra_packages/cipd/api.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cloud Endpoints API for Package Repository service."""
import functools
import logging
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from components import auth
from components import utils
from . import acl
from . import client
from . import impl
# This is used by endpoints indirectly.
package = 'cipd'
################################################################################
## Messages used by other messages.
class Status(messages.Enum):
"""Response status code, shared by all responses."""
# Operation finished successfully (generic "success" response).
SUCCESS = 1
# The package instance was successfully registered.
REGISTERED = 2
# The package instance was already registered (not a error).
ALREADY_REGISTERED = 3
# Some uncategorized non-transient error happened.
ERROR = 4
# No such package.
PACKAGE_NOT_FOUND = 5
# Package itself is known, but requested instance_id isn't registered.
INSTANCE_NOT_FOUND = 6
# Need to upload package data before registering the package.
UPLOAD_FIRST = 7
# Client binary is not available, the call should be retried later.
NOT_EXTRACTED_YET = 8
# Some asynchronous package processing failed.
PROCESSING_FAILED = 9
# Asynchronous package processing is still running.
PROCESSING_NOT_FINISHED_YET = 10
# More than one instance matches criteria in resolveVersion.
AMBIGUOUS_VERSION = 11
class Package(messages.Message):
"""Information about some registered package."""
package_name = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def package_to_proto(entity):
"""Package entity -> Package proto message."""
return Package(
package_name=entity.package_name,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageInstance(messages.Message):
"""Information about some registered package instance."""
package_name = messages.StringField(1, required=True)
instance_id = messages.StringField(2, required=True)
registered_by = messages.StringField(3, required=True)
registered_ts = messages.IntegerField(4, required=True)
def instance_to_proto(entity):
"""PackageInstance entity -> PackageInstance proto message."""
return PackageInstance(
package_name=entity.package_name,
instance_id=entity.instance_id,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class InstanceTag(messages.Message):
"""Some single package instance tag."""
tag = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def tag_to_proto(entity):
"""InstanceTag entity -> InstanceTag proto message."""
return InstanceTag(
tag=entity.tag,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageRef(messages.Message):
"""Information about some ref belonging to a package."""
ref = messages.StringField(1, required=True)
instance_id = messages.StringField(2, required=True)
modified_by = messages.StringField(3, required=True)
modified_ts = messages.IntegerField(4, required=True)
def package_ref_to_proto(entity):
"""PackageRef entity -> PackageRef proto message."""
return PackageRef(
ref=entity.ref,
instance_id=entity.instance_id,
modified_by=entity.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(entity.modified_ts))
class PackageACL(messages.Message):
"""Access control list for some package path and all parent paths."""
class ElementaryACL(messages.Message):
"""Single per role, per package path ACL."""
package_path = messages.StringField(1, required=True)
role = messages.StringField(2, required=True)
principals = messages.StringField(3, repeated=True)
modified_by = messages.StringField(4, required=True)
modified_ts = messages.IntegerField(5, required=True)
# List of ACLs split by package path and role. No ordering.
acls = messages.MessageField(ElementaryACL, 1, repeated=True)
def package_acls_to_proto(per_role_acls):
"""Dict {role -> list of PackageACL entities} -> PackageACL message."""
acls = []
for role, package_acl_entities in per_role_acls.iteritems():
for e in package_acl_entities:
principals = []
principals.extend(u.to_bytes() for u in e.users)
principals.extend('group:%s' % g for g in e.groups)
acls.append(PackageACL.ElementaryACL(
package_path=e.package_path,
role=role,
principals=principals,
modified_by=e.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(e.modified_ts),
))
return PackageACL(acls=acls)
class RoleChange(messages.Message):
"""Describes a single modification to ACL."""
class Action(messages.Enum):
GRANT = 1
REVOKE = 2
# Action to perform.
action = messages.EnumField(Action, 1, required=True)
# Role to modify ('OWNER', 'WRITER', 'READER', ...).
role = messages.StringField(2, required=True)
# Principal ('user:...' or 'group:...') to grant or revoke a role for.
principal = messages.StringField(3, required=True)
def role_change_from_proto(proto, package_path):
"""RoleChange proto message -> acl.RoleChange object.
Raises ValueError on format errors.
"""
if not acl.is_valid_role(proto.role):
raise ValueError('Invalid role %s' % proto.role)
user = None
group = None
if proto.principal.startswith('group:'):
group = proto.principal[len('group:'):]
if not auth.is_valid_group_name(group):
raise ValueError('Invalid group name: "%s"' % group)
else:
# Raises ValueError if proto.user has invalid format, e.g. not 'user:...'.
user = auth.Identity.from_bytes(proto.principal)
return acl.RoleChange(
package_path=package_path,
revoke=(proto.action != RoleChange.Action.GRANT),
role=proto.role,
user=user,
group=group)
class Processor(messages.Message):
"""Status of some package instance processor."""
class Status(messages.Enum):
PENDING = 1
SUCCESS = 2
FAILURE = 3
# Name of the processor, defines what it does.
name = messages.StringField(1, required=True)
# Status of the processing.
status = messages.EnumField(Status, 2, required=True)
def processors_protos(instance):
"""Given PackageInstance entity returns a list of Processor messages."""
def procs_to_msg(procs, status):
return [Processor(name=name, status=status) for name in procs ]
processors = []
processors += procs_to_msg(
instance.processors_pending,
Processor.Status.PENDING)
processors += procs_to_msg(
instance.processors_success,
Processor.Status.SUCCESS)
processors += procs_to_msg(
instance.processors_failure,
Processor.Status.FAILURE)
return processors
################################################################################
class FetchPackageResponse(messages.Message):
"""Results of fetchPackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package.
package = messages.MessageField(Package, 3, required=False)
refs = messages.MessageField(PackageRef, 4, repeated=True)
################################################################################
class ListPackagesResponse(messages.Message):
"""Results of listPackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, names of the packages and names of directories.
packages = messages.StringField(3, repeated=True)
directories = messages.StringField(4, repeated=True)
################################################################################
class DeletePackageResponse(messages.Message):
"""Results of deletePackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class FetchInstanceResponse(messages.Message):
"""Results of fetchInstance call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, a signed url to fetch the package instance file from.
fetch_url = messages.StringField(4, required=False)
# For SUCCESS, list of processors applied to the instance.
processors = messages.MessageField(Processor, 5, repeated=True)
################################################################################
class RegisterInstanceResponse(messages.Message):
"""Results of registerInstance call.
upload_session_id and upload_url (if present) can be used with CAS service
(finishUpload call in particular).
Callers are expected to execute following protocol:
1. Attempt to register a package instance by calling registerInstance(...).
2. On UPLOAD_FIRST response, upload package data and finalize the upload by
using upload_session_id and upload_url and calling cas.finishUpload.
3. Once upload is finalized, call registerInstance(...) again.
"""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For REGISTERED or ALREADY_REGISTERED, info about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For UPLOAD_FIRST status, a unique identifier of the upload operation.
upload_session_id = messages.StringField(4, required=False)
# For UPLOAD_FIRST status, URL to PUT file to via resumable upload protocol.
upload_url = messages.StringField(5, required=False)
################################################################################
class SetRefRequest(messages.Message):
"""Body of setRef call."""
# ID of the package instance to point the ref too.
instance_id = messages.StringField(1, required=True)
class SetRefResponse(messages.Message):
"""Results of setRef call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about the ref.
ref = messages.MessageField(PackageRef, 3, required=False)
class FetchRefsResponse(messages.Message):
"""Results of fetchRefs call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about fetches refs.
refs = messages.MessageField(PackageRef, 3, repeated=True)
################################################################################
class FetchTagsResponse(messages.Message):
"""Results of fetchTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about found tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class AttachTagsRequest(messages.Message):
"""Body of attachTags call."""
tags = messages.StringField(1, repeated=True)
class AttachTagsResponse(messages.Message):
"""Results of attachTag call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about attached tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class DetachTagsResponse(messages.Message):
"""Results of detachTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class SearchResponse(messages.Message):
"""Results of searchInstances call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, list of instances found.
instances = messages.MessageField(PackageInstance, 3, repeated=True)
class ResolveVersionResponse(messages.Message):
"""Results of resolveVersion call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, concrete existing instance ID.
instance_id = messages.StringField(3, required=False)
################################################################################
class FetchACLResponse(messages.Message):
"""Results of fetchACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, list of ACLs split by package path and role.
acls = messages.MessageField(PackageACL, 3, required=False)
################################################################################
class ModifyACLRequest(messages.Message):
"""Body of modifyACL call."""
changes = messages.MessageField(RoleChange, 1, repeated=True)
class ModifyACLResponse(messages.Message):
"""Results of modifyACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class FetchClientBinaryResponse(messages.Message):
"""Results of fetchClientBinary call."""
class ClientBinary(messages.Message):
# SHA1 hex digest of the extracted binary, for verification on the client.
sha1 = messages.StringField(1, required=True)
# Size of the binary file, just for information.
size = messages.IntegerField(2, required=True)
# A signed url to fetch the binary file from.
fetch_url = messages.StringField(3, required=True)
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS or NOT_EXTRACTED_YET, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, information about the client binary.
client_binary = messages.MessageField(ClientBinary, 4, required=False)
################################################################################
class Error(Exception):
status = Status.ERROR
class PackageNotFoundError(Error):
status = Status.PACKAGE_NOT_FOUND
class InstanceNotFoundError(Error):
status = Status.INSTANCE_NOT_FOUND
class ProcessingFailedError(Error):
status = Status.PROCESSING_FAILED
class ProcessingNotFinishedYetError(Error):
status = Status.PROCESSING_NOT_FINISHED_YET
class ValidationError(Error):
# TODO(vadimsh): Use VALIDATION_ERROR. It changes JSON protocol.
status = Status.ERROR
def validate_package_name(package_name):
if not impl.is_valid_package_path(package_name):
raise ValidationError('Invalid package name')
return package_name
def validate_package_path(package_path):
if not impl.is_valid_package_path(package_path):
raise ValidationError('Invalid package path')
return package_path
def validate_package_ref(ref):
if not impl.is_valid_package_ref(ref):
raise ValidationError('Invalid package ref name')
return ref
def validate_package_ref_list(refs):
if not refs: # pragma: no cover
raise ValidationError('Ref list is empty')
return [validate_package_ref(ref) for ref in refs]
def validate_instance_id(instance_id):
if not impl.is_valid_instance_id(instance_id):
raise ValidationError('Invalid package instance ID')
return instance_id
def validate_instance_tag(tag):
if not impl.is_valid_instance_tag(tag):
raise ValidationError('Invalid tag "%s"' % tag)
return tag
def validate_instance_tag_list(tags):
if not tags:
raise ValidationError('Tag list is empty')
return [validate_instance_tag(tag) for tag in tags]
def validate_instance_version(version):
if not impl.is_valid_instance_version(version):
raise ValidationError('Not a valid instance ID or tag: "%s"' % version)
return version
def endpoints_method(request_message, response_message, **kwargs):
"""Wrapper around Endpoint methods to simplify error handling.
Catches Error exceptions and converts them to error responses. Assumes
response_message has fields 'status' and 'error_message'.
"""
assert hasattr(response_message, 'status')
assert hasattr(response_message, 'error_message')
def decorator(f):
@auth.endpoints_method(request_message, response_message, **kwargs)
@functools.wraps(f)
def wrapper(*args):
try:
response = f(*args)
if response.status is None:
response.status = Status.SUCCESS
return response
except Error as e:
return response_message(
status=e.status,
error_message=e.message if e.message else None)
except auth.Error as e:
caller = auth.get_current_identity().to_bytes()
logging.warning('%s (%s): %s', e.__class__.__name__, caller, e)
raise
return wrapper
return decorator
################################################################################
@auth.endpoints_api(
name='repo',
version='v1',
title='CIPD Package Repository API')
class PackageRepositoryApi(remote.Service):
"""Package Repository API."""
# Cached value of 'service' property.
_service = None
@property
def service(self):
"""Returns configured impl.RepoService."""
if self._service is None:
self._service = impl.get_repo_service()
if self._service is None or not self._service.is_fetch_configured():
raise endpoints.InternalServerErrorException(
'Service is not configured')
return self._service
def get_instance(self, package_name, instance_id):
"""Grabs PackageInstance or raises appropriate *NotFoundError."""
instance = self.service.get_instance(package_name, instance_id)
if instance is None:
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
raise InstanceNotFoundError()
return instance
def verify_instance_exists(self, package_name, instance_id):
"""Raises appropriate *NotFoundError if instance is missing."""
self.get_instance(package_name, instance_id)
def verify_instance_is_ready(self, package_name, instance_id):
"""Raises appropriate error if instance doesn't exist or not ready yet.
Instance is ready when all processors successfully finished.
"""
instance = self.get_instance(package_name, instance_id)
if instance.processors_failure:
raise ProcessingFailedError(
'Failed processors: %s' % ', '.join(instance.processors_failure))
if instance.processors_pending:
raise ProcessingNotFinishedYetError(
'Pending processors: %s' % ', '.join(instance.processors_pending))
### Package methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
with_refs=messages.BooleanField(2, required=False)),
FetchPackageResponse,
http_method='GET',
path='package',
name='fetchPackage')
@auth.public # ACL check is inside
def fetch_package(self, request):
"""Returns information about a package."""
package_name = validate_package_name(request.package_name)
caller = auth.get_current_identity()
if not acl.can_fetch_package(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
refs = []
if request.with_refs:
refs = self.service.query_package_refs(package_name)
return FetchPackageResponse(
package=package_to_proto(pkg),
refs=[package_ref_to_proto(r) for r in refs])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
path=messages.StringField(1, required=False),
recursive=messages.BooleanField(2, required=False)),
ListPackagesResponse,
http_method='GET',
path='package/search',
name='listPackages')
@auth.public # ACL check is inside
def list_packages(self, request):
"""Returns packages in the given directory and possibly subdirectories."""
path = request.path or ''
recursive = request.recursive or False
pkgs, dirs = self.service.list_packages(path, recursive)
caller = auth.get_current_identity()
visible_pkgs = [p for p in pkgs if acl.can_fetch_package(p, caller)]
visible_dirs = [d for d in dirs if acl.can_fetch_package(d, caller)]
return ListPackagesResponse(packages=visible_pkgs, directories=visible_dirs)
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True)),
DeletePackageResponse,
http_method='DELETE',
path='package',
name='deletePackage')
@auth.public # ACL check is inside
def delete_package(self, request):
"""Deletes a package along with all its instances."""
package_name = validate_package_name(request.package_name)
caller = auth.get_current_identity()
if not acl.can_delete_package(package_name, caller):
raise auth.AuthorizationError()
deleted = self.service.delete_package(package_name)
if not deleted:
raise PackageNotFoundError()
return DeletePackageResponse()
### PackageInstance methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchInstanceResponse,
http_method='GET',
path='instance',
name='fetchInstance')
@auth.public # ACL check is inside
def fetch_instance(self, request):
"""Returns signed URL that can be used to fetch a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.get_instance(package_name, instance_id)
return FetchInstanceResponse(
instance=instance_to_proto(instance),
fetch_url=self.service.generate_fetch_url(instance),
processors=processors_protos(instance))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
RegisterInstanceResponse,
path='instance',
http_method='POST',
name='registerInstance')
@auth.public # ACL check is inside
def register_instance(self, request):
"""Registers a new package instance in the repository."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_register_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.service.get_instance(package_name, instance_id)
if instance is not None:
return RegisterInstanceResponse(
status=Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
# Need to upload to CAS first? Open an upload session. Caller must use
# CASServiceApi to finish the upload and then call registerInstance again.
if not self.service.is_instance_file_uploaded(package_name, instance_id):
upload_url, upload_session_id = self.service.create_upload_session(
package_name, instance_id, caller)
return RegisterInstanceResponse(
status=Status.UPLOAD_FIRST,
upload_session_id=upload_session_id,
upload_url=upload_url)
# Package data is in the store. Make an entity.
instance, registered = self.service.register_instance(
package_name=package_name,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return RegisterInstanceResponse(
status=Status.REGISTERED if registered else Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
### Refs methods.
@endpoints_method(
endpoints.ResourceContainer(
SetRefRequest,
package_name=messages.StringField(1, required=True),
ref=messages.StringField(2, required=True)),
SetRefResponse,
path='ref',
http_method='POST',
name='setRef')
@auth.public # ACL check is inside
def set_ref(self, request):
"""Creates a ref or moves an existing one."""
package_name = validate_package_name(request.package_name)
ref = validate_package_ref(request.ref)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_move_ref(package_name, ref, caller):
raise auth.AuthorizationError('Not authorized to move "%s"' % ref)
self.verify_instance_is_ready(package_name, instance_id)
ref_entity = self.service.set_package_ref(
package_name=package_name,
ref=ref,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return SetRefResponse(ref=package_ref_to_proto(ref_entity))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
ref=messages.StringField(3, repeated=True)),
FetchRefsResponse,
path='ref',
http_method='GET',
name='fetchRefs')
@auth.public # ACL check is inside
def fetch_refs(self, request):
"""Lists package instance refs (newest first)."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
refs = validate_package_ref_list(request.ref) if request.ref else None
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
self.verify_instance_exists(package_name, instance_id)
if not refs:
# Fetch all.
output = self.service.query_instance_refs(package_name, instance_id)
else:
# Fetch selected refs, pick ones pointing to the instance.
output = [
r
for r in self.service.get_package_refs(package_name, refs).itervalues()
if r and r.instance_id == instance_id
]
output.sort(key=lambda r: r.modified_ts, reverse=True)
return FetchRefsResponse(refs=[package_ref_to_proto(ref) for ref in output])
### Tags methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
FetchTagsResponse,
path='tags',
http_method='GET',
name='fetchTags')
@auth.public # ACL check is inside
def fetch_tags(self, request):
"""Lists package instance tags (newest first)."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag) if request.tag else None
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
self.verify_instance_exists(package_name, instance_id)
if not tags:
# Fetch all.
attached = self.service.query_tags(package_name, instance_id)
else:
# Fetch selected only. "Is tagged by?" check essentially.
found = self.service.get_tags(package_name, instance_id, tags)
attached = [found[tag] for tag in tags if found[tag]]
attached.sort(key=lambda t: t.registered_ts, reverse=True)
return FetchTagsResponse(tags=[tag_to_proto(tag) for tag in attached])
@endpoints_method(
endpoints.ResourceContainer(
AttachTagsRequest,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
AttachTagsResponse,
path='tags',
http_method='POST',
name='attachTags')
@auth.public # ACL check is inside
def attach_tags(self, request):
"""Attaches a set of tags to a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tags)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_attach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to attach "%s"' % tag)
self.verify_instance_is_ready(package_name, instance_id)
attached = self.service.attach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags,
caller=caller,
now=utils.utcnow())
return AttachTagsResponse(tags=[tag_to_proto(attached[t]) for t in tags])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
DetachTagsResponse,
path='tags',
http_method='DELETE',
name='detachTags')
@auth.public # ACL check is inside
def detach_tags(self, request):
"""Removes given tags from a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_detach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to detach "%s"' % tag)
self.verify_instance_exists(package_name, instance_id)
self.service.detach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags)
return DetachTagsResponse()
### Search methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
tag=messages.StringField(1, required=True),
package_name=messages.StringField(2, required=False)),
SearchResponse,
path='instance/search',
http_method='GET',
name='searchInstances')
@auth.public # ACL check is inside
def search_instances(self, request):
"""Returns package instances with given tag (in no particular order)."""
tag = validate_instance_tag(request.tag)
if request.package_name:
package_name = validate_package_name(request.package_name)
else:
package_name = None
caller = auth.get_current_identity()
callback = None
if package_name:
# If search is limited to one package, check its ACL only once.
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
else:
# Filter out packages not allowed by ACL.
acl_cache = {}
def check_readable(package_name, _instance_id):
if package_name not in acl_cache:
acl_cache[package_name] = acl.can_fetch_instance(package_name, caller)
return acl_cache[package_name]
callback = check_readable
found = self.service.search_by_tag(tag, package_name, callback)
return SearchResponse(instances=[instance_to_proto(i) for i in found])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
version=messages.StringField(2, required=True)),
ResolveVersionResponse,
path='instance/resolve',
http_method='GET',
name='resolveVersion')
@auth.public # ACL check is inside
def resolve_version(self, request):
"""Returns instance ID of an existing instance given a ref or a tag."""
package_name = validate_package_name(request.package_name)
version = validate_instance_version(request.version)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
ids = self.service.resolve_version(package_name, version, limit=2)
if not ids:
raise InstanceNotFoundError()
if len(ids) > 1:
return ResolveVersionResponse(
status=Status.AMBIGUOUS_VERSION,
error_message='More than one instance has tag "%s" set' % version)
return ResolveVersionResponse(instance_id=ids[0])
### ACL methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_path=messages.StringField(1, required=True)),
FetchACLResponse,
http_method='GET',
path='acl',
name='fetchACL')
@auth.public # ACL check is inside
def fetch_acl(self, request):
"""Returns access control list for a given package path."""
package_path = validate_package_path(request.package_path)
caller = auth.get_current_identity()
if not acl.can_fetch_acl(package_path, caller):
raise auth.AuthorizationError()
return FetchACLResponse(
acls=package_acls_to_proto({
role: acl.get_package_acls(package_path, role)
for role in acl.ROLES
}))
@endpoints_method(
endpoints.ResourceContainer(
ModifyACLRequest,
package_path=messages.StringField(1, required=True)),
ModifyACLResponse,
http_method='POST',
path='acl',
name='modifyACL')
@auth.public # ACL check is inside
def modify_acl(self, request):
"""Changes access control list for a given package path."""
package_path = validate_package_path(request.package_path)
try:
changes = [
role_change_from_proto(msg, package_path)
for msg in request.changes
]
except ValueError as exc:
raise ValidationError('Invalid role change request: %s' % exc)
caller = auth.get_current_identity()
if not acl.can_modify_acl(package_path, caller):
raise auth.AuthorizationError()
# Apply changes. Do not catch ValueError. Validation above should be
# sufficient. If it is not, HTTP 500 and an uncaught exception in logs is
# exactly what is needed.
acl.modify_roles(changes, caller, utils.utcnow())
return ModifyACLResponse()
### ClientBinary methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchClientBinaryResponse,
http_method='GET',
path='client',
name='fetchClientBinary')
@auth.public # ACL check is inside
def fetch_client_binary(self, request):
"""Returns signed URL that can be used to fetch CIPD client binary."""
package_name = validate_package_name(request.package_name)
if not client.is_cipd_client_package(package_name):
raise ValidationError('Not a CIPD client package')
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
# Grab the location of the extracted binary.
instance = self.get_instance(package_name, instance_id)
client_info, error_message = self.service.get_client_binary_info(instance)
if error_message:
raise Error(error_message)
if client_info is None:
return FetchClientBinaryResponse(
status=Status.NOT_EXTRACTED_YET,
instance=instance_to_proto(instance))
return FetchClientBinaryResponse(
instance=instance_to_proto(instance),
client_binary=FetchClientBinaryResponse.ClientBinary(
sha1=client_info.sha1,
size=client_info.size,
fetch_url=client_info.fetch_url))
| en | 0.645697 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Cloud Endpoints API for Package Repository service. # This is used by endpoints indirectly. ################################################################################ ## Messages used by other messages. Response status code, shared by all responses. # Operation finished successfully (generic "success" response). # The package instance was successfully registered. # The package instance was already registered (not a error). # Some uncategorized non-transient error happened. # No such package. # Package itself is known, but requested instance_id isn't registered. # Need to upload package data before registering the package. # Client binary is not available, the call should be retried later. # Some asynchronous package processing failed. # Asynchronous package processing is still running. # More than one instance matches criteria in resolveVersion. Information about some registered package. Package entity -> Package proto message. Information about some registered package instance. PackageInstance entity -> PackageInstance proto message. Some single package instance tag. InstanceTag entity -> InstanceTag proto message. Information about some ref belonging to a package. PackageRef entity -> PackageRef proto message. Access control list for some package path and all parent paths. Single per role, per package path ACL. # List of ACLs split by package path and role. No ordering. Dict {role -> list of PackageACL entities} -> PackageACL message. Describes a single modification to ACL. # Action to perform. # Role to modify ('OWNER', 'WRITER', 'READER', ...). # Principal ('user:...' or 'group:...') to grant or revoke a role for. RoleChange proto message -> acl.RoleChange object. Raises ValueError on format errors. # Raises ValueError if proto.user has invalid format, e.g. not 'user:...'. Status of some package instance processor. # Name of the processor, defines what it does. # Status of the processing. Given PackageInstance entity returns a list of Processor messages. ################################################################################ Results of fetchPackage call. # For SUCCESS, information about the package. ################################################################################ Results of listPackage call. # For SUCCESS, names of the packages and names of directories. ################################################################################ Results of deletePackage call. ################################################################################ Results of fetchInstance call. # For SUCCESS, information about the package instance. # For SUCCESS, a signed url to fetch the package instance file from. # For SUCCESS, list of processors applied to the instance. ################################################################################ Results of registerInstance call. upload_session_id and upload_url (if present) can be used with CAS service (finishUpload call in particular). Callers are expected to execute following protocol: 1. Attempt to register a package instance by calling registerInstance(...). 2. On UPLOAD_FIRST response, upload package data and finalize the upload by using upload_session_id and upload_url and calling cas.finishUpload. 3. Once upload is finalized, call registerInstance(...) again. # For REGISTERED or ALREADY_REGISTERED, info about the package instance. # For UPLOAD_FIRST status, a unique identifier of the upload operation. # For UPLOAD_FIRST status, URL to PUT file to via resumable upload protocol. ################################################################################ Body of setRef call. # ID of the package instance to point the ref too. Results of setRef call. # For SUCCESS status, details about the ref. Results of fetchRefs call. # For SUCCESS status, details about fetches refs. ################################################################################ Results of fetchTags call. # For SUCCESS status, details about found tags. Body of attachTags call. Results of attachTag call. # For SUCCESS status, details about attached tags. Results of detachTags call. ################################################################################ Results of searchInstances call. # For SUCCESS, list of instances found. Results of resolveVersion call. # For SUCCESS, concrete existing instance ID. ################################################################################ Results of fetchACL call. # For SUCCESS status, list of ACLs split by package path and role. ################################################################################ Body of modifyACL call. Results of modifyACL call. ################################################################################ Results of fetchClientBinary call. # SHA1 hex digest of the extracted binary, for verification on the client. # Size of the binary file, just for information. # A signed url to fetch the binary file from. # For SUCCESS or NOT_EXTRACTED_YET, information about the package instance. # For SUCCESS, information about the client binary. ################################################################################ # TODO(vadimsh): Use VALIDATION_ERROR. It changes JSON protocol. # pragma: no cover Wrapper around Endpoint methods to simplify error handling. Catches Error exceptions and converts them to error responses. Assumes response_message has fields 'status' and 'error_message'. ################################################################################ Package Repository API. # Cached value of 'service' property. Returns configured impl.RepoService. Grabs PackageInstance or raises appropriate *NotFoundError. Raises appropriate *NotFoundError if instance is missing. Raises appropriate error if instance doesn't exist or not ready yet. Instance is ready when all processors successfully finished. ### Package methods. # ACL check is inside Returns information about a package. # ACL check is inside Returns packages in the given directory and possibly subdirectories. # ACL check is inside Deletes a package along with all its instances. ### PackageInstance methods. # ACL check is inside Returns signed URL that can be used to fetch a package instance. # ACL check is inside Registers a new package instance in the repository. # Need to upload to CAS first? Open an upload session. Caller must use # CASServiceApi to finish the upload and then call registerInstance again. # Package data is in the store. Make an entity. ### Refs methods. # ACL check is inside Creates a ref or moves an existing one. # ACL check is inside Lists package instance refs (newest first). # Fetch all. # Fetch selected refs, pick ones pointing to the instance. ### Tags methods. # ACL check is inside Lists package instance tags (newest first). # Fetch all. # Fetch selected only. "Is tagged by?" check essentially. # ACL check is inside Attaches a set of tags to a package instance. # ACL check is inside Removes given tags from a package instance. ### Search methods. # ACL check is inside Returns package instances with given tag (in no particular order). # If search is limited to one package, check its ACL only once. # Filter out packages not allowed by ACL. # ACL check is inside Returns instance ID of an existing instance given a ref or a tag. ### ACL methods. # ACL check is inside Returns access control list for a given package path. # ACL check is inside Changes access control list for a given package path. # Apply changes. Do not catch ValueError. Validation above should be # sufficient. If it is not, HTTP 500 and an uncaught exception in logs is # exactly what is needed. ### ClientBinary methods. # ACL check is inside Returns signed URL that can be used to fetch CIPD client binary. # Grab the location of the extracted binary. | 1.958108 | 2 |
var/spack/repos/builtin/packages/tclap/package.py | HaochengLIU/spack | 2 | 6630984 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tclap(AutotoolsPackage):
"""Templatized C++ Command Line Parser"""
homepage = "http://tclap.sourceforge.net"
url = "https://downloads.sourceforge.net/project/tclap/tclap-1.2.2.tar.gz"
version('1.2.2', '6f35665814dca292eceda007d7e13bcb')
version('1.2.1', 'eb0521d029bf3b1cc0dcaa7e42abf82a')
| # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tclap(AutotoolsPackage):
"""Templatized C++ Command Line Parser"""
homepage = "http://tclap.sourceforge.net"
url = "https://downloads.sourceforge.net/project/tclap/tclap-1.2.2.tar.gz"
version('1.2.2', '6f35665814dca292eceda007d7e13bcb')
version('1.2.1', 'eb0521d029bf3b1cc0dcaa7e42abf82a')
| en | 0.616952 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Templatized C++ Command Line Parser | 1.181775 | 1 |
backend/apps/events/migrations/0001_initial.py | dominikbullo/SportAgenda | 0 | 6630985 | <gh_stars>0
# Generated by Django 3.1.2 on 2020-10-27 20:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('U8', 'Superbaby'), ('U10', 'Mladší predžiaci'), ('U12', 'Starší predžiaci'), ('U14', 'Mladší žiaci'), ('U16', 'Starší žiaci'), ('U18', 'Juniory'), ('U21', 'Dospelý')], max_length=3)),
('year_from', models.IntegerField(choices=[(1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020)], default=2018, verbose_name='Year from')),
('year_until', models.IntegerField(choices=[(1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020)], default=2020, verbose_name='Year until')),
],
options={
'verbose_name_plural': 'Categories',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('SKI_TRAINING', 'Ski Training'), ('ATHLETIC_TRAINING', 'Athletic Training'), ('SKI_RACE', 'Ski Race'), ('SKI_CAMP', 'Ski Camp'), ('VIDEO_ANALYZE', 'Video Analyze'), ('MEETING', 'Meeting')], max_length=50)),
('canceled', models.BooleanField(default=False)),
('send_email', models.BooleanField(default=False)),
('start', models.DateTimeField()),
('end', models.DateTimeField(blank=True)),
('additional_info', models.CharField(blank=True, max_length=150)),
('category', models.ManyToManyField(to='events.Category')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=9, unique=True)),
('current', models.BooleanField(default=False)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SkiRace',
fields=[
('event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.event')),
('skis_type', models.CharField(choices=[('ALL', 'All'), ('SL', 'Slalom'), ('GS', 'Giant Slalom')], default='ALL', max_length=3)),
('temperature', models.IntegerField(blank=True, null=True)),
('propositionURL', models.URLField(blank=True, null=True)),
('hotel_price', models.CharField(blank=True, max_length=50, null=True)),
('book_hotel_from', models.DateTimeField(blank=True, null=True)),
('book_hotel_to', models.DateTimeField(blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('events.event',),
),
migrations.CreateModel(
name='SkiTraining',
fields=[
('event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.event')),
('skis_type', models.CharField(choices=[('ALL', 'All'), ('SL', 'Slalom'), ('GS', 'Giant Slalom')], default='ALL', max_length=3)),
('temperature', models.IntegerField(blank=True, null=True)),
('gates', models.CharField(blank=True, max_length=50, null=True)),
('number_of_runs', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'abstract': False,
},
bases=('events.event',),
),
migrations.CreateModel(
name='RaceOrganizer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('shorthand', models.CharField(max_length=15)),
('website', models.URLField(blank=True, null=True)),
('club', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'unique_together': {('name', 'club')},
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('ski_slope', models.CharField(blank=True, max_length=50, null=True)),
('additional_info', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'unique_together': {('name', 'ski_slope')},
},
),
migrations.AddField(
model_name='event',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='events.location'),
),
]
| # Generated by Django 3.1.2 on 2020-10-27 20:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('U8', 'Superbaby'), ('U10', 'Mladší predžiaci'), ('U12', 'Starší predžiaci'), ('U14', 'Mladší žiaci'), ('U16', 'Starší žiaci'), ('U18', 'Juniory'), ('U21', 'Dospelý')], max_length=3)),
('year_from', models.IntegerField(choices=[(1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020)], default=2018, verbose_name='Year from')),
('year_until', models.IntegerField(choices=[(1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020)], default=2020, verbose_name='Year until')),
],
options={
'verbose_name_plural': 'Categories',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('SKI_TRAINING', 'Ski Training'), ('ATHLETIC_TRAINING', 'Athletic Training'), ('SKI_RACE', 'Ski Race'), ('SKI_CAMP', 'Ski Camp'), ('VIDEO_ANALYZE', 'Video Analyze'), ('MEETING', 'Meeting')], max_length=50)),
('canceled', models.BooleanField(default=False)),
('send_email', models.BooleanField(default=False)),
('start', models.DateTimeField()),
('end', models.DateTimeField(blank=True)),
('additional_info', models.CharField(blank=True, max_length=150)),
('category', models.ManyToManyField(to='events.Category')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=9, unique=True)),
('current', models.BooleanField(default=False)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SkiRace',
fields=[
('event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.event')),
('skis_type', models.CharField(choices=[('ALL', 'All'), ('SL', 'Slalom'), ('GS', 'Giant Slalom')], default='ALL', max_length=3)),
('temperature', models.IntegerField(blank=True, null=True)),
('propositionURL', models.URLField(blank=True, null=True)),
('hotel_price', models.CharField(blank=True, max_length=50, null=True)),
('book_hotel_from', models.DateTimeField(blank=True, null=True)),
('book_hotel_to', models.DateTimeField(blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('events.event',),
),
migrations.CreateModel(
name='SkiTraining',
fields=[
('event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.event')),
('skis_type', models.CharField(choices=[('ALL', 'All'), ('SL', 'Slalom'), ('GS', 'Giant Slalom')], default='ALL', max_length=3)),
('temperature', models.IntegerField(blank=True, null=True)),
('gates', models.CharField(blank=True, max_length=50, null=True)),
('number_of_runs', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'abstract': False,
},
bases=('events.event',),
),
migrations.CreateModel(
name='RaceOrganizer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('shorthand', models.CharField(max_length=15)),
('website', models.URLField(blank=True, null=True)),
('club', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'unique_together': {('name', 'club')},
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('ski_slope', models.CharField(blank=True, max_length=50, null=True)),
('additional_info', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'unique_together': {('name', 'ski_slope')},
},
),
migrations.AddField(
model_name='event',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='events.location'),
),
] | en | 0.768677 | # Generated by Django 3.1.2 on 2020-10-27 20:14 | 2.063211 | 2 |
ontonotes5_to_json.py | geraltofrivia/ontonotes-5-parsing | 0 | 6630986 | from argparse import ArgumentParser
import codecs
import gc
import json
import os
import random
import traceback
import tarfile
from tempfile import NamedTemporaryFile
from tqdm import tqdm
from ontonotes5.utils import parse_file, parse_splitting, check_onf_name
from ontonotes5.utils import get_language_by_filename
from ontonotes5.utils import get_language_frequencies, get_entity_frequencies
def main():
parser = ArgumentParser()
parser.add_argument(
'-s',
'--src',
dest='source_file', type=str, required=True,
help='The source *.tgz file with gzipped Ontonotes 5 dataset (see '
'https://catalog.ldc.upenn.edu/LDC2013T19).'
)
parser.add_argument(
'-d',
'--dst',
dest='dst_file', type=str, required=True,
help='The destination *.json file with texts and their annotations '
'(named entities, morphology and syntax).'
)
parser.add_argument(
'-i',
'--ids',
dest='train_dev_test_ids', type=str, required=False, default=None,
help='The directory with identifiers list, which is described the '
'Ontonotes 5 splitting by subsets for training, development '
'(validation) and final testing (see '
'http://conll.cemantix.org/2012/download/ids/).'
)
parser.add_argument(
'-r',
'--random',
dest='random_seed', type=int, required=False, default=None,
help='A random seed.'
)
cmd_args = parser.parse_args()
if cmd_args.random_seed is not None:
random.seed(cmd_args.random_seed)
src_file_name = os.path.normpath(cmd_args.source_file)
err_msg = 'File "{0}" does not exist!'.format(src_file_name)
assert os.path.isfile(src_file_name), err_msg
dst_file_name = os.path.normpath(cmd_args.dst_file)
dst_file_dir = os.path.dirname(dst_file_name)
if len(dst_file_dir) > 0:
err_msg = 'Directory "{0}" does not exist!'.format(dst_file_dir)
assert os.path.isdir(dst_file_dir), err_msg
if cmd_args.train_dev_test_ids is None:
ids_dir_name = None
else:
ids_dir_name = os.path.normpath(cmd_args.train_dev_test_ids)
err_msg = 'Directory "{0}" does not exist!'.format(ids_dir_name)
assert os.path.isdir(dst_file_dir), err_msg
data_for_training = {}
data_for_validation = []
data_for_testing = []
if ids_dir_name is None:
splitting = None
else:
splitting = parse_splitting(ids_dir_name)
assert len(set(splitting['train']) & set(splitting['test'])) == 0
assert len(set(splitting['train']) & set(splitting['development'])) == 0
assert len(set(splitting['development']) & set(splitting['test'])) == 0
files_with_errors = []
with tarfile.open(src_file_name, mode='r:*', encoding='utf-8') as tgz_fp:
onf_names = list(map(
lambda it2: it2.name,
filter(
lambda it1: it1.isfile() and it1.name.endswith('.onf') and it1.name.startswith('ontonotes-release-5.0/data/files/data/english/annotations/nw/wsj'),
tgz_fp.getmembers()
)
))
# onf_names = onf_names[:100]
number_of_members = len(onf_names)
err_msg = 'There are no labeled texts with *.onf extension in the ' \
'"{0}"!'.format(src_file_name)
assert number_of_members > 0, err_msg
for cur_name in tqdm(onf_names):
language = get_language_by_filename(cur_name)
tmp_name = None
try:
with NamedTemporaryFile(mode='w', delete=False) as tmp_fp:
tmp_name = tmp_fp.name
binary_stream = tgz_fp.extractfile(cur_name)
if binary_stream is not None:
binary_data = binary_stream.read()
with open(tmp_name, 'wb') as tmp_fp:
tmp_fp.write(binary_data)
del binary_data, binary_stream
try:
parsed, err_msg_2 = parse_file(tmp_name, cur_name)
except ValueError:
traceback.print_exc()
continue
if err_msg_2 != '':
files_with_errors.append((cur_name, err_msg_2))
n = len(parsed)
if n > 0:
for idx in range(n):
parsed[idx]['language'] = language
if splitting is None:
data_for_training[cur_name] = parsed
else:
dst_key = check_onf_name(cur_name, splitting)
if dst_key == 'train':
data_for_training += parsed
elif dst_key == 'development':
data_for_validation += parsed
elif dst_key == 'test':
data_for_testing += parsed
finally:
if tmp_name is not None:
if os.path.isfile(tmp_name):
os.remove(tmp_name)
gc.collect()
with codecs.open(dst_file_name, mode='w', encoding='utf-8') as fp:
# random.shuffle(data_for_training)
res = {'TRAINING': data_for_training}
if splitting is None:
assert len(data_for_validation) == 0
assert len(data_for_testing) == 0
else:
assert len(data_for_validation) > 0
assert len(data_for_testing) > 0
# random.shuffle(data_for_validation)
res['VALIDATION'] = data_for_validation
# random.shuffle(data_for_testing)
res['TESTING'] = data_for_testing
json.dump(res, fp=fp, ensure_ascii=False, indent=4, sort_keys=True)
print('{0} files are processed.'.format(number_of_members))
n_errors = len(files_with_errors)
if n_errors > 0:
print('{0} files from them contain some errors.'.format(n_errors))
print('They are:')
for filename, err_msg in files_with_errors:
print(' file name "{0}"'.format(filename))
print(' error "{0}"'.format(err_msg))
assert len(data_for_training) > 0
if splitting is None:
print('{0} samples are loaded...'.format(len(data_for_training)))
languages_for_training = get_language_frequencies(data_for_training)
print('By languages:')
for lang, freq in languages_for_training:
entity_stat = get_entity_frequencies(data_for_training, lang)
print(' {0}:'.format(lang))
print(' {0} samples;'.format(freq))
print(' {0} entities, among them:'.format(
sum([cur[1] for cur in entity_stat])
))
max_width = max([len(cur[0]) for cur in entity_stat])
for entity_type, entity_freq in entity_stat:
print(' {0:>{1}} {2}'.format(entity_type, max_width,
entity_freq))
else:
for goal in res:
print('===============')
print(' {0}'.format(goal))
print('===============')
print('')
print('{0} samples are loaded...'.format(len(res[goal])))
languages_for_training = get_language_frequencies(res[goal])
print('By languages:')
for lang, freq in languages_for_training:
entity_stat = get_entity_frequencies(res[goal], lang)
print(' {0}:'.format(lang))
print(' {0} samples;'.format(freq))
print(' {0} entities, among them:'.format(
sum([cur[1] for cur in entity_stat])
))
max_width = max([len(cur[0]) for cur in entity_stat])
for entity_type, entity_freq in entity_stat:
print(' {0:>{1}} {2}'.format(entity_type, max_width,
entity_freq))
print('')
if __name__ == '__main__':
main()
| from argparse import ArgumentParser
import codecs
import gc
import json
import os
import random
import traceback
import tarfile
from tempfile import NamedTemporaryFile
from tqdm import tqdm
from ontonotes5.utils import parse_file, parse_splitting, check_onf_name
from ontonotes5.utils import get_language_by_filename
from ontonotes5.utils import get_language_frequencies, get_entity_frequencies
def main():
parser = ArgumentParser()
parser.add_argument(
'-s',
'--src',
dest='source_file', type=str, required=True,
help='The source *.tgz file with gzipped Ontonotes 5 dataset (see '
'https://catalog.ldc.upenn.edu/LDC2013T19).'
)
parser.add_argument(
'-d',
'--dst',
dest='dst_file', type=str, required=True,
help='The destination *.json file with texts and their annotations '
'(named entities, morphology and syntax).'
)
parser.add_argument(
'-i',
'--ids',
dest='train_dev_test_ids', type=str, required=False, default=None,
help='The directory with identifiers list, which is described the '
'Ontonotes 5 splitting by subsets for training, development '
'(validation) and final testing (see '
'http://conll.cemantix.org/2012/download/ids/).'
)
parser.add_argument(
'-r',
'--random',
dest='random_seed', type=int, required=False, default=None,
help='A random seed.'
)
cmd_args = parser.parse_args()
if cmd_args.random_seed is not None:
random.seed(cmd_args.random_seed)
src_file_name = os.path.normpath(cmd_args.source_file)
err_msg = 'File "{0}" does not exist!'.format(src_file_name)
assert os.path.isfile(src_file_name), err_msg
dst_file_name = os.path.normpath(cmd_args.dst_file)
dst_file_dir = os.path.dirname(dst_file_name)
if len(dst_file_dir) > 0:
err_msg = 'Directory "{0}" does not exist!'.format(dst_file_dir)
assert os.path.isdir(dst_file_dir), err_msg
if cmd_args.train_dev_test_ids is None:
ids_dir_name = None
else:
ids_dir_name = os.path.normpath(cmd_args.train_dev_test_ids)
err_msg = 'Directory "{0}" does not exist!'.format(ids_dir_name)
assert os.path.isdir(dst_file_dir), err_msg
data_for_training = {}
data_for_validation = []
data_for_testing = []
if ids_dir_name is None:
splitting = None
else:
splitting = parse_splitting(ids_dir_name)
assert len(set(splitting['train']) & set(splitting['test'])) == 0
assert len(set(splitting['train']) & set(splitting['development'])) == 0
assert len(set(splitting['development']) & set(splitting['test'])) == 0
files_with_errors = []
with tarfile.open(src_file_name, mode='r:*', encoding='utf-8') as tgz_fp:
onf_names = list(map(
lambda it2: it2.name,
filter(
lambda it1: it1.isfile() and it1.name.endswith('.onf') and it1.name.startswith('ontonotes-release-5.0/data/files/data/english/annotations/nw/wsj'),
tgz_fp.getmembers()
)
))
# onf_names = onf_names[:100]
number_of_members = len(onf_names)
err_msg = 'There are no labeled texts with *.onf extension in the ' \
'"{0}"!'.format(src_file_name)
assert number_of_members > 0, err_msg
for cur_name in tqdm(onf_names):
language = get_language_by_filename(cur_name)
tmp_name = None
try:
with NamedTemporaryFile(mode='w', delete=False) as tmp_fp:
tmp_name = tmp_fp.name
binary_stream = tgz_fp.extractfile(cur_name)
if binary_stream is not None:
binary_data = binary_stream.read()
with open(tmp_name, 'wb') as tmp_fp:
tmp_fp.write(binary_data)
del binary_data, binary_stream
try:
parsed, err_msg_2 = parse_file(tmp_name, cur_name)
except ValueError:
traceback.print_exc()
continue
if err_msg_2 != '':
files_with_errors.append((cur_name, err_msg_2))
n = len(parsed)
if n > 0:
for idx in range(n):
parsed[idx]['language'] = language
if splitting is None:
data_for_training[cur_name] = parsed
else:
dst_key = check_onf_name(cur_name, splitting)
if dst_key == 'train':
data_for_training += parsed
elif dst_key == 'development':
data_for_validation += parsed
elif dst_key == 'test':
data_for_testing += parsed
finally:
if tmp_name is not None:
if os.path.isfile(tmp_name):
os.remove(tmp_name)
gc.collect()
with codecs.open(dst_file_name, mode='w', encoding='utf-8') as fp:
# random.shuffle(data_for_training)
res = {'TRAINING': data_for_training}
if splitting is None:
assert len(data_for_validation) == 0
assert len(data_for_testing) == 0
else:
assert len(data_for_validation) > 0
assert len(data_for_testing) > 0
# random.shuffle(data_for_validation)
res['VALIDATION'] = data_for_validation
# random.shuffle(data_for_testing)
res['TESTING'] = data_for_testing
json.dump(res, fp=fp, ensure_ascii=False, indent=4, sort_keys=True)
print('{0} files are processed.'.format(number_of_members))
n_errors = len(files_with_errors)
if n_errors > 0:
print('{0} files from them contain some errors.'.format(n_errors))
print('They are:')
for filename, err_msg in files_with_errors:
print(' file name "{0}"'.format(filename))
print(' error "{0}"'.format(err_msg))
assert len(data_for_training) > 0
if splitting is None:
print('{0} samples are loaded...'.format(len(data_for_training)))
languages_for_training = get_language_frequencies(data_for_training)
print('By languages:')
for lang, freq in languages_for_training:
entity_stat = get_entity_frequencies(data_for_training, lang)
print(' {0}:'.format(lang))
print(' {0} samples;'.format(freq))
print(' {0} entities, among them:'.format(
sum([cur[1] for cur in entity_stat])
))
max_width = max([len(cur[0]) for cur in entity_stat])
for entity_type, entity_freq in entity_stat:
print(' {0:>{1}} {2}'.format(entity_type, max_width,
entity_freq))
else:
for goal in res:
print('===============')
print(' {0}'.format(goal))
print('===============')
print('')
print('{0} samples are loaded...'.format(len(res[goal])))
languages_for_training = get_language_frequencies(res[goal])
print('By languages:')
for lang, freq in languages_for_training:
entity_stat = get_entity_frequencies(res[goal], lang)
print(' {0}:'.format(lang))
print(' {0} samples;'.format(freq))
print(' {0} entities, among them:'.format(
sum([cur[1] for cur in entity_stat])
))
max_width = max([len(cur[0]) for cur in entity_stat])
for entity_type, entity_freq in entity_stat:
print(' {0:>{1}} {2}'.format(entity_type, max_width,
entity_freq))
print('')
if __name__ == '__main__':
main()
| en | 0.450857 | # onf_names = onf_names[:100] # random.shuffle(data_for_training) # random.shuffle(data_for_validation) # random.shuffle(data_for_testing) | 2.198978 | 2 |
upwork/routers/workdays.py | upwork/python-upwork | 150 | 6630987 | # Licensed under the Upwork's API Terms of Use;
# you may not use this file except in compliance with the Terms.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author:: <NAME> (<EMAIL>)
# Copyright:: Copyright 2020(c) Upwork.com
# License:: See LICENSE.txt and TOS - https://developers.upwork.com/api-tos.html
class Api:
""" """
client = None
def __init__(self, client):
self.client = client
def get_by_company(self, company, from_date, till_date, params={}):
"""Get Workdays by Company
Parameters:
:param company:
:param from_date:
:param till_date:
:param params: (Default value = {})
"""
return self.client.get(
"/team/v3/workdays/companies/{0}/{1},{2}".format(
company, from_date, till_date
),
params,
)
def get_by_contract(self, contract, from_date, till_date, params={}):
"""Get Workdays by Contract
Parameters:
:param contract:
:param from_date:
:param till_date:
:param params: (Default value = {})
"""
return self.client.get(
"/team/v3/workdays/contracts/{0}/{1},{2}".format(
contract, from_date, till_date
),
params,
)
| # Licensed under the Upwork's API Terms of Use;
# you may not use this file except in compliance with the Terms.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author:: <NAME> (<EMAIL>)
# Copyright:: Copyright 2020(c) Upwork.com
# License:: See LICENSE.txt and TOS - https://developers.upwork.com/api-tos.html
class Api:
""" """
client = None
def __init__(self, client):
self.client = client
def get_by_company(self, company, from_date, till_date, params={}):
"""Get Workdays by Company
Parameters:
:param company:
:param from_date:
:param till_date:
:param params: (Default value = {})
"""
return self.client.get(
"/team/v3/workdays/companies/{0}/{1},{2}".format(
company, from_date, till_date
),
params,
)
def get_by_contract(self, contract, from_date, till_date, params={}):
"""Get Workdays by Contract
Parameters:
:param contract:
:param from_date:
:param till_date:
:param params: (Default value = {})
"""
return self.client.get(
"/team/v3/workdays/contracts/{0}/{1},{2}".format(
contract, from_date, till_date
),
params,
)
| en | 0.810003 | # Licensed under the Upwork's API Terms of Use; # you may not use this file except in compliance with the Terms. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author:: <NAME> (<EMAIL>) # Copyright:: Copyright 2020(c) Upwork.com # License:: See LICENSE.txt and TOS - https://developers.upwork.com/api-tos.html Get Workdays by Company Parameters: :param company: :param from_date: :param till_date: :param params: (Default value = {}) Get Workdays by Contract Parameters: :param contract: :param from_date: :param till_date: :param params: (Default value = {}) | 2.283705 | 2 |
fragmenstein/victor/_victor_validate.py | matteoferla/Fragmenstein | 41 | 6630988 | from rdkit import Chem
from ._victor_base import _VictorBase
from ..m_rmsd import mRSMD
class _VictorValidate(_VictorBase):
def validate(self, reference_mol: Chem.Mol):
"""
Get how well the results compare.
Alternative, do a docking with victor.dock() (-> Chem.Mol)
:param reference_mol: Crystal structure mol
:return:
"""
try:
# compare with reference mol
return mRSMD.from_other_annotated_mols(reference_mol,
self.hits,
self.monster.positioned_mol).mrmsd
except self.error_to_catch as err:
self.journal.error(f'{err.__class__.__name__}: {err} in validation step.')
pass
return float('nan')
| from rdkit import Chem
from ._victor_base import _VictorBase
from ..m_rmsd import mRSMD
class _VictorValidate(_VictorBase):
def validate(self, reference_mol: Chem.Mol):
"""
Get how well the results compare.
Alternative, do a docking with victor.dock() (-> Chem.Mol)
:param reference_mol: Crystal structure mol
:return:
"""
try:
# compare with reference mol
return mRSMD.from_other_annotated_mols(reference_mol,
self.hits,
self.monster.positioned_mol).mrmsd
except self.error_to_catch as err:
self.journal.error(f'{err.__class__.__name__}: {err} in validation step.')
pass
return float('nan')
| en | 0.600657 | Get how well the results compare. Alternative, do a docking with victor.dock() (-> Chem.Mol) :param reference_mol: Crystal structure mol :return: # compare with reference mol | 2.105664 | 2 |
The_ultimate_cloud_ops-install_kubernetes_with_kops-automate_jobs_with_jenkins/3_Tools/create_ec2_server_instance.py | spinningops/SpinningOps_Courses | 0 | 6630989 | <gh_stars>0
import boto3
ec2 = boto3.resource('ec2', region_name='us-east-1')
# create a new EC2 instance
instances = ec2.create_instances(
ImageId='ami-013f17f36f8b1fefb',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
KeyName='SpinningOps_Key',
SecurityGroupIds=[
'<KEY>',
],
InstanceMarketOptions={
'MarketType': 'spot',
'SpotOptions': {
'MaxPrice': '0.0037,
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
},
}
) | import boto3
ec2 = boto3.resource('ec2', region_name='us-east-1')
# create a new EC2 instance
instances = ec2.create_instances(
ImageId='ami-013f17f36f8b1fefb',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
KeyName='SpinningOps_Key',
SecurityGroupIds=[
'<KEY>',
],
InstanceMarketOptions={
'MarketType': 'spot',
'SpotOptions': {
'MaxPrice': '0.0037,
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
},
}
) | en | 0.251346 | # create a new EC2 instance | 2.242979 | 2 |
src/create_plot.py | thomasreolon/DeepfakeDetection | 2 | 6630990 | import os, pathlib
from videoanalizer import VideoAnalizer
os.chdir(pathlib.Path(__file__).parent.absolute())
vd = VideoAnalizer()
############ Plot videos in a graph
ROOT_DIR = '../test_data/videos'
SAVE_PATH= '../output' # where plots are saved
folders_list=[
# each sublist will have a different color in the plot
['real/ElonMusk/train'], # relative path from ROOT_DIR
['fake/ElonMusk'],
['real/Obama/train'],
['fake/Obama'],
['real/morez'],
['fake/morez'],
]
vd.plot_features(folders_list=folders_list, root_dir=ROOT_DIR, save_path=SAVE_PATH, plot_type='LDA')
| import os, pathlib
from videoanalizer import VideoAnalizer
os.chdir(pathlib.Path(__file__).parent.absolute())
vd = VideoAnalizer()
############ Plot videos in a graph
ROOT_DIR = '../test_data/videos'
SAVE_PATH= '../output' # where plots are saved
folders_list=[
# each sublist will have a different color in the plot
['real/ElonMusk/train'], # relative path from ROOT_DIR
['fake/ElonMusk'],
['real/Obama/train'],
['fake/Obama'],
['real/morez'],
['fake/morez'],
]
vd.plot_features(folders_list=folders_list, root_dir=ROOT_DIR, save_path=SAVE_PATH, plot_type='LDA')
| en | 0.890336 | ############ Plot videos in a graph # where plots are saved # each sublist will have a different color in the plot # relative path from ROOT_DIR | 2.250863 | 2 |
datascience/api/inspect.py | rlmwang/datascience-workspace | 0 | 6630991 | <gh_stars>0
import re
from inspect import Parameter, signature
from .typing import get_args, get_origin
def inspect_inputs(func):
"""
Inspects the signature of a function and returns its
input parameters as a dictionary.
"""
parameters = signature(func).parameters
return {
param.name: {
"dtype": get_type(param.annotation),
"value": None,
"default": get_default(param),
"required": get_required(param),
}
for param in parameters.values()
}
def inspect_output(func):
"""
Inspects the signature of a function and returns its
output parameters as a dictionary.
"""
anno = signature(func).return_annotation
name = getattr(anno, "__name__", None)
if name == "tuple":
args = get_args(anno)
else:
args = (anno,)
return {
f"output {k or ''}".strip(): {
"dtype": get_type(arg),
"value": None,
"default": None,
}
for k, arg in enumerate(args)
}
def get_required(param):
d = getattr(param, "default", Parameter.empty)
return d == Parameter.empty
def get_default(param):
d = getattr(param, "default", None)
d = None if d is Parameter.empty else d
return d
def get_type(anno):
origin = get_origin(anno)
if origin is not None:
return {
"name": camel_case(getattr(origin, "__name__", None)),
"args": tuple(get_type(a) for a in get_args(anno)),
}
name = camel_case(getattr(anno, "__name__", None))
if name is None:
name = str(anno)
elif name == "_empty":
name = None
if name == "ndarray":
return {
"name": "ndarray",
"args": tuple(anno.dtype.name),
}
return name
def camel_case(string):
if string is None:
return None
pattern = re.compile(r"(?<!^)(?=[A-Z])")
return re.sub(pattern, "_", string).lower()
| import re
from inspect import Parameter, signature
from .typing import get_args, get_origin
def inspect_inputs(func):
"""
Inspects the signature of a function and returns its
input parameters as a dictionary.
"""
parameters = signature(func).parameters
return {
param.name: {
"dtype": get_type(param.annotation),
"value": None,
"default": get_default(param),
"required": get_required(param),
}
for param in parameters.values()
}
def inspect_output(func):
"""
Inspects the signature of a function and returns its
output parameters as a dictionary.
"""
anno = signature(func).return_annotation
name = getattr(anno, "__name__", None)
if name == "tuple":
args = get_args(anno)
else:
args = (anno,)
return {
f"output {k or ''}".strip(): {
"dtype": get_type(arg),
"value": None,
"default": None,
}
for k, arg in enumerate(args)
}
def get_required(param):
d = getattr(param, "default", Parameter.empty)
return d == Parameter.empty
def get_default(param):
d = getattr(param, "default", None)
d = None if d is Parameter.empty else d
return d
def get_type(anno):
origin = get_origin(anno)
if origin is not None:
return {
"name": camel_case(getattr(origin, "__name__", None)),
"args": tuple(get_type(a) for a in get_args(anno)),
}
name = camel_case(getattr(anno, "__name__", None))
if name is None:
name = str(anno)
elif name == "_empty":
name = None
if name == "ndarray":
return {
"name": "ndarray",
"args": tuple(anno.dtype.name),
}
return name
def camel_case(string):
if string is None:
return None
pattern = re.compile(r"(?<!^)(?=[A-Z])")
return re.sub(pattern, "_", string).lower() | en | 0.78954 | Inspects the signature of a function and returns its input parameters as a dictionary. Inspects the signature of a function and returns its output parameters as a dictionary. | 2.870885 | 3 |
Analysis/EstimateTRAPPIST1Radius/estRad.py | dflemin3/trappist | 1 | 6630992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Estimate the distribution of TRAPPIST-1's radius using our stellar mass posterior
distributions and the Delrez et al. (2018) density constraint following the
procedure outlined in Van Grootel et al. (2018).
Script output:
Radius [Rsun] = 0.120295 + 0.001951 - 0.001821
@author: <NAME>, 2019
@email: dflemin3 (at) uw (dot) edu
"""
import sys
import numpy as np
import pandas as pd
from scipy.stats import norm
from trappist import mcmcUtils
import matplotlib as mpl
import matplotlib.pyplot as plt
#Typical plot parameters that make for pretty plots
mpl.rcParams['font.size'] = 12.0
## for Palatino and other serif fonts use:
mpl.rc('font',**{'family':'serif'})
mpl.rc('text', usetex=True)
# CGS constants
MSUN = 1.988435e33 # mass of Sun in grams
RSUN = 6.957e10 # radius of Sun incm
RHOSUN = MSUN / (4./3. * np.pi * RSUN**3) # density of sun in g/cm^3
# Read in evolutionary tracks
chains = mcmcUtils.extractMCMCResults("../../Data/trappist1Fiducial.h5",
verbose=False, applyBurnin=True,
thinChains=True, blobsExist=False)
# Draw num samples
num = int(1.0e5) # Number of samples
# Draw mass samples with replacement in grams
masses = np.random.choice(chains[:,0], size=(num,), replace=True) * MSUN
# Draw density samples in g/cm^3 by approximating constraint as wide gaussian
rhos = norm.rvs(loc=51.1, scale=2.4, size=(num,)) * RHOSUN
# Compute radius via density equation: rho = M/V = M/(4/3 * pi * r^3)
# -> (rho/m * (4/3) * pi)^(1/3) = r, but convert to Rsun
rads = np.power(masses / (rhos * (4./3.) * np.pi), 1./3.) / RSUN
# Visualize final distribution, compute statistics of interest
rad = np.median(rads)
radPlus = np.percentile(rads, 84) - rad
radMinus = rad - np.percentile(rads, 16)
print("Radius [Rsun] = %lf + %lf - %lf" % (rad, radPlus, radMinus))
# Plot histogram
fig, ax = plt.subplots(figsize=(6,5))
# Plot histogram of samples
ax.hist(rads, bins="auto", color="C0", density=True, alpha=0.6);
ax.hist(rads, bins="auto", color="C0", density=True, histtype="step", lw=2.5);
# Overplot med, +/-
ax.axvline(rad, color="k", ls="--", lw=2.5, label="This Work")
ax.axvline(rad + radPlus, color="k", ls="--", lw=2.5)
ax.axvline(rad - radMinus, color="k", ls="--", lw=2.5)
# Overplot Van Grootel et al. (2018) constraints
ax.axvline(0.121, color="C1", ls="--", lw=2.5, label="Van Grootel et al. (2018)")
ax.axvline(0.121 + 0.003, color="C1", ls="--", lw=2.5)
ax.axvline(0.121 - 0.003, color="C1", ls="--", lw=2.5)
ax.set_ylabel("Density")
ax.set_xlabel(r"Radius [$R_{\odot}]$")
ax.legend(loc="best", framealpha=0.8, fontsize=10)
fig.tight_layout()
# Save!
if (sys.argv[1] == 'pdf'):
fig.savefig("estRad.pdf", bbox_inches="tight",
dpi=200)
if (sys.argv[1] == 'png'):
fig.savefig("estRad.png", bbox_inches="tight",
dpi=200)
# Done!
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Estimate the distribution of TRAPPIST-1's radius using our stellar mass posterior
distributions and the Delrez et al. (2018) density constraint following the
procedure outlined in Van Grootel et al. (2018).
Script output:
Radius [Rsun] = 0.120295 + 0.001951 - 0.001821
@author: <NAME>, 2019
@email: dflemin3 (at) uw (dot) edu
"""
import sys
import numpy as np
import pandas as pd
from scipy.stats import norm
from trappist import mcmcUtils
import matplotlib as mpl
import matplotlib.pyplot as plt
#Typical plot parameters that make for pretty plots
mpl.rcParams['font.size'] = 12.0
## for Palatino and other serif fonts use:
mpl.rc('font',**{'family':'serif'})
mpl.rc('text', usetex=True)
# CGS constants
MSUN = 1.988435e33 # mass of Sun in grams
RSUN = 6.957e10 # radius of Sun incm
RHOSUN = MSUN / (4./3. * np.pi * RSUN**3) # density of sun in g/cm^3
# Read in evolutionary tracks
chains = mcmcUtils.extractMCMCResults("../../Data/trappist1Fiducial.h5",
verbose=False, applyBurnin=True,
thinChains=True, blobsExist=False)
# Draw num samples
num = int(1.0e5) # Number of samples
# Draw mass samples with replacement in grams
masses = np.random.choice(chains[:,0], size=(num,), replace=True) * MSUN
# Draw density samples in g/cm^3 by approximating constraint as wide gaussian
rhos = norm.rvs(loc=51.1, scale=2.4, size=(num,)) * RHOSUN
# Compute radius via density equation: rho = M/V = M/(4/3 * pi * r^3)
# -> (rho/m * (4/3) * pi)^(1/3) = r, but convert to Rsun
rads = np.power(masses / (rhos * (4./3.) * np.pi), 1./3.) / RSUN
# Visualize final distribution, compute statistics of interest
rad = np.median(rads)
radPlus = np.percentile(rads, 84) - rad
radMinus = rad - np.percentile(rads, 16)
print("Radius [Rsun] = %lf + %lf - %lf" % (rad, radPlus, radMinus))
# Plot histogram
fig, ax = plt.subplots(figsize=(6,5))
# Plot histogram of samples
ax.hist(rads, bins="auto", color="C0", density=True, alpha=0.6);
ax.hist(rads, bins="auto", color="C0", density=True, histtype="step", lw=2.5);
# Overplot med, +/-
ax.axvline(rad, color="k", ls="--", lw=2.5, label="This Work")
ax.axvline(rad + radPlus, color="k", ls="--", lw=2.5)
ax.axvline(rad - radMinus, color="k", ls="--", lw=2.5)
# Overplot Van Grootel et al. (2018) constraints
ax.axvline(0.121, color="C1", ls="--", lw=2.5, label="Van Grootel et al. (2018)")
ax.axvline(0.121 + 0.003, color="C1", ls="--", lw=2.5)
ax.axvline(0.121 - 0.003, color="C1", ls="--", lw=2.5)
ax.set_ylabel("Density")
ax.set_xlabel(r"Radius [$R_{\odot}]$")
ax.legend(loc="best", framealpha=0.8, fontsize=10)
fig.tight_layout()
# Save!
if (sys.argv[1] == 'pdf'):
fig.savefig("estRad.pdf", bbox_inches="tight",
dpi=200)
if (sys.argv[1] == 'png'):
fig.savefig("estRad.png", bbox_inches="tight",
dpi=200)
# Done!
| en | 0.703606 | #!/usr/bin/env python # -*- coding: utf-8 -*- Estimate the distribution of TRAPPIST-1's radius using our stellar mass posterior distributions and the Delrez et al. (2018) density constraint following the procedure outlined in Van Grootel et al. (2018). Script output: Radius [Rsun] = 0.120295 + 0.001951 - 0.001821 @author: <NAME>, 2019 @email: dflemin3 (at) uw (dot) edu #Typical plot parameters that make for pretty plots ## for Palatino and other serif fonts use: # CGS constants # mass of Sun in grams # radius of Sun incm # density of sun in g/cm^3 # Read in evolutionary tracks # Draw num samples # Number of samples # Draw mass samples with replacement in grams # Draw density samples in g/cm^3 by approximating constraint as wide gaussian # Compute radius via density equation: rho = M/V = M/(4/3 * pi * r^3) # -> (rho/m * (4/3) * pi)^(1/3) = r, but convert to Rsun # Visualize final distribution, compute statistics of interest # Plot histogram # Plot histogram of samples # Overplot med, +/- # Overplot Van Grootel et al. (2018) constraints # Save! # Done! | 2.664402 | 3 |
test/test_room.py | DataDog/camplight | 1 | 6630993 | # -*- coding: utf-8 -*-
import os
import sys
camplight_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.insert(0, camplight_root)
import pytest
from httpretty import HTTPretty
from camplight import Request, Campfire, Room, MessageType, Sound
def campfire_url(path=''):
return 'https://foo.campfirenow.com' + path
def stub_get(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.GET, campfire_url(path), *args, **kwargs)
def stub_post(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.POST, campfire_url(path), *args, **kwargs)
def stub_put(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.PUT, campfire_url(path), *args, **kwargs)
class TestRoom(object):
def setup_class(self):
HTTPretty.enable()
self.request = Request(campfire_url(), 'some_token')
self.campfire = Campfire(self.request)
self.room_id = 27121983
self.room = Room(self.request, self.room_id)
def teardown_class(self):
HTTPretty.disable()
def test_status(self):
stub_get('/room/%s.json' % self.room_id, body="""
{"room": {"name": "Danger", "topic": "No serious discussion"}}""")
room = self.room.status()
assert room['name'] == 'Danger'
assert room['topic'] == 'No serious discussion'
def test_recent(self):
stub_get('/room/%s/recent.json' % self.room_id, body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.recent()
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_transcript(self):
stub_get('/room/%s/transcript.json' % self.room_id, body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.transcript()
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_transcript_by_date(self):
date = '2013/08/12'
stub_get('/room/%s/transcript/%s.json' % (self.room_id, date), body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.transcript(date)
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_uploads(self):
stub_get('/room/%s/uploads.json' % self.room_id, body="""
{"uploads": [{"name": "file.png", "content_type": "image/png"}]}""")
uploads = self.room.uploads()
assert len(uploads) == 1
assert uploads[0]['name'] == 'file.png'
assert uploads[0]['content_type'] == 'image/png'
def test_join(self):
stub_post('/room/%s/join.json' % self.room_id, body='')
assert self.room.join() == None
def test_leave(self):
stub_post('/room/%s/leave.json' % self.room_id, body='')
assert self.room.leave() == None
def test_lock(self):
stub_post('/room/%s/lock.json' % self.room_id, body='')
assert self.room.lock() == None
def test_unlock(self):
stub_post('/room/%s/unlock.json' % self.room_id, body='')
assert self.room.unlock() == None
def test_speak(self):
body = b'{"message": {"body": "Hello World"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.speak('Hello World')
assert message['body'] == 'Hello World'
assert hasattr(message, 'type') == False
assert HTTPretty.last_request.body == body
def test_paste(self):
body = b'{"message": {"body": "Hello World", "type": "PasteMessage"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.paste('Hello World')
assert message['body'] == 'Hello World'
assert message['type'] == MessageType.PASTE
assert HTTPretty.last_request.body == body
def test_play(self):
body = b'{"message": {"body": "yeah", "type": "SoundMessage"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.play(Sound.YEAH)
assert message['body'] == Sound.YEAH
assert message['type'] == MessageType.SOUND
assert HTTPretty.last_request.body == body
def test_set_name(self):
stub_put('/room/%s.json' % self.room_id, body='')
assert self.room.set_name('Danger') == None
assert HTTPretty.last_request.body == b'{"room": {"name": "Danger"}}'
def test_set_topic(self):
stub_put('/room/%s.json' % self.room_id, body='')
assert self.room.set_topic('No serious discussion') == None
assert HTTPretty.last_request.body == \
b'{"room": {"topic": "No serious discussion"}}'
if __name__ == '__main__':
pytest.main(__file__)
| # -*- coding: utf-8 -*-
import os
import sys
camplight_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.insert(0, camplight_root)
import pytest
from httpretty import HTTPretty
from camplight import Request, Campfire, Room, MessageType, Sound
def campfire_url(path=''):
return 'https://foo.campfirenow.com' + path
def stub_get(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.GET, campfire_url(path), *args, **kwargs)
def stub_post(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.POST, campfire_url(path), *args, **kwargs)
def stub_put(path, *args, **kwargs):
HTTPretty.register_uri(HTTPretty.PUT, campfire_url(path), *args, **kwargs)
class TestRoom(object):
def setup_class(self):
HTTPretty.enable()
self.request = Request(campfire_url(), 'some_token')
self.campfire = Campfire(self.request)
self.room_id = 27121983
self.room = Room(self.request, self.room_id)
def teardown_class(self):
HTTPretty.disable()
def test_status(self):
stub_get('/room/%s.json' % self.room_id, body="""
{"room": {"name": "Danger", "topic": "No serious discussion"}}""")
room = self.room.status()
assert room['name'] == 'Danger'
assert room['topic'] == 'No serious discussion'
def test_recent(self):
stub_get('/room/%s/recent.json' % self.room_id, body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.recent()
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_transcript(self):
stub_get('/room/%s/transcript.json' % self.room_id, body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.transcript()
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_transcript_by_date(self):
date = '2013/08/12'
stub_get('/room/%s/transcript/%s.json' % (self.room_id, date), body="""
{"messages": [{"body": "Hello World", "type": "TextMessage"}]}""")
messages = self.room.transcript(date)
assert len(messages) == 1
assert messages[0]['body'] == 'Hello World'
assert messages[0]['type'] == MessageType.TEXT
def test_uploads(self):
stub_get('/room/%s/uploads.json' % self.room_id, body="""
{"uploads": [{"name": "file.png", "content_type": "image/png"}]}""")
uploads = self.room.uploads()
assert len(uploads) == 1
assert uploads[0]['name'] == 'file.png'
assert uploads[0]['content_type'] == 'image/png'
def test_join(self):
stub_post('/room/%s/join.json' % self.room_id, body='')
assert self.room.join() == None
def test_leave(self):
stub_post('/room/%s/leave.json' % self.room_id, body='')
assert self.room.leave() == None
def test_lock(self):
stub_post('/room/%s/lock.json' % self.room_id, body='')
assert self.room.lock() == None
def test_unlock(self):
stub_post('/room/%s/unlock.json' % self.room_id, body='')
assert self.room.unlock() == None
def test_speak(self):
body = b'{"message": {"body": "Hello World"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.speak('Hello World')
assert message['body'] == 'Hello World'
assert hasattr(message, 'type') == False
assert HTTPretty.last_request.body == body
def test_paste(self):
body = b'{"message": {"body": "Hello World", "type": "PasteMessage"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.paste('Hello World')
assert message['body'] == 'Hello World'
assert message['type'] == MessageType.PASTE
assert HTTPretty.last_request.body == body
def test_play(self):
body = b'{"message": {"body": "yeah", "type": "SoundMessage"}}'
stub_post('/room/%s/speak.json' % self.room_id, body=body)
message = self.room.play(Sound.YEAH)
assert message['body'] == Sound.YEAH
assert message['type'] == MessageType.SOUND
assert HTTPretty.last_request.body == body
def test_set_name(self):
stub_put('/room/%s.json' % self.room_id, body='')
assert self.room.set_name('Danger') == None
assert HTTPretty.last_request.body == b'{"room": {"name": "Danger"}}'
def test_set_topic(self):
stub_put('/room/%s.json' % self.room_id, body='')
assert self.room.set_topic('No serious discussion') == None
assert HTTPretty.last_request.body == \
b'{"room": {"topic": "No serious discussion"}}'
if __name__ == '__main__':
pytest.main(__file__)
| en | 0.242787 | # -*- coding: utf-8 -*- {"room": {"name": "Danger", "topic": "No serious discussion"}} {"messages": [{"body": "Hello World", "type": "TextMessage"}]} {"messages": [{"body": "Hello World", "type": "TextMessage"}]} {"messages": [{"body": "Hello World", "type": "TextMessage"}]} {"uploads": [{"name": "file.png", "content_type": "image/png"}]} | 2.173993 | 2 |
project_template/urls/api.py | armstrong/armstrong.templates.tutorial | 0 | 6630994 | """
Contains URL patterns for a basic API using `Tastypie`_.
.. _tastypie: https://github.com/toastdriven/django-tastypie
"""
from django.conf.urls.defaults import patterns, include, url
from apis.api import v1_api
urlpatterns = patterns('',
url(r'^api/', include(v1_api.urls)),
) | """
Contains URL patterns for a basic API using `Tastypie`_.
.. _tastypie: https://github.com/toastdriven/django-tastypie
"""
from django.conf.urls.defaults import patterns, include, url
from apis.api import v1_api
urlpatterns = patterns('',
url(r'^api/', include(v1_api.urls)),
) | en | 0.369242 | Contains URL patterns for a basic API using `Tastypie`_. .. _tastypie: https://github.com/toastdriven/django-tastypie | 1.793145 | 2 |
src/python/director/visualization.py | edrumwri/director | 0 | 6630995 | import director.objectmodel as om
import director.applogic as app
from .shallowCopy import shallowCopy
import director.vtkAll as vtk
from director import filterUtils
from director import transformUtils
from director import callbacks
from director import frameupdater
from director.fieldcontainer import FieldContainer
from PythonQt import QtCore, QtGui
import PythonQt
import numpy as np
import os
import colorsys
import weakref
import itertools
class PolyDataItem(om.ObjectModelItem):
defaultScalarRangeMap = {
# 'intensity' : (400, 4000),
'spindle_angle' : (0, 360),
'azimuth' : (-2.5, 2.5),
'scan_delta' : (0.0, 0.3),
'point distance to plane' : (-0.2, 0.2),
'normal angle to plane' : (0.0, 10.0),
}
def __init__(self, name, polyData, view):
om.ObjectModelItem.__init__(self, name, om.Icons.Robot)
self.views = []
self.polyData = polyData
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputData(self.polyData)
self.actor = vtk.vtkActor()
self.actor.SetMapper(self.mapper)
self.shadowActor = None
self.scalarBarWidget = None
self.extraViewRenderers = {}
self.rangeMap = dict(PolyDataItem.defaultScalarRangeMap)
self.addProperty('Color By', 0, attributes=om.PropertyAttributes(enumNames=['Solid Color']))
self.addProperty('Visible', True)
self.addProperty('Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
self.addProperty('Point Size', self.actor.GetProperty().GetPointSize(),
attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
self.addProperty('Line Width', self.actor.GetProperty().GetLineWidth(),
attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
self.addProperty('Surface Mode', 0,
attributes=om.PropertyAttributes(enumNames=['Surface', 'Wireframe', 'Surface with edges', 'Points'], hidden=True))
self.addProperty('Color', [1.0, 1.0, 1.0])
self.addProperty('Show Scalar Bar', False)
self._updateSurfaceProperty()
self._updateColorByProperty()
if view is not None:
self.addToView(view)
def _renderAllViews(self):
for view in self.views:
view.render()
def hasDataSet(self, dataSet):
return dataSet == self.polyData
def hasActor(self, actor):
return actor == self.actor
def setPolyData(self, polyData):
self.polyData = polyData
self.mapper.SetInputData(polyData)
self._updateSurfaceProperty()
self._updateColorByProperty()
self._updateColorBy(retainColorMap=True)
if self.getProperty('Visible'):
self._renderAllViews()
def setRangeMap(self, key, value):
self.rangeMap[key] = value
def getArrayNames(self):
pointData = self.polyData.GetPointData()
return [pointData.GetArrayName(i) for i in range(pointData.GetNumberOfArrays())]
def setSolidColor(self, color):
self.setProperty('Color', [float(c) for c in color])
self.colorBy(None)
def _isPointCloud(self):
return self.polyData.GetNumberOfPoints() and (self.polyData.GetNumberOfCells() == self.polyData.GetNumberOfVerts())
def colorBy(self, arrayName, scalarRange=None, lut=None):
if not arrayName:
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
array = self.polyData.GetPointData().GetArray(arrayName)
if not array:
print('colorBy(%s): array not found' % arrayName)
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
self.polyData.GetPointData().SetActiveScalars(arrayName)
if not lut:
lut = self._getDefaultColorMap(array, scalarRange)
#self.mapper.SetColorModeToMapScalars()
self.mapper.ScalarVisibilityOn()
self.mapper.SetUseLookupTableScalarRange(True)
self.mapper.SetLookupTable(lut)
self.mapper.SetInterpolateScalarsBeforeMapping(not self._isPointCloud())
if self.getProperty('Visible'):
self._renderAllViews()
def getChildFrame(self):
frameName = self.getProperty('Name') + ' frame'
return self.findChild(frameName)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
if self.shadowActor:
view.renderer().AddActor(self.shadowActor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Point Size':
self.actor.GetProperty().SetPointSize(self.getProperty(propertyName))
elif propertyName == 'Line Width':
self.actor.GetProperty().SetLineWidth(self.getProperty(propertyName))
elif propertyName == 'Alpha':
self.actor.GetProperty().SetOpacity(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.GetProperty().SetOpacity(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.SetVisibility(self.getProperty(propertyName))
elif propertyName == 'Surface Mode':
mode = self.properties.getPropertyEnumValue(propertyName)
prop = self.actor.GetProperty()
if mode == 'Surface':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOff()
if mode == 'Wireframe':
prop.SetRepresentationToWireframe()
elif mode == 'Surface with edges':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOn()
elif mode == 'Points':
prop.SetRepresentationToPoints()
elif propertyName == 'Color':
color = self.getProperty(propertyName)
self.actor.GetProperty().SetColor(color)
elif propertyName == 'Color By':
self._updateColorBy()
elif propertyName == 'Show Scalar Bar':
self._updateScalarBar()
self._renderAllViews()
def setScalarRange(self, rangeMin, rangeMax):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName != 'Solid Color':
lut = self.mapper.GetLookupTable()
self.colorBy(arrayName, scalarRange=(rangeMin, rangeMax))
def _updateSurfaceProperty(self):
hasPolys = self.polyData.GetNumberOfPolys() or self.polyData.GetNumberOfStrips()
hasLines = self.polyData.GetNumberOfLines()
enableSurfaceMode = hasPolys or hasLines
self.properties.setPropertyAttribute('Surface Mode', 'hidden', not enableSurfaceMode)
enableLineWidth = enableSurfaceMode
self.properties.setPropertyAttribute('Line Width', 'hidden', not enableLineWidth)
enablePointSize = True
self.properties.setPropertyAttribute('Point Size', 'hidden', not enablePointSize)
def _updateColorBy(self, retainColorMap=False):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName == 'Solid Color':
self.colorBy(None)
else:
lut = self.mapper.GetLookupTable() if retainColorMap else None
self.colorBy(arrayName, lut=lut)
self._updateScalarBar()
def _updateColorByProperty(self):
enumNames = ['Solid Color'] + self.getArrayNames()
currentValue = self.properties.getProperty('Color By')
if currentValue >= len(enumNames):
self.setProperty('Color By', 0)
self.properties.setPropertyAttribute('Color By', 'enumNames', enumNames)
def _updateScalarBar(self):
barEnabled = self.getProperty('Show Scalar Bar')
colorBy = self.getProperty('Color By')
if barEnabled and colorBy != 0:
self._showScalarBar()
else:
self._hideScalarBar()
def _hideScalarBar(self):
if self.scalarBarWidget:
self.scalarBarWidget.Off()
self.scalarBarWidget.SetInteractor(None)
self.scalarBarWidget = None
self._renderAllViews()
def _showScalarBar(self):
title = self.properties.getPropertyEnumValue('Color By')
view = self.views[0]
lut = self.mapper.GetLookupTable()
self.scalarBarWidget = createScalarBarWidget(view, lut, title)
self._renderAllViews()
def _setScalarBarTextColor(self, color=(0,0,0)):
act = self.scalarBarWidget.GetScalarBarActor()
act.GetTitleTextProperty().SetColor(color)
act.GetLabelTextProperty().SetColor(color)
def _setScalarBarTitle(self, titleText):
act = self.scalarBarWidget.GetScalarBarActor()
act.SetTitle(titleText)
def getCoolToWarmColorMap(self, scalarRange):
f = vtk.vtkDiscretizableColorTransferFunction()
f.DiscretizeOn()
f.SetColorSpaceToDiverging()
f.SetNumberOfValues(256)
f.AddRGBPoint(scalarRange[0], 0.23, 0.299, 0.754)
f.AddRGBPoint(scalarRange[1], 0.706, 0.016, 0.15)
f.Build()
return f
def _getDefaultColorMap(self, array, scalarRange=None, hueRange=None):
name = array.GetName()
blueToRed = (0.667, 0)
redtoBlue = (0, 0.667)
hueMap = {
'Axes' : redtoBlue
}
scalarRange = scalarRange or self.rangeMap.get(name, array.GetRange())
hueRange = hueRange or hueMap.get(name, blueToRed)
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.SetHueRange(hueRange)
lut.SetRange(scalarRange)
lut.Build()
return lut
#return self.getCoolToWarmColorMap(scalarRange)
def shadowOn(self):
if self.shadowActor:
return
mat = [[1, 0, -1, 0],
[0, 1, -1, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]]
shadowT = transformUtils.getTransformFromNumpy(mat)
baseTransform = self.actor.GetUserTransform()
if baseTransform:
shadowT.PreMultiply()
shadowT.Concatenate(baseTransform)
self.shadowActor = vtk.vtkActor()
self.shadowActor.SetMapper(self.mapper)
self.shadowActor.SetUserTransform(shadowT)
self.shadowActor.GetProperty().LightingOff()
self.shadowActor.GetProperty().SetColor(0, 0, 0)
for view in self.views:
view.renderer().AddActor(self.shadowActor)
def shadowOff(self):
for view in self.views:
view.renderer().RemoveActor(self.shadowActor)
self.shadowActor = None
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
self._hideScalarBar()
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
if self.shadowActor:
view.renderer().RemoveActor(self.shadowActor)
for renderer in self.extraViewRenderers.get(view, []):
renderer.RemoveActor(self.actor)
view.render()
class Image2DItem(om.ObjectModelItem):
def __init__(self, name, image, view):
om.ObjectModelItem.__init__(self, name, om.Icons.Robot)
self.views = []
self.image = image
defaultWidth = 300
self.actor = vtk.vtkLogoRepresentation()
self.actor.SetImage(image)
self.actor.GetImageProperty().SetOpacity(1.0)
actors = vtk.vtkPropCollection()
self.actor.GetActors2D(actors)
self.texture = actors.GetItemAsObject(0).GetTexture()
self.addProperty('Visible', True)
self.addProperty('Anchor', 1,
attributes=om.PropertyAttributes(enumNames=['Top Left', 'Top Right', 'Bottom Left', 'Bottom Right']))
self.addProperty('Width', defaultWidth,
attributes=om.PropertyAttributes(minimum=0, maximum=9999, singleStep=50))
self.addProperty('Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1))
#defaultHeight = self._getHeightForWidth(defaultWidth)
#self.addProperty('Height', defaultHeight,
# attributes=om.PropertyAttributes(minimum=0, maximum=9999, singleStep=10))
if view is not None:
self.addToView(view)
def _renderAllViews(self):
for view in self.views:
view.render()
def hasDataSet(self, dataSet):
return dataSet == self.image
def hasActor(self, actor):
return actor == self.actor
def setImage(self, image):
self.image = image
self.actor.SetImage(image)
# also set the image on the texture, otherwise
# the texture input won't update until the next
# render where this actor is visible
self.texture.SetInputData(image)
if self.getProperty('Visible'):
self._renderAllViews()
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
self._updatePositionCoordinates(view)
view.renderer().AddActor(self.actor)
view.render()
def _getHeightForWidth(self, image, width):
w, h, _ = image.GetDimensions()
aspect = w/float(h)
return int(np.round(width / aspect))
def _updatePositionCoordinates(self, view):
width = self.getProperty('Width')
height = self._getHeightForWidth(self.image, width)
pc0 = vtk.vtkCoordinate()
pc1 = self.actor.GetPositionCoordinate()
pc2 = self.actor.GetPosition2Coordinate()
for pc in [pc0, pc1, pc2]:
pc.SetViewport(view.renderer())
pc0.SetReferenceCoordinate(None)
pc0.SetCoordinateSystemToNormalizedDisplay()
pc1.SetReferenceCoordinate(pc0)
pc1.SetCoordinateSystemToDisplay()
anchor = self.getPropertyEnumValue('Anchor')
if anchor == 'Top Left':
pc0.SetValue(0.0, 1.0)
pc1.SetValue(0.0, -height)
elif anchor == 'Top Right':
pc0.SetValue(1.0, 1.0)
pc1.SetValue(-width, -height)
elif anchor == 'Bottom Left':
pc0.SetValue(0.0, 0.0)
pc1.SetValue(0.0, 0.0)
elif anchor == 'Bottom Right':
pc0.SetValue(1.0, 0.0)
pc1.SetValue(-width, 0.0)
pc2.SetCoordinateSystemToDisplay()
pc2.SetReferenceCoordinate(pc1)
pc2.SetValue(width, height)
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Alpha':
self.actor.GetImageProperty().SetOpacity(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
elif propertyName in ('Width', 'Height', 'Anchor'):
if self.views:
self._updatePositionCoordinates(self.views[0])
self._renderAllViews()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
class TextItem(om.ObjectModelItem):
def __init__(self, name, text='', view=None):
om.ObjectModelItem.__init__(self, name)
self.views = []
self.actor = vtk.vtkTextActor()
prop = self.actor.GetTextProperty()
prop.SetFontSize(18)
self.actor.SetPosition(10,10)
self.actor.SetInput(text)
self.addProperty('Visible', True)
self.addProperty('Text', text)
self.addProperty('Position', [10, 10], attributes=om.PropertyAttributes(minimum=0, maximum=3000, singleStep=1))
self.addProperty('Font Size', 18, attributes=om.PropertyAttributes(minimum=6, maximum=128, singleStep=1))
self.addProperty('Bold', False)
self.addProperty('Italic', False)
if view:
self.addToView(view)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
view.render()
def _renderAllViews(self):
for view in self.views:
view.render()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
self._renderAllViews()
elif propertyName == 'Text':
view = app.getCurrentRenderView()
self.actor.SetInput(self.getProperty(propertyName))
elif propertyName == 'Position':
pos = self.getProperty(propertyName)
self.actor.SetPosition(pos[0], pos[1])
elif propertyName == 'Font Size':
self.actor.GetTextProperty().SetFontSize(self.getProperty(propertyName))
elif propertyName == 'Bold Size':
self.actor.GetTextProperty().SetBold(self.getProperty(propertyName))
elif propertyName == 'Italic':
self.actor.GetTextProperty().SetItalic(self.getProperty(propertyName))
if self.getProperty('Visible'):
self._renderAllViews()
def updateText(text, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj or showText(text, name, **kwargs)
else:
obj.setProperty('Text', text)
return obj
def showText(text, name, fontSize=18, position=(10, 10), parent=None, view=None):
view = view or app.getCurrentRenderView()
assert view
item = TextItem(name, text, view=view)
item.setProperty('Font Size', fontSize)
item.setProperty('Position', list(position))
om.addToObjectModel(item, getParentObj(parent))
return item
def updateImage(image, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showImage(image, name, **kwargs)
else:
obj.setImage(image)
return obj
def showImage(image, name, anchor='Top Left', parent=None, view=None):
view = view or app.getCurrentRenderView()
assert view
item = Image2DItem(name, image, view=view)
item.setProperty('Anchor', anchor)
om.addToObjectModel(item, getParentObj(parent))
return item
def createAxesPolyData(scale, useTube, tubeWidth=0.002):
axes = vtk.vtkAxes()
axes.SetComputeNormals(0)
axes.SetScaleFactor(scale)
axes.Update()
if useTube:
tube = vtk.vtkTubeFilter()
tube.SetInputConnection(axes.GetOutputPort())
tube.SetRadius(tubeWidth)
tube.SetNumberOfSides(12)
tube.Update()
axes = tube
return shallowCopy(axes.GetOutput())
class FrameItem(PolyDataItem):
def __init__(self, name, transform, view):
PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view)
self.transform = transform
self._blockSignals = False
self.actor.SetUserTransform(transform)
self.widget = vtk.vtkFrameWidget()
self.widget.CreateDefaultRepresentation()
self.widget.EnabledOff()
self.rep = self.widget.GetRepresentation()
self.rep.SetTransform(transform)
self.traceData = None
self._frameSync = None
self.addProperty('Scale', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0.01, maximum=100, singleStep=0.1, hidden=False))
self.addProperty('Edit', False)
self.addProperty('Trace', False)
self.addProperty('Tube', False)
self.addProperty('Tube Width', 0.002, attributes=om.PropertyAttributes(decimals=3, minimum=0.001, maximum=10, singleStep=0.01, hidden=True))
self.properties.setPropertyIndex('Edit', 0)
self.properties.setPropertyIndex('Trace', 1)
self.properties.setPropertyIndex('Tube', 2)
self.callbacks.addSignal('FrameModified')
self.onTransformModifiedCallback = None
self.observerTag = self.transform.AddObserver('ModifiedEvent', self.onTransformModified)
self._updateAxesGeometry()
self.setProperty('Color By', 'Axes')
self.setProperty('Icon', om.Icons.Axes)
def connectFrameModified(self, func):
return self.callbacks.connect('FrameModified', func)
def disconnectFrameModified(self, callbackId):
self.callbacks.disconnect(callbackId)
def onTransformModified(self, transform, event):
if not self._blockSignals:
if self.onTransformModifiedCallback:
self.onTransformModifiedCallback(self)
self.callbacks.process('FrameModified', self)
def addToView(self, view):
PolyDataItem.addToView(self, view)
def hasDataSet(self, dataSet):
return dataSet == self.transform
def hasActor(self, actor):
return actor == self.widget.GetRepresentation() or PolyDataItem.hasActor(self, actor)
def copyFrame(self, transform):
self._blockSignals = True
self.transform.SetMatrix(transform.GetMatrix())
self._blockSignals = False
self.transform.Modified()
parent = self.parent()
if (parent and parent.getProperty('Visible')) or self.getProperty('Visible'):
self._renderAllViews()
def getFrameSync(self):
if self._frameSync is None:
self._frameSync = FrameSync()
self._frameSync.addFrame(self)
return self._frameSync
def _updateAxesGeometry(self):
scale = self.getProperty('Scale')
self.rep.SetWorldSize(scale)
self.setPolyData(createAxesPolyData(scale, self.getProperty('Tube'), self.getProperty('Tube Width')))
def _onPropertyChanged(self, propertySet, propertyName):
PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Scale':
scale = self.getProperty(propertyName)
self.rep.SetWorldSize(scale)
self._updateAxesGeometry()
elif propertyName == 'Edit':
view = app.getCurrentRenderView()
if view not in self.views:
view = self.views[0]
self.widget.SetInteractor(view.renderWindow().GetInteractor())
self.widget.SetEnabled(self.getProperty(propertyName))
isEditing = self.getProperty(propertyName)
if isEditing:
frameupdater.registerFrame(self)
elif propertyName == 'Trace':
trace = self.getProperty(propertyName)
if trace and not self.traceData:
self.traceData = FrameTraceVisualizer(self)
elif not trace and self.traceData:
om.removeFromObjectModel(self.traceData.getTraceData())
self.traceData = None
elif propertyName == 'Tube':
self.properties.setPropertyAttribute('Tube Width', 'hidden', not self.getProperty(propertyName))
self._updateAxesGeometry()
def onRemoveFromObjectModel(self):
PolyDataItem.onRemoveFromObjectModel(self)
self.transform.RemoveObserver(self.observerTag)
self.widget.SetInteractor(None)
self.widget.EnabledOff()
for view in self.views:
view.renderer().RemoveActor(self.actor)
view.render()
class FrameTraceVisualizer(object):
def __init__(self, frame):
self.frame = frame
self.traceName = '%s trace' % frame.getProperty('Name')
self.lastPosition = np.array(frame.transform.GetPosition())
frame.connectFrameModified(self.onFrameModified)
def getTraceData(self):
t = self.frame.findChild(self.traceName)
if not t:
pts = vtk.vtkPoints()
pts.SetDataTypeToDouble()
pts.InsertNextPoint(self.lastPosition)
pd = vtk.vtkPolyData()
pd.Allocate(1, 1)
pd.SetPoints(pts)
polyline = vtk.vtkPolyLine()
pd.InsertNextCell(polyline.GetCellType(), polyline.GetPointIds())
idArray = pd.GetLines().GetData()
idArray.InsertNextValue(0)
t = showPolyData(pd, self.traceName, parent=self.frame)
return t
def addPoint(self, point):
traceData = self.getTraceData()
pd = traceData.polyData
pd.GetPoints().InsertNextPoint(point)
numberOfPoints = pd.GetNumberOfPoints()
idArray = pd.GetLines().GetData()
idArray.InsertNextValue(numberOfPoints-1)
idArray.SetValue(0, numberOfPoints)
pd.GetPoints().Modified()
traceData._renderAllViews()
def onFrameModified(self, frame):
position = np.array(frame.transform.GetPosition())
if not np.allclose(position, self.lastPosition):
self.lastPosition = position
self.addPoint(position)
class FrameSync(object):
class FrameData(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self):
self.frames = {}
self._blockCallbacks = False
self._ids = itertools.count()
def addFrame(self, frame, ignoreIncoming=False):
if frame is None:
return
if self._findFrameId(frame) is not None:
return
frameId = next(self._ids)
callbackId = frame.connectFrameModified(self._onFrameModified)
self.frames[frameId] = FrameSync.FrameData(
ref=weakref.ref(frame),
baseTransform=self._computeBaseTransform(frame),
callbackId=callbackId,
ignoreIncoming=ignoreIncoming)
def removeFrame(self, frame):
frameId = self._findFrameId(frame)
if frameId is None:
raise KeyError(frame)
frame.disconnectFrameModified(self.frames[frameId].callbackId)
self._removeFrameId(frameId)
def _computeBaseTransform(self, frame):
currentDelta = None
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
continue
else:
currentDelta = transformUtils.copyFrame(frameData.baseTransform.GetLinearInverse())
currentDelta.Concatenate(transformUtils.copyFrame(frameData.ref().transform))
break
t = transformUtils.copyFrame(frame.transform)
t.PostMultiply()
if currentDelta:
t.Concatenate(currentDelta.GetLinearInverse())
return t
def _removeFrameId(self, frameId):
del self.frames[frameId]
def _findFrameId(self, frame):
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
return frameId
def _moveFrame(self, frameId, modifiedFrameId):
frameData = self.frames[frameId]
modifiedFrameData = self.frames[modifiedFrameId]
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(frameData.baseTransform)
t.Concatenate(modifiedFrameData.baseTransform.GetLinearInverse())
t.Concatenate(modifiedFrameData.ref().transform)
frameData.ref().copyFrame(t)
def _onFrameModified(self, frame):
if self._blockCallbacks:
return
modifiedFrameId = self._findFrameId(frame)
assert modifiedFrameId is not None
#print self, 'onFrameModified:', self.frames[modifiedFrameId].ref().getProperty('Name')
if self.frames[modifiedFrameId].ignoreIncoming:
self.frames[modifiedFrameId].baseTransform = self._computeBaseTransform(frame)
return
self._blockCallbacks = True
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameId != modifiedFrameId:
#print ' ', self, 'moving:', self.frames[frameId].ref().getProperty('Name')
self._moveFrame(frameId, modifiedFrameId)
self._blockCallbacks = False
def setCameraToParallelProjection(camera):
viewAngle = np.radians(camera.GetViewAngle())
viewDistance = np.linalg.norm(np.array(camera.GetFocalPoint()) - np.array(camera.GetPosition()))
desiredParallelScale = np.tan(viewAngle * 0.5) * viewDistance
camera.SetParallelScale(desiredParallelScale)
camera.ParallelProjectionOn()
def setCameraToPerspectiveProjection(camera):
parallelScale = camera.GetParallelScale()
viewAngle = np.radians(camera.GetViewAngle())
desiredViewDistance = parallelScale / np.tan(viewAngle * 0.5)
focalPoint = np.array(camera.GetFocalPoint())
desiredCameraPosition = focalPoint + desiredViewDistance * np.array(camera.GetViewPlaneNormal())
camera.SetPosition(desiredCameraPosition)
camera.ParallelProjectionOff()
class ViewOptionsItem(om.ObjectModelItem):
def __init__(self, view):
om.ObjectModelItem.__init__(self, 'view options')
self.view = view
self.addProperty('Camera projection', 0, attributes=om.PropertyAttributes(enumNames=['Perspective', 'Parallel']))
self.addProperty('View angle', view.camera().GetViewAngle(), attributes=om.PropertyAttributes(minimum=2, maximum=180))
self.addProperty('Key light intensity', view.lightKit().GetKeyLightIntensity(), attributes=om.PropertyAttributes(minimum=0, maximum=5, singleStep=0.1, decimals=2))
self.addProperty('Light kit', True)
self.addProperty('Eye dome lighting', False)
self.addProperty('Orientation widget', True)
self.addProperty('Interactive render', True)
self.addProperty('Gradient background', True)
self.addProperty('Background color', view.backgroundRenderer().GetBackground())
self.addProperty('Background color 2', view.backgroundRenderer().GetBackground2())
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName in ('Gradient background', 'Background color', 'Background color 2'):
colors = [self.getProperty('Background color'), self.getProperty('Background color 2')]
if not self.getProperty('Gradient background'):
colors[1] = colors[0]
self.view.renderer().SetBackground(colors[0])
self.view.renderer().SetBackground2(colors[1])
elif propertyName == 'Camera projection':
if self.getPropertyEnumValue(propertyName) == 'Perspective':
setCameraToPerspectiveProjection(self.view.camera())
else:
setCameraToParallelProjection(self.view.camera())
elif propertyName == 'Orientation widget':
if self.getProperty(propertyName):
self.view.orientationMarkerWidget().On()
else:
self.view.orientationMarkerWidget().Off()
elif propertyName == 'View angle':
angle = self.getProperty(propertyName)
self.view.camera().SetViewAngle(angle)
elif propertyName == 'Key light intensity':
intensity = self.getProperty(propertyName)
self.view.lightKit().SetKeyLightIntensity(intensity)
elif propertyName == 'Light kit':
self.view.setLightKitEnabled(self.getProperty(propertyName))
elif propertyName == 'Eye dome lighting':
if self.getProperty(propertyName):
enableEyeDomeLighting(self.view)
else:
disableEyeDomeLighting(self.view)
elif propertyName == 'Interactive render':
if self.getProperty(propertyName):
self.view.renderWindow().GetInteractor().EnableRenderOn()
else:
self.view.renderWindow().GetInteractor().EnableRenderOff()
self.view.render()
def getVisibleActors(view):
actors = view.renderer().GetActors()
return [actors.GetItemAsObject(i) for i in range(actors.GetNumberOfItems())
if actors.GetItemAsObject(i).GetVisibility()]
def computeViewBoundsNoGrid(view, gridObj):
gridObj.actor.SetUseBounds(False)
bounds = view.renderer().ComputeVisiblePropBounds()
gridObj.actor.SetUseBounds(True)
return bounds
def computeViewBoundsSoloGrid(view, gridObj):
actors = getVisibleActors(view)
onlyGridShowing = (len(actors) == 1) and (actors[0] == gridObj.actor)
if onlyGridShowing:
gridObj.actor.SetUseBounds(True)
return view.renderer().ComputeVisiblePropBounds()
else:
return computeViewBoundsNoGrid(view, gridObj)
class GridItem(PolyDataItem):
def __init__(self, name, view=None):
PolyDataItem.__init__(self, name, polyData=vtk.vtkPolyData(), view=view)
self.actor.PickableOff()
self.actor.GetProperty().LightingOff()
self.textActors = []
self.addProperty('Grid Half Width', 100.0, attributes=om.PropertyAttributes(minimum=0.01, maximum=1e6, singleStep=10, decimals=2))
self.addProperty('Major Tick Resolution', 10, attributes=om.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
self.addProperty('Minor Tick Resolution', 2, attributes=om.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
self.addProperty('Major Tick Rings', True)
self.addProperty('Minor Tick Rings', False)
self.addProperty('Show Text', True)
self.addProperty('Text Angle', 0,
attributes=om.PropertyAttributes(minimum=-999, maximum=999, singleStep=5))
self.addProperty('Text Size', 10, attributes=om.PropertyAttributes(minimum=4, maximum=100, singleStep=1))
self.addProperty('Text Color', [1.0, 1.0, 1.0])
self.addProperty('Text Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1))
self._updateGrid()
self.setProperty('Surface Mode', 'Wireframe')
def _onPropertyChanged(self, propertySet, propertyName):
PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName in ('Grid Half Width', 'Major Tick Resolution',
'Minor Tick Resolution', 'Major Tick Rings', 'Minor Tick Rings'):
self._updateGrid()
if propertyName in ('Visible', 'Show Text', 'Text Color', 'Text Alpha', 'Text Size', 'Text Angle'):
self._updateTextActorProperties()
def _updateGrid(self):
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
minorTickSize = majorTickSize / self.getProperty('Minor Tick Resolution')
majorTickRings = self.getProperty('Major Tick Rings')
minorTickRings = self.getProperty('Minor Tick Rings')
polyData = makeGridPolyData(gridHalfWidth,
majorTickSize, minorTickSize,
majorTickRings, minorTickRings)
self.setPolyData(polyData)
self._buildTextActors()
def _updateTextActorProperties(self):
self._repositionTextActors()
visible = self.getProperty('Visible') and self.getProperty('Show Text')
textAlpha = self.getProperty('Text Alpha')
color = self.getProperty('Text Color')
textSize = self.getProperty('Text Size')
for actor in self.textActors:
prop = actor.GetTextProperty()
actor.SetVisibility(visible)
prop.SetColor(color)
prop.SetFontSize(textSize)
prop.SetOpacity(textAlpha)
def addToView(self, view):
if view in self.views:
return
PolyDataItem.addToView(self, view)
self._addTextActorsToView(view)
def _addTextActorsToView(self, view):
for actor in self.textActors:
view.renderer().AddActor(actor)
def _removeTextActorsFromView(self, view):
for actor in self.textActors:
view.renderer().RemoveActor(actor)
def _clearTextActors(self):
for view in self.views:
self._removeTextActorsFromView(view)
self.textActors = []
def _repositionTextActors(self):
if not self.textActors:
return
angle = np.radians(self.getProperty('Text Angle'))
sinAngle = np.sin(angle)
cosAngle = np.cos(angle)
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
transform = self.actor.GetUserTransform() or vtk.vtkTransform()
for i, actor in enumerate(self.textActors):
distance = i * majorTickSize
actor = self.textActors[i]
prop = actor.GetTextProperty()
coord = actor.GetPositionCoordinate()
coord.SetCoordinateSystemToWorld()
p = transform.TransformPoint((distance*cosAngle, distance*sinAngle, 0.0))
coord.SetValue(p)
def _buildTextActors(self):
self._clearTextActors()
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
suffix = 'm'
for i in range(int(gridHalfWidth / majorTickSize)):
ringDistance = i * majorTickSize
actor = vtk.vtkTextActor()
prop = actor.GetTextProperty()
actor.SetInput('{:.3f}'.format(ringDistance).rstrip('0').rstrip('.') + suffix)
actor.SetPickable(False)
self.textActors.append(actor)
self._updateTextActorProperties()
for view in self.views:
self._addTextActorsToView(view)
def showGrid(view, cellSize=0.5, numberOfCells=25, name='grid', parent='scene', color=[1,1,1], alpha=0.05, gridTransform=None, viewBoundsFunction=None):
gridObj = GridItem(name)
gridHalfWidth = cellSize * numberOfCells
gridObj.setProperty('Grid Half Width', gridHalfWidth)
gridObj.setProperty('Major Tick Resolution', numberOfCells)
gridObj.setProperty('Minor Tick Resolution', 1)
gridObj.setProperty('Show Text', False)
gridObj.setProperty('Major Tick Rings', False)
gridObj.setProperty('Minor Tick Rings', False)
gridObj.setProperty('Alpha', alpha)
gridObj.setProperty('Text Alpha', 0.5)
gridObj.addToView(view)
om.addToObjectModel(gridObj, getParentObj(parent))
gridFrame = addChildFrame(gridObj)
gridFrame.connectFrameModified(lambda x: gridObj._repositionTextActors())
gridFrame.setProperty('Scale', 1.0)
gridObj.viewBoundsFunction = viewBoundsFunction or computeViewBoundsNoGrid
gridObj.emptyBoundsSize = 1.0
def onViewBoundsRequest():
if view not in gridObj.views or not gridObj.getProperty('Visible'):
return
bounds = gridObj.viewBoundsFunction(view, gridObj)
if vtk.vtkMath.AreBoundsInitialized(bounds):
view.addCustomBounds(bounds)
else:
view.addCustomBounds(np.array([-1, 1, -1, 1, -1, 1]) * gridObj.emptyBoundsSize)
view.connect('computeBoundsRequest(ddQVTKWidgetView*)', onViewBoundsRequest)
return gridObj
def makeGridPolyData(gridHalfWidth=100,
majorTickSize=10.0, minorTickSize=1.0,
majorGridRings=True, minorGridRings=False):
majorGrid = vtk.vtkGridSource()
majorGrid.SetSurfaceEnabled(True)
majorGrid.SetArcsEnabled(majorGridRings)
majorGrid.SetGridSize(int(gridHalfWidth/majorTickSize))
majorGrid.SetScale(majorTickSize)
majorGrid.Update()
if minorTickSize != majorTickSize:
minorGrid = vtk.vtkGridSource()
minorGrid.SetSurfaceEnabled(False)
minorGrid.SetArcsEnabled(minorGridRings)
minorGrid.SetScale(minorTickSize)
minorGrid.SetGridSize(int(gridHalfWidth/minorTickSize))
minorGrid.Update()
return filterUtils.appendPolyData([majorGrid.GetOutput(), minorGrid.GetOutput()])
else:
return majorGrid.GetOutput()
def createScalarBarWidget(view, lookupTable, title):
w = vtk.vtkScalarBarWidget()
bar = w.GetScalarBarActor()
bar.SetTitle(title)
bar.SetLookupTable(lookupTable)
w.SetRepositionable(True)
w.SetInteractor(view.renderWindow().GetInteractor())
w.On()
rep = w.GetRepresentation()
rep.SetOrientation(0)
rep.SetPosition(0.77, 0.92)
rep.SetPosition2(0.20, 0.07)
return w
def getParentObj(parent):
if isinstance(parent, str):
return om.getOrCreateContainer(parent)
else:
return parent
def updatePolyData(polyData, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showPolyData(polyData, name, **kwargs)
else:
obj.setPolyData(polyData)
return obj
def updateFrame(frame, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showFrame(frame, name, **kwargs)
else:
obj.copyFrame(frame)
return obj
def showFrame(frame, name, view=None, parent='data', scale=0.35, visible=True, alpha=1.0):
view = view or app.getCurrentRenderView()
assert view
item = FrameItem(name, frame, view)
om.addToObjectModel(item, getParentObj(parent))
item.setProperty('Visible', visible)
item.setProperty('Alpha', alpha)
item.setProperty('Scale', scale)
return item
def showPolyData(polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent='data', cls=None):
view = view or app.getCurrentRenderView()
assert view
cls = cls or PolyDataItem
item = cls(name, polyData, view)
om.addToObjectModel(item, getParentObj(parent))
item.setProperty('Visible', visible)
item.setProperty('Alpha', alpha)
if colorByName and colorByName not in item.getArrayNames():
print('showPolyData(colorByName=%s): array not found' % colorByName)
colorByName = None
if colorByName:
item.setProperty('Color By', colorByName)
item.colorBy(colorByName, colorByRange)
else:
color = [1.0, 1.0, 1.0] if color is None else color
item.setProperty('Color', [float(c) for c in color])
item.colorBy(None)
return item
def addChildFrame(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is given,
the object's polydata is transformed using the inverse of initialTransform
and then a child frame is assigned to the object to maintain its original
position.
'''
if obj.getChildFrame():
return obj.getChildFrame()
if initialTransform:
pd = filterUtils.transformPolyData(obj.polyData, initialTransform.GetLinearInverse())
obj.setPolyData(pd)
t = initialTransform
else:
t = obj.actor.GetUserTransform()
if t is None:
t = vtk.vtkTransform()
t.PostMultiply()
frame = showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False, view=None)
for view in obj.views:
frame.addToView(view)
obj.actor.SetUserTransform(t)
return frame
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def showClusterObjects(clusters, parent):
colors = [ QtCore.Qt.red,
QtCore.Qt.blue,
QtCore.Qt.yellow,
QtCore.Qt.green,
QtCore.Qt.magenta,
QtCore.Qt.cyan,
QtCore.Qt.darkCyan,
QtCore.Qt.darkGreen,
QtCore.Qt.darkMagenta ]
colors = [QtGui.QColor(c) for c in colors]
colors = [(c.red()/255.0, c.green()/255.0, c.blue()/255.0) for c in colors]
objects = []
for i, cluster in enumerate(clusters):
name = 'object %d' % i
color = colors[i % len(colors)]
clusterObj = showPolyData(cluster.mesh, name, color=color, parent=parent, alpha=1.0)
clusterFrame = showFrame(cluster.frame, name + ' frame', scale=0.2, visible=False, parent=clusterObj)
clusterBox = showPolyData(cluster.box, name + ' box', color=color, parent=clusterObj, alpha=0.6, visible=False)
clusterPoints = showPolyData(cluster.points, name + ' points', color=color, parent=clusterObj, visible=False, alpha=1.0)
if hasattr(cluster,'oriented_frame'):
orientedFrame = showFrame(cluster.oriented_frame, name + ' oriented frame', scale=0.2, visible=False, parent=clusterObj)
clusterPoints.setProperty('Point Size', 7)
clusterPoints.colorBy(None)
clusterObj.data = cluster
objects.append(clusterObj)
for obj in [clusterObj, clusterBox, clusterPoints]:
obj.actor.SetUserTransform(cluster.frame)
return objects
captionWidget = None
def hideCaptionWidget():
global captionWidget
if captionWidget is not None:
captionWidget.Off()
captionWidget.Render()
def showCaptionWidget(position, text, view=None):
view = view or app.getCurrentRenderView()
assert view
global captionWidget
if not captionWidget:
rep = vtk.vtkCaptionRepresentation()
rep.SetPosition(0.2, 0.8)
w = vtk.vtkCaptionWidget()
w.SetInteractor(view.renderWindow().GetInteractor())
w.SetRepresentation(rep)
w.On()
captionWidget = w
rep = captionWidget.GetRepresentation()
rep.SetAnchorPosition(position)
rep.GetCaptionActor2D().SetCaption(text)
a = rep.GetCaptionActor2D()
pr = a.GetTextActor().GetTextProperty()
pr.SetJustificationToCentered()
pr.SetVerticalJustificationToCentered()
pr.SetItalic(0)
pr.SetBold(0)
pr.SetShadow(0)
pr.SetFontFamilyToArial()
c2 = rep.GetPosition2Coordinate()
c2.SetCoordinateSystemToDisplay()
c2.SetValue(12*len(text),30)
# disable border
#rep.SetShowBorder(0)
a.SetThreeDimensionalLeader(0)
a.SetLeaderGlyphSize(0.005)
captionWidget.On()
captionWidget.Render()
def getRayFromDisplayPoint(view, displayPoint):
'''
Given a view and an XY display point, returns two XYZ world points which
are the display point at the near/far clipping planes of the view.
'''
worldPt1 = [0,0,0,0]
worldPt2 = [0,0,0,0]
renderer = view.renderer()
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 0, worldPt1)
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 1, worldPt2)
worldPt1 = np.array(worldPt1[:3])
worldPt2 = np.array(worldPt2[:3])
return worldPt1, worldPt2
def pickImage(displayPoint, view, obj=None):
picker = vtk.vtkCellPicker()
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
if obj:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
pickedDataset = picker.GetDataSet()
if obj:
return picker.GetPointIJK()
else:
return pickedDataset, picker.GetPointIJK()
def pickProp(displayPoint, view):
for tolerance in (0.0, 0.005, 0.01):
pickType = 'render' if tolerance == 0.0 else 'cells'
pickData = pickPoint(displayPoint, view, pickType=pickType, tolerance=tolerance)
pickedPoint = pickData.pickedPoint
pickedProp = pickData.pickedProp
pickedDataset = pickData.pickedDataset
if pickedProp is not None:
return pickedPoint, pickedProp, pickedDataset
return None, None, None
def pickPoint(displayPoint, view, obj=None, pickType='points', tolerance=0.01):
"""
:param displayPoint:
:param view:
:param obj:
:param pickType:
:param tolerance:
:return: FieldContainer with fields
pickedPoint
pickedProp
pickedDataset
pickedNormal - is None if no normal can be comp
pickedCellId - is None unless pickType="cells"
"""
assert pickType in ('points', 'cells', 'render')
view = view or app.getCurrentRenderView()
assert view
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
wasTexturedBackground = False
if pickType == 'render':
picker = vtk.vtkPropPicker()
wasTexturedBackground = view.renderer().GetTexturedBackground()
view.renderer().TexturedBackgroundOff()
else:
picker = vtk.vtkPointPicker() if pickType == 'points' else vtk.vtkCellPicker()
picker.SetTolerance(tolerance)
if obj is not None:
if isinstance(obj, list):
for o in obj:
picker.AddPickList(o.actor)
obj = None
else:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
if wasTexturedBackground:
view.renderer().TexturedBackgroundOn()
pickedProp = picker.GetViewProp()
pickedPoint = np.array(picker.GetPickPosition())
pickedDataset = pickedProp.GetMapper().GetInput() if isinstance(pickedProp, vtk.vtkActor) else None
if pickType == "cells":
pickedCellId = picker.GetCellId()
else:
pickedCellId = None
# populate pickedNormal if possible
pickedNormal = None
if pickType == 'cells':
pickedNormal = np.array(picker.GetPickNormal())
elif pickType == 'points' and pickedDataset:
pointId = picker.GetPointId()
normals = pickedDataset.GetPointData().GetNormals()
if normals:
pickedNormal = np.array(normals.GetTuple3(pointId))
#if pickedDataset and pickType == 'cells':
# print 'point id:', pickedDataset.GetCell(picker.GetCellId()).GetPointIds().GetId(picker.GetSubId())
#if pickType == 'points':
# print 'point id:', picker.GetPointId()
fields = FieldContainer(
pickedPoint=pickedPoint,
pickedProp=pickedProp,
pickedDataset=pickedDataset,
pickedNormal=pickedNormal,
pickedCellId=pickedCellId
)
return fields
def mapMousePosition(widget, mouseEvent):
mousePosition = mouseEvent.pos()
return mousePosition.x(), widget.height - mousePosition.y()
def getObjectByDataSet(dataSet):
if not dataSet:
return None
for obj in om.getObjects():
if obj.hasDataSet(dataSet):
return obj
def getObjectByProp(prop):
if not prop:
return None
for obj in om.getObjects():
if obj.hasActor(prop):
return obj
def findPickedObject(displayPoint, view):
pickedPoint, pickedProp, pickedDataset = pickProp(displayPoint, view)
obj = getObjectByProp(pickedProp) or getObjectByDataSet(pickedDataset)
return obj, pickedPoint
"""
Toggles whether anti-aliasing is enabled or not.
This sets a static variable in the ddQVTKWidgeView
so this controls the setting for all views created in the current
executable. Must be called before constructing a ddQTKWidgetView
Anti-aliasing is enabled by default
"""
def setAntiAliasing(enabled):
PythonQt.dd.ddQVTKWidgetView.setAntiAliasing(enabled)
def enableEyeDomeLighting(view):
standardPass = vtk.vtkRenderStepsPass()
edlPass = vtk.vtkEDLShading()
edlPass.SetDelegatePass(standardPass)
view.renderer().SetPass(edlPass)
def disableEyeDomeLighting(view):
view.renderer().SetPass(None)
def showQLabelImage(filename):
'''
Returns a QLabel displaying the image contents of given filename.
Make sure to assign the label, it will destruct when it goes out
of scope.
'''
image = QtGui.QImage(filename)
assert not image.isNull()
imageLabel = QtGui.QLabel()
imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
imageLabel.setScaledContents(True)
imageLabel.resize(imageLabel.pixmap.size())
imageLabel.setWindowTitle(os.path.basename(filename))
imageLabel.show()
return imageLabel
| import director.objectmodel as om
import director.applogic as app
from .shallowCopy import shallowCopy
import director.vtkAll as vtk
from director import filterUtils
from director import transformUtils
from director import callbacks
from director import frameupdater
from director.fieldcontainer import FieldContainer
from PythonQt import QtCore, QtGui
import PythonQt
import numpy as np
import os
import colorsys
import weakref
import itertools
class PolyDataItem(om.ObjectModelItem):
defaultScalarRangeMap = {
# 'intensity' : (400, 4000),
'spindle_angle' : (0, 360),
'azimuth' : (-2.5, 2.5),
'scan_delta' : (0.0, 0.3),
'point distance to plane' : (-0.2, 0.2),
'normal angle to plane' : (0.0, 10.0),
}
def __init__(self, name, polyData, view):
om.ObjectModelItem.__init__(self, name, om.Icons.Robot)
self.views = []
self.polyData = polyData
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputData(self.polyData)
self.actor = vtk.vtkActor()
self.actor.SetMapper(self.mapper)
self.shadowActor = None
self.scalarBarWidget = None
self.extraViewRenderers = {}
self.rangeMap = dict(PolyDataItem.defaultScalarRangeMap)
self.addProperty('Color By', 0, attributes=om.PropertyAttributes(enumNames=['Solid Color']))
self.addProperty('Visible', True)
self.addProperty('Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
self.addProperty('Point Size', self.actor.GetProperty().GetPointSize(),
attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
self.addProperty('Line Width', self.actor.GetProperty().GetLineWidth(),
attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
self.addProperty('Surface Mode', 0,
attributes=om.PropertyAttributes(enumNames=['Surface', 'Wireframe', 'Surface with edges', 'Points'], hidden=True))
self.addProperty('Color', [1.0, 1.0, 1.0])
self.addProperty('Show Scalar Bar', False)
self._updateSurfaceProperty()
self._updateColorByProperty()
if view is not None:
self.addToView(view)
def _renderAllViews(self):
for view in self.views:
view.render()
def hasDataSet(self, dataSet):
return dataSet == self.polyData
def hasActor(self, actor):
return actor == self.actor
def setPolyData(self, polyData):
self.polyData = polyData
self.mapper.SetInputData(polyData)
self._updateSurfaceProperty()
self._updateColorByProperty()
self._updateColorBy(retainColorMap=True)
if self.getProperty('Visible'):
self._renderAllViews()
def setRangeMap(self, key, value):
self.rangeMap[key] = value
def getArrayNames(self):
pointData = self.polyData.GetPointData()
return [pointData.GetArrayName(i) for i in range(pointData.GetNumberOfArrays())]
def setSolidColor(self, color):
self.setProperty('Color', [float(c) for c in color])
self.colorBy(None)
def _isPointCloud(self):
return self.polyData.GetNumberOfPoints() and (self.polyData.GetNumberOfCells() == self.polyData.GetNumberOfVerts())
def colorBy(self, arrayName, scalarRange=None, lut=None):
if not arrayName:
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
array = self.polyData.GetPointData().GetArray(arrayName)
if not array:
print('colorBy(%s): array not found' % arrayName)
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
self.polyData.GetPointData().SetActiveScalars(arrayName)
if not lut:
lut = self._getDefaultColorMap(array, scalarRange)
#self.mapper.SetColorModeToMapScalars()
self.mapper.ScalarVisibilityOn()
self.mapper.SetUseLookupTableScalarRange(True)
self.mapper.SetLookupTable(lut)
self.mapper.SetInterpolateScalarsBeforeMapping(not self._isPointCloud())
if self.getProperty('Visible'):
self._renderAllViews()
def getChildFrame(self):
frameName = self.getProperty('Name') + ' frame'
return self.findChild(frameName)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
if self.shadowActor:
view.renderer().AddActor(self.shadowActor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Point Size':
self.actor.GetProperty().SetPointSize(self.getProperty(propertyName))
elif propertyName == 'Line Width':
self.actor.GetProperty().SetLineWidth(self.getProperty(propertyName))
elif propertyName == 'Alpha':
self.actor.GetProperty().SetOpacity(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.GetProperty().SetOpacity(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.SetVisibility(self.getProperty(propertyName))
elif propertyName == 'Surface Mode':
mode = self.properties.getPropertyEnumValue(propertyName)
prop = self.actor.GetProperty()
if mode == 'Surface':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOff()
if mode == 'Wireframe':
prop.SetRepresentationToWireframe()
elif mode == 'Surface with edges':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOn()
elif mode == 'Points':
prop.SetRepresentationToPoints()
elif propertyName == 'Color':
color = self.getProperty(propertyName)
self.actor.GetProperty().SetColor(color)
elif propertyName == 'Color By':
self._updateColorBy()
elif propertyName == 'Show Scalar Bar':
self._updateScalarBar()
self._renderAllViews()
def setScalarRange(self, rangeMin, rangeMax):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName != 'Solid Color':
lut = self.mapper.GetLookupTable()
self.colorBy(arrayName, scalarRange=(rangeMin, rangeMax))
def _updateSurfaceProperty(self):
hasPolys = self.polyData.GetNumberOfPolys() or self.polyData.GetNumberOfStrips()
hasLines = self.polyData.GetNumberOfLines()
enableSurfaceMode = hasPolys or hasLines
self.properties.setPropertyAttribute('Surface Mode', 'hidden', not enableSurfaceMode)
enableLineWidth = enableSurfaceMode
self.properties.setPropertyAttribute('Line Width', 'hidden', not enableLineWidth)
enablePointSize = True
self.properties.setPropertyAttribute('Point Size', 'hidden', not enablePointSize)
def _updateColorBy(self, retainColorMap=False):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName == 'Solid Color':
self.colorBy(None)
else:
lut = self.mapper.GetLookupTable() if retainColorMap else None
self.colorBy(arrayName, lut=lut)
self._updateScalarBar()
def _updateColorByProperty(self):
enumNames = ['Solid Color'] + self.getArrayNames()
currentValue = self.properties.getProperty('Color By')
if currentValue >= len(enumNames):
self.setProperty('Color By', 0)
self.properties.setPropertyAttribute('Color By', 'enumNames', enumNames)
def _updateScalarBar(self):
barEnabled = self.getProperty('Show Scalar Bar')
colorBy = self.getProperty('Color By')
if barEnabled and colorBy != 0:
self._showScalarBar()
else:
self._hideScalarBar()
def _hideScalarBar(self):
if self.scalarBarWidget:
self.scalarBarWidget.Off()
self.scalarBarWidget.SetInteractor(None)
self.scalarBarWidget = None
self._renderAllViews()
def _showScalarBar(self):
title = self.properties.getPropertyEnumValue('Color By')
view = self.views[0]
lut = self.mapper.GetLookupTable()
self.scalarBarWidget = createScalarBarWidget(view, lut, title)
self._renderAllViews()
def _setScalarBarTextColor(self, color=(0,0,0)):
act = self.scalarBarWidget.GetScalarBarActor()
act.GetTitleTextProperty().SetColor(color)
act.GetLabelTextProperty().SetColor(color)
def _setScalarBarTitle(self, titleText):
act = self.scalarBarWidget.GetScalarBarActor()
act.SetTitle(titleText)
def getCoolToWarmColorMap(self, scalarRange):
f = vtk.vtkDiscretizableColorTransferFunction()
f.DiscretizeOn()
f.SetColorSpaceToDiverging()
f.SetNumberOfValues(256)
f.AddRGBPoint(scalarRange[0], 0.23, 0.299, 0.754)
f.AddRGBPoint(scalarRange[1], 0.706, 0.016, 0.15)
f.Build()
return f
def _getDefaultColorMap(self, array, scalarRange=None, hueRange=None):
name = array.GetName()
blueToRed = (0.667, 0)
redtoBlue = (0, 0.667)
hueMap = {
'Axes' : redtoBlue
}
scalarRange = scalarRange or self.rangeMap.get(name, array.GetRange())
hueRange = hueRange or hueMap.get(name, blueToRed)
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.SetHueRange(hueRange)
lut.SetRange(scalarRange)
lut.Build()
return lut
#return self.getCoolToWarmColorMap(scalarRange)
def shadowOn(self):
if self.shadowActor:
return
mat = [[1, 0, -1, 0],
[0, 1, -1, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]]
shadowT = transformUtils.getTransformFromNumpy(mat)
baseTransform = self.actor.GetUserTransform()
if baseTransform:
shadowT.PreMultiply()
shadowT.Concatenate(baseTransform)
self.shadowActor = vtk.vtkActor()
self.shadowActor.SetMapper(self.mapper)
self.shadowActor.SetUserTransform(shadowT)
self.shadowActor.GetProperty().LightingOff()
self.shadowActor.GetProperty().SetColor(0, 0, 0)
for view in self.views:
view.renderer().AddActor(self.shadowActor)
def shadowOff(self):
for view in self.views:
view.renderer().RemoveActor(self.shadowActor)
self.shadowActor = None
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
self._hideScalarBar()
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
if self.shadowActor:
view.renderer().RemoveActor(self.shadowActor)
for renderer in self.extraViewRenderers.get(view, []):
renderer.RemoveActor(self.actor)
view.render()
class Image2DItem(om.ObjectModelItem):
def __init__(self, name, image, view):
om.ObjectModelItem.__init__(self, name, om.Icons.Robot)
self.views = []
self.image = image
defaultWidth = 300
self.actor = vtk.vtkLogoRepresentation()
self.actor.SetImage(image)
self.actor.GetImageProperty().SetOpacity(1.0)
actors = vtk.vtkPropCollection()
self.actor.GetActors2D(actors)
self.texture = actors.GetItemAsObject(0).GetTexture()
self.addProperty('Visible', True)
self.addProperty('Anchor', 1,
attributes=om.PropertyAttributes(enumNames=['Top Left', 'Top Right', 'Bottom Left', 'Bottom Right']))
self.addProperty('Width', defaultWidth,
attributes=om.PropertyAttributes(minimum=0, maximum=9999, singleStep=50))
self.addProperty('Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1))
#defaultHeight = self._getHeightForWidth(defaultWidth)
#self.addProperty('Height', defaultHeight,
# attributes=om.PropertyAttributes(minimum=0, maximum=9999, singleStep=10))
if view is not None:
self.addToView(view)
def _renderAllViews(self):
for view in self.views:
view.render()
def hasDataSet(self, dataSet):
return dataSet == self.image
def hasActor(self, actor):
return actor == self.actor
def setImage(self, image):
self.image = image
self.actor.SetImage(image)
# also set the image on the texture, otherwise
# the texture input won't update until the next
# render where this actor is visible
self.texture.SetInputData(image)
if self.getProperty('Visible'):
self._renderAllViews()
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
self._updatePositionCoordinates(view)
view.renderer().AddActor(self.actor)
view.render()
def _getHeightForWidth(self, image, width):
w, h, _ = image.GetDimensions()
aspect = w/float(h)
return int(np.round(width / aspect))
def _updatePositionCoordinates(self, view):
width = self.getProperty('Width')
height = self._getHeightForWidth(self.image, width)
pc0 = vtk.vtkCoordinate()
pc1 = self.actor.GetPositionCoordinate()
pc2 = self.actor.GetPosition2Coordinate()
for pc in [pc0, pc1, pc2]:
pc.SetViewport(view.renderer())
pc0.SetReferenceCoordinate(None)
pc0.SetCoordinateSystemToNormalizedDisplay()
pc1.SetReferenceCoordinate(pc0)
pc1.SetCoordinateSystemToDisplay()
anchor = self.getPropertyEnumValue('Anchor')
if anchor == 'Top Left':
pc0.SetValue(0.0, 1.0)
pc1.SetValue(0.0, -height)
elif anchor == 'Top Right':
pc0.SetValue(1.0, 1.0)
pc1.SetValue(-width, -height)
elif anchor == 'Bottom Left':
pc0.SetValue(0.0, 0.0)
pc1.SetValue(0.0, 0.0)
elif anchor == 'Bottom Right':
pc0.SetValue(1.0, 0.0)
pc1.SetValue(-width, 0.0)
pc2.SetCoordinateSystemToDisplay()
pc2.SetReferenceCoordinate(pc1)
pc2.SetValue(width, height)
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Alpha':
self.actor.GetImageProperty().SetOpacity(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
elif propertyName in ('Width', 'Height', 'Anchor'):
if self.views:
self._updatePositionCoordinates(self.views[0])
self._renderAllViews()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
class TextItem(om.ObjectModelItem):
def __init__(self, name, text='', view=None):
om.ObjectModelItem.__init__(self, name)
self.views = []
self.actor = vtk.vtkTextActor()
prop = self.actor.GetTextProperty()
prop.SetFontSize(18)
self.actor.SetPosition(10,10)
self.actor.SetInput(text)
self.addProperty('Visible', True)
self.addProperty('Text', text)
self.addProperty('Position', [10, 10], attributes=om.PropertyAttributes(minimum=0, maximum=3000, singleStep=1))
self.addProperty('Font Size', 18, attributes=om.PropertyAttributes(minimum=6, maximum=128, singleStep=1))
self.addProperty('Bold', False)
self.addProperty('Italic', False)
if view:
self.addToView(view)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
view.render()
def _renderAllViews(self):
for view in self.views:
view.render()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
self._renderAllViews()
elif propertyName == 'Text':
view = app.getCurrentRenderView()
self.actor.SetInput(self.getProperty(propertyName))
elif propertyName == 'Position':
pos = self.getProperty(propertyName)
self.actor.SetPosition(pos[0], pos[1])
elif propertyName == 'Font Size':
self.actor.GetTextProperty().SetFontSize(self.getProperty(propertyName))
elif propertyName == 'Bold Size':
self.actor.GetTextProperty().SetBold(self.getProperty(propertyName))
elif propertyName == 'Italic':
self.actor.GetTextProperty().SetItalic(self.getProperty(propertyName))
if self.getProperty('Visible'):
self._renderAllViews()
def updateText(text, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj or showText(text, name, **kwargs)
else:
obj.setProperty('Text', text)
return obj
def showText(text, name, fontSize=18, position=(10, 10), parent=None, view=None):
view = view or app.getCurrentRenderView()
assert view
item = TextItem(name, text, view=view)
item.setProperty('Font Size', fontSize)
item.setProperty('Position', list(position))
om.addToObjectModel(item, getParentObj(parent))
return item
def updateImage(image, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showImage(image, name, **kwargs)
else:
obj.setImage(image)
return obj
def showImage(image, name, anchor='Top Left', parent=None, view=None):
view = view or app.getCurrentRenderView()
assert view
item = Image2DItem(name, image, view=view)
item.setProperty('Anchor', anchor)
om.addToObjectModel(item, getParentObj(parent))
return item
def createAxesPolyData(scale, useTube, tubeWidth=0.002):
axes = vtk.vtkAxes()
axes.SetComputeNormals(0)
axes.SetScaleFactor(scale)
axes.Update()
if useTube:
tube = vtk.vtkTubeFilter()
tube.SetInputConnection(axes.GetOutputPort())
tube.SetRadius(tubeWidth)
tube.SetNumberOfSides(12)
tube.Update()
axes = tube
return shallowCopy(axes.GetOutput())
class FrameItem(PolyDataItem):
def __init__(self, name, transform, view):
PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view)
self.transform = transform
self._blockSignals = False
self.actor.SetUserTransform(transform)
self.widget = vtk.vtkFrameWidget()
self.widget.CreateDefaultRepresentation()
self.widget.EnabledOff()
self.rep = self.widget.GetRepresentation()
self.rep.SetTransform(transform)
self.traceData = None
self._frameSync = None
self.addProperty('Scale', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0.01, maximum=100, singleStep=0.1, hidden=False))
self.addProperty('Edit', False)
self.addProperty('Trace', False)
self.addProperty('Tube', False)
self.addProperty('Tube Width', 0.002, attributes=om.PropertyAttributes(decimals=3, minimum=0.001, maximum=10, singleStep=0.01, hidden=True))
self.properties.setPropertyIndex('Edit', 0)
self.properties.setPropertyIndex('Trace', 1)
self.properties.setPropertyIndex('Tube', 2)
self.callbacks.addSignal('FrameModified')
self.onTransformModifiedCallback = None
self.observerTag = self.transform.AddObserver('ModifiedEvent', self.onTransformModified)
self._updateAxesGeometry()
self.setProperty('Color By', 'Axes')
self.setProperty('Icon', om.Icons.Axes)
def connectFrameModified(self, func):
return self.callbacks.connect('FrameModified', func)
def disconnectFrameModified(self, callbackId):
self.callbacks.disconnect(callbackId)
def onTransformModified(self, transform, event):
if not self._blockSignals:
if self.onTransformModifiedCallback:
self.onTransformModifiedCallback(self)
self.callbacks.process('FrameModified', self)
def addToView(self, view):
PolyDataItem.addToView(self, view)
def hasDataSet(self, dataSet):
return dataSet == self.transform
def hasActor(self, actor):
return actor == self.widget.GetRepresentation() or PolyDataItem.hasActor(self, actor)
def copyFrame(self, transform):
self._blockSignals = True
self.transform.SetMatrix(transform.GetMatrix())
self._blockSignals = False
self.transform.Modified()
parent = self.parent()
if (parent and parent.getProperty('Visible')) or self.getProperty('Visible'):
self._renderAllViews()
def getFrameSync(self):
if self._frameSync is None:
self._frameSync = FrameSync()
self._frameSync.addFrame(self)
return self._frameSync
def _updateAxesGeometry(self):
scale = self.getProperty('Scale')
self.rep.SetWorldSize(scale)
self.setPolyData(createAxesPolyData(scale, self.getProperty('Tube'), self.getProperty('Tube Width')))
def _onPropertyChanged(self, propertySet, propertyName):
PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Scale':
scale = self.getProperty(propertyName)
self.rep.SetWorldSize(scale)
self._updateAxesGeometry()
elif propertyName == 'Edit':
view = app.getCurrentRenderView()
if view not in self.views:
view = self.views[0]
self.widget.SetInteractor(view.renderWindow().GetInteractor())
self.widget.SetEnabled(self.getProperty(propertyName))
isEditing = self.getProperty(propertyName)
if isEditing:
frameupdater.registerFrame(self)
elif propertyName == 'Trace':
trace = self.getProperty(propertyName)
if trace and not self.traceData:
self.traceData = FrameTraceVisualizer(self)
elif not trace and self.traceData:
om.removeFromObjectModel(self.traceData.getTraceData())
self.traceData = None
elif propertyName == 'Tube':
self.properties.setPropertyAttribute('Tube Width', 'hidden', not self.getProperty(propertyName))
self._updateAxesGeometry()
def onRemoveFromObjectModel(self):
PolyDataItem.onRemoveFromObjectModel(self)
self.transform.RemoveObserver(self.observerTag)
self.widget.SetInteractor(None)
self.widget.EnabledOff()
for view in self.views:
view.renderer().RemoveActor(self.actor)
view.render()
class FrameTraceVisualizer(object):
def __init__(self, frame):
self.frame = frame
self.traceName = '%s trace' % frame.getProperty('Name')
self.lastPosition = np.array(frame.transform.GetPosition())
frame.connectFrameModified(self.onFrameModified)
def getTraceData(self):
t = self.frame.findChild(self.traceName)
if not t:
pts = vtk.vtkPoints()
pts.SetDataTypeToDouble()
pts.InsertNextPoint(self.lastPosition)
pd = vtk.vtkPolyData()
pd.Allocate(1, 1)
pd.SetPoints(pts)
polyline = vtk.vtkPolyLine()
pd.InsertNextCell(polyline.GetCellType(), polyline.GetPointIds())
idArray = pd.GetLines().GetData()
idArray.InsertNextValue(0)
t = showPolyData(pd, self.traceName, parent=self.frame)
return t
def addPoint(self, point):
traceData = self.getTraceData()
pd = traceData.polyData
pd.GetPoints().InsertNextPoint(point)
numberOfPoints = pd.GetNumberOfPoints()
idArray = pd.GetLines().GetData()
idArray.InsertNextValue(numberOfPoints-1)
idArray.SetValue(0, numberOfPoints)
pd.GetPoints().Modified()
traceData._renderAllViews()
def onFrameModified(self, frame):
position = np.array(frame.transform.GetPosition())
if not np.allclose(position, self.lastPosition):
self.lastPosition = position
self.addPoint(position)
class FrameSync(object):
class FrameData(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self):
self.frames = {}
self._blockCallbacks = False
self._ids = itertools.count()
def addFrame(self, frame, ignoreIncoming=False):
if frame is None:
return
if self._findFrameId(frame) is not None:
return
frameId = next(self._ids)
callbackId = frame.connectFrameModified(self._onFrameModified)
self.frames[frameId] = FrameSync.FrameData(
ref=weakref.ref(frame),
baseTransform=self._computeBaseTransform(frame),
callbackId=callbackId,
ignoreIncoming=ignoreIncoming)
def removeFrame(self, frame):
frameId = self._findFrameId(frame)
if frameId is None:
raise KeyError(frame)
frame.disconnectFrameModified(self.frames[frameId].callbackId)
self._removeFrameId(frameId)
def _computeBaseTransform(self, frame):
currentDelta = None
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
continue
else:
currentDelta = transformUtils.copyFrame(frameData.baseTransform.GetLinearInverse())
currentDelta.Concatenate(transformUtils.copyFrame(frameData.ref().transform))
break
t = transformUtils.copyFrame(frame.transform)
t.PostMultiply()
if currentDelta:
t.Concatenate(currentDelta.GetLinearInverse())
return t
def _removeFrameId(self, frameId):
del self.frames[frameId]
def _findFrameId(self, frame):
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
return frameId
def _moveFrame(self, frameId, modifiedFrameId):
frameData = self.frames[frameId]
modifiedFrameData = self.frames[modifiedFrameId]
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(frameData.baseTransform)
t.Concatenate(modifiedFrameData.baseTransform.GetLinearInverse())
t.Concatenate(modifiedFrameData.ref().transform)
frameData.ref().copyFrame(t)
def _onFrameModified(self, frame):
if self._blockCallbacks:
return
modifiedFrameId = self._findFrameId(frame)
assert modifiedFrameId is not None
#print self, 'onFrameModified:', self.frames[modifiedFrameId].ref().getProperty('Name')
if self.frames[modifiedFrameId].ignoreIncoming:
self.frames[modifiedFrameId].baseTransform = self._computeBaseTransform(frame)
return
self._blockCallbacks = True
for frameId, frameData in list(self.frames.items()):
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameId != modifiedFrameId:
#print ' ', self, 'moving:', self.frames[frameId].ref().getProperty('Name')
self._moveFrame(frameId, modifiedFrameId)
self._blockCallbacks = False
def setCameraToParallelProjection(camera):
viewAngle = np.radians(camera.GetViewAngle())
viewDistance = np.linalg.norm(np.array(camera.GetFocalPoint()) - np.array(camera.GetPosition()))
desiredParallelScale = np.tan(viewAngle * 0.5) * viewDistance
camera.SetParallelScale(desiredParallelScale)
camera.ParallelProjectionOn()
def setCameraToPerspectiveProjection(camera):
parallelScale = camera.GetParallelScale()
viewAngle = np.radians(camera.GetViewAngle())
desiredViewDistance = parallelScale / np.tan(viewAngle * 0.5)
focalPoint = np.array(camera.GetFocalPoint())
desiredCameraPosition = focalPoint + desiredViewDistance * np.array(camera.GetViewPlaneNormal())
camera.SetPosition(desiredCameraPosition)
camera.ParallelProjectionOff()
class ViewOptionsItem(om.ObjectModelItem):
def __init__(self, view):
om.ObjectModelItem.__init__(self, 'view options')
self.view = view
self.addProperty('Camera projection', 0, attributes=om.PropertyAttributes(enumNames=['Perspective', 'Parallel']))
self.addProperty('View angle', view.camera().GetViewAngle(), attributes=om.PropertyAttributes(minimum=2, maximum=180))
self.addProperty('Key light intensity', view.lightKit().GetKeyLightIntensity(), attributes=om.PropertyAttributes(minimum=0, maximum=5, singleStep=0.1, decimals=2))
self.addProperty('Light kit', True)
self.addProperty('Eye dome lighting', False)
self.addProperty('Orientation widget', True)
self.addProperty('Interactive render', True)
self.addProperty('Gradient background', True)
self.addProperty('Background color', view.backgroundRenderer().GetBackground())
self.addProperty('Background color 2', view.backgroundRenderer().GetBackground2())
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName in ('Gradient background', 'Background color', 'Background color 2'):
colors = [self.getProperty('Background color'), self.getProperty('Background color 2')]
if not self.getProperty('Gradient background'):
colors[1] = colors[0]
self.view.renderer().SetBackground(colors[0])
self.view.renderer().SetBackground2(colors[1])
elif propertyName == 'Camera projection':
if self.getPropertyEnumValue(propertyName) == 'Perspective':
setCameraToPerspectiveProjection(self.view.camera())
else:
setCameraToParallelProjection(self.view.camera())
elif propertyName == 'Orientation widget':
if self.getProperty(propertyName):
self.view.orientationMarkerWidget().On()
else:
self.view.orientationMarkerWidget().Off()
elif propertyName == 'View angle':
angle = self.getProperty(propertyName)
self.view.camera().SetViewAngle(angle)
elif propertyName == 'Key light intensity':
intensity = self.getProperty(propertyName)
self.view.lightKit().SetKeyLightIntensity(intensity)
elif propertyName == 'Light kit':
self.view.setLightKitEnabled(self.getProperty(propertyName))
elif propertyName == 'Eye dome lighting':
if self.getProperty(propertyName):
enableEyeDomeLighting(self.view)
else:
disableEyeDomeLighting(self.view)
elif propertyName == 'Interactive render':
if self.getProperty(propertyName):
self.view.renderWindow().GetInteractor().EnableRenderOn()
else:
self.view.renderWindow().GetInteractor().EnableRenderOff()
self.view.render()
def getVisibleActors(view):
actors = view.renderer().GetActors()
return [actors.GetItemAsObject(i) for i in range(actors.GetNumberOfItems())
if actors.GetItemAsObject(i).GetVisibility()]
def computeViewBoundsNoGrid(view, gridObj):
gridObj.actor.SetUseBounds(False)
bounds = view.renderer().ComputeVisiblePropBounds()
gridObj.actor.SetUseBounds(True)
return bounds
def computeViewBoundsSoloGrid(view, gridObj):
actors = getVisibleActors(view)
onlyGridShowing = (len(actors) == 1) and (actors[0] == gridObj.actor)
if onlyGridShowing:
gridObj.actor.SetUseBounds(True)
return view.renderer().ComputeVisiblePropBounds()
else:
return computeViewBoundsNoGrid(view, gridObj)
class GridItem(PolyDataItem):
def __init__(self, name, view=None):
PolyDataItem.__init__(self, name, polyData=vtk.vtkPolyData(), view=view)
self.actor.PickableOff()
self.actor.GetProperty().LightingOff()
self.textActors = []
self.addProperty('Grid Half Width', 100.0, attributes=om.PropertyAttributes(minimum=0.01, maximum=1e6, singleStep=10, decimals=2))
self.addProperty('Major Tick Resolution', 10, attributes=om.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
self.addProperty('Minor Tick Resolution', 2, attributes=om.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
self.addProperty('Major Tick Rings', True)
self.addProperty('Minor Tick Rings', False)
self.addProperty('Show Text', True)
self.addProperty('Text Angle', 0,
attributes=om.PropertyAttributes(minimum=-999, maximum=999, singleStep=5))
self.addProperty('Text Size', 10, attributes=om.PropertyAttributes(minimum=4, maximum=100, singleStep=1))
self.addProperty('Text Color', [1.0, 1.0, 1.0])
self.addProperty('Text Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1))
self._updateGrid()
self.setProperty('Surface Mode', 'Wireframe')
def _onPropertyChanged(self, propertySet, propertyName):
PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName in ('Grid Half Width', 'Major Tick Resolution',
'Minor Tick Resolution', 'Major Tick Rings', 'Minor Tick Rings'):
self._updateGrid()
if propertyName in ('Visible', 'Show Text', 'Text Color', 'Text Alpha', 'Text Size', 'Text Angle'):
self._updateTextActorProperties()
def _updateGrid(self):
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
minorTickSize = majorTickSize / self.getProperty('Minor Tick Resolution')
majorTickRings = self.getProperty('Major Tick Rings')
minorTickRings = self.getProperty('Minor Tick Rings')
polyData = makeGridPolyData(gridHalfWidth,
majorTickSize, minorTickSize,
majorTickRings, minorTickRings)
self.setPolyData(polyData)
self._buildTextActors()
def _updateTextActorProperties(self):
self._repositionTextActors()
visible = self.getProperty('Visible') and self.getProperty('Show Text')
textAlpha = self.getProperty('Text Alpha')
color = self.getProperty('Text Color')
textSize = self.getProperty('Text Size')
for actor in self.textActors:
prop = actor.GetTextProperty()
actor.SetVisibility(visible)
prop.SetColor(color)
prop.SetFontSize(textSize)
prop.SetOpacity(textAlpha)
def addToView(self, view):
if view in self.views:
return
PolyDataItem.addToView(self, view)
self._addTextActorsToView(view)
def _addTextActorsToView(self, view):
for actor in self.textActors:
view.renderer().AddActor(actor)
def _removeTextActorsFromView(self, view):
for actor in self.textActors:
view.renderer().RemoveActor(actor)
def _clearTextActors(self):
for view in self.views:
self._removeTextActorsFromView(view)
self.textActors = []
def _repositionTextActors(self):
if not self.textActors:
return
angle = np.radians(self.getProperty('Text Angle'))
sinAngle = np.sin(angle)
cosAngle = np.cos(angle)
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
transform = self.actor.GetUserTransform() or vtk.vtkTransform()
for i, actor in enumerate(self.textActors):
distance = i * majorTickSize
actor = self.textActors[i]
prop = actor.GetTextProperty()
coord = actor.GetPositionCoordinate()
coord.SetCoordinateSystemToWorld()
p = transform.TransformPoint((distance*cosAngle, distance*sinAngle, 0.0))
coord.SetValue(p)
def _buildTextActors(self):
self._clearTextActors()
gridHalfWidth = self.getProperty('Grid Half Width')
majorTickSize = gridHalfWidth / self.getProperty('Major Tick Resolution')
suffix = 'm'
for i in range(int(gridHalfWidth / majorTickSize)):
ringDistance = i * majorTickSize
actor = vtk.vtkTextActor()
prop = actor.GetTextProperty()
actor.SetInput('{:.3f}'.format(ringDistance).rstrip('0').rstrip('.') + suffix)
actor.SetPickable(False)
self.textActors.append(actor)
self._updateTextActorProperties()
for view in self.views:
self._addTextActorsToView(view)
def showGrid(view, cellSize=0.5, numberOfCells=25, name='grid', parent='scene', color=[1,1,1], alpha=0.05, gridTransform=None, viewBoundsFunction=None):
gridObj = GridItem(name)
gridHalfWidth = cellSize * numberOfCells
gridObj.setProperty('Grid Half Width', gridHalfWidth)
gridObj.setProperty('Major Tick Resolution', numberOfCells)
gridObj.setProperty('Minor Tick Resolution', 1)
gridObj.setProperty('Show Text', False)
gridObj.setProperty('Major Tick Rings', False)
gridObj.setProperty('Minor Tick Rings', False)
gridObj.setProperty('Alpha', alpha)
gridObj.setProperty('Text Alpha', 0.5)
gridObj.addToView(view)
om.addToObjectModel(gridObj, getParentObj(parent))
gridFrame = addChildFrame(gridObj)
gridFrame.connectFrameModified(lambda x: gridObj._repositionTextActors())
gridFrame.setProperty('Scale', 1.0)
gridObj.viewBoundsFunction = viewBoundsFunction or computeViewBoundsNoGrid
gridObj.emptyBoundsSize = 1.0
def onViewBoundsRequest():
if view not in gridObj.views or not gridObj.getProperty('Visible'):
return
bounds = gridObj.viewBoundsFunction(view, gridObj)
if vtk.vtkMath.AreBoundsInitialized(bounds):
view.addCustomBounds(bounds)
else:
view.addCustomBounds(np.array([-1, 1, -1, 1, -1, 1]) * gridObj.emptyBoundsSize)
view.connect('computeBoundsRequest(ddQVTKWidgetView*)', onViewBoundsRequest)
return gridObj
def makeGridPolyData(gridHalfWidth=100,
majorTickSize=10.0, minorTickSize=1.0,
majorGridRings=True, minorGridRings=False):
majorGrid = vtk.vtkGridSource()
majorGrid.SetSurfaceEnabled(True)
majorGrid.SetArcsEnabled(majorGridRings)
majorGrid.SetGridSize(int(gridHalfWidth/majorTickSize))
majorGrid.SetScale(majorTickSize)
majorGrid.Update()
if minorTickSize != majorTickSize:
minorGrid = vtk.vtkGridSource()
minorGrid.SetSurfaceEnabled(False)
minorGrid.SetArcsEnabled(minorGridRings)
minorGrid.SetScale(minorTickSize)
minorGrid.SetGridSize(int(gridHalfWidth/minorTickSize))
minorGrid.Update()
return filterUtils.appendPolyData([majorGrid.GetOutput(), minorGrid.GetOutput()])
else:
return majorGrid.GetOutput()
def createScalarBarWidget(view, lookupTable, title):
w = vtk.vtkScalarBarWidget()
bar = w.GetScalarBarActor()
bar.SetTitle(title)
bar.SetLookupTable(lookupTable)
w.SetRepositionable(True)
w.SetInteractor(view.renderWindow().GetInteractor())
w.On()
rep = w.GetRepresentation()
rep.SetOrientation(0)
rep.SetPosition(0.77, 0.92)
rep.SetPosition2(0.20, 0.07)
return w
def getParentObj(parent):
if isinstance(parent, str):
return om.getOrCreateContainer(parent)
else:
return parent
def updatePolyData(polyData, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showPolyData(polyData, name, **kwargs)
else:
obj.setPolyData(polyData)
return obj
def updateFrame(frame, name, **kwargs):
obj = om.findObjectByName(name, parent=getParentObj(kwargs.get('parent')))
if obj is None:
obj = showFrame(frame, name, **kwargs)
else:
obj.copyFrame(frame)
return obj
def showFrame(frame, name, view=None, parent='data', scale=0.35, visible=True, alpha=1.0):
view = view or app.getCurrentRenderView()
assert view
item = FrameItem(name, frame, view)
om.addToObjectModel(item, getParentObj(parent))
item.setProperty('Visible', visible)
item.setProperty('Alpha', alpha)
item.setProperty('Scale', scale)
return item
def showPolyData(polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent='data', cls=None):
view = view or app.getCurrentRenderView()
assert view
cls = cls or PolyDataItem
item = cls(name, polyData, view)
om.addToObjectModel(item, getParentObj(parent))
item.setProperty('Visible', visible)
item.setProperty('Alpha', alpha)
if colorByName and colorByName not in item.getArrayNames():
print('showPolyData(colorByName=%s): array not found' % colorByName)
colorByName = None
if colorByName:
item.setProperty('Color By', colorByName)
item.colorBy(colorByName, colorByRange)
else:
color = [1.0, 1.0, 1.0] if color is None else color
item.setProperty('Color', [float(c) for c in color])
item.colorBy(None)
return item
def addChildFrame(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is given,
the object's polydata is transformed using the inverse of initialTransform
and then a child frame is assigned to the object to maintain its original
position.
'''
if obj.getChildFrame():
return obj.getChildFrame()
if initialTransform:
pd = filterUtils.transformPolyData(obj.polyData, initialTransform.GetLinearInverse())
obj.setPolyData(pd)
t = initialTransform
else:
t = obj.actor.GetUserTransform()
if t is None:
t = vtk.vtkTransform()
t.PostMultiply()
frame = showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False, view=None)
for view in obj.views:
frame.addToView(view)
obj.actor.SetUserTransform(t)
return frame
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def showClusterObjects(clusters, parent):
colors = [ QtCore.Qt.red,
QtCore.Qt.blue,
QtCore.Qt.yellow,
QtCore.Qt.green,
QtCore.Qt.magenta,
QtCore.Qt.cyan,
QtCore.Qt.darkCyan,
QtCore.Qt.darkGreen,
QtCore.Qt.darkMagenta ]
colors = [QtGui.QColor(c) for c in colors]
colors = [(c.red()/255.0, c.green()/255.0, c.blue()/255.0) for c in colors]
objects = []
for i, cluster in enumerate(clusters):
name = 'object %d' % i
color = colors[i % len(colors)]
clusterObj = showPolyData(cluster.mesh, name, color=color, parent=parent, alpha=1.0)
clusterFrame = showFrame(cluster.frame, name + ' frame', scale=0.2, visible=False, parent=clusterObj)
clusterBox = showPolyData(cluster.box, name + ' box', color=color, parent=clusterObj, alpha=0.6, visible=False)
clusterPoints = showPolyData(cluster.points, name + ' points', color=color, parent=clusterObj, visible=False, alpha=1.0)
if hasattr(cluster,'oriented_frame'):
orientedFrame = showFrame(cluster.oriented_frame, name + ' oriented frame', scale=0.2, visible=False, parent=clusterObj)
clusterPoints.setProperty('Point Size', 7)
clusterPoints.colorBy(None)
clusterObj.data = cluster
objects.append(clusterObj)
for obj in [clusterObj, clusterBox, clusterPoints]:
obj.actor.SetUserTransform(cluster.frame)
return objects
captionWidget = None
def hideCaptionWidget():
global captionWidget
if captionWidget is not None:
captionWidget.Off()
captionWidget.Render()
def showCaptionWidget(position, text, view=None):
view = view or app.getCurrentRenderView()
assert view
global captionWidget
if not captionWidget:
rep = vtk.vtkCaptionRepresentation()
rep.SetPosition(0.2, 0.8)
w = vtk.vtkCaptionWidget()
w.SetInteractor(view.renderWindow().GetInteractor())
w.SetRepresentation(rep)
w.On()
captionWidget = w
rep = captionWidget.GetRepresentation()
rep.SetAnchorPosition(position)
rep.GetCaptionActor2D().SetCaption(text)
a = rep.GetCaptionActor2D()
pr = a.GetTextActor().GetTextProperty()
pr.SetJustificationToCentered()
pr.SetVerticalJustificationToCentered()
pr.SetItalic(0)
pr.SetBold(0)
pr.SetShadow(0)
pr.SetFontFamilyToArial()
c2 = rep.GetPosition2Coordinate()
c2.SetCoordinateSystemToDisplay()
c2.SetValue(12*len(text),30)
# disable border
#rep.SetShowBorder(0)
a.SetThreeDimensionalLeader(0)
a.SetLeaderGlyphSize(0.005)
captionWidget.On()
captionWidget.Render()
def getRayFromDisplayPoint(view, displayPoint):
'''
Given a view and an XY display point, returns two XYZ world points which
are the display point at the near/far clipping planes of the view.
'''
worldPt1 = [0,0,0,0]
worldPt2 = [0,0,0,0]
renderer = view.renderer()
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 0, worldPt1)
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 1, worldPt2)
worldPt1 = np.array(worldPt1[:3])
worldPt2 = np.array(worldPt2[:3])
return worldPt1, worldPt2
def pickImage(displayPoint, view, obj=None):
picker = vtk.vtkCellPicker()
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
if obj:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
pickedDataset = picker.GetDataSet()
if obj:
return picker.GetPointIJK()
else:
return pickedDataset, picker.GetPointIJK()
def pickProp(displayPoint, view):
for tolerance in (0.0, 0.005, 0.01):
pickType = 'render' if tolerance == 0.0 else 'cells'
pickData = pickPoint(displayPoint, view, pickType=pickType, tolerance=tolerance)
pickedPoint = pickData.pickedPoint
pickedProp = pickData.pickedProp
pickedDataset = pickData.pickedDataset
if pickedProp is not None:
return pickedPoint, pickedProp, pickedDataset
return None, None, None
def pickPoint(displayPoint, view, obj=None, pickType='points', tolerance=0.01):
"""
:param displayPoint:
:param view:
:param obj:
:param pickType:
:param tolerance:
:return: FieldContainer with fields
pickedPoint
pickedProp
pickedDataset
pickedNormal - is None if no normal can be comp
pickedCellId - is None unless pickType="cells"
"""
assert pickType in ('points', 'cells', 'render')
view = view or app.getCurrentRenderView()
assert view
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
wasTexturedBackground = False
if pickType == 'render':
picker = vtk.vtkPropPicker()
wasTexturedBackground = view.renderer().GetTexturedBackground()
view.renderer().TexturedBackgroundOff()
else:
picker = vtk.vtkPointPicker() if pickType == 'points' else vtk.vtkCellPicker()
picker.SetTolerance(tolerance)
if obj is not None:
if isinstance(obj, list):
for o in obj:
picker.AddPickList(o.actor)
obj = None
else:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
if wasTexturedBackground:
view.renderer().TexturedBackgroundOn()
pickedProp = picker.GetViewProp()
pickedPoint = np.array(picker.GetPickPosition())
pickedDataset = pickedProp.GetMapper().GetInput() if isinstance(pickedProp, vtk.vtkActor) else None
if pickType == "cells":
pickedCellId = picker.GetCellId()
else:
pickedCellId = None
# populate pickedNormal if possible
pickedNormal = None
if pickType == 'cells':
pickedNormal = np.array(picker.GetPickNormal())
elif pickType == 'points' and pickedDataset:
pointId = picker.GetPointId()
normals = pickedDataset.GetPointData().GetNormals()
if normals:
pickedNormal = np.array(normals.GetTuple3(pointId))
#if pickedDataset and pickType == 'cells':
# print 'point id:', pickedDataset.GetCell(picker.GetCellId()).GetPointIds().GetId(picker.GetSubId())
#if pickType == 'points':
# print 'point id:', picker.GetPointId()
fields = FieldContainer(
pickedPoint=pickedPoint,
pickedProp=pickedProp,
pickedDataset=pickedDataset,
pickedNormal=pickedNormal,
pickedCellId=pickedCellId
)
return fields
def mapMousePosition(widget, mouseEvent):
mousePosition = mouseEvent.pos()
return mousePosition.x(), widget.height - mousePosition.y()
def getObjectByDataSet(dataSet):
if not dataSet:
return None
for obj in om.getObjects():
if obj.hasDataSet(dataSet):
return obj
def getObjectByProp(prop):
if not prop:
return None
for obj in om.getObjects():
if obj.hasActor(prop):
return obj
def findPickedObject(displayPoint, view):
pickedPoint, pickedProp, pickedDataset = pickProp(displayPoint, view)
obj = getObjectByProp(pickedProp) or getObjectByDataSet(pickedDataset)
return obj, pickedPoint
"""
Toggles whether anti-aliasing is enabled or not.
This sets a static variable in the ddQVTKWidgeView
so this controls the setting for all views created in the current
executable. Must be called before constructing a ddQTKWidgetView
Anti-aliasing is enabled by default
"""
def setAntiAliasing(enabled):
PythonQt.dd.ddQVTKWidgetView.setAntiAliasing(enabled)
def enableEyeDomeLighting(view):
standardPass = vtk.vtkRenderStepsPass()
edlPass = vtk.vtkEDLShading()
edlPass.SetDelegatePass(standardPass)
view.renderer().SetPass(edlPass)
def disableEyeDomeLighting(view):
view.renderer().SetPass(None)
def showQLabelImage(filename):
'''
Returns a QLabel displaying the image contents of given filename.
Make sure to assign the label, it will destruct when it goes out
of scope.
'''
image = QtGui.QImage(filename)
assert not image.isNull()
imageLabel = QtGui.QLabel()
imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
imageLabel.setScaledContents(True)
imageLabel.resize(imageLabel.pixmap.size())
imageLabel.setWindowTitle(os.path.basename(filename))
imageLabel.show()
return imageLabel
| en | 0.610079 | # 'intensity' : (400, 4000), #self.mapper.SetColorModeToMapScalars() #return self.getCoolToWarmColorMap(scalarRange) #defaultHeight = self._getHeightForWidth(defaultWidth) #self.addProperty('Height', defaultHeight, # attributes=om.PropertyAttributes(minimum=0, maximum=9999, singleStep=10)) # also set the image on the texture, otherwise # the texture input won't update until the next # render where this actor is visible #print self, 'onFrameModified:', self.frames[modifiedFrameId].ref().getProperty('Name') #print ' ', self, 'moving:', self.frames[frameId].ref().getProperty('Name') Adds a child frame to the given PolyDataItem. If initialTransform is given, the object's polydata is transformed using the inverse of initialTransform and then a child frame is assigned to the object to maintain its original position. Return a random color as a list of RGB values between 0.0 and 1.0. # disable border #rep.SetShowBorder(0) Given a view and an XY display point, returns two XYZ world points which are the display point at the near/far clipping planes of the view. :param displayPoint: :param view: :param obj: :param pickType: :param tolerance: :return: FieldContainer with fields pickedPoint pickedProp pickedDataset pickedNormal - is None if no normal can be comp pickedCellId - is None unless pickType="cells" # populate pickedNormal if possible #if pickedDataset and pickType == 'cells': # print 'point id:', pickedDataset.GetCell(picker.GetCellId()).GetPointIds().GetId(picker.GetSubId()) #if pickType == 'points': # print 'point id:', picker.GetPointId() Toggles whether anti-aliasing is enabled or not. This sets a static variable in the ddQVTKWidgeView so this controls the setting for all views created in the current executable. Must be called before constructing a ddQTKWidgetView Anti-aliasing is enabled by default Returns a QLabel displaying the image contents of given filename. Make sure to assign the label, it will destruct when it goes out of scope. | 1.816939 | 2 |
SubGNN/test.py | rmwu/SubGNN | 107 | 6630996 | import sys
sys.path.insert(0, '..') # add config to path
import config
import train as tr
import os
import json
import random
import numpy as np
import argparse
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def parse_arguments():
parser = argparse.ArgumentParser(description="Run SubGNN")
parser.add_argument('-task', type=str, default=None, help='Task name (e.g. hpo_metab)')
parser.add_argument('-tb_name', type=str, default="sg", help='Base Model Name for Tensorboard Log')
parser.add_argument('-restoreModelPath', type=str, default=None, help='Parent directory of model, hparams, kwargs')
parser.add_argument("-max_epochs", type=int, default=200, help="Max number of epochs to train")
parser.add_argument("-random_seeds", action="store_true", help="Use random seeds from 0-9. Otherwise use random random seeds")
parser.add_argument('-tb_dir', default="tensorboard_test", type=str)
parser.add_argument('-no_train', action="store_true")
args = parser.parse_args()
return args
def main(args_script):
args_to_function = {
"task" : args_script.task,
"tb_name" : args_script.tb_name,
"restoreModelPath" : args_script.restoreModelPath,
"max_epochs" : args_script.max_epochs,
"tb_dir" : args_script.tb_dir,
## Defaults
"checkpoint_k": 1,
"no_checkpointing" : False, #0 and True or 1 and False
"tb_logging": True,
"runTest" : False,
"no_save" : False,
"print_train_times" : False,
"monitor_metric":'val_micro_f1',
"opt_n_trials":None,
"debug_mode":False,
"subset_data":False,
"restoreModelName":None,
"noTrain":False,
"log_path":None
}
args = Namespace(**args_to_function)
# dict to keep track of results
exp_results = {
"test_acc_mean":0, "test_acc_sd":0,"test_micro_f1_mean":0,"test_micro_f1_sd":0,
"test_auroc_mean":0, "test_auroc_sd":0,
"test_acc" : [], "test_micro_f1": [], "test_auroc" : [],
"call" : args_to_function
}
# for each seed, train a new model
for seed in range(10):
print(f"Running Round {seed+1}")
# either use a random seed from 0 to 1000000 or use the default random seeds 0-9
args.seed = random.randint(0, 1000000) if args_script.random_seeds else seed
print('Seed used: ', args.seed)
args.tb_dir = os.path.join(config.PROJECT_ROOT, args.tb_dir)
args.tb_version = f"version_{seed}"
if not args_script.no_train: #train the model from scratch
args.noTrain = False
args.runTest = True
test_results = tr.train_model(args)
else: #read in the model - NOTE that this doesn't differentiaate .ckpt files if multiple are saved
model_path = os.path.join(config.PROJECT_ROOT,args.tb_dir, args.tb_name, args.tb_version)
for file in os.listdir(model_path):
if file.endswith(".ckpt") and file.startswith("epoch"):
outpath = file
args.noTrain = True
args.no_save = True
args.restoreModelPath = model_path
args.restoreModelName = outpath
test_results = tr.train_model(args)
# keep track of test results for each random seed run
exp_results['test_micro_f1'].append(float(test_results['test_micro_f1']))
exp_results['test_acc'].append(float(test_results['test_acc']))
exp_results['test_auroc'].append(float(test_results['test_auroc']))
exp_results["test_acc_mean"] = np.mean(exp_results['test_acc'])
exp_results["test_acc_sd"] = np.std(exp_results['test_acc'])
exp_results["test_micro_f1_mean"] = np.mean(exp_results['test_micro_f1'])
exp_results["test_micro_f1_sd"] = np.std(exp_results['test_micro_f1'])
exp_results["test_auroc_mean"] = np.mean(exp_results['test_auroc'])
exp_results["test_auroc_sd"] = np.std(exp_results['test_auroc'])
print("OVERALL RESULTS:") # across all random seeds
print(exp_results)
# write results for all runs to file
exp_results_file = open(os.path.join(config.PROJECT_ROOT, args.tb_dir, args.tb_name, "experiment_results.json"),"w")
exp_results_file.write(json.dumps(exp_results, indent=4))
exp_results_file.close()
if __name__ == "__main__":
args = parse_arguments()
main(args) | import sys
sys.path.insert(0, '..') # add config to path
import config
import train as tr
import os
import json
import random
import numpy as np
import argparse
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def parse_arguments():
parser = argparse.ArgumentParser(description="Run SubGNN")
parser.add_argument('-task', type=str, default=None, help='Task name (e.g. hpo_metab)')
parser.add_argument('-tb_name', type=str, default="sg", help='Base Model Name for Tensorboard Log')
parser.add_argument('-restoreModelPath', type=str, default=None, help='Parent directory of model, hparams, kwargs')
parser.add_argument("-max_epochs", type=int, default=200, help="Max number of epochs to train")
parser.add_argument("-random_seeds", action="store_true", help="Use random seeds from 0-9. Otherwise use random random seeds")
parser.add_argument('-tb_dir', default="tensorboard_test", type=str)
parser.add_argument('-no_train', action="store_true")
args = parser.parse_args()
return args
def main(args_script):
args_to_function = {
"task" : args_script.task,
"tb_name" : args_script.tb_name,
"restoreModelPath" : args_script.restoreModelPath,
"max_epochs" : args_script.max_epochs,
"tb_dir" : args_script.tb_dir,
## Defaults
"checkpoint_k": 1,
"no_checkpointing" : False, #0 and True or 1 and False
"tb_logging": True,
"runTest" : False,
"no_save" : False,
"print_train_times" : False,
"monitor_metric":'val_micro_f1',
"opt_n_trials":None,
"debug_mode":False,
"subset_data":False,
"restoreModelName":None,
"noTrain":False,
"log_path":None
}
args = Namespace(**args_to_function)
# dict to keep track of results
exp_results = {
"test_acc_mean":0, "test_acc_sd":0,"test_micro_f1_mean":0,"test_micro_f1_sd":0,
"test_auroc_mean":0, "test_auroc_sd":0,
"test_acc" : [], "test_micro_f1": [], "test_auroc" : [],
"call" : args_to_function
}
# for each seed, train a new model
for seed in range(10):
print(f"Running Round {seed+1}")
# either use a random seed from 0 to 1000000 or use the default random seeds 0-9
args.seed = random.randint(0, 1000000) if args_script.random_seeds else seed
print('Seed used: ', args.seed)
args.tb_dir = os.path.join(config.PROJECT_ROOT, args.tb_dir)
args.tb_version = f"version_{seed}"
if not args_script.no_train: #train the model from scratch
args.noTrain = False
args.runTest = True
test_results = tr.train_model(args)
else: #read in the model - NOTE that this doesn't differentiaate .ckpt files if multiple are saved
model_path = os.path.join(config.PROJECT_ROOT,args.tb_dir, args.tb_name, args.tb_version)
for file in os.listdir(model_path):
if file.endswith(".ckpt") and file.startswith("epoch"):
outpath = file
args.noTrain = True
args.no_save = True
args.restoreModelPath = model_path
args.restoreModelName = outpath
test_results = tr.train_model(args)
# keep track of test results for each random seed run
exp_results['test_micro_f1'].append(float(test_results['test_micro_f1']))
exp_results['test_acc'].append(float(test_results['test_acc']))
exp_results['test_auroc'].append(float(test_results['test_auroc']))
exp_results["test_acc_mean"] = np.mean(exp_results['test_acc'])
exp_results["test_acc_sd"] = np.std(exp_results['test_acc'])
exp_results["test_micro_f1_mean"] = np.mean(exp_results['test_micro_f1'])
exp_results["test_micro_f1_sd"] = np.std(exp_results['test_micro_f1'])
exp_results["test_auroc_mean"] = np.mean(exp_results['test_auroc'])
exp_results["test_auroc_sd"] = np.std(exp_results['test_auroc'])
print("OVERALL RESULTS:") # across all random seeds
print(exp_results)
# write results for all runs to file
exp_results_file = open(os.path.join(config.PROJECT_ROOT, args.tb_dir, args.tb_name, "experiment_results.json"),"w")
exp_results_file.write(json.dumps(exp_results, indent=4))
exp_results_file.close()
if __name__ == "__main__":
args = parse_arguments()
main(args) | en | 0.879018 | # add config to path ## Defaults #0 and True or 1 and False # dict to keep track of results # for each seed, train a new model # either use a random seed from 0 to 1000000 or use the default random seeds 0-9 #train the model from scratch #read in the model - NOTE that this doesn't differentiaate .ckpt files if multiple are saved # keep track of test results for each random seed run # across all random seeds # write results for all runs to file | 2.248046 | 2 |
flaskr/liff/models.py | kohei25/rakumeshi | 2 | 6630997 | <filename>flaskr/liff/models.py<gh_stars>1-10
# from ast import keyword
# from flaskr import db
# from flaskr.linebot.models import User
# class UserFeature(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_id = db.Column(db.ForeignKey(User.id), nullable=False)
# sex = db.Column(db.Integer)
# age = db.Column(db.Integer)
# genre = db.Column(db.Integer)
# budget = db.Column(db.Integer)
# created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
# user = db.relationship(User, lazy='joined', backref='userfeatures')
# class Keyword(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_id = db.Column(db.ForeignKey(User.id), nullable=False)
# keyword = db.Column(db.String(100))
# created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
# user = db.relationship(User, lazy='joined', backref='keywords') | <filename>flaskr/liff/models.py<gh_stars>1-10
# from ast import keyword
# from flaskr import db
# from flaskr.linebot.models import User
# class UserFeature(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_id = db.Column(db.ForeignKey(User.id), nullable=False)
# sex = db.Column(db.Integer)
# age = db.Column(db.Integer)
# genre = db.Column(db.Integer)
# budget = db.Column(db.Integer)
# created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
# user = db.relationship(User, lazy='joined', backref='userfeatures')
# class Keyword(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_id = db.Column(db.ForeignKey(User.id), nullable=False)
# keyword = db.Column(db.String(100))
# created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
# user = db.relationship(User, lazy='joined', backref='keywords') | en | 0.350645 | # from ast import keyword # from flaskr import db # from flaskr.linebot.models import User # class UserFeature(db.Model): # id = db.Column(db.Integer, primary_key=True) # user_id = db.Column(db.ForeignKey(User.id), nullable=False) # sex = db.Column(db.Integer) # age = db.Column(db.Integer) # genre = db.Column(db.Integer) # budget = db.Column(db.Integer) # created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp()) # user = db.relationship(User, lazy='joined', backref='userfeatures') # class Keyword(db.Model): # id = db.Column(db.Integer, primary_key=True) # user_id = db.Column(db.ForeignKey(User.id), nullable=False) # keyword = db.Column(db.String(100)) # created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp()) # user = db.relationship(User, lazy='joined', backref='keywords') | 2.363223 | 2 |
django_config_gen/management/commands/print_settings.py | brillgen/django-config-gen | 1 | 6630998 | <reponame>brillgen/django-config-gen
# -*- coding: utf-8 -*-
#Copyright (C) 2010, 2011 <NAME>
#
#Licensed under a BSD 3-Clause License. See LICENSE file.
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from .. import patch_settings
import json
import copy
import logging
logger = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
patch_settings()
class Command(BaseCommand):
help = 'Prints out settings serialized as JSON.'
def handle(self, **options):
#remove logging statements from output
l = logging.getLogger('')
for h in l.handlers:
l.removeHandler(h)
l.addHandler(NullHandler())
d = {}
s_d = settings._wrapped.__dict__
for key in settings._wrapped.__dict__:
val = s_d[key]
logger.debug('%s: %s' % (key, val))
try:
#if settings has something like "import django.conf.global_settings as DEFAULT_SETTINGS"
#in it, then json encoding will throw and error. Copying makes
#sure modules don't get included.
d[key] = copy.copy(val)
except Exception as e:
logger.error(e)
print(json.dumps(d, indent=4, sort_keys=True))
| # -*- coding: utf-8 -*-
#Copyright (C) 2010, 2011 <NAME>
#
#Licensed under a BSD 3-Clause License. See LICENSE file.
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from .. import patch_settings
import json
import copy
import logging
logger = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
patch_settings()
class Command(BaseCommand):
help = 'Prints out settings serialized as JSON.'
def handle(self, **options):
#remove logging statements from output
l = logging.getLogger('')
for h in l.handlers:
l.removeHandler(h)
l.addHandler(NullHandler())
d = {}
s_d = settings._wrapped.__dict__
for key in settings._wrapped.__dict__:
val = s_d[key]
logger.debug('%s: %s' % (key, val))
try:
#if settings has something like "import django.conf.global_settings as DEFAULT_SETTINGS"
#in it, then json encoding will throw and error. Copying makes
#sure modules don't get included.
d[key] = copy.copy(val)
except Exception as e:
logger.error(e)
print(json.dumps(d, indent=4, sort_keys=True)) | en | 0.831792 | # -*- coding: utf-8 -*- #Copyright (C) 2010, 2011 <NAME> # #Licensed under a BSD 3-Clause License. See LICENSE file. #remove logging statements from output #if settings has something like "import django.conf.global_settings as DEFAULT_SETTINGS" #in it, then json encoding will throw and error. Copying makes #sure modules don't get included. | 2.155541 | 2 |
renku/service/config.py | almutlue/renku-python | 0 | 6630999 | # -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service config."""
import os
import tempfile
from pathlib import Path
import pkg_resources
GIT_ACCESS_DENIED_ERROR_CODE = -32000
GIT_UNKNOWN_ERROR_CODE = -32001
RENKU_EXCEPTION_ERROR_CODE = -32100
REDIS_EXCEPTION_ERROR_CODE = -32200
INVALID_HEADERS_ERROR_CODE = -32601
INVALID_PARAMS_ERROR_CODE = -32602
INTERNAL_FAILURE_ERROR_CODE = -32603
HTTP_SERVER_ERROR = -32000
SERVICE_NAME = "Renku Service"
OPENAPI_VERSION = "3.0.3"
API_VERSION = "v1"
PROJECT_CLONE_NO_DEPTH = -1
PROJECT_CLONE_DEPTH_DEFAULT = int(os.getenv("PROJECT_CLONE_DEPTH_DEFAULT", 1))
TEMPLATE_CLONE_DEPTH_DEFAULT = int(os.getenv("TEMPLATE_CLONE_DEPTH_DEFAULT", 0))
CACHE_DIR = os.getenv("CACHE_DIR", os.path.realpath(tempfile.TemporaryDirectory().name))
CACHE_UPLOADS_PATH = Path(CACHE_DIR) / Path("uploads")
CACHE_UPLOADS_PATH.mkdir(parents=True, exist_ok=True)
CACHE_PROJECTS_PATH = Path(CACHE_DIR) / Path("projects")
CACHE_PROJECTS_PATH.mkdir(parents=True, exist_ok=True)
TAR_ARCHIVE_CONTENT_TYPE = "application/x-tar"
ZIP_ARCHIVE_CONTENT_TYPE = "application/zip"
GZ_ARCHIVE_CONTENT_TYPE = "application/x-gzip"
SUPPORTED_ARCHIVES = [
TAR_ARCHIVE_CONTENT_TYPE,
ZIP_ARCHIVE_CONTENT_TYPE,
GZ_ARCHIVE_CONTENT_TYPE,
]
# the path prefix on the service
SERVICE_PREFIX = os.getenv("CORE_SERVICE_PREFIX", "/")
# the reverse proxy prefix
SERVICE_API_BASE_PATH = os.getenv("CORE_SERVICE_API_BASE_PATH", "/")
# path to the swagger spec
API_SPEC_URL = SERVICE_PREFIX.lstrip("/") + "/spec.json"
LOGGER_CONFIG_FILE = Path(pkg_resources.resource_filename("renku", "service/logging.yaml"))
| # -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service config."""
import os
import tempfile
from pathlib import Path
import pkg_resources
GIT_ACCESS_DENIED_ERROR_CODE = -32000
GIT_UNKNOWN_ERROR_CODE = -32001
RENKU_EXCEPTION_ERROR_CODE = -32100
REDIS_EXCEPTION_ERROR_CODE = -32200
INVALID_HEADERS_ERROR_CODE = -32601
INVALID_PARAMS_ERROR_CODE = -32602
INTERNAL_FAILURE_ERROR_CODE = -32603
HTTP_SERVER_ERROR = -32000
SERVICE_NAME = "Renku Service"
OPENAPI_VERSION = "3.0.3"
API_VERSION = "v1"
PROJECT_CLONE_NO_DEPTH = -1
PROJECT_CLONE_DEPTH_DEFAULT = int(os.getenv("PROJECT_CLONE_DEPTH_DEFAULT", 1))
TEMPLATE_CLONE_DEPTH_DEFAULT = int(os.getenv("TEMPLATE_CLONE_DEPTH_DEFAULT", 0))
CACHE_DIR = os.getenv("CACHE_DIR", os.path.realpath(tempfile.TemporaryDirectory().name))
CACHE_UPLOADS_PATH = Path(CACHE_DIR) / Path("uploads")
CACHE_UPLOADS_PATH.mkdir(parents=True, exist_ok=True)
CACHE_PROJECTS_PATH = Path(CACHE_DIR) / Path("projects")
CACHE_PROJECTS_PATH.mkdir(parents=True, exist_ok=True)
TAR_ARCHIVE_CONTENT_TYPE = "application/x-tar"
ZIP_ARCHIVE_CONTENT_TYPE = "application/zip"
GZ_ARCHIVE_CONTENT_TYPE = "application/x-gzip"
SUPPORTED_ARCHIVES = [
TAR_ARCHIVE_CONTENT_TYPE,
ZIP_ARCHIVE_CONTENT_TYPE,
GZ_ARCHIVE_CONTENT_TYPE,
]
# the path prefix on the service
SERVICE_PREFIX = os.getenv("CORE_SERVICE_PREFIX", "/")
# the reverse proxy prefix
SERVICE_API_BASE_PATH = os.getenv("CORE_SERVICE_API_BASE_PATH", "/")
# path to the swagger spec
API_SPEC_URL = SERVICE_PREFIX.lstrip("/") + "/spec.json"
LOGGER_CONFIG_FILE = Path(pkg_resources.resource_filename("renku", "service/logging.yaml"))
| en | 0.76791 | # -*- coding: utf-8 -*- # # Copyright 2020 - Swiss Data Science Center (SDSC) # A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and # Eidgenössische Technische Hochschule Zürich (ETHZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Renku service config. # the path prefix on the service # the reverse proxy prefix # path to the swagger spec | 1.810425 | 2 |
pysagec/models.py | migonzalvar/pysagec | 3 | 6631000 | from urllib.parse import urlsplit, parse_qs
from .base import Model, Nested, String
def key_or_none(qs, key):
iterable = qs.get(key, [None])
return iterable[0]
class AuthInfo(Model):
root_tag = 'mrw:AuthInfo'
franchise_code = String('mrw:CodigoFranquicia')
subscriber_code = String('mrw:CodigoAbonado')
departament_code = String('mrw:CodigoDepartamento', ignore_if_none=True)
username = String('mrw:UserName')
password = String('<PASSWORD>')
@classmethod
def from_url(cls, url):
url = urlsplit(url)
qs = parse_qs(url.query)
return cls(
username=url.username,
password=<PASSWORD>,
franchise_code=key_or_none(qs, 'franchise'),
subscriber_code=key_or_none(qs, 'subscriber'),
departament_code=key_or_none(qs, 'department')
)
class Address(Model):
root_tag = 'mrw:Direccion'
street_type = String('mrw:CodigoTipoVia')
street_name = String('mrw:Via')
street_number = String('mrw:Numero')
remaining_details = String('mrw:Resto')
postal_code = String('mrw:CodigoPostal')
city = String('mrw:Poblacion')
class PickupInfo(Model):
root_tag = 'mrw:DatosEntrega'
pickup_address = Nested('mrw:Direccion', Address, unwrap=True)
vat_number = String('mrw:Nif')
recipient_name = String('mrw:Nombre')
recipient_phone_number = String('mrw:Telefono')
contact_phone_number = String('mrw:Contacto')
contact_name = String('mrw:ALaAtencionDe')
comments = String('mrw:Observaciones')
class Package(Model):
root_tag = 'mrw:BultoRequest'
height = String('mrw:Alto')
length = String('mrw:Largo')
width = String('mrw:Ancho')
dimension = String('mrw:Dimension')
reference = String('mrw:Referencia')
weight = String('mrw:Peso')
class ServiceInfo(Model):
root_tag = 'mrw:DatosServicio'
date = String('mrw:Fecha')
customer_reference = String('mrw:Referencia')
franchise_delivery = String('mrw:EnFranquicia', default='N')
service_code = String('mrw:CodigoServicio')
packages = Nested('mrw:Bultos', Package, many=True)
number_of_packages = String('mrw:NumeroBultos')
weight = String('mrw:Peso')
delivery_on_saturday = String('mrw:EntregaSabado', default='N')
delivery_before_830 = String('mrw:Entrega830', default='N')
delivery_after_time = String('mrw:EntregaPartirDe')
management = String('mrw:Gestion', default='N')
return_back = String('mrw:Retorno', default='N')
immediate_confirmation = String('mrw:ConfirmacionInmediata', default='N')
reimbursement = String('mrw:Reembolso', default='N')
class GetLabel(Model):
root_tag = None
shipping_number = String('mrw:NumeroEnvio')
delimiter = String('mrw:SeparadorNumerosEnvio', ignore_if_none=True)
date_range_start = String('mrw:FechaInicioEnvio', ignore_if_none=True)
date_range_end = String('mrw:FechaFinEnvio', ignore_if_none=True)
label_type = String('mrw:TipoEtiquetaEnvio', default='0')
top_margin = String('mrw:ReportTopMargin', default='1100')
left_margin = String('mrw:ReportLeftMargin', default=650)
class SendResponseResult(Model):
root_tag = None
message = String('{http://www.mrw.es/}Mensaje')
shipping_number = String('{http://www.mrw.es/}NumeroEnvio')
request_number = String('{http://www.mrw.es/}NumeroSolicitud')
status = String('{http://www.mrw.es/}Estado')
url = String('{http://www.mrw.es/}Url')
class SendResponse(Model):
root_tag = '{http://www.mrw.es/}TransmEnvioResponse'
result = Nested(
'{http://www.mrw.es/}TransmEnvioResult',
SendResponseResult,
unwrap=True
)
class LabelResponseResult(Model):
root_tag = None
file = String('{http://www.mrw.es/}EtiquetaFile')
message = String('{http://www.mrw.es/}Mensaje')
status = String('{http://www.mrw.es/}Estado')
class LabelResponse(Model):
root_tag = '{http://www.mrw.es/}GetEtiquetaEnvioResponse'
result = Nested(
'{http://www.mrw.es/}GetEtiquetaEnvioResult',
LabelResponseResult,
unwrap=True
)
| from urllib.parse import urlsplit, parse_qs
from .base import Model, Nested, String
def key_or_none(qs, key):
iterable = qs.get(key, [None])
return iterable[0]
class AuthInfo(Model):
root_tag = 'mrw:AuthInfo'
franchise_code = String('mrw:CodigoFranquicia')
subscriber_code = String('mrw:CodigoAbonado')
departament_code = String('mrw:CodigoDepartamento', ignore_if_none=True)
username = String('mrw:UserName')
password = String('<PASSWORD>')
@classmethod
def from_url(cls, url):
url = urlsplit(url)
qs = parse_qs(url.query)
return cls(
username=url.username,
password=<PASSWORD>,
franchise_code=key_or_none(qs, 'franchise'),
subscriber_code=key_or_none(qs, 'subscriber'),
departament_code=key_or_none(qs, 'department')
)
class Address(Model):
root_tag = 'mrw:Direccion'
street_type = String('mrw:CodigoTipoVia')
street_name = String('mrw:Via')
street_number = String('mrw:Numero')
remaining_details = String('mrw:Resto')
postal_code = String('mrw:CodigoPostal')
city = String('mrw:Poblacion')
class PickupInfo(Model):
root_tag = 'mrw:DatosEntrega'
pickup_address = Nested('mrw:Direccion', Address, unwrap=True)
vat_number = String('mrw:Nif')
recipient_name = String('mrw:Nombre')
recipient_phone_number = String('mrw:Telefono')
contact_phone_number = String('mrw:Contacto')
contact_name = String('mrw:ALaAtencionDe')
comments = String('mrw:Observaciones')
class Package(Model):
root_tag = 'mrw:BultoRequest'
height = String('mrw:Alto')
length = String('mrw:Largo')
width = String('mrw:Ancho')
dimension = String('mrw:Dimension')
reference = String('mrw:Referencia')
weight = String('mrw:Peso')
class ServiceInfo(Model):
root_tag = 'mrw:DatosServicio'
date = String('mrw:Fecha')
customer_reference = String('mrw:Referencia')
franchise_delivery = String('mrw:EnFranquicia', default='N')
service_code = String('mrw:CodigoServicio')
packages = Nested('mrw:Bultos', Package, many=True)
number_of_packages = String('mrw:NumeroBultos')
weight = String('mrw:Peso')
delivery_on_saturday = String('mrw:EntregaSabado', default='N')
delivery_before_830 = String('mrw:Entrega830', default='N')
delivery_after_time = String('mrw:EntregaPartirDe')
management = String('mrw:Gestion', default='N')
return_back = String('mrw:Retorno', default='N')
immediate_confirmation = String('mrw:ConfirmacionInmediata', default='N')
reimbursement = String('mrw:Reembolso', default='N')
class GetLabel(Model):
root_tag = None
shipping_number = String('mrw:NumeroEnvio')
delimiter = String('mrw:SeparadorNumerosEnvio', ignore_if_none=True)
date_range_start = String('mrw:FechaInicioEnvio', ignore_if_none=True)
date_range_end = String('mrw:FechaFinEnvio', ignore_if_none=True)
label_type = String('mrw:TipoEtiquetaEnvio', default='0')
top_margin = String('mrw:ReportTopMargin', default='1100')
left_margin = String('mrw:ReportLeftMargin', default=650)
class SendResponseResult(Model):
root_tag = None
message = String('{http://www.mrw.es/}Mensaje')
shipping_number = String('{http://www.mrw.es/}NumeroEnvio')
request_number = String('{http://www.mrw.es/}NumeroSolicitud')
status = String('{http://www.mrw.es/}Estado')
url = String('{http://www.mrw.es/}Url')
class SendResponse(Model):
root_tag = '{http://www.mrw.es/}TransmEnvioResponse'
result = Nested(
'{http://www.mrw.es/}TransmEnvioResult',
SendResponseResult,
unwrap=True
)
class LabelResponseResult(Model):
root_tag = None
file = String('{http://www.mrw.es/}EtiquetaFile')
message = String('{http://www.mrw.es/}Mensaje')
status = String('{http://www.mrw.es/}Estado')
class LabelResponse(Model):
root_tag = '{http://www.mrw.es/}GetEtiquetaEnvioResponse'
result = Nested(
'{http://www.mrw.es/}GetEtiquetaEnvioResult',
LabelResponseResult,
unwrap=True
)
| none | 1 | 2.463649 | 2 |
|
scripts/models/k_fold_model.py | daniele21/DL_soccer_prediction_v2 | 0 | 6631001 | <reponame>daniele21/DL_soccer_prediction_v2
from torch.utils.data import DataLoader
import numpy as np
import torch
from tqdm import tqdm
from copy import deepcopy
from torch.multiprocessing import Process, set_start_method
from scripts.constants.configs import HOME, AWAY
from scripts.models.base import Base_Model
from scripts.models.model_utils import get_device_from_name
from scripts.utils.loading import load_model
from core.logger.logging import logger
from core.file_manager.saving import save_model, save_json
class K_fold_model():
def __init__(self, network, params, dataloader):
self.name = params['name']
self.seed = params['seed']
self.device = get_device_from_name(params['device'])
self.n_folds = len(dataloader['train'])
trainloaders = [DataLoader(d,
batch_size=d.batch_size,
shuffle=False) for d in dataloader['train']]
evalloaders = [DataLoader(d,
batch_size=d.batch_size,
shuffle=False) for d in dataloader['eval']]
self.dataloaders = [{'train':trainloader, 'eval':evalloader}
for trainloader, evalloader in zip(trainloaders, evalloaders)]
self.models = [Base_Model(deepcopy(network), params, self.dataloaders[i]) for i in range(self.n_folds)]
for i, model in enumerate(self.models):
model.save_dir += f'fold_{i}/'
self.save_dir = params['save_dir'] if 'save_dir' in list(params.keys()) else None
# REPRODUCIBILITY
np.random.seed(self.seed)
torch.manual_seed(self.seed)
# Dataset size
last_train_event = trainloaders[-1].dataset.last_n_event()
last_eval_event = evalloaders[-1].dataset.last_n_event()
print(f'> Last Training Index: {last_train_event}')
print(f'> Last Evaluation Index: {last_eval_event}')
def train(self, epochs, patience=None):
try:
set_start_method('spawn')
except RuntimeError:
pass
for model in tqdm(self.models, desc='> Folds '):
p = Process(target=model.train, args=(epochs, patience))
p.start()
p.join()
updated_models = []
for model in self.models:
ckp_model = f'{model.save_dir}{model.name}.pth'
updated_models.append(load_model(ckp_model))
self.models = updated_models
if(self.save_dir is not None):
filepath = f'{self.save_dir}{self.name}.pth'
save_model(self, filepath)
losses, mean_loss = self.get_losses()
model_loss = {'losses':losses,
'mean_loss':mean_loss}
filepath = f'{self.save_dir}losses.json'
save_json(model_loss, filepath)
return
def predict(self, testloader, field=None):
"""
Inference with all models, in a dict
Args:
testloader: dataloader containing the test data
field: type of match [HOME / AWAY]
Returns:
preds: dict{ KEY: model number
VALUE: list of predictions}
"""
model_name = str(field).lower()
assert field == HOME or field == AWAY, 'ERROR - model predict: WRONG model name. Give "home" or "away"'
preds = {}
for i, model in enumerate(self.models):
if (model_name == HOME):
# logger.info('> Calling Home Network')
field_net = model.model.home_network
elif (model_name == AWAY):
# logger.info('> Calling Away Network')
field_net = model.model.away_network
else:
raise ValueError('Model - predict: Wrong model name')
model_preds = []
with torch.no_grad():
for x in testloader:
x = torch.Tensor(x).to(self.device)
out = field_net(x)
out = out.squeeze()
model_preds.append(out.item())
preds[i] = model_preds
return preds[i]
def get_losses(self):
losses = {'train':[],
'eval':[]}
for model in self.models:
losses['train'].append(model.losses['train'][-1])
losses['eval'].append(model.losses['eval'][-1])
mean_loss = {'train':np.mean(losses['train']),
'eval':np.mean(losses['eval'])}
return losses, mean_loss
| from torch.utils.data import DataLoader
import numpy as np
import torch
from tqdm import tqdm
from copy import deepcopy
from torch.multiprocessing import Process, set_start_method
from scripts.constants.configs import HOME, AWAY
from scripts.models.base import Base_Model
from scripts.models.model_utils import get_device_from_name
from scripts.utils.loading import load_model
from core.logger.logging import logger
from core.file_manager.saving import save_model, save_json
class K_fold_model():
def __init__(self, network, params, dataloader):
self.name = params['name']
self.seed = params['seed']
self.device = get_device_from_name(params['device'])
self.n_folds = len(dataloader['train'])
trainloaders = [DataLoader(d,
batch_size=d.batch_size,
shuffle=False) for d in dataloader['train']]
evalloaders = [DataLoader(d,
batch_size=d.batch_size,
shuffle=False) for d in dataloader['eval']]
self.dataloaders = [{'train':trainloader, 'eval':evalloader}
for trainloader, evalloader in zip(trainloaders, evalloaders)]
self.models = [Base_Model(deepcopy(network), params, self.dataloaders[i]) for i in range(self.n_folds)]
for i, model in enumerate(self.models):
model.save_dir += f'fold_{i}/'
self.save_dir = params['save_dir'] if 'save_dir' in list(params.keys()) else None
# REPRODUCIBILITY
np.random.seed(self.seed)
torch.manual_seed(self.seed)
# Dataset size
last_train_event = trainloaders[-1].dataset.last_n_event()
last_eval_event = evalloaders[-1].dataset.last_n_event()
print(f'> Last Training Index: {last_train_event}')
print(f'> Last Evaluation Index: {last_eval_event}')
def train(self, epochs, patience=None):
try:
set_start_method('spawn')
except RuntimeError:
pass
for model in tqdm(self.models, desc='> Folds '):
p = Process(target=model.train, args=(epochs, patience))
p.start()
p.join()
updated_models = []
for model in self.models:
ckp_model = f'{model.save_dir}{model.name}.pth'
updated_models.append(load_model(ckp_model))
self.models = updated_models
if(self.save_dir is not None):
filepath = f'{self.save_dir}{self.name}.pth'
save_model(self, filepath)
losses, mean_loss = self.get_losses()
model_loss = {'losses':losses,
'mean_loss':mean_loss}
filepath = f'{self.save_dir}losses.json'
save_json(model_loss, filepath)
return
def predict(self, testloader, field=None):
"""
Inference with all models, in a dict
Args:
testloader: dataloader containing the test data
field: type of match [HOME / AWAY]
Returns:
preds: dict{ KEY: model number
VALUE: list of predictions}
"""
model_name = str(field).lower()
assert field == HOME or field == AWAY, 'ERROR - model predict: WRONG model name. Give "home" or "away"'
preds = {}
for i, model in enumerate(self.models):
if (model_name == HOME):
# logger.info('> Calling Home Network')
field_net = model.model.home_network
elif (model_name == AWAY):
# logger.info('> Calling Away Network')
field_net = model.model.away_network
else:
raise ValueError('Model - predict: Wrong model name')
model_preds = []
with torch.no_grad():
for x in testloader:
x = torch.Tensor(x).to(self.device)
out = field_net(x)
out = out.squeeze()
model_preds.append(out.item())
preds[i] = model_preds
return preds[i]
def get_losses(self):
losses = {'train':[],
'eval':[]}
for model in self.models:
losses['train'].append(model.losses['train'][-1])
losses['eval'].append(model.losses['eval'][-1])
mean_loss = {'train':np.mean(losses['train']),
'eval':np.mean(losses['eval'])}
return losses, mean_loss | en | 0.411117 | # REPRODUCIBILITY # Dataset size Inference with all models, in a dict Args: testloader: dataloader containing the test data field: type of match [HOME / AWAY] Returns: preds: dict{ KEY: model number VALUE: list of predictions} # logger.info('> Calling Home Network') # logger.info('> Calling Away Network') | 2.113641 | 2 |
kaldi_recipes/local/make_train_dev_test_splits.py | skesiraju/indic-kws | 0 | 6631002 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author : <NAME>
# e-mail : kesiraju[AT]fit[DOT]vutbr[DOT]cz
# Date created : 03 Jun 2021
# Last modified : 03 Jun 2021
"""
Get total duration on utterances. If input is utt2dur, the calculation
is straightforward. If the input is wav.scp then will use sox command
to get the duration of each recording.
"""
import os
import sys
import argparse
from random import shuffle
import subprocess
import numpy as np
def get_uid2dur_mapping(data_dir, set_name, uid2dur):
""" Get utterance ID to duration (sec) mapping """
utt2dur_f = os.path.join(data_dir, f"{set_name}/utt2dur")
wavscp_f = os.path.join(data_dir, f"{set_name}/wav.scp")
if os.path.exists(utt2dur_f):
with open(utt2dur_f, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split()
if len(parts) != 2:
print(
"Each line should have two columns. Found:",
parts,
"at line",
lno,
file=sys.stderr,
)
sys.exit()
uid2dur[parts[0]] = float(parts[1])
elif os.path.exists(wavscp_f):
with open(wavscp_f, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split()
res = subprocess.run(["soxi", "-D", parts[1]], capture_output=True)
uid2dur[parts[0]] = float(res.stdout)
return uid2dur
def load_key_value_from_text(fname, id2text, full_line=True):
with open(fname, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split(" ", 1)
if parts[0] not in id2text:
if full_line:
id2text[parts[0]] = line.strip()
else:
id2text[parts[0]] = parts[1].strip()
else:
print("Duplicate ID:", parts[0])
sys.exit()
return id2text
def save_subset(in_files, out_ids, out_file):
id2text = {}
for in_file in in_files:
id2text = load_key_value_from_text(in_file, id2text, True)
with open(out_file, "w", encoding="utf-8") as fpw:
for uid in sorted(out_ids):
fpw.write(id2text[uid].strip() + "\n")
print(out_file, "saved.")
def get_utt2uid_mapping(text_f, utt2uid):
if not os.path.exists(text_f):
print("get_utt2uid_mapping:", text_f, "FILE NOT FOUND.")
sys.exit()
lno = 0
with open(text_f, "r", encoding="utf-8") as fpr:
for line in fpr:
lno += 1
uid, text = line.strip().split(" ", 1)
if text in utt2uid:
utt2uid[text].append(uid)
else:
utt2uid[text] = [uid]
return utt2uid
def main():
""" main method """
args = parse_arguments()
utt2uid = {} # utterance text to utterance ID mapping
uid2dur = {} # utterance ID to duration mapping
for set_name in ["train", "test"]:
if args.set_name != "both":
if args.set_name != set_name:
continue
print("processing", set_name, "set ..")
text_f = os.path.join(args.data_dir, f"{set_name}/text")
utt2uid = get_utt2uid_mapping(text_f, utt2uid)
uid2dur = get_uid2dur_mapping(args.data_dir, f"{set_name}", uid2dur)
uid2text = {}
for text, uids in utt2uid.items():
for uid in uids:
uid2text[uid] = text
print(
"# utt2uid:",
len(utt2uid),
" | # uid2text:",
len(uid2text),
"| # uid2dur:",
len(uid2dur),
)
cntbin2uids = (
{}
) # nested dict {bin_1: {utt_11: [uid_11]..}, bin_2: {utt_22: [uid_221, uid_222], ...}, ...}
utt2avgdur = {}
avg_uniq_dur = 0.0
for utt, uids in utt2uid.items():
n_uids = len(uids)
sub_dict = {}
if n_uids in cntbin2uids:
sub_dict = cntbin2uids[n_uids]
sub_dict[utt] = uids
cntbin2uids[n_uids] = sub_dict
utt_avg_dur = 0.0
for uid in uids:
utt_avg_dur += uid2dur[uid]
utt_avg_dur /= len(uids)
utt2avgdur[utt] = utt_avg_dur
avg_uniq_dur += utt_avg_dur
n_utts = 0
for i in cntbin2uids:
n_utts += i * len(cntbin2uids[i])
print("# utts:", n_utts)
total_dur = 0.0
for uid, dur in uid2dur.items():
total_dur += dur
print("total dur: {:.2f} hrs".format(total_dur / 3600))
print("uniq utt dur: {:.2f} hrs".format(avg_uniq_dur / 3600))
desired_total_dur = 5 * 3600.0
desired_uniq_dur = avg_uniq_dur * 0.15
print(
"desired uniq utt dur for each dev and test sets: {:.2f} min".format(
desired_uniq_dur / 60.0
)
)
bin_sizes = []
for i in range(500):
if i not in cntbin2uids:
bin_sizes.append(0)
else:
bin_sizes.append(len(cntbin2uids[i]))
selected_utts = {"dev": set(), "test": set()}
selected_uids = {"dev": [], "test": []}
selected_set = set()
percent = args.percent
for set_name in ["dev", "test"]:
obt_dur = 0.0
cntbin_thresh = 1
flag = False
while obt_dur < desired_uniq_dur:
for i in range(500):
if i not in cntbin2uids:
continue
sub_dict = cntbin2uids[i]
max_utts_per_bin = int(len(sub_dict) * percent)
j = 0
for utt in sub_dict:
if utt in selected_set:
continue
obt_dur += utt2avgdur[utt]
selected_utts[set_name].add(utt)
selected_set.add(utt)
j += 1
if obt_dur > desired_uniq_dur:
flag = True
break
if j > max_utts_per_bin:
print(
"{:2d} {:4d} {:6.2f}/{:6.2f}".format(
i,
len(selected_utts[set_name]),
obt_dur,
desired_uniq_dur,
)
)
break
if flag:
break
set_dur = 0.
set_uids = []
for utt in selected_utts[set_name]:
for uid in utt2uid[utt]:
set_dur += uid2dur[uid]
set_uids.append(uid)
selected_uids[set_name] = sorted(set_uids)
print(set_name, "dur: {:.2f}".format(set_dur/3600.))
if args.set_name == 'train':
break
print('utts in dev + test:', len(selected_set))
all_uids = set(list(uid2dur.keys()))
train_set = all_uids - (set(selected_uids['dev']) | set(selected_uids['test']))
train_uids = sorted(list(train_set))
print(len(all_uids), len(train_uids), len(selected_uids['dev']), len(selected_uids['test']))
os.makedirs(args.out_dir, exist_ok=True)
dev_dur = 0.0
for uid in selected_uids['dev']:
dev_dur += uid2dur[uid]
print("Dev dur: {:.1f}".format(dev_dur / 3600))
if os.path.exists(os.path.join(args.out_dir, "/train/text")):
print("Files present in", args.out_dir)
sys.exit()
else:
with open(
os.path.join(args.out_dir, "train.ids"), "w", encoding="utf-8"
) as fpw:
for uid in train_uids:
fpw.write(uid + "\n")
with open(os.path.join(args.out_dir, "dev.ids"), "w", encoding="utf-8") as fpw:
for uid in selected_uids['dev']:
fpw.write(uid + "\n")
if selected_uids['test']:
with open(os.path.join(args.out_dir, "test.ids"), "w", encoding="utf-8") as fpw:
for uid in selected_uids['test']:
fpw.write(uid + "\n")
uids = {"train": train_uids, "dev": selected_uids['dev'], "test": selected_uids['test']}
for set_name in ["train", "dev", "test"]:
os.makedirs(args.out_dir + "/" + set_name, exist_ok=True)
for base in ["text", "utt2spk", "wav.scp"]:
main_f = [args.data_dir + f"train/{base}", args.data_dir + f"test/{base}"]
out_f = args.out_dir + f"/{set_name}/{base}"
if uids[set_name]:
save_subset(main_f, uids[set_name], out_f)
def parse_arguments():
""" parse command line arguments """
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("data_dir", help="path to data dir")
parser.add_argument("out_dir", help="path to out dir to save new splits")
parser.add_argument("-percent", type=float, default=0.15, help="percentage of dev and test")
parser.add_argument(
"-set_name", choices=["train", "test", "both"], default="both", type=str
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author : <NAME>
# e-mail : kesiraju[AT]fit[DOT]vutbr[DOT]cz
# Date created : 03 Jun 2021
# Last modified : 03 Jun 2021
"""
Get total duration on utterances. If input is utt2dur, the calculation
is straightforward. If the input is wav.scp then will use sox command
to get the duration of each recording.
"""
import os
import sys
import argparse
from random import shuffle
import subprocess
import numpy as np
def get_uid2dur_mapping(data_dir, set_name, uid2dur):
""" Get utterance ID to duration (sec) mapping """
utt2dur_f = os.path.join(data_dir, f"{set_name}/utt2dur")
wavscp_f = os.path.join(data_dir, f"{set_name}/wav.scp")
if os.path.exists(utt2dur_f):
with open(utt2dur_f, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split()
if len(parts) != 2:
print(
"Each line should have two columns. Found:",
parts,
"at line",
lno,
file=sys.stderr,
)
sys.exit()
uid2dur[parts[0]] = float(parts[1])
elif os.path.exists(wavscp_f):
with open(wavscp_f, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split()
res = subprocess.run(["soxi", "-D", parts[1]], capture_output=True)
uid2dur[parts[0]] = float(res.stdout)
return uid2dur
def load_key_value_from_text(fname, id2text, full_line=True):
with open(fname, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split(" ", 1)
if parts[0] not in id2text:
if full_line:
id2text[parts[0]] = line.strip()
else:
id2text[parts[0]] = parts[1].strip()
else:
print("Duplicate ID:", parts[0])
sys.exit()
return id2text
def save_subset(in_files, out_ids, out_file):
id2text = {}
for in_file in in_files:
id2text = load_key_value_from_text(in_file, id2text, True)
with open(out_file, "w", encoding="utf-8") as fpw:
for uid in sorted(out_ids):
fpw.write(id2text[uid].strip() + "\n")
print(out_file, "saved.")
def get_utt2uid_mapping(text_f, utt2uid):
if not os.path.exists(text_f):
print("get_utt2uid_mapping:", text_f, "FILE NOT FOUND.")
sys.exit()
lno = 0
with open(text_f, "r", encoding="utf-8") as fpr:
for line in fpr:
lno += 1
uid, text = line.strip().split(" ", 1)
if text in utt2uid:
utt2uid[text].append(uid)
else:
utt2uid[text] = [uid]
return utt2uid
def main():
""" main method """
args = parse_arguments()
utt2uid = {} # utterance text to utterance ID mapping
uid2dur = {} # utterance ID to duration mapping
for set_name in ["train", "test"]:
if args.set_name != "both":
if args.set_name != set_name:
continue
print("processing", set_name, "set ..")
text_f = os.path.join(args.data_dir, f"{set_name}/text")
utt2uid = get_utt2uid_mapping(text_f, utt2uid)
uid2dur = get_uid2dur_mapping(args.data_dir, f"{set_name}", uid2dur)
uid2text = {}
for text, uids in utt2uid.items():
for uid in uids:
uid2text[uid] = text
print(
"# utt2uid:",
len(utt2uid),
" | # uid2text:",
len(uid2text),
"| # uid2dur:",
len(uid2dur),
)
cntbin2uids = (
{}
) # nested dict {bin_1: {utt_11: [uid_11]..}, bin_2: {utt_22: [uid_221, uid_222], ...}, ...}
utt2avgdur = {}
avg_uniq_dur = 0.0
for utt, uids in utt2uid.items():
n_uids = len(uids)
sub_dict = {}
if n_uids in cntbin2uids:
sub_dict = cntbin2uids[n_uids]
sub_dict[utt] = uids
cntbin2uids[n_uids] = sub_dict
utt_avg_dur = 0.0
for uid in uids:
utt_avg_dur += uid2dur[uid]
utt_avg_dur /= len(uids)
utt2avgdur[utt] = utt_avg_dur
avg_uniq_dur += utt_avg_dur
n_utts = 0
for i in cntbin2uids:
n_utts += i * len(cntbin2uids[i])
print("# utts:", n_utts)
total_dur = 0.0
for uid, dur in uid2dur.items():
total_dur += dur
print("total dur: {:.2f} hrs".format(total_dur / 3600))
print("uniq utt dur: {:.2f} hrs".format(avg_uniq_dur / 3600))
desired_total_dur = 5 * 3600.0
desired_uniq_dur = avg_uniq_dur * 0.15
print(
"desired uniq utt dur for each dev and test sets: {:.2f} min".format(
desired_uniq_dur / 60.0
)
)
bin_sizes = []
for i in range(500):
if i not in cntbin2uids:
bin_sizes.append(0)
else:
bin_sizes.append(len(cntbin2uids[i]))
selected_utts = {"dev": set(), "test": set()}
selected_uids = {"dev": [], "test": []}
selected_set = set()
percent = args.percent
for set_name in ["dev", "test"]:
obt_dur = 0.0
cntbin_thresh = 1
flag = False
while obt_dur < desired_uniq_dur:
for i in range(500):
if i not in cntbin2uids:
continue
sub_dict = cntbin2uids[i]
max_utts_per_bin = int(len(sub_dict) * percent)
j = 0
for utt in sub_dict:
if utt in selected_set:
continue
obt_dur += utt2avgdur[utt]
selected_utts[set_name].add(utt)
selected_set.add(utt)
j += 1
if obt_dur > desired_uniq_dur:
flag = True
break
if j > max_utts_per_bin:
print(
"{:2d} {:4d} {:6.2f}/{:6.2f}".format(
i,
len(selected_utts[set_name]),
obt_dur,
desired_uniq_dur,
)
)
break
if flag:
break
set_dur = 0.
set_uids = []
for utt in selected_utts[set_name]:
for uid in utt2uid[utt]:
set_dur += uid2dur[uid]
set_uids.append(uid)
selected_uids[set_name] = sorted(set_uids)
print(set_name, "dur: {:.2f}".format(set_dur/3600.))
if args.set_name == 'train':
break
print('utts in dev + test:', len(selected_set))
all_uids = set(list(uid2dur.keys()))
train_set = all_uids - (set(selected_uids['dev']) | set(selected_uids['test']))
train_uids = sorted(list(train_set))
print(len(all_uids), len(train_uids), len(selected_uids['dev']), len(selected_uids['test']))
os.makedirs(args.out_dir, exist_ok=True)
dev_dur = 0.0
for uid in selected_uids['dev']:
dev_dur += uid2dur[uid]
print("Dev dur: {:.1f}".format(dev_dur / 3600))
if os.path.exists(os.path.join(args.out_dir, "/train/text")):
print("Files present in", args.out_dir)
sys.exit()
else:
with open(
os.path.join(args.out_dir, "train.ids"), "w", encoding="utf-8"
) as fpw:
for uid in train_uids:
fpw.write(uid + "\n")
with open(os.path.join(args.out_dir, "dev.ids"), "w", encoding="utf-8") as fpw:
for uid in selected_uids['dev']:
fpw.write(uid + "\n")
if selected_uids['test']:
with open(os.path.join(args.out_dir, "test.ids"), "w", encoding="utf-8") as fpw:
for uid in selected_uids['test']:
fpw.write(uid + "\n")
uids = {"train": train_uids, "dev": selected_uids['dev'], "test": selected_uids['test']}
for set_name in ["train", "dev", "test"]:
os.makedirs(args.out_dir + "/" + set_name, exist_ok=True)
for base in ["text", "utt2spk", "wav.scp"]:
main_f = [args.data_dir + f"train/{base}", args.data_dir + f"test/{base}"]
out_f = args.out_dir + f"/{set_name}/{base}"
if uids[set_name]:
save_subset(main_f, uids[set_name], out_f)
def parse_arguments():
""" parse command line arguments """
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("data_dir", help="path to data dir")
parser.add_argument("out_dir", help="path to out dir to save new splits")
parser.add_argument("-percent", type=float, default=0.15, help="percentage of dev and test")
parser.add_argument(
"-set_name", choices=["train", "test", "both"], default="both", type=str
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| en | 0.708219 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # author : <NAME> # e-mail : kesiraju[AT]fit[DOT]vutbr[DOT]cz # Date created : 03 Jun 2021 # Last modified : 03 Jun 2021 Get total duration on utterances. If input is utt2dur, the calculation is straightforward. If the input is wav.scp then will use sox command to get the duration of each recording. Get utterance ID to duration (sec) mapping main method # utterance text to utterance ID mapping # utterance ID to duration mapping # uid2text:", # uid2dur:", # nested dict {bin_1: {utt_11: [uid_11]..}, bin_2: {utt_22: [uid_221, uid_222], ...}, ...} parse command line arguments | 2.698177 | 3 |
algorithms/implementation/angry_professor.py | avenet/hackerrank | 0 | 6631003 | cases = int(input())
for cases in range(cases):
n, k = map(
int,
input().split()
)
arrivals = map(int, input().split())
early_comers = len([x for x in arrivals if x <= 0])
if early_comers >= k:
print('NO')
else:
print('YES')
| cases = int(input())
for cases in range(cases):
n, k = map(
int,
input().split()
)
arrivals = map(int, input().split())
early_comers = len([x for x in arrivals if x <= 0])
if early_comers >= k:
print('NO')
else:
print('YES')
| none | 1 | 3.106714 | 3 |
|
Week 5 - 03.03.2021 DAA Lab/ActivitySelection_Day5.py | abhisheks008/Design-and-Analysis-Algorithm-Lab-4th-Semester | 4 | 6631004 | # Author : <NAME>
# Q2. Activity Selection problem using Python 3
# Design analysis and Algorithm Problems
# difficulty : medium
# score : 10
def printMaxActivities(start , finish):
n = len(start)
z = 1
i = 0
for j in range(1,n):
if start[j] >= finish[i]:
z = z + 1
i = j
print (z)
# Author : <NAME>
n = int(input())
start = list(map(int, input().strip().split()))
finish = list(map(int, input().strip().split()))
printMaxActivities(start , finish)
| # Author : <NAME>
# Q2. Activity Selection problem using Python 3
# Design analysis and Algorithm Problems
# difficulty : medium
# score : 10
def printMaxActivities(start , finish):
n = len(start)
z = 1
i = 0
for j in range(1,n):
if start[j] >= finish[i]:
z = z + 1
i = j
print (z)
# Author : <NAME>
n = int(input())
start = list(map(int, input().strip().split()))
finish = list(map(int, input().strip().split()))
printMaxActivities(start , finish)
| en | 0.666973 | # Author : <NAME> # Q2. Activity Selection problem using Python 3 # Design analysis and Algorithm Problems # difficulty : medium # score : 10 # Author : <NAME> | 3.504249 | 4 |
lib/datasets/imagenet.py | j40903272/bottom-up-attention-py3 | 0 | 6631005 | from __future__ import print_function
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import range
import datasets
import datasets.imagenet
import os, sys
from datasets.imdb import imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
class imagenet(imdb):
def __init__(self, image_set, devkit_path, data_path):
imdb.__init__(self, image_set)
self._image_set = image_set
self._devkit_path = devkit_path
self._data_path = data_path
synsets_image = sio.loadmat(os.path.join(self._devkit_path, 'data', 'meta_det.mat'))
synsets_video = sio.loadmat(os.path.join(self._devkit_path, 'data', 'meta_vid.mat'))
self._classes_image = ('__background__',)
self._wnid_image = (0,)
self._classes = ('__background__',)
self._wnid = (0,)
for i in range(200):
self._classes_image = self._classes_image + (synsets_image['synsets'][0][i][2][0],)
self._wnid_image = self._wnid_image + (synsets_image['synsets'][0][i][1][0],)
for i in range(30):
self._classes = self._classes + (synsets_video['synsets'][0][i][2][0],)
self._wnid = self._wnid + (synsets_video['synsets'][0][i][1][0],)
self._wnid_to_ind_image = dict(list(zip(self._wnid_image, range(201))))
self._class_to_ind_image = dict(list(zip(self._classes_image, range(201))))
self._wnid_to_ind = dict(list(zip(self._wnid, range(31))))
self._class_to_ind = dict(list(zip(self._classes, range(31))))
#check for valid intersection between video and image classes
self._valid_image_flag = [0]*201
for i in range(1,201):
if self._wnid_image[i] in self._wnid_to_ind:
self._valid_image_flag[i] = 1
self._image_ext = ['.JPEG']
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
# Specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), 'Devkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), 'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'Data', self._image_set, index + self._image_ext[0])
assert os.path.exists(image_path), 'path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._data_path + /ImageSets/val.txt
if self._image_set == 'train':
image_set_file = os.path.join(self._data_path, 'ImageSets', 'trainr.txt')
image_index = []
if os.path.exists(image_set_file):
f = open(image_set_file, 'r')
data = f.read().split()
for lines in data:
if lines != '':
image_index.append(lines)
f.close()
return image_index
for i in range(1,31):
print(i)
image_set_file = os.path.join(self._data_path, 'ImageSets', 'train_' + str(i) + '.txt')
with open(image_set_file) as f:
tmp_index = [x.strip() for x in f.readlines()]
vtmp_index = []
for line in tmp_index:
image_list = os.popen('ls ' + self._data_path + '/Data/train/' + line + '/*.JPEG').read().split()
tmp_list = []
for imgs in image_list:
tmp_list.append(imgs[:-5])
vtmp_index = vtmp_index + tmp_list
num_lines = len(vtmp_index)
ids = np.random.permutation(num_lines)
count = 0
while count < 2000:
image_index.append(vtmp_index[ids[count % num_lines]])
count = count + 1
for i in range(1,201):
if self._valid_image_flag[i] == 1:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'train_pos_' + str(i) + '.txt')
with open(image_set_file) as f:
tmp_index = [x.strip() for x in f.readlines()]
num_lines = len(tmp_index)
ids = np.random.permutation(num_lines)
count = 0
while count < 2000:
image_index.append(tmp_index[ids[count % num_lines]])
count = count + 1
image_set_file = os.path.join(self._data_path, 'ImageSets', 'trainr.txt')
f = open(image_set_file, 'w')
for lines in image_index:
f.write(lines + '\n')
f.close()
else:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'val.txt')
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_imagenet_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_imagenet_annotation(self, index):
"""
Load image and bounding boxes info from txt files of imagenet.
"""
filename = os.path.join(self._data_path, 'Annotations', self._image_set, index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
x1 = float(get_data_from_tag(obj, 'xmin'))
y1 = float(get_data_from_tag(obj, 'ymin'))
x2 = float(get_data_from_tag(obj, 'xmax'))
y2 = float(get_data_from_tag(obj, 'ymax'))
cls = self._wnid_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
if __name__ == '__main__':
d = datasets.imagenet('val', '')
res = d.roidb
from IPython import embed; embed()
| from __future__ import print_function
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import range
import datasets
import datasets.imagenet
import os, sys
from datasets.imdb import imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
class imagenet(imdb):
def __init__(self, image_set, devkit_path, data_path):
imdb.__init__(self, image_set)
self._image_set = image_set
self._devkit_path = devkit_path
self._data_path = data_path
synsets_image = sio.loadmat(os.path.join(self._devkit_path, 'data', 'meta_det.mat'))
synsets_video = sio.loadmat(os.path.join(self._devkit_path, 'data', 'meta_vid.mat'))
self._classes_image = ('__background__',)
self._wnid_image = (0,)
self._classes = ('__background__',)
self._wnid = (0,)
for i in range(200):
self._classes_image = self._classes_image + (synsets_image['synsets'][0][i][2][0],)
self._wnid_image = self._wnid_image + (synsets_image['synsets'][0][i][1][0],)
for i in range(30):
self._classes = self._classes + (synsets_video['synsets'][0][i][2][0],)
self._wnid = self._wnid + (synsets_video['synsets'][0][i][1][0],)
self._wnid_to_ind_image = dict(list(zip(self._wnid_image, range(201))))
self._class_to_ind_image = dict(list(zip(self._classes_image, range(201))))
self._wnid_to_ind = dict(list(zip(self._wnid, range(31))))
self._class_to_ind = dict(list(zip(self._classes, range(31))))
#check for valid intersection between video and image classes
self._valid_image_flag = [0]*201
for i in range(1,201):
if self._wnid_image[i] in self._wnid_to_ind:
self._valid_image_flag[i] = 1
self._image_ext = ['.JPEG']
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
# Specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), 'Devkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), 'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'Data', self._image_set, index + self._image_ext[0])
assert os.path.exists(image_path), 'path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._data_path + /ImageSets/val.txt
if self._image_set == 'train':
image_set_file = os.path.join(self._data_path, 'ImageSets', 'trainr.txt')
image_index = []
if os.path.exists(image_set_file):
f = open(image_set_file, 'r')
data = f.read().split()
for lines in data:
if lines != '':
image_index.append(lines)
f.close()
return image_index
for i in range(1,31):
print(i)
image_set_file = os.path.join(self._data_path, 'ImageSets', 'train_' + str(i) + '.txt')
with open(image_set_file) as f:
tmp_index = [x.strip() for x in f.readlines()]
vtmp_index = []
for line in tmp_index:
image_list = os.popen('ls ' + self._data_path + '/Data/train/' + line + '/*.JPEG').read().split()
tmp_list = []
for imgs in image_list:
tmp_list.append(imgs[:-5])
vtmp_index = vtmp_index + tmp_list
num_lines = len(vtmp_index)
ids = np.random.permutation(num_lines)
count = 0
while count < 2000:
image_index.append(vtmp_index[ids[count % num_lines]])
count = count + 1
for i in range(1,201):
if self._valid_image_flag[i] == 1:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'train_pos_' + str(i) + '.txt')
with open(image_set_file) as f:
tmp_index = [x.strip() for x in f.readlines()]
num_lines = len(tmp_index)
ids = np.random.permutation(num_lines)
count = 0
while count < 2000:
image_index.append(tmp_index[ids[count % num_lines]])
count = count + 1
image_set_file = os.path.join(self._data_path, 'ImageSets', 'trainr.txt')
f = open(image_set_file, 'w')
for lines in image_index:
f.write(lines + '\n')
f.close()
else:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'val.txt')
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_imagenet_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_imagenet_annotation(self, index):
"""
Load image and bounding boxes info from txt files of imagenet.
"""
filename = os.path.join(self._data_path, 'Annotations', self._image_set, index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
x1 = float(get_data_from_tag(obj, 'xmin'))
y1 = float(get_data_from_tag(obj, 'ymin'))
x2 = float(get_data_from_tag(obj, 'xmax'))
y2 = float(get_data_from_tag(obj, 'ymax'))
cls = self._wnid_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
if __name__ == '__main__':
d = datasets.imagenet('val', '')
res = d.roidb
from IPython import embed; embed()
| en | 0.677775 | # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> # -------------------------------------------------------- #check for valid intersection between video and image classes # Default to roidb handler # Specific config options Return the absolute path to image i in the image sequence. Construct an image path from the image's "index" identifier. Load the indexes listed in this dataset's image set file. # Example path to image set file: # self._data_path + /ImageSets/val.txt Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls. Load image and bounding boxes info from txt files of imagenet. # print 'Loading: {}'.format(filename) # Load object bounding boxes into a data frame. | 2.031024 | 2 |
models/ernie.py | biubiubiiu/SpamClassification | 0 | 6631006 | <filename>models/ernie.py
from torch import nn
from pytorch_pretrained import BertModel
class Ernie(nn.Module):
def __init__(self, config):
super(Ernie, self).__init__()
self.ernie = BertModel.from_pretrained('pretrained/ernie')
for param in self.ernie.parameters():
param.requires_grad = True
self.fc = nn.Linear(config['hidden_size'], config['num_classes'])
def forward(self, x):
context = x[0]
mask = x[2]
_, pooled = self.ernie(context, attention_mask=mask, output_all_encoded_layers=False)
out = self.fc(pooled)
return out
| <filename>models/ernie.py
from torch import nn
from pytorch_pretrained import BertModel
class Ernie(nn.Module):
def __init__(self, config):
super(Ernie, self).__init__()
self.ernie = BertModel.from_pretrained('pretrained/ernie')
for param in self.ernie.parameters():
param.requires_grad = True
self.fc = nn.Linear(config['hidden_size'], config['num_classes'])
def forward(self, x):
context = x[0]
mask = x[2]
_, pooled = self.ernie(context, attention_mask=mask, output_all_encoded_layers=False)
out = self.fc(pooled)
return out
| none | 1 | 2.674528 | 3 |
|
setup.py | LeoXing1996/GeNeVA | 1 | 6631007 | <reponame>LeoXing1996/GeNeVA
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from setuptools import setup
from setuptools import find_packages
setup(
name='GeNeVA',
version='1.0',
url='http://github.com/Maluuba/GeNeVA',
author='Microsoft Research',
description='Code to train and evaluate the GeNeVA-GAN model and the object detector and localizer for GeNeVA metrics',
# packages=['geneva'],
packages=find_packages(),
extras_require=dict(
dev=['pytest', 'pytest-flake8', 'flake8<3.6', 'flaky'],
),
)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from setuptools import setup
from setuptools import find_packages
setup(
name='GeNeVA',
version='1.0',
url='http://github.com/Maluuba/GeNeVA',
author='Microsoft Research',
description='Code to train and evaluate the GeNeVA-GAN model and the object detector and localizer for GeNeVA metrics',
# packages=['geneva'],
packages=find_packages(),
extras_require=dict(
dev=['pytest', 'pytest-flake8', 'flake8<3.6', 'flaky'],
),
) | en | 0.827842 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # packages=['geneva'], | 0.936702 | 1 |
src/gtk/toga_gtk/widgets/numberinput.py | jrwdunham/toga | 0 | 6631008 | <gh_stars>0
from gi.repository import Gtk
from toga.interface import NumberInput as NumberInputInterface
from .base import WidgetMixin
class NumberInput(WidgetMixin, NumberInputInterface):
def __init__(self, id=None, style=None, min_value=0, max_value=100, step=1,
**ex):
super().__init__(id=id, style=style, min_value=min_value,
max_value=max_value, step=step, **ex)
self._create()
def create(self):
adjustment = Gtk.Adjustment(0, self._min_value, self._max_value,
self._step, 10, 0)
self._impl = Gtk.SpinButton()
self._impl.set_adjustment(adjustment)
self._impl.set_numeric(True)
self._impl._interface = self
self.rehint()
def _get_value(self):
return self._impl.get_value()
def _set_value(self, value):
self._impl.set_value(value)
def rehint(self):
self.style.min_width = 120
self.style.height = 32
| from gi.repository import Gtk
from toga.interface import NumberInput as NumberInputInterface
from .base import WidgetMixin
class NumberInput(WidgetMixin, NumberInputInterface):
def __init__(self, id=None, style=None, min_value=0, max_value=100, step=1,
**ex):
super().__init__(id=id, style=style, min_value=min_value,
max_value=max_value, step=step, **ex)
self._create()
def create(self):
adjustment = Gtk.Adjustment(0, self._min_value, self._max_value,
self._step, 10, 0)
self._impl = Gtk.SpinButton()
self._impl.set_adjustment(adjustment)
self._impl.set_numeric(True)
self._impl._interface = self
self.rehint()
def _get_value(self):
return self._impl.get_value()
def _set_value(self, value):
self._impl.set_value(value)
def rehint(self):
self.style.min_width = 120
self.style.height = 32 | none | 1 | 2.33493 | 2 |
|
voyager/resources/fireballresource.py | marwynnsomridhivej/voyager | 1 | 6631009 | import datetime
from asyncio.events import AbstractEventLoop
from typing import Generator, List, Union
from ..exceptions import VoyagerException
from .base import BaseResource
__all__ = [
'FireballResource',
]
class FireballRecord(object):
__slots__ = [
'_fc',
'_date',
'_lat',
'_lon',
'_lat_dir',
'_lon_dir',
'_alt',
'_vel',
'_energy',
'_impact_e',
'_vx',
'_vy',
'_vz',
]
_FIELDS = [
'date',
'lat',
'lon',
'lat-dir',
'lon-dir',
'alt',
'vel',
'energy',
'impact-e',
'vx',
'vy',
'vz',
]
_cache = {}
def __init__(self, data: List[str], fields: List[str]) -> None:
self._fc = self._FIELDS.copy()
for field, value in zip(fields, data):
setattr(self, f"_{field.replace('-', '_')}", value)
self._FIELDS.remove(field)
for unset in self._FIELDS:
setattr(self, f"_{unset.replace('-', '_')}", None)
def __len__(self) -> int:
if (l := "len") not in self._cache:
self._cache[l] = len(self._fc) - len(self._FIELDS)
del self._FIELDS
return self._cache[l]
@property
def date(self) -> Union[str, None]:
return self._date
@property
def datetime(self) -> Union[datetime.datetime, None]:
if not self._date:
return None
return datetime.datetime.strptime(self._date, "%Y-%m-%d %H:%M:%S")
@property
def lat(self) -> Union[float, None]:
if not self._lat:
return None
return float(self._lat)
@property
def latitude(self) -> Union[float, None]:
return self.lat
@property
def lon(self) -> Union[float, None]:
if not self._lon:
return None
return float(self._lon)
@property
def longitude(self) -> Union[float, None]:
return self.lon
@property
def lat_dir(self) -> Union[str, None]:
return self._lat_dir
@property
def latitude_dir(self) -> Union[str, None]:
return self.lat_dir
@property
def lon_dir(self) -> Union[str, None]:
return self._lon_dir
@property
def longitude_dir(self) -> Union[str, None]:
return self.lon_dir
@property
def alt(self) -> Union[float, None]:
if not self._alt:
return None
return float(self._alt)
@property
def altitude(self) -> Union[float, None]:
return self.alt
@property
def vel(self) -> Union[float, None]:
if not self._vel:
return None
return float(self._vel)
@property
def velocity(self) -> Union[float, None]:
return self.vel
@property
def energy(self) -> Union[float, None]:
if not self._energy:
return None
return float(self._energy)
@property
def impact_e(self) -> Union[float, None]:
if not self._impact_e:
return None
return float(self._impact_e)
@property
def impact_energy(self) -> Union[float, None]:
return self.impact_e
@property
def vx(self) -> Union[float, None]:
if not self._vx:
return None
return float(self._vx)
@property
def velocity_x(self) -> Union[float, None]:
return self.vx
@property
def vy(self) -> Union[float, None]:
if not self._vy:
return None
return float(self._vy)
@property
def velocity_y(self) -> Union[float, None]:
return self.vy
@property
def vz(self) -> Union[float, None]:
if not self._vz:
return None
return self._vz
@property
def velocity_z(self) -> Union[float, None]:
return self.vz
def _process_dict(self) -> dict:
return {field: getattr(self, f"_{field.replace('-', '_')}") for field in self._fc}
@property
def to_dict(self) -> dict:
if self not in self._cache:
self._cache[self] = self._process_dict()
return self._cache[self]
@classmethod
def from_dict(cls, data: dict) -> "FireballRecord":
if not all((key in cls._FIELDS for key in data)):
raise VoyagerException("Malformed input. Invalid key(s) supplied")
return cls([value for value in data.values()], [key for key in data])
class FireballResource(BaseResource):
__slots__ = [
'_signature',
'_count',
'_fields',
'_data',
]
_cache = {}
def __init__(self, data: dict,
loop: AbstractEventLoop = None) -> None:
super(FireballResource, self).__init__(data, loop=loop)
self._signature = data.get("signature")
self._count = data.get("count")
self._fields = data.get("fields")
self._data = data
def __len__(self) -> int:
return self.count
def __iter__(self):
return self
def __next__(self):
for fb in self.data:
yield fb
@property
def signature(self) -> str:
return self._signature
@property
def source(self) -> str:
return self._signature.get("source")
@property
def version(self) -> str:
return self._signature.get("version")
@property
def count(self) -> int:
return int(self._count)
@property
def fields(self) -> List[str]:
return self._fields
def _process_fb_data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if not (fb := self._data.get("data")):
return None
elif len(fb) != 1:
for values in fb:
yield FireballRecord(values, self._fields)
else:
return FireballRecord(fb[0], self._fields)
@property
def data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if self not in self._cache:
self._cache[self] = self._process_fb_data()
return self._cache[self]
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict,
loop: AbstractEventLoop = None) -> "FireballResource":
return cls(data, loop=loop)
| import datetime
from asyncio.events import AbstractEventLoop
from typing import Generator, List, Union
from ..exceptions import VoyagerException
from .base import BaseResource
__all__ = [
'FireballResource',
]
class FireballRecord(object):
__slots__ = [
'_fc',
'_date',
'_lat',
'_lon',
'_lat_dir',
'_lon_dir',
'_alt',
'_vel',
'_energy',
'_impact_e',
'_vx',
'_vy',
'_vz',
]
_FIELDS = [
'date',
'lat',
'lon',
'lat-dir',
'lon-dir',
'alt',
'vel',
'energy',
'impact-e',
'vx',
'vy',
'vz',
]
_cache = {}
def __init__(self, data: List[str], fields: List[str]) -> None:
self._fc = self._FIELDS.copy()
for field, value in zip(fields, data):
setattr(self, f"_{field.replace('-', '_')}", value)
self._FIELDS.remove(field)
for unset in self._FIELDS:
setattr(self, f"_{unset.replace('-', '_')}", None)
def __len__(self) -> int:
if (l := "len") not in self._cache:
self._cache[l] = len(self._fc) - len(self._FIELDS)
del self._FIELDS
return self._cache[l]
@property
def date(self) -> Union[str, None]:
return self._date
@property
def datetime(self) -> Union[datetime.datetime, None]:
if not self._date:
return None
return datetime.datetime.strptime(self._date, "%Y-%m-%d %H:%M:%S")
@property
def lat(self) -> Union[float, None]:
if not self._lat:
return None
return float(self._lat)
@property
def latitude(self) -> Union[float, None]:
return self.lat
@property
def lon(self) -> Union[float, None]:
if not self._lon:
return None
return float(self._lon)
@property
def longitude(self) -> Union[float, None]:
return self.lon
@property
def lat_dir(self) -> Union[str, None]:
return self._lat_dir
@property
def latitude_dir(self) -> Union[str, None]:
return self.lat_dir
@property
def lon_dir(self) -> Union[str, None]:
return self._lon_dir
@property
def longitude_dir(self) -> Union[str, None]:
return self.lon_dir
@property
def alt(self) -> Union[float, None]:
if not self._alt:
return None
return float(self._alt)
@property
def altitude(self) -> Union[float, None]:
return self.alt
@property
def vel(self) -> Union[float, None]:
if not self._vel:
return None
return float(self._vel)
@property
def velocity(self) -> Union[float, None]:
return self.vel
@property
def energy(self) -> Union[float, None]:
if not self._energy:
return None
return float(self._energy)
@property
def impact_e(self) -> Union[float, None]:
if not self._impact_e:
return None
return float(self._impact_e)
@property
def impact_energy(self) -> Union[float, None]:
return self.impact_e
@property
def vx(self) -> Union[float, None]:
if not self._vx:
return None
return float(self._vx)
@property
def velocity_x(self) -> Union[float, None]:
return self.vx
@property
def vy(self) -> Union[float, None]:
if not self._vy:
return None
return float(self._vy)
@property
def velocity_y(self) -> Union[float, None]:
return self.vy
@property
def vz(self) -> Union[float, None]:
if not self._vz:
return None
return self._vz
@property
def velocity_z(self) -> Union[float, None]:
return self.vz
def _process_dict(self) -> dict:
return {field: getattr(self, f"_{field.replace('-', '_')}") for field in self._fc}
@property
def to_dict(self) -> dict:
if self not in self._cache:
self._cache[self] = self._process_dict()
return self._cache[self]
@classmethod
def from_dict(cls, data: dict) -> "FireballRecord":
if not all((key in cls._FIELDS for key in data)):
raise VoyagerException("Malformed input. Invalid key(s) supplied")
return cls([value for value in data.values()], [key for key in data])
class FireballResource(BaseResource):
__slots__ = [
'_signature',
'_count',
'_fields',
'_data',
]
_cache = {}
def __init__(self, data: dict,
loop: AbstractEventLoop = None) -> None:
super(FireballResource, self).__init__(data, loop=loop)
self._signature = data.get("signature")
self._count = data.get("count")
self._fields = data.get("fields")
self._data = data
def __len__(self) -> int:
return self.count
def __iter__(self):
return self
def __next__(self):
for fb in self.data:
yield fb
@property
def signature(self) -> str:
return self._signature
@property
def source(self) -> str:
return self._signature.get("source")
@property
def version(self) -> str:
return self._signature.get("version")
@property
def count(self) -> int:
return int(self._count)
@property
def fields(self) -> List[str]:
return self._fields
def _process_fb_data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if not (fb := self._data.get("data")):
return None
elif len(fb) != 1:
for values in fb:
yield FireballRecord(values, self._fields)
else:
return FireballRecord(fb[0], self._fields)
@property
def data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if self not in self._cache:
self._cache[self] = self._process_fb_data()
return self._cache[self]
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict,
loop: AbstractEventLoop = None) -> "FireballResource":
return cls(data, loop=loop)
| none | 1 | 2.066879 | 2 |
|
neps/minerador.py | matheusdomis/OBI | 2 | 6631010 | <gh_stars>1-10
nm = [int(x) for x in input().split()]
v = [float(x) for x in input().split()]
g = [float(x) for x in input().split()]
lmax = [0,0]
lmin = [float('inf'),0]
for i in range(nm[0]):
l = sum(g[:i+1]) * v[i] * nm[1]
if l > lmax[0]:
lmax[0] = l
lmax[1] = i+1
if l < lmin[0]:
lmin[0] = l
lmin[1] = i+1
print("%d %.2f"%(lmax[1],lmax[0]))
print("%d %.2f"%(lmin[1],lmin[0]))
| nm = [int(x) for x in input().split()]
v = [float(x) for x in input().split()]
g = [float(x) for x in input().split()]
lmax = [0,0]
lmin = [float('inf'),0]
for i in range(nm[0]):
l = sum(g[:i+1]) * v[i] * nm[1]
if l > lmax[0]:
lmax[0] = l
lmax[1] = i+1
if l < lmin[0]:
lmin[0] = l
lmin[1] = i+1
print("%d %.2f"%(lmax[1],lmax[0]))
print("%d %.2f"%(lmin[1],lmin[0])) | none | 1 | 2.951248 | 3 |
|
tools/train_net.py | dylan-campbell/Motionformer | 153 | 6631011 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Train a video classification model."""
import numpy as np
import pickle
import pprint
from timm.data import Mixup
import torch
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
from slowfast.config.defaults import get_cfg
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.meters import TrainMeter, ValMeter, EPICTrainMeter, EPICValMeter
from slowfast.utils.multigrid import MultigridSchedule
from timm.utils import NativeScaler
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg,
writer=None, loss_scaler=None, loss_fun=None, mixup_fn=None
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, (inputs, labels, index, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
if not isinstance(val[i], (str,)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
if mixup_fn is not None:
labels = labels.cuda()
inputs, labels = mixup_fn(inputs[0], labels)
inputs = [inputs]
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
with torch.cuda.amp.autocast(enabled=cfg.SOLVER.USE_MIXED_PRECISION):
preds = model(inputs)
if mixup_fn is None:
if isinstance(labels, (dict,)):
labels = {k: v.cuda() for k, v in labels.items()}
else:
labels = labels.cuda()
global_step = data_size * cur_epoch + cur_iter
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the loss.
loss_verb = loss_fun(preds[0], labels['verb'])
loss_noun = loss_fun(preds[1], labels['noun'])
loss = 0.5 * (loss_verb + loss_noun)
else:
loss = loss_fun(preds, labels)
# check Nan Loss.
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
if cfg.SOLVER.USE_MIXED_PRECISION: # Mixed Precision Training
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=cfg.SOLVER.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order)
else:
loss.backward()
# Update the parameters.
optimizer.step()
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the verb accuracies.
verb_top1_acc, verb_top5_acc = metrics.topk_accuracies(
preds[0], labels['verb'], (1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_verb, verb_top1_acc, verb_top5_acc = du.all_reduce(
[loss_verb, verb_top1_acc, verb_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss_verb, verb_top1_acc, verb_top5_acc = (
loss_verb.item(),
verb_top1_acc.item(),
verb_top5_acc.item(),
)
# Compute the noun accuracies.
noun_top1_acc, noun_top5_acc = metrics.topk_accuracies(
preds[1], labels['noun'], (1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_noun, noun_top1_acc, noun_top5_acc = du.all_reduce(
[loss_noun, noun_top1_acc, noun_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss_noun, noun_top1_acc, noun_top5_acc = (
loss_noun.item(),
noun_top1_acc.item(),
noun_top5_acc.item(),
)
# Compute the action accuracies.
action_top1_acc, action_top5_acc = metrics.multitask_topk_accuracies(
(preds[0], preds[1]),
(labels['verb'], labels['noun']),
(1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, action_top1_acc, action_top5_acc = du.all_reduce(
[loss, action_top1_acc, action_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss, action_top1_acc, action_top5_acc = (
loss.item(),
action_top1_acc.item(),
action_top5_acc.item(),
)
# Update and log stats.
train_meter.update_stats(
(verb_top1_acc, noun_top1_acc, action_top1_acc),
(verb_top5_acc, noun_top5_acc, action_top5_acc),
(loss_verb, loss_noun, loss),
lr, inputs[0].size(0) * cfg.NUM_GPUS
)
else:
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
),
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
},
global_step=data_size * cur_epoch + cur_iter,
)
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
writer.add_scalars(
{
"Train/verb_top1_acc": verb_top1_acc,
"Train/verb_top5_acc": verb_top5_acc,
"Train/noun_top1_acc": noun_top1_acc,
"Train/noun_top5_acc": noun_top5_acc,
"Train/action_top1_acc": action_top1_acc,
"Train/action_top5_acc": action_top5_acc,
},
global_step=data_size * cur_epoch + cur_iter,
)
else:
writer.add_scalars(
{
"Train/Top1_err": top1_err if top1_err is not None else 0.0,
"Train/Top5_err": top5_err if top5_err is not None else 0.0,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
if isinstance(labels, (dict,)):
labels = {k: v.cuda() for k, v in labels.items()}
else:
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
if not isinstance(val[i], (str,)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
with torch.cuda.amp.autocast(enabled=cfg.SOLVER.USE_MIXED_PRECISION):
preds = model(inputs)
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the verb accuracies.
verb_top1_acc, verb_top5_acc = metrics.topk_accuracies(
preds[0], labels['verb'], (1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
verb_top1_acc, verb_top5_acc = du.all_reduce(
[verb_top1_acc, verb_top5_acc])
# Copy the errors from GPU to CPU (sync point).
verb_top1_acc, verb_top5_acc = verb_top1_acc.item(), verb_top5_acc.item()
# Compute the noun accuracies.
noun_top1_acc, noun_top5_acc = metrics.topk_accuracies(
preds[1], labels['noun'], (1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
noun_top1_acc, noun_top5_acc = du.all_reduce(
[noun_top1_acc, noun_top5_acc])
# Copy the errors from GPU to CPU (sync point).
noun_top1_acc, noun_top5_acc = noun_top1_acc.item(), noun_top5_acc.item()
# Compute the action accuracies.
action_top1_acc, action_top5_acc = metrics.multitask_topk_accuracies(
(preds[0], preds[1]),
(labels['verb'], labels['noun']),
(1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
action_top1_acc, action_top5_acc = du.all_reduce([action_top1_acc, action_top5_acc])
# Copy the errors from GPU to CPU (sync point).
action_top1_acc, action_top5_acc = action_top1_acc.item(), action_top5_acc.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
(verb_top1_acc, noun_top1_acc, action_top1_acc),
(verb_top5_acc, noun_top5_acc, action_top5_acc),
inputs[0].size(0) * cfg.NUM_GPUS
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Val/verb_top1_acc": verb_top1_acc,
"Val/verb_top5_acc": verb_top5_acc,
"Val/noun_top1_acc": noun_top1_acc,
"Val/noun_top5_acc": noun_top5_acc,
"Val/action_top1_acc": action_top1_acc,
"Val/action_top5_acc": action_top5_acc,
},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
),
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO and cfg.DATA.INPUT_TYPE == 'rgb':
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Mixed Precision Training Scaler
if cfg.SOLVER.USE_MIXED_PRECISION:
loss_scaler = NativeScaler()
else:
loss_scaler = None
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(
cfg, model, optimizer, loss_scaler=loss_scaler)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
# Create meters.
if cfg.TRAIN.DATASET == 'Epickitchens':
train_meter = EPICTrainMeter(len(train_loader), cfg)
val_meter = EPICValMeter(len(val_loader), cfg)
else:
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
mixup_fn = None
mixup_active = cfg.MIXUP.MIXUP_ALPHA > 0 or cfg.MIXUP.CUTMIX_ALPHA > 0 or cfg.MIXUP.CUTMIX_MINMAX is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=cfg.MIXUP.MIXUP_ALPHA,
cutmix_alpha=cfg.MIXUP.CUTMIX_ALPHA,
cutmix_minmax=cfg.MIXUP.CUTMIX_MINMAX,
prob=cfg.MIXUP.MIXUP_PROB,
switch_prob=cfg.MIXUP.MIXUP_SWITCH_PROB,
mode=cfg.MIXUP.MIXUP_MODE,
label_smoothing=cfg.SOLVER.SMOOTHING,
num_classes=cfg.MODEL.NUM_CLASSES
)
# Explicitly declare reduction to mean.
if cfg.MIXUP.MIXUP_ALPHA > 0.:
# smoothing is handled with mixup label transform
loss_fun = losses.get_loss_func("soft_target_cross_entropy")()
elif cfg.SOLVER.SMOOTHING > 0.0:
loss_fun = losses.get_loss_func("label_smoothing_cross_entropy")(
smoothing=cfg.SOLVER.SMOOTHING)
else:
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer,
loss_scaler=loss_scaler, loss_fun=loss_fun, mixup_fn=mixup_fn)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg,
loss_scaler=loss_scaler)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close()
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Train a video classification model."""
import numpy as np
import pickle
import pprint
from timm.data import Mixup
import torch
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
from slowfast.config.defaults import get_cfg
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.meters import TrainMeter, ValMeter, EPICTrainMeter, EPICValMeter
from slowfast.utils.multigrid import MultigridSchedule
from timm.utils import NativeScaler
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg,
writer=None, loss_scaler=None, loss_fun=None, mixup_fn=None
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, (inputs, labels, index, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
if not isinstance(val[i], (str,)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
if mixup_fn is not None:
labels = labels.cuda()
inputs, labels = mixup_fn(inputs[0], labels)
inputs = [inputs]
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
with torch.cuda.amp.autocast(enabled=cfg.SOLVER.USE_MIXED_PRECISION):
preds = model(inputs)
if mixup_fn is None:
if isinstance(labels, (dict,)):
labels = {k: v.cuda() for k, v in labels.items()}
else:
labels = labels.cuda()
global_step = data_size * cur_epoch + cur_iter
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the loss.
loss_verb = loss_fun(preds[0], labels['verb'])
loss_noun = loss_fun(preds[1], labels['noun'])
loss = 0.5 * (loss_verb + loss_noun)
else:
loss = loss_fun(preds, labels)
# check Nan Loss.
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
if cfg.SOLVER.USE_MIXED_PRECISION: # Mixed Precision Training
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=cfg.SOLVER.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order)
else:
loss.backward()
# Update the parameters.
optimizer.step()
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the verb accuracies.
verb_top1_acc, verb_top5_acc = metrics.topk_accuracies(
preds[0], labels['verb'], (1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_verb, verb_top1_acc, verb_top5_acc = du.all_reduce(
[loss_verb, verb_top1_acc, verb_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss_verb, verb_top1_acc, verb_top5_acc = (
loss_verb.item(),
verb_top1_acc.item(),
verb_top5_acc.item(),
)
# Compute the noun accuracies.
noun_top1_acc, noun_top5_acc = metrics.topk_accuracies(
preds[1], labels['noun'], (1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_noun, noun_top1_acc, noun_top5_acc = du.all_reduce(
[loss_noun, noun_top1_acc, noun_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss_noun, noun_top1_acc, noun_top5_acc = (
loss_noun.item(),
noun_top1_acc.item(),
noun_top5_acc.item(),
)
# Compute the action accuracies.
action_top1_acc, action_top5_acc = metrics.multitask_topk_accuracies(
(preds[0], preds[1]),
(labels['verb'], labels['noun']),
(1, 5))
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, action_top1_acc, action_top5_acc = du.all_reduce(
[loss, action_top1_acc, action_top5_acc]
)
# Copy the stats from GPU to CPU (sync point).
loss, action_top1_acc, action_top5_acc = (
loss.item(),
action_top1_acc.item(),
action_top5_acc.item(),
)
# Update and log stats.
train_meter.update_stats(
(verb_top1_acc, noun_top1_acc, action_top1_acc),
(verb_top5_acc, noun_top5_acc, action_top5_acc),
(loss_verb, loss_noun, loss),
lr, inputs[0].size(0) * cfg.NUM_GPUS
)
else:
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
),
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
},
global_step=data_size * cur_epoch + cur_iter,
)
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
writer.add_scalars(
{
"Train/verb_top1_acc": verb_top1_acc,
"Train/verb_top5_acc": verb_top5_acc,
"Train/noun_top1_acc": noun_top1_acc,
"Train/noun_top5_acc": noun_top5_acc,
"Train/action_top1_acc": action_top1_acc,
"Train/action_top5_acc": action_top5_acc,
},
global_step=data_size * cur_epoch + cur_iter,
)
else:
writer.add_scalars(
{
"Train/Top1_err": top1_err if top1_err is not None else 0.0,
"Train/Top5_err": top5_err if top5_err is not None else 0.0,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
if isinstance(labels, (dict,)):
labels = {k: v.cuda() for k, v in labels.items()}
else:
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
if not isinstance(val[i], (str,)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
with torch.cuda.amp.autocast(enabled=cfg.SOLVER.USE_MIXED_PRECISION):
preds = model(inputs)
if isinstance(labels, (dict,)) and cfg.TRAIN.DATASET == "Epickitchens":
# Compute the verb accuracies.
verb_top1_acc, verb_top5_acc = metrics.topk_accuracies(
preds[0], labels['verb'], (1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
verb_top1_acc, verb_top5_acc = du.all_reduce(
[verb_top1_acc, verb_top5_acc])
# Copy the errors from GPU to CPU (sync point).
verb_top1_acc, verb_top5_acc = verb_top1_acc.item(), verb_top5_acc.item()
# Compute the noun accuracies.
noun_top1_acc, noun_top5_acc = metrics.topk_accuracies(
preds[1], labels['noun'], (1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
noun_top1_acc, noun_top5_acc = du.all_reduce(
[noun_top1_acc, noun_top5_acc])
# Copy the errors from GPU to CPU (sync point).
noun_top1_acc, noun_top5_acc = noun_top1_acc.item(), noun_top5_acc.item()
# Compute the action accuracies.
action_top1_acc, action_top5_acc = metrics.multitask_topk_accuracies(
(preds[0], preds[1]),
(labels['verb'], labels['noun']),
(1, 5))
# Combine the errors across the GPUs.
if cfg.NUM_GPUS > 1:
action_top1_acc, action_top5_acc = du.all_reduce([action_top1_acc, action_top5_acc])
# Copy the errors from GPU to CPU (sync point).
action_top1_acc, action_top5_acc = action_top1_acc.item(), action_top5_acc.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
(verb_top1_acc, noun_top1_acc, action_top1_acc),
(verb_top5_acc, noun_top5_acc, action_top5_acc),
inputs[0].size(0) * cfg.NUM_GPUS
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Val/verb_top1_acc": verb_top1_acc,
"Val/verb_top5_acc": verb_top5_acc,
"Val/noun_top1_acc": noun_top1_acc,
"Val/noun_top5_acc": noun_top5_acc,
"Val/action_top1_acc": action_top1_acc,
"Val/action_top5_acc": action_top5_acc,
},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
),
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO and cfg.DATA.INPUT_TYPE == 'rgb':
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Mixed Precision Training Scaler
if cfg.SOLVER.USE_MIXED_PRECISION:
loss_scaler = NativeScaler()
else:
loss_scaler = None
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(
cfg, model, optimizer, loss_scaler=loss_scaler)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
# Create meters.
if cfg.TRAIN.DATASET == 'Epickitchens':
train_meter = EPICTrainMeter(len(train_loader), cfg)
val_meter = EPICValMeter(len(val_loader), cfg)
else:
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
mixup_fn = None
mixup_active = cfg.MIXUP.MIXUP_ALPHA > 0 or cfg.MIXUP.CUTMIX_ALPHA > 0 or cfg.MIXUP.CUTMIX_MINMAX is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=cfg.MIXUP.MIXUP_ALPHA,
cutmix_alpha=cfg.MIXUP.CUTMIX_ALPHA,
cutmix_minmax=cfg.MIXUP.CUTMIX_MINMAX,
prob=cfg.MIXUP.MIXUP_PROB,
switch_prob=cfg.MIXUP.MIXUP_SWITCH_PROB,
mode=cfg.MIXUP.MIXUP_MODE,
label_smoothing=cfg.SOLVER.SMOOTHING,
num_classes=cfg.MODEL.NUM_CLASSES
)
# Explicitly declare reduction to mean.
if cfg.MIXUP.MIXUP_ALPHA > 0.:
# smoothing is handled with mixup label transform
loss_fun = losses.get_loss_func("soft_target_cross_entropy")()
elif cfg.SOLVER.SMOOTHING > 0.0:
loss_fun = losses.get_loss_func("label_smoothing_cross_entropy")(
smoothing=cfg.SOLVER.SMOOTHING)
else:
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer,
loss_scaler=loss_scaler, loss_fun=loss_fun, mixup_fn=mixup_fn)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg,
loss_scaler=loss_scaler)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close()
| en | 0.779146 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. Train a video classification model. Perform the video training for one epoch. Args: train_loader (loader): video training loader. model (model): the video model to train. optimizer (optim): the optimizer to perform optimization on the model's parameters. train_meter (TrainMeter): training meters to log the training performance. cur_epoch (int): current epoch of training. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py writer (TensorboardWriter, optional): TensorboardWriter object to writer Tensorboard log. # Enable train mode. # Transfer the data to the current GPU device. # Update the learning rate. # Compute the loss. # check Nan Loss. # Perform the backward pass. # Mixed Precision Training # Update the parameters. # Update and log stats. # write to tensorboard format if available. # Compute the verb accuracies. # Gather all the predictions across all the devices. # Copy the stats from GPU to CPU (sync point). # Compute the noun accuracies. # Gather all the predictions across all the devices. # Copy the stats from GPU to CPU (sync point). # Compute the action accuracies. # Gather all the predictions across all the devices. # Copy the stats from GPU to CPU (sync point). # Update and log stats. # Gather all the predictions across all the devices. # Copy the stats from GPU to CPU (sync point). # Update and log stats. # write to tensorboard format if available. # measure allreduce for this meter # Log epoch stats. Evaluate the model on the val set. Args: val_loader (loader): data loader to provide validation data. model (model): model to evaluate the performance. val_meter (ValMeter): meter instance to record and calculate the metrics. cur_epoch (int): number of the current epoch of training. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py writer (TensorboardWriter, optional): TensorboardWriter object to writer Tensorboard log. # Evaluation mode enabled. The running stats would not be updated. # Transferthe data to the current GPU device. # Compute the verb accuracies. # Combine the errors across the GPUs. # Copy the errors from GPU to CPU (sync point). # Compute the noun accuracies. # Combine the errors across the GPUs. # Copy the errors from GPU to CPU (sync point). # Compute the action accuracies. # Combine the errors across the GPUs. # Copy the errors from GPU to CPU (sync point). # Update and log stats. # write to tensorboard format if available. # Compute the errors. # Combine the errors across the GPUs. # Copy the errors from GPU to CPU (sync point). # Update and log stats. # write to tensorboard format if available. # Log epoch stats. # write to tensorboard format if available. Update the stats in bn layers by calculate the precise stats. Args: loader (loader): data loader to provide training data. model (model): model to update the bn stats. num_iters (int): number of iterations to compute and update the bn stats. use_gpu (bool): whether to use GPU or not. # Update the bn stats. Build training model and its associated tools, including optimizer, dataloaders and meters. Args: cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py Returns: model (nn.Module): training model. optimizer (Optimizer): optimizer. train_loader (DataLoader): training data loader. val_loader (DataLoader): validatoin data loader. precise_bn_loader (DataLoader): training data loader for computing precise BN. train_meter (TrainMeter): tool for measuring training stats. val_meter (ValMeter): tool for measuring validation stats. # Build the video model and print model statistics. # Construct the optimizer. # Create the video train and val loaders. # Create meters. Train a video model for many epochs on train set and evaluate it on val set. Args: cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py # Set up environment. # Set random seed from configs. # Setup logging format. # Init multigrid. # Print config. # Build the video model and print model statistics. # Construct the optimizer. # Mixed Precision Training Scaler # Load a checkpoint to resume training if applicable. # Create the video train and val loaders. # Create meters. # set up writer for logging to Tensorboard format. # Perform the training loop. # Explicitly declare reduction to mean. # smoothing is handled with mixup label transform # Load checkpoint. # Shuffle the dataset. # Train for one epoch. # Compute precise BN stats. # Save a checkpoint. # Evaluate the model on validation set. | 2.282494 | 2 |
MyExtenstion.extension/Gaochao.tab/Gaochao.panel/Structure_Create.pulldown/Beam_From_CAD.pushbutton/script(bake).py | gaochaowyq/MyPyRevitExtentision | 0 | 6631012 | # -*- coding: utf-8 -*-
__doc__="根据导入的CAD绘制结构梁"
import sys
import os
from collections import namedtuple
from Autodesk.Revit.DB.Architecture import Room
import rpw
from rpw import doc, uidoc, DB, UI, db, ui
from rpw.ui.forms import FlexForm, Label, ComboBox, TextBox, TextBox,Separator, Button,SelectFromList
import json
from MyLib import Helper
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
Picked= uidoc.Selection.PickObject(UI.Selection.ObjectType.Element)
PickedElementId=Picked.ElementId
Picked_Selection=db.Element.from_id(PickedElementId)
#信息输入部分
Framing_types = rpw.db.Collector(of_category='OST_StructuralFraming', is_type=True).elements
Framing_type_options = {t.FamilyName+DB.Element.Name.GetValue(t): t for t in Framing_types}
Level_type=db.Collector(of_category='Levels', is_type=False).elements
Level_type_options = {DB.Element.Name.GetValue(t): t for t in Level_type}
components = [
Label('输入图层名称'),
TextBox('图层名称', Text="S-STEL-BEAM"),
Label('构件名称'),
ComboBox('FamilyName', Framing_type_options),
Label('标高'),
ComboBox('Level', Level_type_options),
Label('偏移标高'),
TextBox('Offset', Text="-300"),
Button('确定')
]
form = FlexForm('结构', components)
form.show()
Value=form.values
LayerName=Value['图层名称']
FamilyName=Value['FamilyName']
Level=Value['Level']
Offset=Helper.MmToFeet(float(Value['Offset']))
#
def Draw_LinesfromPoints(Points):
pass
def Old_ConvertRevitCurves(xcrv):
if str(xcrv.GetType()) != "Autodesk.Revit.DB.PolyLine":
rtn=xcrv
else:
pt = []
for abc in xcrv.GetCoordinates():
#print(abc)
pt.append(abc)
#for i in range(0,len(pt)-1):
# lines.append(DB.Line.CreateBound(pt[i],pt[1+1]));
#rtn=lines
return rtn
def _ConvertRevitCurves(xcrv):
if str(xcrv.GetType()) != "Autodesk.Revit.DB.PolyLine":
rtn=xcrv
elif str(xcrv.GetType())=="Autodesk.Revit.DB.PolyLine":
lines=[]
points=xcrv.GetCoordinates()
for i in range(0,len(points)-1):
try:
newline=DB.Line.CreateBound(points[i],points[i+1])
except:
pass
lines.append(newline)
rtn=lines
else:
rtn=xcrv
return rtn
DOC =doc
DWG =Picked_Selection.unwrap()
CRV = []
CRX = []
LAY = []
CLR = []
for abc in DWG.get_Geometry(DB.Options()):
for crv in abc.GetInstanceGeometry():
#print(crv.GetType())
lay = DOC.GetElement(crv.GraphicsStyleId).GraphicsStyleCategory.Name
ccc = DOC.GetElement(crv.GraphicsStyleId).GraphicsStyleCategory.LineColor
CRX.append(_ConvertRevitCurves(crv))
CRV.append(crv)
LAY.append(lay)
CLR.append(ccc.Green)
OUT = [CRV, CRX, LAY, CLR]
LayedSelection=[]
for c,l in zip(CRX,LAY):
if l==LayerName:
LayedSelection.append(c)
testLine=LayedSelection
@rpw.db.Transaction.ensure('CreateBeam')
def CreateBeam(Curves,FamilySymbol,Level,StructureType):
for i in Curves:
c=doc.Create.NewFamilyInstance(i,FamilySymbol,Level,StructureType)
WrpedElement=db.Element(c)
WrpedElement.parameters['Start Level Offset']=Offset
WrpedElement.parameters['End Level Offset']=Offset
print(WrpedElement)
Curve=Helper.List_Flat(testLine)
StructuralType=DB.Structure.StructuralType.Beam
c=CreateBeam(Curve,FamilyName,Level,StructuralType)
print(c)
print("绘制完成")
| # -*- coding: utf-8 -*-
__doc__="根据导入的CAD绘制结构梁"
import sys
import os
from collections import namedtuple
from Autodesk.Revit.DB.Architecture import Room
import rpw
from rpw import doc, uidoc, DB, UI, db, ui
from rpw.ui.forms import FlexForm, Label, ComboBox, TextBox, TextBox,Separator, Button,SelectFromList
import json
from MyLib import Helper
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
Picked= uidoc.Selection.PickObject(UI.Selection.ObjectType.Element)
PickedElementId=Picked.ElementId
Picked_Selection=db.Element.from_id(PickedElementId)
#信息输入部分
Framing_types = rpw.db.Collector(of_category='OST_StructuralFraming', is_type=True).elements
Framing_type_options = {t.FamilyName+DB.Element.Name.GetValue(t): t for t in Framing_types}
Level_type=db.Collector(of_category='Levels', is_type=False).elements
Level_type_options = {DB.Element.Name.GetValue(t): t for t in Level_type}
components = [
Label('输入图层名称'),
TextBox('图层名称', Text="S-STEL-BEAM"),
Label('构件名称'),
ComboBox('FamilyName', Framing_type_options),
Label('标高'),
ComboBox('Level', Level_type_options),
Label('偏移标高'),
TextBox('Offset', Text="-300"),
Button('确定')
]
form = FlexForm('结构', components)
form.show()
Value=form.values
LayerName=Value['图层名称']
FamilyName=Value['FamilyName']
Level=Value['Level']
Offset=Helper.MmToFeet(float(Value['Offset']))
#
def Draw_LinesfromPoints(Points):
pass
def Old_ConvertRevitCurves(xcrv):
if str(xcrv.GetType()) != "Autodesk.Revit.DB.PolyLine":
rtn=xcrv
else:
pt = []
for abc in xcrv.GetCoordinates():
#print(abc)
pt.append(abc)
#for i in range(0,len(pt)-1):
# lines.append(DB.Line.CreateBound(pt[i],pt[1+1]));
#rtn=lines
return rtn
def _ConvertRevitCurves(xcrv):
if str(xcrv.GetType()) != "Autodesk.Revit.DB.PolyLine":
rtn=xcrv
elif str(xcrv.GetType())=="Autodesk.Revit.DB.PolyLine":
lines=[]
points=xcrv.GetCoordinates()
for i in range(0,len(points)-1):
try:
newline=DB.Line.CreateBound(points[i],points[i+1])
except:
pass
lines.append(newline)
rtn=lines
else:
rtn=xcrv
return rtn
DOC =doc
DWG =Picked_Selection.unwrap()
CRV = []
CRX = []
LAY = []
CLR = []
for abc in DWG.get_Geometry(DB.Options()):
for crv in abc.GetInstanceGeometry():
#print(crv.GetType())
lay = DOC.GetElement(crv.GraphicsStyleId).GraphicsStyleCategory.Name
ccc = DOC.GetElement(crv.GraphicsStyleId).GraphicsStyleCategory.LineColor
CRX.append(_ConvertRevitCurves(crv))
CRV.append(crv)
LAY.append(lay)
CLR.append(ccc.Green)
OUT = [CRV, CRX, LAY, CLR]
LayedSelection=[]
for c,l in zip(CRX,LAY):
if l==LayerName:
LayedSelection.append(c)
testLine=LayedSelection
@rpw.db.Transaction.ensure('CreateBeam')
def CreateBeam(Curves,FamilySymbol,Level,StructureType):
for i in Curves:
c=doc.Create.NewFamilyInstance(i,FamilySymbol,Level,StructureType)
WrpedElement=db.Element(c)
WrpedElement.parameters['Start Level Offset']=Offset
WrpedElement.parameters['End Level Offset']=Offset
print(WrpedElement)
Curve=Helper.List_Flat(testLine)
StructuralType=DB.Structure.StructuralType.Beam
c=CreateBeam(Curve,FamilyName,Level,StructuralType)
print(c)
print("绘制完成")
| zh | 0.124791 | # -*- coding: utf-8 -*- #信息输入部分 # #print(abc) #for i in range(0,len(pt)-1): # lines.append(DB.Line.CreateBound(pt[i],pt[1+1])); #rtn=lines #print(crv.GetType()) | 2.040957 | 2 |
fortnitepy/errors.py | Jawschamp/fortnitepy | 0 | 6631013 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class FortniteException(Exception):
"""Base exception for fortnitepy.
This could in theory be caught to handle all exceptions thrown by this library.
"""
pass
class AuthException(FortniteException):
"""This exception is raised when auth fails."""
pass
class EventError(FortniteException):
"""This exception is raised when something regarding events fails."""
pass
class XMPPError(FortniteException):
"""This exception is raised when something regarding the XMPP service fails."""
pass
class PartyError(FortniteException):
"""This exception is raised when something regarding parties fails."""
pass
class PartyPermissionError(FortniteException):
"""This exception is raised when you dont have permission to do something in a party
or a party you are trying to join is private.
"""
pass
class HTTPException(FortniteException):
"""This exception is raised when an error is received by Fortnite services.
Attributes
----------
response: :class:`aiohttp.ClientResponse`
The response from the HTTP request.
text: :class:`str`
The error message.
status: :class:`int`
The status code of the HTTP request.
raw: Union[:class:`str`, :class:`dict`]
The raw message/data received from Fortnite services.
message: :class:`str`
The raw error message received from Fortnite services.
message_code: :class:`str`
The raw error message code received from Fortnite services.
message_vars: List[:class:`str`]
List containing arguments passed to the message.
code: :class:`int`
The error code received from Fortnite services.
originating_service: :class:`str`
The originating service this error was received from.
intent: :class:`str`
The prod this error was received from.
"""
def __init__(self, response, message):
self.response = response
self.status = response.status
self.raw = message
_err = message if isinstance(message, dict) else {}
self.message = _err.get('errorMessage')
self.message_code = _err.get('errorCode')
self.message_vars = _err.get('messageVars')
self.code = _err.get('numericErrorCode')
self.originating_service = _err.get('originatingService')
self.intent = _err.get('intent')
self.text = 'Code: "{0}" - {1}'.format(
self.message_code,
self.message
)
super().__init__(self.text)
| # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class FortniteException(Exception):
"""Base exception for fortnitepy.
This could in theory be caught to handle all exceptions thrown by this library.
"""
pass
class AuthException(FortniteException):
"""This exception is raised when auth fails."""
pass
class EventError(FortniteException):
"""This exception is raised when something regarding events fails."""
pass
class XMPPError(FortniteException):
"""This exception is raised when something regarding the XMPP service fails."""
pass
class PartyError(FortniteException):
"""This exception is raised when something regarding parties fails."""
pass
class PartyPermissionError(FortniteException):
"""This exception is raised when you dont have permission to do something in a party
or a party you are trying to join is private.
"""
pass
class HTTPException(FortniteException):
"""This exception is raised when an error is received by Fortnite services.
Attributes
----------
response: :class:`aiohttp.ClientResponse`
The response from the HTTP request.
text: :class:`str`
The error message.
status: :class:`int`
The status code of the HTTP request.
raw: Union[:class:`str`, :class:`dict`]
The raw message/data received from Fortnite services.
message: :class:`str`
The raw error message received from Fortnite services.
message_code: :class:`str`
The raw error message code received from Fortnite services.
message_vars: List[:class:`str`]
List containing arguments passed to the message.
code: :class:`int`
The error code received from Fortnite services.
originating_service: :class:`str`
The originating service this error was received from.
intent: :class:`str`
The prod this error was received from.
"""
def __init__(self, response, message):
self.response = response
self.status = response.status
self.raw = message
_err = message if isinstance(message, dict) else {}
self.message = _err.get('errorMessage')
self.message_code = _err.get('errorCode')
self.message_vars = _err.get('messageVars')
self.code = _err.get('numericErrorCode')
self.originating_service = _err.get('originatingService')
self.intent = _err.get('intent')
self.text = 'Code: "{0}" - {1}'.format(
self.message_code,
self.message
)
super().__init__(self.text)
| en | 0.843 | # -*- coding: utf-8 -*- MIT License Copyright (c) 2019 Terbau Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Base exception for fortnitepy. This could in theory be caught to handle all exceptions thrown by this library. This exception is raised when auth fails. This exception is raised when something regarding events fails. This exception is raised when something regarding the XMPP service fails. This exception is raised when something regarding parties fails. This exception is raised when you dont have permission to do something in a party or a party you are trying to join is private. This exception is raised when an error is received by Fortnite services. Attributes ---------- response: :class:`aiohttp.ClientResponse` The response from the HTTP request. text: :class:`str` The error message. status: :class:`int` The status code of the HTTP request. raw: Union[:class:`str`, :class:`dict`] The raw message/data received from Fortnite services. message: :class:`str` The raw error message received from Fortnite services. message_code: :class:`str` The raw error message code received from Fortnite services. message_vars: List[:class:`str`] List containing arguments passed to the message. code: :class:`int` The error code received from Fortnite services. originating_service: :class:`str` The originating service this error was received from. intent: :class:`str` The prod this error was received from. | 2.235627 | 2 |
highway_env/envs/merge_out_origin.py | jasonplato/High_SimulationPlatform | 0 | 6631014 | <reponame>jasonplato/High_SimulationPlatform
from __future__ import division, print_function, absolute_import
import numpy as np
from highway_env import utils
from highway_env.envs.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane, LanesConcatenation
from highway_env.road.road import Road
from highway_env.vehicle.control import ControlledVehicle, MDPVehicle, CarSim, FreeControl
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.vehicle.dynamics import Obstacle
import time
import random
def mobil(self, lane_index, mandatory):
"""
action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec', 'right acc',
'right same', 'right dec']
MOBIL lane change model: Minimizing Overall Braking Induced by a Lane change
The vehicle should change lane only if:
- after changing it (and/or following vehicles) can accelerate more;
- it doesn't impose an unsafe braking on its new following vehicle.
:param lane_index: the candidate lane for the change
:param mandatory: if the lane change is mandatory
:return: whether the lane change should be performed
"""
def acceleration(ego_vehicle, front_vehicle=None):
"""
Compute an acceleration command with the Intelligent Driver Model.
The acceleration is chosen so as to:
- reach a target velocity;
- maintain a minimum safety distance (and safety time) w.r.t the front vehicle.
:param ego_vehicle: the vehicle whose desired acceleration is to be computed. It does not have to be an
IDM vehicle, which is why this method is a class method. This allows an IDM vehicle to
reason about other vehicles behaviors even though they may not IDMs.
:param front_vehicle: the vehicle preceding the ego-vehicle
:return: the acceleration command for the ego-vehicle [m/s2]
"""
COMFORT_ACC_MAX = 3.0
COMFORT_ACC_MIN = -5.0
TIME_WANTED = 1.5
DISTANCE_WANTED = 10
DELTA = 4.0
def not_zero(x):
EPSILON = 0.01
if abs(x) > EPSILON:
return x
elif x > 0:
return EPSILON
else:
return -EPSILON
def desired_gap(ego_vehicle, front_vehicle):
d0 = DISTANCE_WANTED + ego_vehicle.LENGTH / 2 + front_vehicle.LENGTH / 2
tau = TIME_WANTED
ab = -COMFORT_ACC_MAX * COMFORT_ACC_MIN
dv = ego_vehicle.velocity - front_vehicle.velocity
d_star = d0 + ego_vehicle.velocity * tau + ego_vehicle.velocity * dv / (2 * np.sqrt(ab))
return d_star
if not ego_vehicle:
return 0
acceleration = COMFORT_ACC_MAX * (
1 - np.power(ego_vehicle.velocity / not_zero(ego_vehicle.target_velocity), DELTA))
if front_vehicle:
d = ego_vehicle.lane_distance_to(front_vehicle)
acceleration -= COMFORT_ACC_MAX * np.power(
desired_gap(ego_vehicle, front_vehicle) / not_zero(d), 2)
return acceleration
LANE_CHANGE_MAX_BRAKING_IMPOSED = 1.0
LANE_CHANGE_MIN_ACC_GAIN = 0.1
POLITENESS = 0.
# Is the maneuver unsafe for the new following vehicle?
new_preceding, new_following = self.road.neighbour_vehicles(self, self.road.lanes[lane_index])
# todo: added mandatory part
preceding_vehicle_ok = True
if new_preceding:
relative_x = new_preceding.position[0] - self.position[0]
relative_v = self.velocity - new_preceding.velocity
if relative_x < 5:
preceding_vehicle_ok = False
if relative_v == 0.0:
pass
else:
t = relative_x / relative_v
if 0 < t < 3:
preceding_vehicle_ok = False
following_vehicle_ok = True
if new_following:
relative_x = self.position[0] - new_following.position[0]
relative_v = new_following.velocity - self.velocity
if relative_x < 5:
following_vehicle_ok = False
if relative_v == 0.0:
pass
else:
t = relative_x / relative_v
if 0 < t < 3:
following_vehicle_ok = False
if mandatory:
if preceding_vehicle_ok and following_vehicle_ok:
return True
else:
return False
# todo: part finish
new_following_a = acceleration(ego_vehicle=new_following, front_vehicle=new_preceding)
new_following_pred_a = acceleration(ego_vehicle=new_following, front_vehicle=self)
if new_following_pred_a < -LANE_CHANGE_MAX_BRAKING_IMPOSED:
return False
# Is there an advantage for me and/or my followers to change lane?
old_preceding, old_following = self.road.neighbour_vehicles(self)
self_a = acceleration(ego_vehicle=self, front_vehicle=old_preceding)
self_pred_a = acceleration(ego_vehicle=self, front_vehicle=new_preceding)
old_following_a = acceleration(ego_vehicle=old_following, front_vehicle=self)
old_following_pred_a = acceleration(ego_vehicle=old_following, front_vehicle=old_preceding)
jerk = self_pred_a - self_a + POLITENESS * (
new_following_pred_a - new_following_a + old_following_pred_a - old_following_a)
if jerk < LANE_CHANGE_MIN_ACC_GAIN:
return False
# All clear, let's go!
return True
def global_mobil(env, action):
"""
:param env: environment
:param action: action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec',
'right acc', 'right same', 'right dec']
"""
vehicle = env.vehicle
mandatory = False
lane_index = vehicle.lane_index
if action in [0, 1, 2]:
lane_index -= 1
mandatory = True
if lane_index >= 0 and env.road.lanes[lane_index].is_reachable_from(vehicle.position):
print('mandatory to left: {}'.format(mobil(vehicle, lane_index, mandatory)))
elif action in [6, 7, 8]:
lane_index += 1
mandatory = True
if lane_index < len(env.road.lanes) and env.road.lanes[lane_index].is_reachable_from(vehicle.position):
print('mandatory to right: {}'.format(mobil(vehicle, lane_index, mandatory)))
else:
lane_offsets = [i for i in [-1, 1] if 0 <= vehicle.lane_index + i < len(env.road.lanes)]
for lane_offset in lane_offsets:
# Is the candidate lane close enough?
if not env.road.lanes[vehicle.lane_index + lane_offset].is_reachable_from(vehicle.position):
continue
# Does the MOBIL model recommend a lane change?
if mobil(vehicle, lane_index, mandatory):
print("unmandatory to {}, True!".format(lane_offset))
else:
print("unmandatory to {}, False!".format(lane_offset))
# todo
# --------------------------------------------
# todo
class MergeEnvOut(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD = -1
RIGHT_LANE_REWARD = 0.1
HIGH_VELOCITY_REWARD = 0.2
MERGING_VELOCITY_REWARD = -0.5
LANE_CHANGE_REWARD = -0.05
DEFAULT_CONFIG = {"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle"}
def __init__(self):
super(MergeEnvOut, self).__init__()
self.switch = False
self.other_vehicles_mandatory = False
self.config = self.DEFAULT_CONFIG.copy()
# self.make_road()
self.make()
# self.double_merge()
self.make_vehicles(self.other_vehicles_mandatory)
self.success_cnt = 0
def configure(self, config):
self.config.update(config)
def _observation(self):
return super(MergeEnvOut, self)._observation()
def _reward(self, action):
"""
The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index / (len(self.road.lanes) - 2) \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == len(self.road.lanes)-1 and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_VELOCITY_REWARD * \
(vehicle.target_velocity - vehicle.velocity) / vehicle.target_velocity
return reward + action_reward[action]
def ego_vehicle_switch(self):
self.switch = not self.switch
def _is_terminal(self):
"""
The episode is over when a collision occurs or when the access ramp has been passed.
"""
if self.vehicle.position[0] > 500:
if self.vehicle.lane_index == 3:
self.success_cnt += 0.5
return self.vehicle.crashed or self.vehicle.position[0] > 500
def reset(self):
# self.make_road()
self.make()
self.make_vehicles(self.other_vehicles_mandatory)
return self._observation()
def make_straight(self):
lm10 = StraightLane(np.array([0, 0]), 0, 4.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 500])
l1 = LanesConcatenation([lm10])
lm20 = StraightLane(l1.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 500])
l2 = LanesConcatenation([lm20])
# lm30 = StraightLane(l2.position(0,4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,100])
# lm31 = StraightLane(lm30.position(0,0), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,500])
# l3 = LanesConcatenation([lm30,lm31])
lm30 = StraightLane(l2.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 500])
l3 = LanesConcatenation([lm30])
amplitude = 4.5
lm40 = StraightLane(l3.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE], bounds=[200, 400])
lm41 = SineLane(lm40.position(400, amplitude), 0, 4.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.CONTINUOUS, LineType.CONTINUOUS], bounds=[0, 50], forbidden=True)
lm42 = StraightLane(lm41.position(50, 0), 0, 4.0, [LineType.CONTINUOUS_LINE, LineType.CONTINUOUS_LINE],
bounds=[0, 50],
forbidden=True)
l4 = LanesConcatenation([lm40, lm41, lm42])
road = Road([l1, l2, l3, l4])
# road = Road([ l3])
# road = Road([lm0,lm2])
# todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles
obstacle = Obstacle(road, lm40.position(0, 0))
road.vehicles.append(obstacle)
road.lanes[3].vehicles.append(obstacle)
self.road = road
def make_sin(self):
# amplitude = 4.5
amplitude = 9.0
lm10 = StraightLane(np.array([0, 0]), 0, 5.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 400])
lm11 = SineLane(lm10.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.CONTINUOUS, LineType.STRIPED], bounds=[0, 250])
lm12 = StraightLane(lm11.position(250, 0), 0, 5.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 50])
l1 = LanesConcatenation([lm10, lm11, lm12])
lm20 = StraightLane(lm10.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 400])
lm21 = SineLane(lm20.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.STRIPED], bounds=[0, 250])
lm22 = StraightLane(lm21.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 50])
l2 = LanesConcatenation([lm20, lm21, lm22])
lm30 = StraightLane(lm20.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 400])
lm31 = SineLane(lm30.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.STRIPED], bounds=[0, 250])
lm32 = StraightLane(lm31.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 50])
l3 = LanesConcatenation([lm30, lm31, lm32])
lm40 = StraightLane(lm30.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE], bounds=[0, 400])
lm41 = SineLane(lm40.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.CONTINUOUS], bounds=[0, 250])
lm42 = StraightLane(lm41.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE],
bounds=[0, 50],)
l4 = LanesConcatenation([lm40, lm41, lm42])
road = Road([l1, l2, l3, l4])
# road = Road([ l3])
# road = Road([lm0,lm2])
# todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles
obstacle = Obstacle(road, lm40.position(0, 0))
road.vehicles.append(obstacle)
road.lanes[3].vehicles.append(obstacle)
self.road = road
def make(self):
self.make_straight()
# self.make_sin()
def make_vehicles(self, other_vehicles_mandatory=False):
"""
Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:param other_vehicles_mandatory: if the lane changing maneuvers of other vehicles are mandatory
:return: None
"""
max_l = 500
road = self.road
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
car_number_each_lane = 15
# reset_position_range = (30, 40)
reset_position_range = (20, 30)
# reset_lane = random.choice(road.lanes)
reset_lane = road.lanes[0]
for l in road.lanes[:3]:
cars_on_lane = car_number_each_lane
reset_position = None
if l is reset_lane:
cars_on_lane += 1
reset_position = random.choice(range(5, 6))
# reset_position = 2
for i in range(cars_on_lane):
if i == reset_position:
if self.switch:
ego_vehicle = MDPVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
else:
ego_vehicle = IDMVehicle(road, l.position((i + 1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
ego_vehicle.destination = 1
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
l.vehicles.append(ego_vehicle)
else:
car = other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=np.random.randint(18, 25), dst=3, max_length=max_l)
if other_vehicles_mandatory:
car.destination = 1
road.vehicles.append(car)
l.vehicles.append(car)
for l in [road.lanes[3]]:
cars_on_lane = car_number_each_lane
reset_position = None
if l is reset_lane:
cars_on_lane += 1
reset_position = random.choice(range(5, 6))
# reset_position = 2
for i in range(cars_on_lane):
if i < 8:
continue
if i == reset_position:
# ego_vehicle = MDPVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
# velocity=20, max_length=max_l)
ego_vehicle = IDMVehicle(road, l.position((i + 1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
ego_vehicle.destination = 1
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
l.vehicles.append(ego_vehicle)
else:
car = other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=np.random.randint(18, 25), dst=3, max_length=max_l)
if other_vehicles_mandatory:
car.destination = 1
road.vehicles.append(car)
l.vehicles.append(car)
for lane in road.lanes:
lane.vehicles = sorted(lane.vehicles, key=lambda x: lane.local_coordinates(x.position)[0])
for i, v in enumerate(lane.vehicles):
v.vehicle_index_in_line = i
# for l in road.lanes[3:]:
# cars_on_lane = car_number_each_lane
# reset_position = None
# if l is reset_lane:
# cars_on_lane+=1
# reset_position = random.choice(range(1,car_number_each_lane))
# for i in range(cars_on_lane):
# if i == reset_position:
# ego_vehicle = ControlledVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=20,max_length=max_l)
# road.vehicles.append(ego_vehicle)
# self.vehicle = ego_vehicle
# else:
# road.vehicles.append(other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=np.random.randint(18,25),dst=2,rever=True,max_length=max_l))
if __name__ == '__main__':
pass
| from __future__ import division, print_function, absolute_import
import numpy as np
from highway_env import utils
from highway_env.envs.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane, LanesConcatenation
from highway_env.road.road import Road
from highway_env.vehicle.control import ControlledVehicle, MDPVehicle, CarSim, FreeControl
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.vehicle.dynamics import Obstacle
import time
import random
def mobil(self, lane_index, mandatory):
"""
action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec', 'right acc',
'right same', 'right dec']
MOBIL lane change model: Minimizing Overall Braking Induced by a Lane change
The vehicle should change lane only if:
- after changing it (and/or following vehicles) can accelerate more;
- it doesn't impose an unsafe braking on its new following vehicle.
:param lane_index: the candidate lane for the change
:param mandatory: if the lane change is mandatory
:return: whether the lane change should be performed
"""
def acceleration(ego_vehicle, front_vehicle=None):
"""
Compute an acceleration command with the Intelligent Driver Model.
The acceleration is chosen so as to:
- reach a target velocity;
- maintain a minimum safety distance (and safety time) w.r.t the front vehicle.
:param ego_vehicle: the vehicle whose desired acceleration is to be computed. It does not have to be an
IDM vehicle, which is why this method is a class method. This allows an IDM vehicle to
reason about other vehicles behaviors even though they may not IDMs.
:param front_vehicle: the vehicle preceding the ego-vehicle
:return: the acceleration command for the ego-vehicle [m/s2]
"""
COMFORT_ACC_MAX = 3.0
COMFORT_ACC_MIN = -5.0
TIME_WANTED = 1.5
DISTANCE_WANTED = 10
DELTA = 4.0
def not_zero(x):
EPSILON = 0.01
if abs(x) > EPSILON:
return x
elif x > 0:
return EPSILON
else:
return -EPSILON
def desired_gap(ego_vehicle, front_vehicle):
d0 = DISTANCE_WANTED + ego_vehicle.LENGTH / 2 + front_vehicle.LENGTH / 2
tau = TIME_WANTED
ab = -COMFORT_ACC_MAX * COMFORT_ACC_MIN
dv = ego_vehicle.velocity - front_vehicle.velocity
d_star = d0 + ego_vehicle.velocity * tau + ego_vehicle.velocity * dv / (2 * np.sqrt(ab))
return d_star
if not ego_vehicle:
return 0
acceleration = COMFORT_ACC_MAX * (
1 - np.power(ego_vehicle.velocity / not_zero(ego_vehicle.target_velocity), DELTA))
if front_vehicle:
d = ego_vehicle.lane_distance_to(front_vehicle)
acceleration -= COMFORT_ACC_MAX * np.power(
desired_gap(ego_vehicle, front_vehicle) / not_zero(d), 2)
return acceleration
LANE_CHANGE_MAX_BRAKING_IMPOSED = 1.0
LANE_CHANGE_MIN_ACC_GAIN = 0.1
POLITENESS = 0.
# Is the maneuver unsafe for the new following vehicle?
new_preceding, new_following = self.road.neighbour_vehicles(self, self.road.lanes[lane_index])
# todo: added mandatory part
preceding_vehicle_ok = True
if new_preceding:
relative_x = new_preceding.position[0] - self.position[0]
relative_v = self.velocity - new_preceding.velocity
if relative_x < 5:
preceding_vehicle_ok = False
if relative_v == 0.0:
pass
else:
t = relative_x / relative_v
if 0 < t < 3:
preceding_vehicle_ok = False
following_vehicle_ok = True
if new_following:
relative_x = self.position[0] - new_following.position[0]
relative_v = new_following.velocity - self.velocity
if relative_x < 5:
following_vehicle_ok = False
if relative_v == 0.0:
pass
else:
t = relative_x / relative_v
if 0 < t < 3:
following_vehicle_ok = False
if mandatory:
if preceding_vehicle_ok and following_vehicle_ok:
return True
else:
return False
# todo: part finish
new_following_a = acceleration(ego_vehicle=new_following, front_vehicle=new_preceding)
new_following_pred_a = acceleration(ego_vehicle=new_following, front_vehicle=self)
if new_following_pred_a < -LANE_CHANGE_MAX_BRAKING_IMPOSED:
return False
# Is there an advantage for me and/or my followers to change lane?
old_preceding, old_following = self.road.neighbour_vehicles(self)
self_a = acceleration(ego_vehicle=self, front_vehicle=old_preceding)
self_pred_a = acceleration(ego_vehicle=self, front_vehicle=new_preceding)
old_following_a = acceleration(ego_vehicle=old_following, front_vehicle=self)
old_following_pred_a = acceleration(ego_vehicle=old_following, front_vehicle=old_preceding)
jerk = self_pred_a - self_a + POLITENESS * (
new_following_pred_a - new_following_a + old_following_pred_a - old_following_a)
if jerk < LANE_CHANGE_MIN_ACC_GAIN:
return False
# All clear, let's go!
return True
def global_mobil(env, action):
"""
:param env: environment
:param action: action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec',
'right acc', 'right same', 'right dec']
"""
vehicle = env.vehicle
mandatory = False
lane_index = vehicle.lane_index
if action in [0, 1, 2]:
lane_index -= 1
mandatory = True
if lane_index >= 0 and env.road.lanes[lane_index].is_reachable_from(vehicle.position):
print('mandatory to left: {}'.format(mobil(vehicle, lane_index, mandatory)))
elif action in [6, 7, 8]:
lane_index += 1
mandatory = True
if lane_index < len(env.road.lanes) and env.road.lanes[lane_index].is_reachable_from(vehicle.position):
print('mandatory to right: {}'.format(mobil(vehicle, lane_index, mandatory)))
else:
lane_offsets = [i for i in [-1, 1] if 0 <= vehicle.lane_index + i < len(env.road.lanes)]
for lane_offset in lane_offsets:
# Is the candidate lane close enough?
if not env.road.lanes[vehicle.lane_index + lane_offset].is_reachable_from(vehicle.position):
continue
# Does the MOBIL model recommend a lane change?
if mobil(vehicle, lane_index, mandatory):
print("unmandatory to {}, True!".format(lane_offset))
else:
print("unmandatory to {}, False!".format(lane_offset))
# todo
# --------------------------------------------
# todo
class MergeEnvOut(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD = -1
RIGHT_LANE_REWARD = 0.1
HIGH_VELOCITY_REWARD = 0.2
MERGING_VELOCITY_REWARD = -0.5
LANE_CHANGE_REWARD = -0.05
DEFAULT_CONFIG = {"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle"}
def __init__(self):
super(MergeEnvOut, self).__init__()
self.switch = False
self.other_vehicles_mandatory = False
self.config = self.DEFAULT_CONFIG.copy()
# self.make_road()
self.make()
# self.double_merge()
self.make_vehicles(self.other_vehicles_mandatory)
self.success_cnt = 0
def configure(self, config):
self.config.update(config)
def _observation(self):
return super(MergeEnvOut, self)._observation()
def _reward(self, action):
"""
The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index / (len(self.road.lanes) - 2) \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == len(self.road.lanes)-1 and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_VELOCITY_REWARD * \
(vehicle.target_velocity - vehicle.velocity) / vehicle.target_velocity
return reward + action_reward[action]
def ego_vehicle_switch(self):
self.switch = not self.switch
def _is_terminal(self):
"""
The episode is over when a collision occurs or when the access ramp has been passed.
"""
if self.vehicle.position[0] > 500:
if self.vehicle.lane_index == 3:
self.success_cnt += 0.5
return self.vehicle.crashed or self.vehicle.position[0] > 500
def reset(self):
# self.make_road()
self.make()
self.make_vehicles(self.other_vehicles_mandatory)
return self._observation()
def make_straight(self):
lm10 = StraightLane(np.array([0, 0]), 0, 4.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 500])
l1 = LanesConcatenation([lm10])
lm20 = StraightLane(l1.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 500])
l2 = LanesConcatenation([lm20])
# lm30 = StraightLane(l2.position(0,4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,100])
# lm31 = StraightLane(lm30.position(0,0), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,500])
# l3 = LanesConcatenation([lm30,lm31])
lm30 = StraightLane(l2.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 500])
l3 = LanesConcatenation([lm30])
amplitude = 4.5
lm40 = StraightLane(l3.position(0, 4), 0, 4.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE], bounds=[200, 400])
lm41 = SineLane(lm40.position(400, amplitude), 0, 4.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.CONTINUOUS, LineType.CONTINUOUS], bounds=[0, 50], forbidden=True)
lm42 = StraightLane(lm41.position(50, 0), 0, 4.0, [LineType.CONTINUOUS_LINE, LineType.CONTINUOUS_LINE],
bounds=[0, 50],
forbidden=True)
l4 = LanesConcatenation([lm40, lm41, lm42])
road = Road([l1, l2, l3, l4])
# road = Road([ l3])
# road = Road([lm0,lm2])
# todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles
obstacle = Obstacle(road, lm40.position(0, 0))
road.vehicles.append(obstacle)
road.lanes[3].vehicles.append(obstacle)
self.road = road
def make_sin(self):
# amplitude = 4.5
amplitude = 9.0
lm10 = StraightLane(np.array([0, 0]), 0, 5.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 400])
lm11 = SineLane(lm10.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.CONTINUOUS, LineType.STRIPED], bounds=[0, 250])
lm12 = StraightLane(lm11.position(250, 0), 0, 5.0, [LineType.CONTINUOUS_LINE, LineType.STRIPED], bounds=[0, 50])
l1 = LanesConcatenation([lm10, lm11, lm12])
lm20 = StraightLane(lm10.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 400])
lm21 = SineLane(lm20.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.STRIPED], bounds=[0, 250])
lm22 = StraightLane(lm21.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 50])
l2 = LanesConcatenation([lm20, lm21, lm22])
lm30 = StraightLane(lm20.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 400])
lm31 = SineLane(lm30.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.STRIPED], bounds=[0, 250])
lm32 = StraightLane(lm31.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.STRIPED], bounds=[0, 50])
l3 = LanesConcatenation([lm30, lm31, lm32])
lm40 = StraightLane(lm30.position(0, 5), 0, 5.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE], bounds=[0, 400])
lm41 = SineLane(lm40.position(400, amplitude), 0, 5.0, -amplitude, 2 * np.pi / (2 * 50), np.pi / 2,
[LineType.STRIPED, LineType.CONTINUOUS], bounds=[0, 250])
lm42 = StraightLane(lm41.position(250, 0), 0, 5.0, [LineType.STRIPED, LineType.CONTINUOUS_LINE],
bounds=[0, 50],)
l4 = LanesConcatenation([lm40, lm41, lm42])
road = Road([l1, l2, l3, l4])
# road = Road([ l3])
# road = Road([lm0,lm2])
# todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles
obstacle = Obstacle(road, lm40.position(0, 0))
road.vehicles.append(obstacle)
road.lanes[3].vehicles.append(obstacle)
self.road = road
def make(self):
self.make_straight()
# self.make_sin()
def make_vehicles(self, other_vehicles_mandatory=False):
"""
Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:param other_vehicles_mandatory: if the lane changing maneuvers of other vehicles are mandatory
:return: None
"""
max_l = 500
road = self.road
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
car_number_each_lane = 15
# reset_position_range = (30, 40)
reset_position_range = (20, 30)
# reset_lane = random.choice(road.lanes)
reset_lane = road.lanes[0]
for l in road.lanes[:3]:
cars_on_lane = car_number_each_lane
reset_position = None
if l is reset_lane:
cars_on_lane += 1
reset_position = random.choice(range(5, 6))
# reset_position = 2
for i in range(cars_on_lane):
if i == reset_position:
if self.switch:
ego_vehicle = MDPVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
else:
ego_vehicle = IDMVehicle(road, l.position((i + 1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
ego_vehicle.destination = 1
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
l.vehicles.append(ego_vehicle)
else:
car = other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=np.random.randint(18, 25), dst=3, max_length=max_l)
if other_vehicles_mandatory:
car.destination = 1
road.vehicles.append(car)
l.vehicles.append(car)
for l in [road.lanes[3]]:
cars_on_lane = car_number_each_lane
reset_position = None
if l is reset_lane:
cars_on_lane += 1
reset_position = random.choice(range(5, 6))
# reset_position = 2
for i in range(cars_on_lane):
if i < 8:
continue
if i == reset_position:
# ego_vehicle = MDPVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
# velocity=20, max_length=max_l)
ego_vehicle = IDMVehicle(road, l.position((i + 1) * np.random.randint(*reset_position_range), 0),
velocity=20, max_length=max_l)
ego_vehicle.destination = 1
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
l.vehicles.append(ego_vehicle)
else:
car = other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0),
velocity=np.random.randint(18, 25), dst=3, max_length=max_l)
if other_vehicles_mandatory:
car.destination = 1
road.vehicles.append(car)
l.vehicles.append(car)
for lane in road.lanes:
lane.vehicles = sorted(lane.vehicles, key=lambda x: lane.local_coordinates(x.position)[0])
for i, v in enumerate(lane.vehicles):
v.vehicle_index_in_line = i
# for l in road.lanes[3:]:
# cars_on_lane = car_number_each_lane
# reset_position = None
# if l is reset_lane:
# cars_on_lane+=1
# reset_position = random.choice(range(1,car_number_each_lane))
# for i in range(cars_on_lane):
# if i == reset_position:
# ego_vehicle = ControlledVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=20,max_length=max_l)
# road.vehicles.append(ego_vehicle)
# self.vehicle = ego_vehicle
# else:
# road.vehicles.append(other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=np.random.randint(18,25),dst=2,rever=True,max_length=max_l))
if __name__ == '__main__':
pass | en | 0.733579 | action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec', 'right acc', 'right same', 'right dec'] MOBIL lane change model: Minimizing Overall Braking Induced by a Lane change The vehicle should change lane only if: - after changing it (and/or following vehicles) can accelerate more; - it doesn't impose an unsafe braking on its new following vehicle. :param lane_index: the candidate lane for the change :param mandatory: if the lane change is mandatory :return: whether the lane change should be performed Compute an acceleration command with the Intelligent Driver Model. The acceleration is chosen so as to: - reach a target velocity; - maintain a minimum safety distance (and safety time) w.r.t the front vehicle. :param ego_vehicle: the vehicle whose desired acceleration is to be computed. It does not have to be an IDM vehicle, which is why this method is a class method. This allows an IDM vehicle to reason about other vehicles behaviors even though they may not IDMs. :param front_vehicle: the vehicle preceding the ego-vehicle :return: the acceleration command for the ego-vehicle [m/s2] # Is the maneuver unsafe for the new following vehicle? # todo: added mandatory part # todo: part finish # Is there an advantage for me and/or my followers to change lane? # All clear, let's go! :param env: environment :param action: action_explain = ['left acc', 'left same', 'left dec', 'same acc', 'same same', 'same dec', 'right acc', 'right same', 'right dec'] # Is the candidate lane close enough? # Does the MOBIL model recommend a lane change? # todo # -------------------------------------------- # todo A highway merge negotiation environment. The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp. It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging vehicles. # self.make_road() # self.double_merge() The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity. :param action: the action performed :return: the reward of the state-action transition # Altruistic penalty The episode is over when a collision occurs or when the access ramp has been passed. # self.make_road() # lm30 = StraightLane(l2.position(0,4), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,100]) # lm31 = StraightLane(lm30.position(0,0), 0, 4.0, [LineType.STRIPED, LineType.STRIPED],bounds=[0,500]) # l3 = LanesConcatenation([lm30,lm31]) # road = Road([ l3]) # road = Road([lm0,lm2]) # todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles # amplitude = 4.5 # road = Road([ l3]) # road = Road([lm0,lm2]) # todo !!!!!!!!!!! how to do with Obstacle in lane.vehicles # self.make_sin() Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle. :param other_vehicles_mandatory: if the lane changing maneuvers of other vehicles are mandatory :return: None # reset_position_range = (30, 40) # reset_lane = random.choice(road.lanes) # reset_position = 2 # reset_position = 2 # ego_vehicle = MDPVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), # velocity=20, max_length=max_l) # for l in road.lanes[3:]: # cars_on_lane = car_number_each_lane # reset_position = None # if l is reset_lane: # cars_on_lane+=1 # reset_position = random.choice(range(1,car_number_each_lane)) # for i in range(cars_on_lane): # if i == reset_position: # ego_vehicle = ControlledVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=20,max_length=max_l) # road.vehicles.append(ego_vehicle) # self.vehicle = ego_vehicle # else: # road.vehicles.append(other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=np.random.randint(18,25),dst=2,rever=True,max_length=max_l)) | 2.764081 | 3 |
Time.py | DefJia/Auto_Reservation_System_BE | 15 | 6631015 | <reponame>DefJia/Auto_Reservation_System_BE<filename>Time.py<gh_stars>10-100
import datetime
import time
import requests
from configparser import ConfigParser
import ast
class Time:
def __init__(self):
self.cfg = ConfigParser()
self.cfg.read('.config.ini', encoding='utf8')
def wait_until(self, type):
"""
It needs to be clarified that the time is that on remote server.
:return: 0 -> time is up
"""
pass
def time_control(self, type):
"""
:param type: 0 -> pre_book, 1 -> pick
:return:
"""
tmp = 'book' if type == 0 else 'pick'
target_time = self.cfg.get('Time', tmp + '_time').split(':')
hour = int(target_time[0])
minute = int(target_time[1])
prepare_seconds = self.cfg.getint('Time', 'advanced_second_to_prepare')
interval_seconds = self.cfg.getint('Time', 'interval_second_to_calibrate')
start_seconds = self.cfg.getint('Time', 'advanced_second_to_book')
while True:
if self.cal_seconds(0, (hour, minute), prepare_seconds):
# 本地时间符合之后,开始验证服务器时间
while not self.cal_seconds(1, (hour, minute), start_seconds):
time.sleep(interval_seconds)
return 0
else:
# 本地时间不符合,则继续等待
time.sleep(interval_seconds)
def cal_seconds(self, time_type, target_time, target_delta_seconds):
"""
:param time_type: 0 -> local time, 1 -> server time
:param target_time: target time tuple -> (hour, minute)
:param target_delta_seconds: target delta seconds
:return: true or false
"""
target_seconds = (target_time[0] * 60 + target_time[1]) * 60
current_time = datetime.datetime.now() if time_type == 0 else self.get_server_time()
current_hour = current_time.hour
current_minute = current_time.minute
current_second = current_time.second
current_seconds = current_hour * 3600 + current_minute * 60 + current_second
current_delta_second = target_seconds - current_seconds
# print('模式%d, 时间差%d' % (time_type, current_delta_second))
return True if 0 <= current_delta_second <= target_delta_seconds else False
@staticmethod
def get_server_time():
host = 'http://seat.lib.bit.edu.cn'
r = requests.get(host)
dic = ast.literal_eval(str(r.headers))
t = datetime.datetime.strptime(dic['Date'], "%a, %d %b %Y %H:%M:%S GMT") + datetime.timedelta(hours=8)
return t
if __name__ == '__main__':
res = Time()
# r = res.time_control(0)
r = res.get_server_time()
print(r)
| import datetime
import time
import requests
from configparser import ConfigParser
import ast
class Time:
def __init__(self):
self.cfg = ConfigParser()
self.cfg.read('.config.ini', encoding='utf8')
def wait_until(self, type):
"""
It needs to be clarified that the time is that on remote server.
:return: 0 -> time is up
"""
pass
def time_control(self, type):
"""
:param type: 0 -> pre_book, 1 -> pick
:return:
"""
tmp = 'book' if type == 0 else 'pick'
target_time = self.cfg.get('Time', tmp + '_time').split(':')
hour = int(target_time[0])
minute = int(target_time[1])
prepare_seconds = self.cfg.getint('Time', 'advanced_second_to_prepare')
interval_seconds = self.cfg.getint('Time', 'interval_second_to_calibrate')
start_seconds = self.cfg.getint('Time', 'advanced_second_to_book')
while True:
if self.cal_seconds(0, (hour, minute), prepare_seconds):
# 本地时间符合之后,开始验证服务器时间
while not self.cal_seconds(1, (hour, minute), start_seconds):
time.sleep(interval_seconds)
return 0
else:
# 本地时间不符合,则继续等待
time.sleep(interval_seconds)
def cal_seconds(self, time_type, target_time, target_delta_seconds):
"""
:param time_type: 0 -> local time, 1 -> server time
:param target_time: target time tuple -> (hour, minute)
:param target_delta_seconds: target delta seconds
:return: true or false
"""
target_seconds = (target_time[0] * 60 + target_time[1]) * 60
current_time = datetime.datetime.now() if time_type == 0 else self.get_server_time()
current_hour = current_time.hour
current_minute = current_time.minute
current_second = current_time.second
current_seconds = current_hour * 3600 + current_minute * 60 + current_second
current_delta_second = target_seconds - current_seconds
# print('模式%d, 时间差%d' % (time_type, current_delta_second))
return True if 0 <= current_delta_second <= target_delta_seconds else False
@staticmethod
def get_server_time():
host = 'http://seat.lib.bit.edu.cn'
r = requests.get(host)
dic = ast.literal_eval(str(r.headers))
t = datetime.datetime.strptime(dic['Date'], "%a, %d %b %Y %H:%M:%S GMT") + datetime.timedelta(hours=8)
return t
if __name__ == '__main__':
res = Time()
# r = res.time_control(0)
r = res.get_server_time()
print(r) | en | 0.489695 | It needs to be clarified that the time is that on remote server. :return: 0 -> time is up :param type: 0 -> pre_book, 1 -> pick :return: # 本地时间符合之后,开始验证服务器时间 # 本地时间不符合,则继续等待 :param time_type: 0 -> local time, 1 -> server time :param target_time: target time tuple -> (hour, minute) :param target_delta_seconds: target delta seconds :return: true or false # print('模式%d, 时间差%d' % (time_type, current_delta_second)) # r = res.time_control(0) | 3.210605 | 3 |
tensorflow/python/kernel_tests/linalg_grad_test.py | devsangwoo/tensor | 1 | 6631016 | <reponame>devsangwoo/tensor
<<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
if (name == 'float32_10_10_adj_False_low_True') and \
test_lib.is_built_with_rocm():
# Skip this one particular subtest on the ROCm platform
# It will fail because of 1 element in 10,000 mismatch,
# and the mismatch is minor (tolerance is 0.20, mismtach is 0,22)
# TODO(rocm) : investigate cause of mistmach and fix
continue
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
=======
"""Tests for tensorflow.ops.linalg_grad."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class MatrixInverseGradientTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixInverseGradientTest(dtype, shape):
def Test(self):
with self.test_session():
np.random.seed(1)
m = np.random.uniform(low=1.0, high=100.0, size=np.prod(shape)).reshape(
shape).astype(dtype)
a = tf.constant(m)
epsilon = np.finfo(dtype).eps
# Optimal stepsize for central difference is O(epsilon^{1/3}).
delta = epsilon ** (1.0 / 3.0)
tol = 1e-3
if len(shape) == 2:
ainv = tf.matrix_inverse(a)
else:
ainv = tf.batch_matrix_inverse(a)
theoretical, numerical = gc.ComputeGradient(a, shape, ainv, shape,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
# TODO(rmlarsen,irving): Reenable float32 once tolerances are fixed
# The test used to loop over (np.float, np.double), both of which are float64.
for dtype in np.float64,:
for size in 2, 3, 5, 10:
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
<<<<<<< HEAD
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge))
test_lib.main()
=======
setattr(MatrixInverseGradientTest, 'testMatrixInverseGradient_' + name,
_GetMatrixInverseGradientTest(dtype, shape))
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
if (name == 'float32_10_10_adj_False_low_True') and \
test_lib.is_built_with_rocm():
# Skip this one particular subtest on the ROCm platform
# It will fail because of 1 element in 10,000 mismatch,
# and the mismatch is minor (tolerance is 0.20, mismtach is 0,22)
# TODO(rocm) : investigate cause of mistmach and fix
continue
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
=======
"""Tests for tensorflow.ops.linalg_grad."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class MatrixInverseGradientTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixInverseGradientTest(dtype, shape):
def Test(self):
with self.test_session():
np.random.seed(1)
m = np.random.uniform(low=1.0, high=100.0, size=np.prod(shape)).reshape(
shape).astype(dtype)
a = tf.constant(m)
epsilon = np.finfo(dtype).eps
# Optimal stepsize for central difference is O(epsilon^{1/3}).
delta = epsilon ** (1.0 / 3.0)
tol = 1e-3
if len(shape) == 2:
ainv = tf.matrix_inverse(a)
else:
ainv = tf.batch_matrix_inverse(a)
theoretical, numerical = gc.ComputeGradient(a, shape, ainv, shape,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
# TODO(rmlarsen,irving): Reenable float32 once tolerances are fixed
# The test used to loop over (np.float, np.double), both of which are float64.
for dtype in np.float64,:
for size in 2, 3, 5, 10:
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
<<<<<<< HEAD
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge))
test_lib.main()
=======
setattr(MatrixInverseGradientTest, 'testMatrixInverseGradient_' + name,
_GetMatrixInverseGradientTest(dtype, shape))
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. | en | 0.825381 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for tensorflow.ops.linalg_grad. # Filled in below # Square the input matrix to ensure that its matrix square root exists # Optimal stepsize for central difference is O(epsilon^{1/3}). # tolerance obtained by looking at actual differences using # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build # Filled in below # TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable # GPU test for matrix_solve. # Optimal stepsize for central difference is O(epsilon^{1/3}). # tolerance obtained by looking at actual differences using # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build # The gradients for a and b may be of very different magnitudes, # so to not get spurious failures we test them separately. # Tests for gradients of binary matrix operations. # We skip the rank 4, size 10 case: it is slow and conceptually covered # by the other cases. # Skip this one particular subtest on the ROCm platform # It will fail because of 1 element in 10,000 mismatch, # and the mismatch is minor (tolerance is 0.20, mismtach is 0,22) # TODO(rocm) : investigate cause of mistmach and fix # Tests for gradients of unary matrix operations. Tests for tensorflow.ops.linalg_grad. # Filled in below # Optimal stepsize for central difference is O(epsilon^{1/3}). # TODO(rmlarsen,irving): Reenable float32 once tolerances are fixed # The test used to loop over (np.float, np.double), both of which are float64. # We skip the rank 4, size 10 case: it is slow and conceptually covered # by the other cases. # The numerical Jacobian is consistently invalid for these four shapes # because the matrix square root of the perturbed input doesn't exist # Alternative shape that consistently produces a valid numerical Jacobian # Tests for gradients of matrix_solve_ls # pylint: disable=long-lambda,g-long-lambda | 2.028057 | 2 |
py_privatekonomi/core/db.py | nilsFK/py-privatekonomi | 2 | 6631017 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sqlalchemy
from sqlalchemy import __version__
from sqlalchemy import create_engine
import sqlalchemy.engine.url as url
from py_privatekonomi.utilities import common
from py_privatekonomi.utilities.common import (singleton, is_dict, is_Struct, as_obj, as_dict)
@singleton
class DB(object):
def connect(self, db_config):
if not is_dict(db_config) and not is_Struct(db_config):
raise Exception("db_config must be either dict or common.Struct: %s" % (repr(db_config)))
if is_Struct(db_config):
db_config = as_dict(db_config)
query = None
if 'charset' in db_config:
if db_config['charset'] == 'utf-8': # SQLAlchemy won't accept 'utf-8'...
db_config['charset'] = 'utf8'
query = {
'charset' : db_config['charset']
}
if 'encoding' not in db_config:
db_config['encoding'] = 'utf-8'
db_config = as_obj(db_config)
engine_url = url.URL(
drivername=db_config.engine,
host=db_config.host,
port=db_config.port,
username=db_config.username,
password=<PASSWORD>,
database=db_config.database,
query=query
)
self.__engine = create_engine(engine_url, encoding=db_config.encoding)
self.__connection = self.__engine.connect()
self.__config = db_config
self.__connected = True
def getEngine(self):
return self.__engine
def getConnection(self):
return self.__connection
def getConfig(self, config = None):
if config is not None:
return getattr(self.__config, config)
else:
return self.__config
def hasConfig(self, config_name):
return hasattr(self.__config, config_name)
def isConnected(self):
try:
is_connected = self.__connected
except AttributeError:
return False
return is_connected
if __name__ == '__main__':
db = DB()
print((sqlalchemy.__version__))
db.connect()
db.getEngine()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sqlalchemy
from sqlalchemy import __version__
from sqlalchemy import create_engine
import sqlalchemy.engine.url as url
from py_privatekonomi.utilities import common
from py_privatekonomi.utilities.common import (singleton, is_dict, is_Struct, as_obj, as_dict)
@singleton
class DB(object):
def connect(self, db_config):
if not is_dict(db_config) and not is_Struct(db_config):
raise Exception("db_config must be either dict or common.Struct: %s" % (repr(db_config)))
if is_Struct(db_config):
db_config = as_dict(db_config)
query = None
if 'charset' in db_config:
if db_config['charset'] == 'utf-8': # SQLAlchemy won't accept 'utf-8'...
db_config['charset'] = 'utf8'
query = {
'charset' : db_config['charset']
}
if 'encoding' not in db_config:
db_config['encoding'] = 'utf-8'
db_config = as_obj(db_config)
engine_url = url.URL(
drivername=db_config.engine,
host=db_config.host,
port=db_config.port,
username=db_config.username,
password=<PASSWORD>,
database=db_config.database,
query=query
)
self.__engine = create_engine(engine_url, encoding=db_config.encoding)
self.__connection = self.__engine.connect()
self.__config = db_config
self.__connected = True
def getEngine(self):
return self.__engine
def getConnection(self):
return self.__connection
def getConfig(self, config = None):
if config is not None:
return getattr(self.__config, config)
else:
return self.__config
def hasConfig(self, config_name):
return hasattr(self.__config, config_name)
def isConnected(self):
try:
is_connected = self.__connected
except AttributeError:
return False
return is_connected
if __name__ == '__main__':
db = DB()
print((sqlalchemy.__version__))
db.connect()
db.getEngine()
| en | 0.688447 | #!/usr/bin/env python # -*- coding: utf-8 -*- # SQLAlchemy won't accept 'utf-8'... | 2.4143 | 2 |
vyper/parser/external_call.py | t4n6a1ka/vyper | 0 | 6631018 | from vyper import ast
from vyper.exceptions import (
ConstancyViolationException,
FunctionDeclarationException,
StructureException,
TypeMismatchException,
VariableDeclarationException,
)
from vyper.parser.lll_node import (
LLLnode,
)
from vyper.parser.parser_utils import (
getpos,
pack_arguments,
unwrap_location,
)
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
TupleLike,
get_size_of_type,
)
def external_contract_call(node,
context,
contract_name,
contract_address,
pos,
value=None,
gas=None):
from vyper.parser.expr import (
Expr,
)
if value is None:
value = 0
if gas is None:
gas = 'gas'
if not contract_name:
raise StructureException(
f'Invalid external contract call "{node.func.attr}".',
node
)
if contract_name not in context.sigs:
raise VariableDeclarationException(
f'Contract "{contract_name}" not declared yet',
node
)
if contract_address.value == "address":
raise StructureException(
f"External calls to self are not permitted.", node
)
method_name = node.func.attr
if method_name not in context.sigs[contract_name]:
raise FunctionDeclarationException(
(
"Function not declared yet: %s (reminder: "
"function must be declared in the correct contract)"
" The available methods are: %s"
) % (method_name, ",".join(context.sigs[contract_name].keys())),
node.func
)
sig = context.sigs[contract_name][method_name]
inargs, inargsize, _ = pack_arguments(
sig,
[Expr(arg, context).lll_node for arg in node.args],
context,
node.func,
)
output_placeholder, output_size, returner = get_external_contract_call_output(sig, context)
sub = [
'seq',
['assert', ['extcodesize', contract_address]],
['assert', ['ne', 'address', contract_address]],
]
if context.is_constant() and not sig.const:
raise ConstancyViolationException(
"May not call non-constant function '%s' within %s." % (
method_name,
context.pp_constancy(),
) +
" For asserting the result of modifiable contract calls, try assert_modifiable.",
node
)
if context.is_constant() or sig.const:
sub.append([
'assert',
[
'staticcall',
gas, contract_address, inargs, inargsize, output_placeholder, output_size,
]
])
else:
sub.append([
'assert',
[
'call',
gas, contract_address, value, inargs, inargsize, output_placeholder, output_size,
]
])
sub.extend(returner)
o = LLLnode.from_list(sub, typ=sig.output_type, location='memory', pos=getpos(node))
return o
def get_external_contract_call_output(sig, context):
if not sig.output_type:
return 0, 0, []
output_placeholder = context.new_placeholder(typ=sig.output_type)
output_size = get_size_of_type(sig.output_type) * 32
if isinstance(sig.output_type, BaseType):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ByteArrayLike):
returner = [0, output_placeholder + 32]
elif isinstance(sig.output_type, TupleLike):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ListType):
returner = [0, output_placeholder]
else:
raise TypeMismatchException("Invalid output type: %s" % sig.output_type)
return output_placeholder, output_size, returner
def get_external_contract_keywords(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = None, None
for kw in stmt_expr.keywords:
if kw.arg not in ('value', 'gas'):
raise TypeMismatchException(
'Invalid keyword argument, only "gas" and "value" supported.',
stmt_expr,
)
elif kw.arg == 'gas':
gas = Expr.parse_value_expr(kw.value, context)
elif kw.arg == 'value':
value = Expr.parse_value_expr(kw.value, context)
return value, gas
def make_external_call(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = get_external_contract_keywords(stmt_expr, context)
if isinstance(stmt_expr.func, ast.Attribute) and isinstance(stmt_expr.func.value, ast.Call):
contract_name = stmt_expr.func.value.func.id
contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, ast.Attribute) and stmt_expr.func.value.attr in context.sigs: # noqa: E501
contract_name = stmt_expr.func.value.attr
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, ast.Attribute) and stmt_expr.func.value.attr in context.globals: # noqa: E501
contract_name = context.globals[stmt_expr.func.value.attr].typ.unit
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
else:
raise StructureException("Unsupported operator.", stmt_expr)
| from vyper import ast
from vyper.exceptions import (
ConstancyViolationException,
FunctionDeclarationException,
StructureException,
TypeMismatchException,
VariableDeclarationException,
)
from vyper.parser.lll_node import (
LLLnode,
)
from vyper.parser.parser_utils import (
getpos,
pack_arguments,
unwrap_location,
)
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
TupleLike,
get_size_of_type,
)
def external_contract_call(node,
context,
contract_name,
contract_address,
pos,
value=None,
gas=None):
from vyper.parser.expr import (
Expr,
)
if value is None:
value = 0
if gas is None:
gas = 'gas'
if not contract_name:
raise StructureException(
f'Invalid external contract call "{node.func.attr}".',
node
)
if contract_name not in context.sigs:
raise VariableDeclarationException(
f'Contract "{contract_name}" not declared yet',
node
)
if contract_address.value == "address":
raise StructureException(
f"External calls to self are not permitted.", node
)
method_name = node.func.attr
if method_name not in context.sigs[contract_name]:
raise FunctionDeclarationException(
(
"Function not declared yet: %s (reminder: "
"function must be declared in the correct contract)"
" The available methods are: %s"
) % (method_name, ",".join(context.sigs[contract_name].keys())),
node.func
)
sig = context.sigs[contract_name][method_name]
inargs, inargsize, _ = pack_arguments(
sig,
[Expr(arg, context).lll_node for arg in node.args],
context,
node.func,
)
output_placeholder, output_size, returner = get_external_contract_call_output(sig, context)
sub = [
'seq',
['assert', ['extcodesize', contract_address]],
['assert', ['ne', 'address', contract_address]],
]
if context.is_constant() and not sig.const:
raise ConstancyViolationException(
"May not call non-constant function '%s' within %s." % (
method_name,
context.pp_constancy(),
) +
" For asserting the result of modifiable contract calls, try assert_modifiable.",
node
)
if context.is_constant() or sig.const:
sub.append([
'assert',
[
'staticcall',
gas, contract_address, inargs, inargsize, output_placeholder, output_size,
]
])
else:
sub.append([
'assert',
[
'call',
gas, contract_address, value, inargs, inargsize, output_placeholder, output_size,
]
])
sub.extend(returner)
o = LLLnode.from_list(sub, typ=sig.output_type, location='memory', pos=getpos(node))
return o
def get_external_contract_call_output(sig, context):
if not sig.output_type:
return 0, 0, []
output_placeholder = context.new_placeholder(typ=sig.output_type)
output_size = get_size_of_type(sig.output_type) * 32
if isinstance(sig.output_type, BaseType):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ByteArrayLike):
returner = [0, output_placeholder + 32]
elif isinstance(sig.output_type, TupleLike):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ListType):
returner = [0, output_placeholder]
else:
raise TypeMismatchException("Invalid output type: %s" % sig.output_type)
return output_placeholder, output_size, returner
def get_external_contract_keywords(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = None, None
for kw in stmt_expr.keywords:
if kw.arg not in ('value', 'gas'):
raise TypeMismatchException(
'Invalid keyword argument, only "gas" and "value" supported.',
stmt_expr,
)
elif kw.arg == 'gas':
gas = Expr.parse_value_expr(kw.value, context)
elif kw.arg == 'value':
value = Expr.parse_value_expr(kw.value, context)
return value, gas
def make_external_call(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = get_external_contract_keywords(stmt_expr, context)
if isinstance(stmt_expr.func, ast.Attribute) and isinstance(stmt_expr.func.value, ast.Call):
contract_name = stmt_expr.func.value.func.id
contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, ast.Attribute) and stmt_expr.func.value.attr in context.sigs: # noqa: E501
contract_name = stmt_expr.func.value.attr
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, ast.Attribute) and stmt_expr.func.value.attr in context.globals: # noqa: E501
contract_name = context.globals[stmt_expr.func.value.attr].typ.unit
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
else:
raise StructureException("Unsupported operator.", stmt_expr)
| it | 0.364061 | # noqa: E501 # noqa: E501 | 2.394392 | 2 |
Chapter14/Scripts/cartoframes_test.py | monocilindro/Mastering-Geospatial-Analysis-with-Python | 64 | 6631019 | <filename>Chapter14/Scripts/cartoframes_test.py
import cartoframes
APIKEY = "<KEY>"
# `base_url`s are of the form `http://{username}.carto.com/` for most users
cc = cartoframes.CartoContext(base_url='https://lokiintelligent.carto.com/',
api_key=APIKEY)
# read a table from your CARTO account to a DataFrame
df = cc.read('arenas_nba')
| <filename>Chapter14/Scripts/cartoframes_test.py
import cartoframes
APIKEY = "<KEY>"
# `base_url`s are of the form `http://{username}.carto.com/` for most users
cc = cartoframes.CartoContext(base_url='https://lokiintelligent.carto.com/',
api_key=APIKEY)
# read a table from your CARTO account to a DataFrame
df = cc.read('arenas_nba')
| en | 0.768435 | # `base_url`s are of the form `http://{username}.carto.com/` for most users # read a table from your CARTO account to a DataFrame | 2.808148 | 3 |
Chapter08/rdd/rddtranform1.py | MichaelRW/Python-for-Geeks | 31 | 6631020 | #rddtransform1.py: rdd tranformation function
#please ignore next 2 statements if running directly in PySpark shell
import time
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[*]")\
.appName("RDD Test app")\
.getOrCreate()
rdd1 = spark.sparkContext.textFile('sample.txt')
#print(rdd1.getNumPartitions())
rdd2 = rdd1.map(lambda lines: lines.lower())
rdd3 = rdd1.map(lambda lines: lines.upper())
print(rdd2.collect())
print(rdd3.collect())
time.sleep(60) | #rddtransform1.py: rdd tranformation function
#please ignore next 2 statements if running directly in PySpark shell
import time
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[*]")\
.appName("RDD Test app")\
.getOrCreate()
rdd1 = spark.sparkContext.textFile('sample.txt')
#print(rdd1.getNumPartitions())
rdd2 = rdd1.map(lambda lines: lines.lower())
rdd3 = rdd1.map(lambda lines: lines.upper())
print(rdd2.collect())
print(rdd3.collect())
time.sleep(60) | en | 0.234749 | #rddtransform1.py: rdd tranformation function #please ignore next 2 statements if running directly in PySpark shell #print(rdd1.getNumPartitions()) | 2.713923 | 3 |
mayan/apps/document_states/tests/test_models.py | marumadang/mayan-edms | 0 | 6631021 | <gh_stars>0
from __future__ import unicode_literals
from django.test import override_settings
from common.tests import BaseTestCase
from common.tests.mixins import UserMixin
from documents.models import DocumentType
from documents.tests import TEST_SMALL_DOCUMENT_PATH, TEST_DOCUMENT_TYPE_LABEL
from document_indexing.models import Index, IndexInstanceNode
from ..models import Workflow
from .literals import (
TEST_INDEX_LABEL, TEST_INDEX_TEMPLATE_METADATA_EXPRESSION,
TEST_WORKFLOW_INTERNAL_NAME, TEST_WORKFLOW_INITIAL_STATE_LABEL,
TEST_WORKFLOW_INITIAL_STATE_COMPLETION, TEST_WORKFLOW_LABEL,
TEST_WORKFLOW_STATE_LABEL, TEST_WORKFLOW_STATE_COMPLETION,
TEST_WORKFLOW_TRANSITION_LABEL
)
@override_settings(OCR_AUTO_OCR=False)
class DocumentStateIndexingTestCase(UserMixin, BaseTestCase):
def tearDown(self):
self.document_type.delete()
super(DocumentStateIndexingTestCase, self).tearDown()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_workflow(self):
self.workflow = Workflow.objects.create(
label=TEST_WORKFLOW_LABEL,
internal_name=TEST_WORKFLOW_INTERNAL_NAME
)
self.workflow.document_types.add(self.document_type)
def _create_workflow_states(self):
self._create_workflow()
self.workflow_state_1 = self.workflow.states.create(
completion=TEST_WORKFLOW_INITIAL_STATE_COMPLETION,
initial=True, label=TEST_WORKFLOW_INITIAL_STATE_LABEL
)
self.workflow_state_2 = self.workflow.states.create(
completion=TEST_WORKFLOW_STATE_COMPLETION,
label=TEST_WORKFLOW_STATE_LABEL
)
def _create_workflow_transition(self):
self._create_workflow_states()
self.workflow_transition = self.workflow.transitions.create(
label=TEST_WORKFLOW_TRANSITION_LABEL,
origin_state=self.workflow_state_1,
destination_state=self.workflow_state_2,
)
def _create_index(self):
# Create empty index
index = Index.objects.create(label=TEST_INDEX_LABEL)
# Add our document type to the new index
index.document_types.add(self.document_type)
# Create simple index template
root = index.template_root
index.node_templates.create(
parent=root, expression=TEST_INDEX_TEMPLATE_METADATA_EXPRESSION,
link_documents=True
)
def test_workflow_indexing_initial_state(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['', TEST_WORKFLOW_INITIAL_STATE_LABEL]
)
def test_workflow_indexing_transition(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.document.workflows.first().do_transition(
transition=self.workflow_transition,
user=self.admin_user
)
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['', TEST_WORKFLOW_STATE_LABEL]
)
def test_workflow_indexing_document_delete(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.document.workflows.first().do_transition(
transition=self.workflow_transition,
user=self.admin_user
)
self.document.delete(to_trash=False)
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['']
)
| from __future__ import unicode_literals
from django.test import override_settings
from common.tests import BaseTestCase
from common.tests.mixins import UserMixin
from documents.models import DocumentType
from documents.tests import TEST_SMALL_DOCUMENT_PATH, TEST_DOCUMENT_TYPE_LABEL
from document_indexing.models import Index, IndexInstanceNode
from ..models import Workflow
from .literals import (
TEST_INDEX_LABEL, TEST_INDEX_TEMPLATE_METADATA_EXPRESSION,
TEST_WORKFLOW_INTERNAL_NAME, TEST_WORKFLOW_INITIAL_STATE_LABEL,
TEST_WORKFLOW_INITIAL_STATE_COMPLETION, TEST_WORKFLOW_LABEL,
TEST_WORKFLOW_STATE_LABEL, TEST_WORKFLOW_STATE_COMPLETION,
TEST_WORKFLOW_TRANSITION_LABEL
)
@override_settings(OCR_AUTO_OCR=False)
class DocumentStateIndexingTestCase(UserMixin, BaseTestCase):
def tearDown(self):
self.document_type.delete()
super(DocumentStateIndexingTestCase, self).tearDown()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_workflow(self):
self.workflow = Workflow.objects.create(
label=TEST_WORKFLOW_LABEL,
internal_name=TEST_WORKFLOW_INTERNAL_NAME
)
self.workflow.document_types.add(self.document_type)
def _create_workflow_states(self):
self._create_workflow()
self.workflow_state_1 = self.workflow.states.create(
completion=TEST_WORKFLOW_INITIAL_STATE_COMPLETION,
initial=True, label=TEST_WORKFLOW_INITIAL_STATE_LABEL
)
self.workflow_state_2 = self.workflow.states.create(
completion=TEST_WORKFLOW_STATE_COMPLETION,
label=TEST_WORKFLOW_STATE_LABEL
)
def _create_workflow_transition(self):
self._create_workflow_states()
self.workflow_transition = self.workflow.transitions.create(
label=TEST_WORKFLOW_TRANSITION_LABEL,
origin_state=self.workflow_state_1,
destination_state=self.workflow_state_2,
)
def _create_index(self):
# Create empty index
index = Index.objects.create(label=TEST_INDEX_LABEL)
# Add our document type to the new index
index.document_types.add(self.document_type)
# Create simple index template
root = index.template_root
index.node_templates.create(
parent=root, expression=TEST_INDEX_TEMPLATE_METADATA_EXPRESSION,
link_documents=True
)
def test_workflow_indexing_initial_state(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['', TEST_WORKFLOW_INITIAL_STATE_LABEL]
)
def test_workflow_indexing_transition(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.document.workflows.first().do_transition(
transition=self.workflow_transition,
user=self.admin_user
)
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['', TEST_WORKFLOW_STATE_LABEL]
)
def test_workflow_indexing_document_delete(self):
self._create_document_type()
self._create_workflow_transition()
self._create_index()
self._create_document()
self.document.workflows.first().do_transition(
transition=self.workflow_transition,
user=self.admin_user
)
self.document.delete(to_trash=False)
self.assertEqual(
list(
IndexInstanceNode.objects.values_list('value', flat=True)
), ['']
) | en | 0.358598 | # Create empty index # Add our document type to the new index # Create simple index template | 1.87433 | 2 |
wham.py | robeme/whampy | 2 | 6631022 | <gh_stars>1-10
"""AUTHOR: efortin
DATE: 16/05/2018 16:06
VERSION: 1.1
This is a Python3 executable script that performs the WHAM analysis
of a set of umbrella sampling simulations, using various methods.
"""
# IMPORTS
import os
import re
import sys
import time
import warnings
import numpy as np
import wham.simdata as sim
from wham.init import Logger, update_progress, parse_command
from wham.setup import startup, read_data
from wham.minim import minimization, calc_free
from wham.errors import mc_error_analysis, split_analysis, consistency_tests
from wham.prints import print_results, print_consistency
from matplotlib import pyplot as plt
from scipy import optimize, constants
from operator import attrgetter
# DECLARATION OF GLOBAL VARIABLES
# PROGRAM STARTUP (COMMAND LINE PARSING)
start_time = time.time()
np.seterr(all='ignore')
metafile, outfile = parse_command(sys.argv)
print("Using {0} as metadata file".format(metafile))
windows, init_time = startup(metafile)
windows, data, read_time = read_data(windows)
g, min_time = minimization(windows, data)
data[:,2], data[:,3], bin_min = calc_free(g, windows, data)
if sim.num_mc_runs:
P_std, A_std, G_std, mc_time = mc_error_analysis(windows, data)
else:
P_std, A_std, G_std, split_time = split_analysis(windows, data)
phi, eta, tests_time = consistency_tests(windows, data)
print_results(outfile, data, A_std, P_std)
print_consistency(outfile, windows, G_std, phi, eta)
total_time = time.time() - start_time
print("WHAM calculation complete")
print("--- Runtime: %s seconds ---" % total_time)
| """AUTHOR: efortin
DATE: 16/05/2018 16:06
VERSION: 1.1
This is a Python3 executable script that performs the WHAM analysis
of a set of umbrella sampling simulations, using various methods.
"""
# IMPORTS
import os
import re
import sys
import time
import warnings
import numpy as np
import wham.simdata as sim
from wham.init import Logger, update_progress, parse_command
from wham.setup import startup, read_data
from wham.minim import minimization, calc_free
from wham.errors import mc_error_analysis, split_analysis, consistency_tests
from wham.prints import print_results, print_consistency
from matplotlib import pyplot as plt
from scipy import optimize, constants
from operator import attrgetter
# DECLARATION OF GLOBAL VARIABLES
# PROGRAM STARTUP (COMMAND LINE PARSING)
start_time = time.time()
np.seterr(all='ignore')
metafile, outfile = parse_command(sys.argv)
print("Using {0} as metadata file".format(metafile))
windows, init_time = startup(metafile)
windows, data, read_time = read_data(windows)
g, min_time = minimization(windows, data)
data[:,2], data[:,3], bin_min = calc_free(g, windows, data)
if sim.num_mc_runs:
P_std, A_std, G_std, mc_time = mc_error_analysis(windows, data)
else:
P_std, A_std, G_std, split_time = split_analysis(windows, data)
phi, eta, tests_time = consistency_tests(windows, data)
print_results(outfile, data, A_std, P_std)
print_consistency(outfile, windows, G_std, phi, eta)
total_time = time.time() - start_time
print("WHAM calculation complete")
print("--- Runtime: %s seconds ---" % total_time) | en | 0.748167 | AUTHOR: efortin DATE: 16/05/2018 16:06 VERSION: 1.1 This is a Python3 executable script that performs the WHAM analysis of a set of umbrella sampling simulations, using various methods. # IMPORTS # DECLARATION OF GLOBAL VARIABLES # PROGRAM STARTUP (COMMAND LINE PARSING) | 2.37208 | 2 |
Algorithm/Easy/1-500/100Remove Duplicates from Sorted Array.py | MartinYan623/Lint-Code | 0 | 6631023 | <gh_stars>0
class Solution:
"""
@param: nums: An ineger array
@return: An integer
"""
def removeDuplicates(self, nums):
# write your code here
length = len(nums)
nowlength = length
for i in range(length - 1):
while i < nowlength - 1 and nums[i] == nums[i + 1]:
del nums[i + 1]
nowlength -= 1
ans = len(nums)
return ans
| class Solution:
"""
@param: nums: An ineger array
@return: An integer
"""
def removeDuplicates(self, nums):
# write your code here
length = len(nums)
nowlength = length
for i in range(length - 1):
while i < nowlength - 1 and nums[i] == nums[i + 1]:
del nums[i + 1]
nowlength -= 1
ans = len(nums)
return ans | en | 0.207302 | @param: nums: An ineger array @return: An integer # write your code here | 3.404665 | 3 |
dollar_lambda/args.py | ethanabrooks/pymonad | 1 | 6631024 | """
Defines the :py:class:`Args <dollar_lambda.args.Args>` dataclass and associated functions.
"""
from __future__ import annotations
import dataclasses
import typing
from dataclasses import MISSING, Field, dataclass, fields
from typing import Any, Callable, Iterator, Optional, Union, get_args
from dollar_lambda.data_structures import KeyValue, Output, Sequence
from dollar_lambda.parsers import Parser, defaults, flag, nonpositional, option
def field(
help: Optional[str] = None,
metadata: Optional[dict] = None,
parser: Optional[Parser[Output]] = None,
**kwargs,
) -> Field:
"""
This is a thin wrapper around :external:py:func:`dataclasses.field`.
Parameters
----------
help : str
An optional help string for the argument.
metadata : str
Identical to the `metadata` argument for :external:py:func:`dataclasses.field`.
type : Optional[type | Callable[[str], Any]]
A function that takes a string and returns a value just like the ``type`` argument for
:external:py:meth:`argparse.ArgumentParser.add_argument`.
Returns
-------
A :external:py:class:`dataclasses.Field` object that can be used in place of a default argument as described in the :external:py:class:`dataclasses.Field` documentation.
"""
if metadata is None:
metadata = {}
if parser is not None:
metadata.update(parser=parser)
if help is not None:
metadata.update(help=help)
return dataclasses.field(metadata=metadata, **kwargs)
@dataclass
class _ArgsField:
name: str
default: Any
help: Optional[str] = None
type: Callable[[str], Any] = str
@staticmethod
def parse(field: Field) -> Union["_ArgsField", Parser[Output]]:
if "help" in field.metadata:
help_ = field.metadata["help"]
else:
help_ = None
if "parser" in field.metadata:
parser = field.metadata["parser"]
assert isinstance(parser, Parser), parser
if field.default is MISSING:
return parser
else:
return parser | defaults(**{field.name: field.default})
return _ArgsField(
name=field.name, default=field.default, help=help_, type=field.type
)
@staticmethod
def parser(
*fields: Union["_ArgsField", Parser[Output]],
flip_bools: bool,
repeated: Optional[Parser[Output]],
replace_underscores: bool,
) -> Parser[Output]:
"""
>>> from dollar_lambda import Args
>>> from dataclasses import dataclass
...
>>> @dataclass
... class MyArgs(Args):
... x: Optional[int]
... y: Optional[int] = None
...
>>> MyArgs.parse_args("-x", "1", "-y", "2")
{'x': 1, 'y': 2}
>>> MyArgs.parse_args("-x", "1")
{'x': 1, 'y': None}
>>> MyArgs.parse_args("-y", "2")
usage: -x X -y Y
y: (default: None)
Expected '-x'. Got '-y'
>>> MyArgs.parse_args()
usage: -x X -y Y
y: (default: None)
The following arguments are required: -x
"""
def get_parsers() -> Iterator[Parser[Output]]:
for field in fields:
if isinstance(field, Parser):
yield field
continue
_type = field.type
type_args = get_args(_type)
try:
_type, none = type_args
assert none == type(None)
except (ValueError, AssertionError):
pass
string: Optional[str] = None
if _type == bool:
if field.default is True and flip_bools:
string = f"--no-{field.name}"
yield flag(
default=field.default,
dest=field.name,
help=field.help,
replace_underscores=replace_underscores,
string=string,
)
else:
yield option(
default=field.default,
dest=field.name,
flag=string,
help=field.help,
replace_underscores=replace_underscores,
type=_type,
)
return nonpositional(*get_parsers(), repeated=repeated)
@dataclass
class Args:
"""
:py:class:`Args` is sugar for the :py:func:`nonpositional <dollar_lambda.parsers.nonpositional>` function and
removes much of the boilerplate from defining parsers with many arguments.
>>> from dataclasses import dataclass
>>> from dollar_lambda import Args
>>> @dataclass
... class MyArgs(Args):
... verbose: bool
... count: int
>>> MyArgs.parse_args("--verbose", "--count", "1")
{'verbose': True, 'count': 1}
``MyArgs`` will accept these arguments in any order:
>>> MyArgs.parse_args("--count", "1", "--verbose")
{'count': 1, 'verbose': True}
Note that when the default value of an argument is ``True``, :py:class:`Args` will, by default
add ``--no-`` to the front of the flag (while still assigning the value to the original key):
>>> @dataclass
... class MyArgs(Args):
... tests: bool = True
>>> MyArgs.parse_args("--no-tests")
{'tests': False}
>>> MyArgs.parse_args()
{'tests': True}
To suppress this behavior, set ``flip_bools=False``:
>>> MyArgs.parse_args("--tests", flip_bools=False)
{'tests': False}
By using the :py:meth:`Args.parser` method, :py:class:`Args` can take advantage of all the same
combinators as other parsers:
>>> from dollar_lambda import argument
>>> p = MyArgs.parser()
>>> p1 = p >> argument("a")
>>> p1.parse_args("--no-tests", "hello")
{'tests': False, 'a': 'hello'}
To supply other metadata, like ``help`` text or custom parsers, use :py:func:`field`:
>>> from dollar_lambda import field, option
>>> @dataclass
... class MyArgs(Args):
... x: int = field(default=0, help="a number")
... y: int = field(
... default=1,
... parser=option("y", type=lambda s: int(s) + 1, help="a number to increment"),
... )
>>> MyArgs.parse_args("-h")
usage: -x X -y Y
x: a number (default: 0)
y: a number to increment
This supplies defaults for ``y`` when omitted:
>>> MyArgs.parse_args("-x", "10")
{'x': 10, 'y': 1}
It also applies the custom type to ``y`` when ``"-y"`` is given
>>> MyArgs.parse_args()
{'y': 1, 'x': 0}
"""
@classmethod
def parser(
cls,
flip_bools: bool = True,
repeated: Optional[Parser[Output]] = None,
replace_underscores: bool = True,
) -> Parser[Output]:
"""
Returns a parser for the dataclass.
Converts each field to a parser (:py:func:`option <dollar_lambda.parsers.option>` or
:py:func:`flag <dollar_lambda.parsers.flag>` depending on its type). Combines these parsers using
:py:func:`nonpositional <dollar_lambda.parsers.nonpositional>`.
Parameters
----------
flip_bools: bool
Whether to add ``--no-<argument>`` before arguments that default to ``True``.
replace_underscores: bool
If true, underscores in argument names are replaced with dashes.
Examples
--------
>>> @dataclass
... class MyArgs(Args):
... tests: bool = True
Note the leading ``--no-``:
>>> MyArgs.parse_args("--no-tests")
{'tests': False}
>>> MyArgs.parse_args()
{'tests': True}
To suppress this behavior, set ``flip_bools=False``:
>>> MyArgs.parse_args("--tests", flip_bools=False)
{'tests': False}
"""
def get_fields():
types = typing.get_type_hints(cls) # see https://peps.python.org/pep-0563/
for field in fields(cls):
field.type = types.get(field.name, str)
yield _ArgsField.parse(field)
return _ArgsField.parser(
*get_fields(),
flip_bools=flip_bools,
repeated=repeated,
replace_underscores=replace_underscores,
)
@classmethod
def parse_args(
cls,
*args,
flip_bools: bool = True,
repeated: Optional[Parser[Output]] = None,
) -> Optional[typing.Dict[str, Any]]:
"""
Parses the arguments and returns a dictionary of the parsed values.
"""
return (
cls.parser(flip_bools=flip_bools, repeated=repeated)
>> Parser[Output[Sequence[KeyValue[Any]]]].done()
).parse_args(*args)
| """
Defines the :py:class:`Args <dollar_lambda.args.Args>` dataclass and associated functions.
"""
from __future__ import annotations
import dataclasses
import typing
from dataclasses import MISSING, Field, dataclass, fields
from typing import Any, Callable, Iterator, Optional, Union, get_args
from dollar_lambda.data_structures import KeyValue, Output, Sequence
from dollar_lambda.parsers import Parser, defaults, flag, nonpositional, option
def field(
help: Optional[str] = None,
metadata: Optional[dict] = None,
parser: Optional[Parser[Output]] = None,
**kwargs,
) -> Field:
"""
This is a thin wrapper around :external:py:func:`dataclasses.field`.
Parameters
----------
help : str
An optional help string for the argument.
metadata : str
Identical to the `metadata` argument for :external:py:func:`dataclasses.field`.
type : Optional[type | Callable[[str], Any]]
A function that takes a string and returns a value just like the ``type`` argument for
:external:py:meth:`argparse.ArgumentParser.add_argument`.
Returns
-------
A :external:py:class:`dataclasses.Field` object that can be used in place of a default argument as described in the :external:py:class:`dataclasses.Field` documentation.
"""
if metadata is None:
metadata = {}
if parser is not None:
metadata.update(parser=parser)
if help is not None:
metadata.update(help=help)
return dataclasses.field(metadata=metadata, **kwargs)
@dataclass
class _ArgsField:
name: str
default: Any
help: Optional[str] = None
type: Callable[[str], Any] = str
@staticmethod
def parse(field: Field) -> Union["_ArgsField", Parser[Output]]:
if "help" in field.metadata:
help_ = field.metadata["help"]
else:
help_ = None
if "parser" in field.metadata:
parser = field.metadata["parser"]
assert isinstance(parser, Parser), parser
if field.default is MISSING:
return parser
else:
return parser | defaults(**{field.name: field.default})
return _ArgsField(
name=field.name, default=field.default, help=help_, type=field.type
)
@staticmethod
def parser(
*fields: Union["_ArgsField", Parser[Output]],
flip_bools: bool,
repeated: Optional[Parser[Output]],
replace_underscores: bool,
) -> Parser[Output]:
"""
>>> from dollar_lambda import Args
>>> from dataclasses import dataclass
...
>>> @dataclass
... class MyArgs(Args):
... x: Optional[int]
... y: Optional[int] = None
...
>>> MyArgs.parse_args("-x", "1", "-y", "2")
{'x': 1, 'y': 2}
>>> MyArgs.parse_args("-x", "1")
{'x': 1, 'y': None}
>>> MyArgs.parse_args("-y", "2")
usage: -x X -y Y
y: (default: None)
Expected '-x'. Got '-y'
>>> MyArgs.parse_args()
usage: -x X -y Y
y: (default: None)
The following arguments are required: -x
"""
def get_parsers() -> Iterator[Parser[Output]]:
for field in fields:
if isinstance(field, Parser):
yield field
continue
_type = field.type
type_args = get_args(_type)
try:
_type, none = type_args
assert none == type(None)
except (ValueError, AssertionError):
pass
string: Optional[str] = None
if _type == bool:
if field.default is True and flip_bools:
string = f"--no-{field.name}"
yield flag(
default=field.default,
dest=field.name,
help=field.help,
replace_underscores=replace_underscores,
string=string,
)
else:
yield option(
default=field.default,
dest=field.name,
flag=string,
help=field.help,
replace_underscores=replace_underscores,
type=_type,
)
return nonpositional(*get_parsers(), repeated=repeated)
@dataclass
class Args:
"""
:py:class:`Args` is sugar for the :py:func:`nonpositional <dollar_lambda.parsers.nonpositional>` function and
removes much of the boilerplate from defining parsers with many arguments.
>>> from dataclasses import dataclass
>>> from dollar_lambda import Args
>>> @dataclass
... class MyArgs(Args):
... verbose: bool
... count: int
>>> MyArgs.parse_args("--verbose", "--count", "1")
{'verbose': True, 'count': 1}
``MyArgs`` will accept these arguments in any order:
>>> MyArgs.parse_args("--count", "1", "--verbose")
{'count': 1, 'verbose': True}
Note that when the default value of an argument is ``True``, :py:class:`Args` will, by default
add ``--no-`` to the front of the flag (while still assigning the value to the original key):
>>> @dataclass
... class MyArgs(Args):
... tests: bool = True
>>> MyArgs.parse_args("--no-tests")
{'tests': False}
>>> MyArgs.parse_args()
{'tests': True}
To suppress this behavior, set ``flip_bools=False``:
>>> MyArgs.parse_args("--tests", flip_bools=False)
{'tests': False}
By using the :py:meth:`Args.parser` method, :py:class:`Args` can take advantage of all the same
combinators as other parsers:
>>> from dollar_lambda import argument
>>> p = MyArgs.parser()
>>> p1 = p >> argument("a")
>>> p1.parse_args("--no-tests", "hello")
{'tests': False, 'a': 'hello'}
To supply other metadata, like ``help`` text or custom parsers, use :py:func:`field`:
>>> from dollar_lambda import field, option
>>> @dataclass
... class MyArgs(Args):
... x: int = field(default=0, help="a number")
... y: int = field(
... default=1,
... parser=option("y", type=lambda s: int(s) + 1, help="a number to increment"),
... )
>>> MyArgs.parse_args("-h")
usage: -x X -y Y
x: a number (default: 0)
y: a number to increment
This supplies defaults for ``y`` when omitted:
>>> MyArgs.parse_args("-x", "10")
{'x': 10, 'y': 1}
It also applies the custom type to ``y`` when ``"-y"`` is given
>>> MyArgs.parse_args()
{'y': 1, 'x': 0}
"""
@classmethod
def parser(
cls,
flip_bools: bool = True,
repeated: Optional[Parser[Output]] = None,
replace_underscores: bool = True,
) -> Parser[Output]:
"""
Returns a parser for the dataclass.
Converts each field to a parser (:py:func:`option <dollar_lambda.parsers.option>` or
:py:func:`flag <dollar_lambda.parsers.flag>` depending on its type). Combines these parsers using
:py:func:`nonpositional <dollar_lambda.parsers.nonpositional>`.
Parameters
----------
flip_bools: bool
Whether to add ``--no-<argument>`` before arguments that default to ``True``.
replace_underscores: bool
If true, underscores in argument names are replaced with dashes.
Examples
--------
>>> @dataclass
... class MyArgs(Args):
... tests: bool = True
Note the leading ``--no-``:
>>> MyArgs.parse_args("--no-tests")
{'tests': False}
>>> MyArgs.parse_args()
{'tests': True}
To suppress this behavior, set ``flip_bools=False``:
>>> MyArgs.parse_args("--tests", flip_bools=False)
{'tests': False}
"""
def get_fields():
types = typing.get_type_hints(cls) # see https://peps.python.org/pep-0563/
for field in fields(cls):
field.type = types.get(field.name, str)
yield _ArgsField.parse(field)
return _ArgsField.parser(
*get_fields(),
flip_bools=flip_bools,
repeated=repeated,
replace_underscores=replace_underscores,
)
@classmethod
def parse_args(
cls,
*args,
flip_bools: bool = True,
repeated: Optional[Parser[Output]] = None,
) -> Optional[typing.Dict[str, Any]]:
"""
Parses the arguments and returns a dictionary of the parsed values.
"""
return (
cls.parser(flip_bools=flip_bools, repeated=repeated)
>> Parser[Output[Sequence[KeyValue[Any]]]].done()
).parse_args(*args)
| en | 0.294345 | Defines the :py:class:`Args <dollar_lambda.args.Args>` dataclass and associated functions. This is a thin wrapper around :external:py:func:`dataclasses.field`. Parameters ---------- help : str An optional help string for the argument. metadata : str Identical to the `metadata` argument for :external:py:func:`dataclasses.field`. type : Optional[type | Callable[[str], Any]] A function that takes a string and returns a value just like the ``type`` argument for :external:py:meth:`argparse.ArgumentParser.add_argument`. Returns ------- A :external:py:class:`dataclasses.Field` object that can be used in place of a default argument as described in the :external:py:class:`dataclasses.Field` documentation. >>> from dollar_lambda import Args >>> from dataclasses import dataclass ... >>> @dataclass ... class MyArgs(Args): ... x: Optional[int] ... y: Optional[int] = None ... >>> MyArgs.parse_args("-x", "1", "-y", "2") {'x': 1, 'y': 2} >>> MyArgs.parse_args("-x", "1") {'x': 1, 'y': None} >>> MyArgs.parse_args("-y", "2") usage: -x X -y Y y: (default: None) Expected '-x'. Got '-y' >>> MyArgs.parse_args() usage: -x X -y Y y: (default: None) The following arguments are required: -x :py:class:`Args` is sugar for the :py:func:`nonpositional <dollar_lambda.parsers.nonpositional>` function and removes much of the boilerplate from defining parsers with many arguments. >>> from dataclasses import dataclass >>> from dollar_lambda import Args >>> @dataclass ... class MyArgs(Args): ... verbose: bool ... count: int >>> MyArgs.parse_args("--verbose", "--count", "1") {'verbose': True, 'count': 1} ``MyArgs`` will accept these arguments in any order: >>> MyArgs.parse_args("--count", "1", "--verbose") {'count': 1, 'verbose': True} Note that when the default value of an argument is ``True``, :py:class:`Args` will, by default add ``--no-`` to the front of the flag (while still assigning the value to the original key): >>> @dataclass ... class MyArgs(Args): ... tests: bool = True >>> MyArgs.parse_args("--no-tests") {'tests': False} >>> MyArgs.parse_args() {'tests': True} To suppress this behavior, set ``flip_bools=False``: >>> MyArgs.parse_args("--tests", flip_bools=False) {'tests': False} By using the :py:meth:`Args.parser` method, :py:class:`Args` can take advantage of all the same combinators as other parsers: >>> from dollar_lambda import argument >>> p = MyArgs.parser() >>> p1 = p >> argument("a") >>> p1.parse_args("--no-tests", "hello") {'tests': False, 'a': 'hello'} To supply other metadata, like ``help`` text or custom parsers, use :py:func:`field`: >>> from dollar_lambda import field, option >>> @dataclass ... class MyArgs(Args): ... x: int = field(default=0, help="a number") ... y: int = field( ... default=1, ... parser=option("y", type=lambda s: int(s) + 1, help="a number to increment"), ... ) >>> MyArgs.parse_args("-h") usage: -x X -y Y x: a number (default: 0) y: a number to increment This supplies defaults for ``y`` when omitted: >>> MyArgs.parse_args("-x", "10") {'x': 10, 'y': 1} It also applies the custom type to ``y`` when ``"-y"`` is given >>> MyArgs.parse_args() {'y': 1, 'x': 0} Returns a parser for the dataclass. Converts each field to a parser (:py:func:`option <dollar_lambda.parsers.option>` or :py:func:`flag <dollar_lambda.parsers.flag>` depending on its type). Combines these parsers using :py:func:`nonpositional <dollar_lambda.parsers.nonpositional>`. Parameters ---------- flip_bools: bool Whether to add ``--no-<argument>`` before arguments that default to ``True``. replace_underscores: bool If true, underscores in argument names are replaced with dashes. Examples -------- >>> @dataclass ... class MyArgs(Args): ... tests: bool = True Note the leading ``--no-``: >>> MyArgs.parse_args("--no-tests") {'tests': False} >>> MyArgs.parse_args() {'tests': True} To suppress this behavior, set ``flip_bools=False``: >>> MyArgs.parse_args("--tests", flip_bools=False) {'tests': False} # see https://peps.python.org/pep-0563/ Parses the arguments and returns a dictionary of the parsed values. | 2.724111 | 3 |
deployment/custom_resources/custom-resource-py/lib/medialive.py | mlnrt/live-streaming-with-automated-multi-language-subtitling | 0 | 6631025 | <reponame>mlnrt/live-streaming-with-automated-multi-language-subtitling<filename>deployment/custom_resources/custom-resource-py/lib/medialive.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not #
# use this file except in compliance with the License. A copy of the #
# License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, #
# express or implied. See the License for the specific language governing #
# permissions and limitations under the License. #
##############################################################################
import json
from urllib.parse import urlparse
import boto3
import time
medialive = boto3.client('medialive')
ssm = boto3.client('ssm')
responseData = {}
def create_push_input(config):
sg = medialive.create_input_security_group(
WhitelistRules=[
{
'Cidr': config['Cidr']
}
]
)
#Feature/xxxx RTMP Requires Stream names for each input Destination.
if config['Type'] == 'RTMP_PUSH':
Destination = [
{
'StreamName': config['StreamName']+'/primary'
},
{
'StreamName': config['StreamName']+'/secondary'
}
]
else:
Destination = []
response = medialive.create_input(
InputSecurityGroups=[
sg['SecurityGroup']['Id'],
],
Name = config['StreamName'],
Destinations= Destination,
Type=config['Type']
)
responseData['Id'] = response['Input']['Id']
responseData['EndPoint1'] = response['Input']['Destinations'][0]['Url']
responseData['EndPoint2'] = response['Input']['Destinations'][1]['Url']
print('RESPONSE::{}'.format(responseData))
return responseData
def create_pull_input(config):
Name = config['StreamName']
Sources = [
{
'Url': config['PriUrl']
},
{
'Url': config['PriUrl']
}
]
Type = config['Type']
# store input u/p in SSM
if config['PriUser']:
Sources[0]['Username'] = config['PriUser']
#Sources[0]['Username'] = config['PriUser']
ssm.put_parameter(
Name = config['PriUser'],
Description = 'Live Stream solution Primary input credentials',
Type = 'string',
Value = config['PriPass']
)
# store input u/p in SSM
if config['SecUser']:
Sources[1]['Username'] = config['SecUser']
#Sources[1]['Username'] = config['SecUser']
ssm.put_parameter(
Name = config['PriUser'],
Description = 'Live Stream solution Primary input credentials',
Type = 'string',
Value = config['PriPass']
)
response = medialive.create_input(
Name = Name,
Type = Type,
Sources = Sources
)
responseData['Id'] = response['Input']['Id']
responseData['EndPoint1'] = 'Push InputType only'
responseData['EndPoint2'] = 'Push InputType only'
print('RESPONSE::{}'.format(responseData))
return responseData
def create_channel(config):
# set InputSpecification based on the input resolution:
if config['Resolution'] == '1080':
res = 'HD'
bitrate = 'MAX_20_MBPS'
profile = './encoding-profiles/medialive-1080p.json'
elif config['Resolution'] == '720':
res = 'HD'
bitrate = 'MAX_10_MBPS'
profile = './encoding-profiles/medialive-720p.json'
else:
res = 'SD'
bitrate = 'MAX_10_MBPS'
profile = './encoding-profiles/medialive-540p.json'
#hotfix/V52152945 loop only supported in HLS_PULL
if config['Type'] == 'URL_PULL':
settings = {
'SourceEndBehavior': 'LOOP'
}
else:
settings = {}
with open(profile) as encoding:
EncoderSettings = json.load(encoding)
response = medialive.create_channel(
InputSpecification = {
'Codec': config['Codec'],
'Resolution':res,
'MaximumBitrate':bitrate
},
InputAttachments = [{
'InputId': config['InputId'],
'InputSettings': settings
}],
Destinations = [{
'Id': "destination1",
'Settings': [
{
'PasswordParam': config['<PASSWORD>'],
'Url': config['MediaPackagePriUrl'],
'Username': config['MediaPackagePriUser']
},
{
'PasswordParam': config['<PASSWORD>'],
'Url': config['MediaPackageSecUrl'],
'Username': config['MediaPackageSecUser']
}
]
}],
Name = config['Name'],
RoleArn = config['Role'],
EncoderSettings = EncoderSettings,
)
responseData['ChannelId'] = response['Channel']['Id']
print('RESPONSE::{}'.format(responseData))
return responseData
def delete_channel(ChannelId):
medialive.stop_channel(
ChannelId = ChannelId
)
response = medialive.delete_channel(
ChannelId = ChannelId
)
InputId = response['InputAttachments'][0]['InputId']
# wait for channel delete so that the input state is detached:
while True:
state = medialive.describe_input(
InputId=InputId
)
if state['State'] == 'DETACHED':
break
else:
time.sleep(3)
medialive.delete_input(
InputId = InputId
)
return
| #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not #
# use this file except in compliance with the License. A copy of the #
# License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, #
# express or implied. See the License for the specific language governing #
# permissions and limitations under the License. #
##############################################################################
import json
from urllib.parse import urlparse
import boto3
import time
medialive = boto3.client('medialive')
ssm = boto3.client('ssm')
responseData = {}
def create_push_input(config):
sg = medialive.create_input_security_group(
WhitelistRules=[
{
'Cidr': config['Cidr']
}
]
)
#Feature/xxxx RTMP Requires Stream names for each input Destination.
if config['Type'] == 'RTMP_PUSH':
Destination = [
{
'StreamName': config['StreamName']+'/primary'
},
{
'StreamName': config['StreamName']+'/secondary'
}
]
else:
Destination = []
response = medialive.create_input(
InputSecurityGroups=[
sg['SecurityGroup']['Id'],
],
Name = config['StreamName'],
Destinations= Destination,
Type=config['Type']
)
responseData['Id'] = response['Input']['Id']
responseData['EndPoint1'] = response['Input']['Destinations'][0]['Url']
responseData['EndPoint2'] = response['Input']['Destinations'][1]['Url']
print('RESPONSE::{}'.format(responseData))
return responseData
def create_pull_input(config):
Name = config['StreamName']
Sources = [
{
'Url': config['PriUrl']
},
{
'Url': config['PriUrl']
}
]
Type = config['Type']
# store input u/p in SSM
if config['PriUser']:
Sources[0]['Username'] = config['PriUser']
#Sources[0]['Username'] = config['PriUser']
ssm.put_parameter(
Name = config['PriUser'],
Description = 'Live Stream solution Primary input credentials',
Type = 'string',
Value = config['PriPass']
)
# store input u/p in SSM
if config['SecUser']:
Sources[1]['Username'] = config['SecUser']
#Sources[1]['Username'] = config['SecUser']
ssm.put_parameter(
Name = config['PriUser'],
Description = 'Live Stream solution Primary input credentials',
Type = 'string',
Value = config['PriPass']
)
response = medialive.create_input(
Name = Name,
Type = Type,
Sources = Sources
)
responseData['Id'] = response['Input']['Id']
responseData['EndPoint1'] = 'Push InputType only'
responseData['EndPoint2'] = 'Push InputType only'
print('RESPONSE::{}'.format(responseData))
return responseData
def create_channel(config):
# set InputSpecification based on the input resolution:
if config['Resolution'] == '1080':
res = 'HD'
bitrate = 'MAX_20_MBPS'
profile = './encoding-profiles/medialive-1080p.json'
elif config['Resolution'] == '720':
res = 'HD'
bitrate = 'MAX_10_MBPS'
profile = './encoding-profiles/medialive-720p.json'
else:
res = 'SD'
bitrate = 'MAX_10_MBPS'
profile = './encoding-profiles/medialive-540p.json'
#hotfix/V52152945 loop only supported in HLS_PULL
if config['Type'] == 'URL_PULL':
settings = {
'SourceEndBehavior': 'LOOP'
}
else:
settings = {}
with open(profile) as encoding:
EncoderSettings = json.load(encoding)
response = medialive.create_channel(
InputSpecification = {
'Codec': config['Codec'],
'Resolution':res,
'MaximumBitrate':bitrate
},
InputAttachments = [{
'InputId': config['InputId'],
'InputSettings': settings
}],
Destinations = [{
'Id': "destination1",
'Settings': [
{
'PasswordParam': config['<PASSWORD>'],
'Url': config['MediaPackagePriUrl'],
'Username': config['MediaPackagePriUser']
},
{
'PasswordParam': config['<PASSWORD>'],
'Url': config['MediaPackageSecUrl'],
'Username': config['MediaPackageSecUser']
}
]
}],
Name = config['Name'],
RoleArn = config['Role'],
EncoderSettings = EncoderSettings,
)
responseData['ChannelId'] = response['Channel']['Id']
print('RESPONSE::{}'.format(responseData))
return responseData
def delete_channel(ChannelId):
medialive.stop_channel(
ChannelId = ChannelId
)
response = medialive.delete_channel(
ChannelId = ChannelId
)
InputId = response['InputAttachments'][0]['InputId']
# wait for channel delete so that the input state is detached:
while True:
state = medialive.describe_input(
InputId=InputId
)
if state['State'] == 'DETACHED':
break
else:
time.sleep(3)
medialive.delete_input(
InputId = InputId
)
return | en | 0.725801 | #!/usr/bin/python # -*- coding: utf-8 -*- ############################################################################## # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Amazon Software License (the "License"). You may not # # use this file except in compliance with the License. A copy of the # # License is located at # # # # http://aws.amazon.com/asl/ # # # # or in the "license" file accompanying this file. This file is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # # express or implied. See the License for the specific language governing # # permissions and limitations under the License. # ############################################################################## #Feature/xxxx RTMP Requires Stream names for each input Destination. # store input u/p in SSM #Sources[0]['Username'] = config['PriUser'] # store input u/p in SSM #Sources[1]['Username'] = config['SecUser'] # set InputSpecification based on the input resolution: #hotfix/V52152945 loop only supported in HLS_PULL # wait for channel delete so that the input state is detached: | 1.924643 | 2 |
pwnable.kr-write-up/random/random.py | IdanBanani/Pwnable.kr-CTF-Writeups | 0 | 6631026 | import os
random_value = 0x6b8b4567
xor_result = 0xdeadbeef
key = random_value ^ xor_result
print "key is:", key
os.system("echo '" + str(key) + "' | ./random")
| import os
random_value = 0x6b8b4567
xor_result = 0xdeadbeef
key = random_value ^ xor_result
print "key is:", key
os.system("echo '" + str(key) + "' | ./random")
| none | 1 | 2.405367 | 2 |
|
tcfl/pos.py | d-scott-phillips/tcf | 0 | 6631027 | <gh_stars>0
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FIXME:
#
# - command line method to discover installed capabiltiies; print
# each's __doc__
"""
This module provides tools to image devices with a Provisioning OS.
The general operation mode for this is instructing the device to boot
the :term:`Provisioning OS <POS>`; at this point, the test script (or
via the *tcf* client line) can interact with the POS over the serial
console.
Then the device can be partitioned, formatted, etc with general Linux
command line. As well, we can provide an :mod:`rsync server
<ttbl.rsync>` to provide OS images that can be flashed
Booting to POS can be accomplished:
- by network boot and root over NFS
- by a special boot device pre-configured to always boot POS
- any other
Server side modules used actively by this system:
- DHCP server :mod:`ttbl.dhcp`: provides dynamic IP address
assignment; it can be configured so a pre-configured IP address is
always assigned to a target and will provide also PXE/TFTP boot
services to boot into POS mode (working in conjunction with a HTTP,
TFTP and NFS servers).
- rsync server :mod:`ttbl.rsync`: provides access to images to rsync
into partitions (which is way faster than some other imaging methods
when done over a 1Gbps link).
- port redirector :mod:`ttbl.socat`: not strictly needed for POS, but
useful to redirect ports out of the :term:`NUT` to the greater
Internet. This comes handy if as part of the testing external
software has to be installed or external services acccessed.
Note installation in the server side is needed, as described in
:ref:`POS setup <pos_setup>`.
"""
import inspect
import operator
import os
import random
import re
import traceback
import distutils.version
import Levenshtein
import tc
import tl
from . import msgid_c
def image_spec_to_tuple(i):
distro = ""
spin = ""
version = ""
pl = ""
arch = ""
il = i.split(":")
if len(il) > 0:
distro = il[0]
if len(il) > 1:
spin = il[1]
if len(il) > 2:
version = il[2]
if len(il) > 3:
pl = il[3]
if len(il) > 4:
arch = il[4]
return distro, spin, version, pl, arch
def image_list_from_rsync_output(output):
imagel = []
# drwxrwxr-x 4,096 2018/10/19 00:41:04 .
# drwxr-xr-x 4,096 2018/10/11 06:24:44 clear:live:25550
# dr-xr-xr-x 4,096 2018/04/24 23:10:02 fedora:cloud-base-x86-64:28
# drwxr-xr-x 4,096 2018/10/11 20:52:34 rtk::114
# ...
# so we parse for 5 fields, take last
for line in output.splitlines():
tokens = line.split(None, 5)
if len(tokens) != 5:
continue
image = tokens[4]
if not ':' in image:
continue
imagel.append(image_spec_to_tuple(image))
return imagel
def image_select_best(image, available_images, target):
arch_default = target.bsp_model
image_spec = image_spec_to_tuple(image)
arch = image_spec[4]
if arch == "":
arch = arch_default
if arch == None or arch == "":
image_spec2 = list(image_spec)
image_spec2[4] = "ARCHITECTURE"
raise tc.blocked_e(
"no architecture specified (image %s), neither it could not be "
"guessed from the target's BSP model (%s); try specifying the "
"image as %s"
% (image, target.bsp_model, ":".join(image_spec2)))
target.report_info("POS: goal image spec: %s" % list(image_spec), dlevel = 2)
for available_image in available_images:
target.report_info("POS: available images: %s" % list(available_image),
dlevel = 2)
# filter which images have arch or no arch spec
available_images = filter(lambda x: x[4] == arch, available_images)
if not available_images:
raise tc.blocked_e(
"can't find image for architecture %s "
"in list of available image" % arch,
dict(images_available = \
"\n".join([ ":".join(i) for i in available_images ]))
)
for available_image in available_images:
target.report_info("POS: available images (filtered arch %s): %s"
% (arch, list(available_image)), dlevel = 2)
# filter first based on the distro (first field)
distro = image_spec[0]
if distro == "":
distro_images = available_images
else:
distro_images = filter(lambda x: x[0] == distro, available_images)
for available_image in distro_images:
target.report_info("POS: available images (filtered distro %s): %s"
% (distro, list(available_image)), dlevel = 2)
# now filter based on the distro spin; if none, well, pick one at random
spin = image_spec[1]
if spin == "":
spin_images = distro_images
else:
spin_images = filter(lambda x: x[1] == spin, distro_images)
if not spin_images:
raise tc.blocked_e(
"can't find match for image %s on available images" % image,
dict(images_available =
"\n".join([ ":".join(i) for i in available_images ]))
)
for available_image in spin_images:
target.report_info("POS: available images (filtered spin %s): %s"
% (spin, list(available_image)), dlevel = 2)
# now filter based on version -- rules change here -- if there is
# no version specified, pick what seems to be the most recent
# (highest)
version = image_spec[2]
if version == "":
versions = sorted([
(distutils.version.LooseVersion(i[2]) if i[2] != ""
else distutils.version.LooseVersion('0'))
for i in spin_images
])
version = versions[-1]
else:
version = distutils.version.LooseVersion(version)
version_images = filter(
lambda x: (
distutils.version.LooseVersion(x[2] if x[2] != "" else '0')
== version
),
spin_images)
if not version_images:
raise tc.blocked_e(
"can't find image match for version %s "
"in list of available images" % version,
dict(images_available =
"\n".join([ ":".join(i) for i in version_images ]))
)
for available_image in version_images:
target.report_info("POS: available images (filtered version %s): %s"
% (spin, list(available_image)), dlevel = 2)
# now filter based on subversion -- rules change here -- if there is
# no subversion specified, pick what seems to be the most recent
# (highest)
subversion = image_spec[3]
if subversion == "":
subversions = sorted([
(distutils.version.LooseVersion(i[3]) if i[3] != ""
else distutils.version.LooseVersion('0'))
for i in version_images
])
subversion = subversions[-1]
else:
subversion = distutils.version.LooseVersion(subversion)
subversion_images = filter(
lambda x: (
distutils.version.LooseVersion(x[3] if x[3] != "" else '0')
== subversion
),
version_images)
if not subversion_images:
raise tc.blocked_e(
"can't find image match for sub-version %s "
"in list of available images" % subversion,
dict(images_available =
"\n".join([ ":".join(i) for i in subversion_images ]))
)
for available_image in subversion_images:
target.report_info("POS: available images (filtered subversion %s): %s"
% (spin, list(available_image)), dlevel = 2)
# we might have multiple image choices if distro or live image
# weren't specified, so pick one
return random.choice(subversion_images)
# FIXME: what I don't like about this is that we have no info on the
# interconnect -- this must require it?
def target_power_cycle_to_pos_pxe(target):
target.report_info("POS: setting target to PXE boot Provisioning OS")
target.property_set("pos_mode", "pxe")
target.power.cycle()
# Now setup the local boot loader to boot off that
target.property_set("pos_mode", "local")
# FIXME: what I don't like about this is that we have no info on the
# interconnect -- this must require it?
def target_power_cycle_to_normal_pxe(target):
target.report_info("Setting target not to PXE boot Provisioning OS")
target.property_set("pos_mode", "local")
target.power.cycle()
def mk_persistent_tcf_d(target, subdirs = None):
if subdirs == None:
dirs = [ '/mnt/persistent.tcf.d' ]
else:
dirs = [ '/mnt/persistent.tcf.d/' + subdir for subdir in subdirs ]
# just create / recreate all the thirs
target.shell.run('mkdir -p ' + " ".join(dirs))
# Ensure there is a README -- this is slow, so don't do it if
# already there
output = target.shell.run(
'test -f /mnt/persistent.tcf.d/README || echo N""O' ,
output = True)
if 'NO' in output:
target.shell.run("""\
cat <<EOF > /mnt/persistent.tcf.d/README
This directory has been created by TCF's Provisioning OS to store files to
be provisioned in the root file system.
When flashing a new image to this partition, the contents in this tree
will not be removed/replaced. It is then faster to rsync things in
from the client machine.
EOF""")
def deploy_linux_kernel(ic, target, _kws):
"""Deploy a linux kernel tree in the local machine to the target's
root filesystem
This is normally given to :func:`target.pos.deploy_image
<tcfl.pos.extension.deploy_image>` as:
>>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION)
>>> target.pos.deploy_image(ic, IMAGENAME,
>>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ])
as it expects ``kws['pos_deploy_linux_kernel']`` which points to a
local directory in the form::
- boot/*
- lib/modules/KVER/*
all those will be rsynced to the target's persistent root area
(for speed) and from there to the root filesystem's /boot and
/lib/modules. Anything else in the ``/boot/`` and
``/lib/modules/`` directories will be replaced with what comes
from the *kernel tree*.
**Low level details**
When the target's image has been flashed in place,
:func:`tcfl.pos.deploy_image <tcfl.pos.extension.deploy_image>` is
asked to call this function.
The client will rsync the tree from the local machine to the
persistent space using :meth:`target.pos.rsync <extension.rsync>`,
which also caches it in a persistent area to speed up multiple
transfers.
"""
if not '' in _kws:
target.report_info("not deploying linux kernel because "
"*pos_deploy_linux_kernel_tree* keyword "
"has not been set for the target", dlevel = 2)
return
target.report_info("rsyncing boot image to target")
target.pos.rsync("%(pos_deploy_linux_kernel_tree)s/boot" % target.kws,
"/boot")
target.report_info("rsyncing lib/modules to target")
target.pos.rsync("%(pos_deploy_linux_kernel_tree)s/lib/modules"
% target.kws,
"/lib/modules")
target.testcase._targets_active()
target.report_pass("linux kernel transferred")
#:
#: Functions to boot a target into POS
#:
#: Different target drivers can be loaded and will add members to
#: these dictionaries to extend the abilities of the core system to
#: put targets in Provisioning OS mode.
#:
#: This then allows a single test script to work with multiple target
#: types without having to worry about details.
capability_fns = dict(
#: Function to call to power cycle the target and have it boot the
#: Provisioning OS.
#:
#: This shall be a one shot thing; the following power cycle shall
#: boot the target normally
#:
#: Arguments:
#: - tcfl.tc.target_c target: target to boot in POS mode
boot_to_pos = dict(),
#: Function to call to power cycle the target and have it boot the
#: installed OS (not the Provisioning OS).
#:
#: Arguments:
#: - tcfl.tc.target_c target: target to boot in normal mode
boot_to_normal = dict(),
#: Function to call to configure the boot loader once the system
#: has been provisoned.
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's boot has to be configured
#: - str root_part_dev: root device
#: - str image: image specification
boot_config = dict(),
#: Function to call to fix the boot loader from a system that
#: might have booted, we have something like a login prompt on the
#: serial console
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's boot has to be configured
boot_config_fix = dict(),
#: Function to use to partition the target's storage
#:
#: Will be called when the target has a property *pos_repartition*
#: set or when the system things the partition table is trashed
#: and needs reinitialization.
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's storage we are
#: partitioning
#: - str boot_dev: device used to boot
#:
#: returns: nothing, but sets target.root_part_dev, where the rootfs is
#:
mount_fs = dict(),
#: Post-deploy functions to run
extra_deploy = dict(),
)
_pos_capable_defaults = dict(
# backwards compat
boot_to_pos = 'pxe',
boot_to_normal = 'pxe',
boot_config = 'uefi',
mount_fs = 'multiroot',
partition = 'default',
)
def capability_register(capability, value, fns):
assert capability in capability_fns.keys(), \
"capability %s is not one of: %s" \
% (capability, " ".join(capability_fns.keys()))
assert isinstance(value, basestring), \
"capability value must be a string, got %s" % type(value).__name__
assert callable(fns) \
or (
isinstance(fns, list)
and all([ callable(i) for i in fns ])
), \
"fns %s is not a callable or list of callables" % fns
capability_fns.setdefault(capability, {})[value] = fns
class extension(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to handle Provisioning
OS capabilities.
"""
def __init__(self, target):
if 'pos_capable' not in target.rt:
raise self.unneeded
tc.target_extension_c.__init__(self, target)
pos_capable = target.kws['pos_capable']
if isinstance(pos_capable, bool):
if pos_capable == False:
raise tc.blocked_e("target is not POS capable",
dict(target = target))
target.report_info("WARNING! target's pos_capable is still old "
"style, update your config--taking "
"defaults")
self.capabilities = _pos_capable_defaults
elif isinstance(pos_capable, dict):
self.capabilities = pos_capable
else:
raise tc.blocked_e("Target's 'pos_capable' target is "
"not a dictionary of POS capabilities",
dict(target = self.target))
self.umount_list = [ '/mnt' ]
def _boot_dev_guess(self, boot_dev):
target = self.target
# What is our boot device?
if boot_dev:
assert isinstance(boot_dev, basestring), \
'boot_dev must be a string'
target.report_info("POS: boot device %s (from arguments)"
% boot_dev, dlevel = 3)
else:
boot_dev = target.kws.get('pos_boot_dev', None)
if boot_dev == None:
raise tc.blocked_e(
"Can't guess boot_dev (no `pos_boot_dev` tag available)",
{ 'target': target } )
target.report_info("POS: boot device %s (from pos_boot_dev tag)"
% boot_dev)
boot_dev = "/dev/" + boot_dev
# HACK: /dev/[hs]d* do partitions as /dev/[hs]dN, where as mmc and
# friends add /dev/mmcWHATEVERpN. Seriously...
if boot_dev.startswith("/dev/hd") \
or boot_dev.startswith("/dev/sd") \
or boot_dev.startswith("/dev/vd"):
target.kw_set('p_prefix', "")
else:
target.kw_set('p_prefix', "p")
return boot_dev
# FIXME: make this return fn and a description saying
# "capability %s/%s @ %s.%s()" so we can use it to feed to messages such as
# "rebooting into Provisioning OS [0/3] with capability %s/%s @ %s.%s()"
def cap_fn_get(self, capability, default = None):
"""
Return a target's POS capability.
:param str capability: name of the capability, as defined in
the target's tag :ref:`*pos_capable* <pos_capable>`.
:param str default: (optional) default to use if not
specified; DO NOT USE! WILL BE DEPRECATED!
"""
if capability not in capability_fns:
raise tc.blocked_e("Unknown POS capability '%s'; maybe "
"needs to be configured in "
"tcfl.pos.capability_fns?" %
capability, dict(target = self.target))
if capability not in self.capabilities:
self.target.report_info("WARNING! target's pos_capable "
"doesn't list '%s'; defaulting to '%s'"
% (capability, default))
capability_value = self.capabilities.get(capability, default)
if capability_value == None: # this means not needed/supported
self.target.report_info(
"POS: capability %s resolves to no-action" % capability)
return None
if capability_value not in capability_fns[capability]:
raise tc.blocked_e(
"target defines '%s' method for '%s' that is unknown to "
"the Provisioning OS library; maybe configuration for it "
"is not loaded?" % (capability_value, capability),
attachments = dict(target = self.target,
capability = capability,
value = capability_value)
)
capability_fn = capability_fns[capability][capability_value]
modname = capability_fn.__module__
self.target.report_info(
"POS: capability %s/%s by %s.%s" % (
capability, capability_value,
inspect.getsourcefile(capability_fn), capability_fn.__name__))
return capability_fn
_regex_waiting_for_login = re.compile(r".*\blogin:\s*$")
def _unexpected_console_output_try_fix(self, output, target):
# so when trying to boot POS we got unexpected console output;
# let's see what can we do about it.
if output == None:
# nah, can't do much
return
# looks like a login prompt? Maybe we can login and munge
# things around
if self._regex_waiting_for_login.search(output):
boot_config_fix_fn = target.pos.cap_fn_get('boot_config_fix',
'uefi')
if boot_config_fix_fn:
target.report_info("POS: got an unexpected login "
"prompt, will try to fix the "
"boot configuration")
boot_config_fix_fn(target)
else:
target.report_error(
"POS: seems we got a login prompt that is not POS, "
"but I don't know how to fix it; target does not "
"declare capability `boot_config_fix`",
attachments = dict(output = output))
def boot_to_pos(self, pos_prompt = None,
# plenty to boot to an nfsroot, hopefully
timeout = 60,
boot_to_pos_fn = None):
target = self.target
if boot_to_pos_fn == None:
# None specified, let's take from the target config
boot_to_pos_fn = self.cap_fn_get('boot_to_pos', 'pxe')
for tries in range(3):
target.report_info("POS: rebooting into Provisioning OS [%d/3]"
% tries)
boot_to_pos_fn(target)
# Sequence for TCF-live based on Fedora
if pos_prompt:
target.shell.linux_shell_prompt_regex = pos_prompt
try:
target.shell.up(timeout = timeout)
except tc.error_e as e:
outputf = e.attachments_get().get('console output', None)
if outputf:
output = open(outputf.name).read()
if output == None or output == "" or output == "\x00":
target.report_error("POS: no console output, retrying")
continue
# sometimes the BIOS has been set to boot local directly,
# so we might as well retry
target.report_error("POS: unexpected console output, retrying")
self._unexpected_console_output_try_fix(output, target)
continue
target.report_info("POS: got Provisioning OS shell")
break
else:
raise tc.blocked_e(
"POS: tried too many times to boot, without signs of life",
{ "console output": target.console.read(), 'target': target })
def boot_normal(self, boot_to_normal_fn = None):
"""
Power cycle the target (if neeed) and boot to normal OS (vs
booting to the Provisioning OS).
"""
target = self.target
if boot_to_normal_fn == None:
# None specified, let's take from the target config
boot_to_normal_fn = self.cap_fn_get('boot_to_normal')
boot_to_normal_fn(target)
def mount_fs(self, image, boot_dev):
"""Mount the target's filesystems in /mnt
When completed, this function has (maybe)
formatted/reformatted and mounted all of the target's
filesystems starting in /mnt.
For example, if the final system would have filesystems */boot*,
*/* and */home*, this function would mount them on:
- / on /mnt/
- /boot on /mnt/boot
- /home on /mnt/home
This allows :meth:`deploy_image` to rysnc content into the
final system.
:param str image: name of the image we are going to deploy in
this target
:param str boot_dev: device name the system will use to boot
"""
assert isinstance(image, basestring)
assert isinstance(boot_dev, basestring)
self.target.shell.run("lsblk")
mount_fs_fn = self.cap_fn_get("mount_fs")
return mount_fs_fn(self.target, image, boot_dev)
def rsyncd_start(self, ic):
"""
Start an *rsync* server on a target running Provisioning OS
This can be used to receive deployment files from any location
needed to execute later in the target. The server is attached to
the ``/mnt`` directory and the target is upposed to mount the
destination filesystems there.
This is usually called automatically for the user by the likes of
:func:`deploy_image` and others.
It will create a tunnel from the server to the target's port where
the rsync daemon is listening. A client can then connect to the
server's port to stream data over the rsync protocol. The server
address and port will be stored in the *target*'s keywords
*rsync_port* and *rsync_server* and thus can be accessed with:
>>> print target.kws['rsync_server'], target.kws['rsync_port']
:param tcfl.tc.target_c ic: interconnect (network) to which
the target is connected.
"""
target = self.target
target.shell.run("""\
cat > /tmp/rsync.conf <<EOF
[rootfs]
use chroot = true
path = /mnt/
read only = false
timeout = 60
uid = root
gid = root
EOF""")
# start rsync in the background, save it's PID file as rsync makes
# no pids and we might not have killall in the POS
target.shell.run(
"rsync --port 3000 --daemon --no-detach --config /tmp/rsync.conf &"
"echo $! > /tmp/rsync.pid")
# Tell the tunneling interface which IP address we want to use
target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
target.kw_set('rsync_port', target.tunnel.add(3000))
target.kw_set('rsync_server', target.rtb.parsed_url.hostname)
def rsync(self, src = None, dst = None,
persistent_name = None,
persistent_dir = '/persistent.tcf.d'):
"""
rsync data from the local machine to a target
The local machine is the machine executing the test script (where
*tcf run* was called).
This function will first rsync data to a location in the target
(persistent storage ``/persistent.tcd.d``) that will not be
overriden when flashing images. Then it will rsync it from there
to the final location.
This allows the content to be cached in between testcase execution
that reimages the target. Thus, the first run, the whole source
tree is transferred to the persistent area, but subsequent runs
will already find it there even when if the OS image has been
reflashed (as the reflashing will not touch the persistent
area). Of course this assumes the previous executions didn't wipe
the persistent area or the whole disk was not corrupted.
This function can be used, for example, when wanting to deploy
extra data to the target when using :func:`deploy_image`:
>>> @tcfl.tc.interconnect("ipv4_addr")
>>> @tcfl.tc.target("pos_capable")
>>> class _test(tcfl.tc.tc_c)
>>> ...
>>>
>>> @staticmethod
>>> def _deploy_mygittree(_ic, target, _kws):
>>> tcfl.pos.rsync(os.path.expanduser("~/somegittree.git"),
>>> dst = '/opt/somegittree.git')
>>>
>>> def deploy(self, ic, target):
>>> ic.power.on()
>>> target.pos.deploy_image(
>>> ic, "fedora::29",
>>> extra_deploy_fns = [ self._deploy_mygittree ])
>>>
>>> ...
In this example, the user has a cloned git tree in
``~/somegittree.git`` that has to be flashed to the target into
``/opt/somegittree.git`` after ensuring the root file system is
flashed with *Fedora 29*. :func:`deploy_image` will start the rsync
server and then call *_deploy_mygittree()* which will use
:meth:`target.pos.rsync <rsync>` to rsync from the user's
machine to the target's persistent location (in
``/mnt/persistent.tcf.d/somegittree.git``) and from there to the
final location of ``/mnt/opt/somegittree.git``. When the system
boots it will be of course in ``/opt/somegittree.git``
Because :meth:`target.pos.rsyncd_start <rsyncd_start>`
has been called already, we have now these keywords available
that allows to know where to connect to.
>>> target.kws['rsync_server']
>>> target.kws['rsync_port']
as setup by calling :meth:`target.pos.rsyncd_start
<rsyncd_start>` on the target. Functions such as
:meth:`target.pos.deploy_image <deploy_image>` do this for
you.
:param str src: (optional) source tree/file in the local machine
to be copied to the target's persistent area. If not specified,
nothing is copied to the persistent area.
:param str dst: (optional) destination tree/file in the target
machine; if specified, the file is copied from the persistent
area to the final destination. If not specified,
nothing is copied from the persistent area to the final
destination.
:param str persistent_name: (optional) name for the file/tree in
the persistent area; defaults to the basename of the source file
specification.
:param str persistent_dir: (optional) name for the persistent
area in the target, defaults to `/persistent.tcf.d`.
"""
target = self.target
target.shell.run("mkdir -p /mnt/%s" % persistent_dir)
# upload the directory to the persistent area
if persistent_name == None:
assert src != None, \
"no `src` parameter is given, `persistent_name` must " \
"then be specified"
persistent_name = os.path.basename(src)
if src != None:
target.report_info(
"rsyncing %s to target's persistent area /mnt%s/%s"
% (src, persistent_dir, persistent_name))
target.shcmd_local(
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
"time rsync -HaAX --numeric-ids --delete"
" --port %%(rsync_port)s "
" %s/. %%(rsync_server)s::rootfs/%s/%s"
% (src, persistent_dir, persistent_name))
target.testcase._targets_active()
if dst != None:
# There is a final destination specified, so now, in the
# target, make a copy from the persistent area to the final
# destination
parent_dirs = os.path.dirname(dst)
if parent_dirs != '':
target.shell.run("mkdir -p /mnt/%s" % parent_dirs)
target.shell.run(
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
"time rsync -HaAX --delete /mnt/%s/%s/. /mnt/%s"
% (persistent_dir, persistent_name, dst))
def rsync_np(self, src, dst, option_delete = False):
"""rsync data from the local machine to a target
The local machine is the machine executing the test script (where
*tcf run* was called).
Unlike :meth:`rsync`, this function will rsync data straight
from the local machine to the target's final destination, but
without using the persistent storage ``/persistent.tcd.d``.
This function can be used, for example, to flash a whole
distribution from the target--however, because that would be
very slow, :meth:`deploy_image` is used to transfer a distro
as a seed from the server (faster) and then from the local
machine, just whatever changed (eg: some changes being tested
in some package):
>>> @tcfl.tc.interconnect("ipv4_addr")
>>> @tcfl.tc.target("pos_capable")
>>> class _test(tcfl.tc.tc_c)
>>> ...
>>>
>>> def deploy_tree(_ic, target, _kws):
>>> target.pos.rsync_np("/SOME/DIR/my-fedora-29", "/")
>>>
>>> def deploy(self, ic, target):
>>> ic.power.on()
>>> target.pos.deploy_image(
>>> ic, "fedora::29",
>>> extra_deploy_fns = [ self.deploy_tree ])
>>>
>>> ...
In this example, the target will be flashed to whatever fedora
29 is available in the server and then
``/SOME/DIR/my-fedora-29`` will be rsynced on top.
:param str src: (optional) source tree/file in the local machine
to be copied to the target's persistent area. If not specified,
nothing is copied to the persistent area.
:param str dst: (optional) destination tree/file in the target
machine; if specified, the file is copied from the persistent
area to the final destination. If not specified,
nothing is copied from the persistent area to the final
destination.
:param bool option_delete: (optional) Add the ``--delete``
option to delete anything in the target that is not present
in the source (%(default)s).
"""
target = self.target
target.shell.run("mkdir -p /%s # create dest for rsync_np" % dst)
if option_delete:
_delete = "--delete"
else:
_delete = ""
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
cmdline = \
"time sudo rsync -HaAX --numeric-ids %s" \
" --inplace" \
" --exclude=persistent.tcf.d --exclude='persistent.tcf.d/*'" \
" --port %%(rsync_port)s %s/. %%(rsync_server)s::rootfs/%s/." \
% (_delete, src, dst)
target.report_info(
"POS: rsyncing %s to target's /mnt/%s" % (src, dst), dlevel = -1,
attachments = dict(cmdline = cmdline))
output = target.shcmd_local(cmdline)
target.testcase._targets_active()
target.report_info(
"rsynced %s to target's /%s"
% (src, dst),
attachments = dict(cmdline = cmdline, output = output))
def rsyncd_stop(self):
"""
Stop an *rsync* server on a target running Provisioning OS
A server was started with :meth:`target.pos.rsyncd_start
<rsyncd_start>`; kill it gracefully.
"""
target = self.target
# Use sh syntax rather than bash's $(</tmp/rsync.pid) to avoid
# surprises if the shall changes; ideally we'd use killall, but we
# don't know if it is installed in the POS
target.shell.run("kill -9 `cat /tmp/rsync.pid`")
# remove the runnel we created to the rsync server and the
# keywords to access it
target.tunnel.remove(int(target.kws['rsync_port']))
target.kw_unset('rsync_port')
target.kw_unset('rsync_server')
def deploy_image(self, ic, image,
boot_dev = None, root_part_dev = None,
partitioning_fn = None,
extra_deploy_fns = None,
# mkfs has to have -F to avoid it asking questions
mkfs_cmd = "mkfs.ext4 -Fj %(root_part_dev)s",
pos_prompt = None,
# plenty to boot to an nfsroot, hopefully
timeout = 60,
# When flushing to USB drives, it can be slow
timeout_sync = 240,
target_power_cycle_to_pos = None,
boot_config = None):
"""Deploy an image to a target using the Provisioning OS
:param tcfl.tc.tc_c ic: interconnect off which we are booting the
Provisioning OS and to which ``target`` is connected.
:param str image: name of an image available in an rsync server
specified in the interconnect's ``pos_rsync_server`` tag. Each
image is specified as ``IMAGE:SPIN:VERSION:SUBVERSION:ARCH``, e.g:
- fedora:workstation:28::x86_64
- clear:live:25550::x86_64
- yocto:core-image-minimal:2.5.1::x86
Note that you can specify a partial image name and the closest
match to it will be selected. From the previous example, asking
for *fedora* would auto select *fedora:workstation:28::x86_64*
assuming the target supports the *x86_64* target.
:param str boot_dev: (optional) which is the boot device to use,
where the boot loader needs to be installed in a boot
partition. e.g.: ``sda`` for */dev/sda* or ``mmcblk01`` for
*/dev/mmcblk01*.
Defaults to the value of the ``pos_boot_dev`` tag.
:param str root_part_dev: (optional) which is the device to use
for the root partition. e.g: ``mmcblk0p4`` for
*/dev/mmcblk0p4* or ``hda5`` for */dev/hda5*.
If not specified, the system will pick up one from all the
different root partitions that are available, trying to select
the one that has the most similar to what we are installing to
minimize the install time.
:param extra_deploy_fns: list of functions to call after the
image has been deployed. e.g.:
>>> def deploy_linux_kernel(ic, target, kws, kernel_file = None):
>>> ...
the function will be passed keywords which contain values found
out during this execution
:returns str: name of the image that was deployed (in case it was
guessed)
FIXME:
- increase in property bd.stats.client.sos_boot_failures and
bd.stats.client.sos_boot_count (to get a baseline)
- tag bd.stats.last_reset to DATE
Note: you might want the interconnect power cycled
"""
assert isinstance(ic, tc.target_c), \
"ic must be an instance of tc.target_c, but found %s" \
% type(ic).__name__
assert isinstance(image, basestring)
target = self.target
testcase = target.testcase
boot_dev = self._boot_dev_guess(boot_dev)
with msgid_c("POS"):
self.boot_to_pos(pos_prompt = pos_prompt, timeout = timeout,
boot_to_pos_fn = target_power_cycle_to_pos)
testcase.targets_active()
kws = dict(
rsync_server = ic.kws['pos_rsync_server'],
image = image,
boot_dev = boot_dev,
)
kws.update(target.kws)
original_timeout = testcase.tls.expecter.timeout
try:
testcase.tls.expecter.timeout = 800
# List the available images and decide if we have the
# one we are asked to install, autocomplete missing
# fields and get us a good match if there is any.
image_list_output = target.shell.run(
"rsync %(rsync_server)s/" % kws, output = True)
images_available = image_list_from_rsync_output(
image_list_output)
image_final_tuple = image_select_best(image, images_available,
target)
image_final = ":".join(image_final_tuple)
kws['image'] = image_final
testcase.targets_active()
root_part_dev = self.mount_fs(image_final, boot_dev)
kws['root_part_dev'] = root_part_dev
target.report_info("POS: rsyncing %(image)s from "
"%(rsync_server)s to /mnt" % kws,
dlevel = -1)
target.shell.run(
"time rsync -HaAX --numeric-ids --delete --inplace"
" --exclude=/persistent.tcf.d"
" --exclude='/persistent.tcf.d/*'"
" %(rsync_server)s/%(image)s/. /mnt/." % kws)
target.report_info("POS: rsynced %(image)s from "
"%(rsync_server)s to /mnt" % kws)
# did the user provide an extra function to deploy stuff?
_extra_deploy_fns = []
more = self.cap_fn_get('extra_deploy')
if more:
_extra_deploy_fns += more
if extra_deploy_fns:
_extra_deploy_fns += extra_deploy_fns
if _extra_deploy_fns:
self.rsyncd_start(ic)
for extra_deploy_fn in _extra_deploy_fns:
target.report_info("POS: running extra deploy fn %s"
% extra_deploy_fn, dlevel = 2)
testcase.targets_active()
extra_deploy_fn(ic, target, kws)
self.rsyncd_stop()
# Configure the bootloader: by hand with shell
# commands, so it is easy to reproduce by a user
# typing them
testcase.targets_active()
target.report_info("POS: configuring bootloader")
boot_config_fn = target.pos.cap_fn_get('boot_config', 'uefi')
if boot_config_fn:
# maybe something, maybe nothing
boot_config_fn(target, boot_dev, image_final)
testcase.tls.expecter.timeout = timeout_sync
except Exception as e:
target.report_info(
"BUG? exception %s: %s %s" %
(type(e).__name__, e, traceback.format_exc()))
raise
finally:
testcase.tls.expecter.timeout = original_timeout
# FIXME: document
# sync, kill any processes left over in /mnt, unmount it
# don't fail if this fails, as it'd trigger another exception
# and hide whatever happened that make us fail. Just make a
# good hearted attempt at cleaning up
target.shell.run(
"sync; "
"which lsof"
" && kill -9 `lsof -Fp /home | sed -n '/^p/{s/^p//;p}'`; "
"cd /; "
"for device in %s; do umount -l $device || true; done"
% " ".join(reversed(target.pos.umount_list)))
target.report_info("POS: deployed %(image)s" % kws)
return kws['image']
def image_seed_match(lp, goal):
"""
Given two image/seed specifications, return the most similar one
>>> lp = {
>>> 'part1': 'clear:live:25550::x86-64',
>>> 'part2': 'fedora:workstation:28::x86',
>>> 'part3': 'rtk::91',
>>> 'part4': 'rtk::90',
>>> 'part5': 'rtk::114',
>>> }
>>> _seed_match(lp, "rtk::112")
>>> ('part5', 0.933333333333, 'rtk::114')
"""
goall = image_spec_to_tuple(str(goal))
scores = {}
for part_name, seed in lp.iteritems():
score = 0
seedl = image_spec_to_tuple(str(seed))
if seedl[0] == goall[0]:
# At least we want a distribution match for it to be
# considered
scores[part_name] = Levenshtein.seqratio(goall, seedl)
else:
scores[part_name] = 0
if scores:
selected, score = max(scores.iteritems(), key = operator.itemgetter(1))
return selected, score, lp[selected]
return None, 0, None
def deploy_tree(_ic, target, _kws):
"""
Rsync a local tree to the target after imaging
This is normally given to :func:`target.pos.deploy_image
<tcfl.pos.extension.deploy_image>` as:
>>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION)
>>> target.pos.deploy_image(ic, IMAGENAME,
>>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ])
"""
source_tree = getattr(target, "deploy_tree_src", None)
if source_tree == None:
target.report_info("not deploying local tree because "
"*target.deploy_tree_src* is missing or None ",
dlevel = 2)
return
target.report_info("rsyncing tree %s -> target:/" % source_tree,
dlevel = 1)
target.testcase._targets_active()
target.pos.rsync_np(source_tree, "/", option_delete = True)
target.testcase._targets_active()
target.report_pass("rsynced tree %s -> target:/" % source_tree)
import pos_multiroot # pylint: disable = wrong-import-order,wrong-import-position,relative-import
import pos_uefi # pylint: disable = wrong-import-order,wrong-import-position,relative-import
capability_register('mount_fs', 'multiroot', pos_multiroot.mount_fs)
capability_register('boot_to_pos', 'pxe', target_power_cycle_to_pos_pxe)
capability_register('boot_to_normal', 'pxe', target_power_cycle_to_normal_pxe)
capability_register('boot_config', 'uefi', pos_uefi.boot_config_multiroot)
capability_register('boot_config_fix', 'uefi', pos_uefi.boot_config_fix)
| #! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FIXME:
#
# - command line method to discover installed capabiltiies; print
# each's __doc__
"""
This module provides tools to image devices with a Provisioning OS.
The general operation mode for this is instructing the device to boot
the :term:`Provisioning OS <POS>`; at this point, the test script (or
via the *tcf* client line) can interact with the POS over the serial
console.
Then the device can be partitioned, formatted, etc with general Linux
command line. As well, we can provide an :mod:`rsync server
<ttbl.rsync>` to provide OS images that can be flashed
Booting to POS can be accomplished:
- by network boot and root over NFS
- by a special boot device pre-configured to always boot POS
- any other
Server side modules used actively by this system:
- DHCP server :mod:`ttbl.dhcp`: provides dynamic IP address
assignment; it can be configured so a pre-configured IP address is
always assigned to a target and will provide also PXE/TFTP boot
services to boot into POS mode (working in conjunction with a HTTP,
TFTP and NFS servers).
- rsync server :mod:`ttbl.rsync`: provides access to images to rsync
into partitions (which is way faster than some other imaging methods
when done over a 1Gbps link).
- port redirector :mod:`ttbl.socat`: not strictly needed for POS, but
useful to redirect ports out of the :term:`NUT` to the greater
Internet. This comes handy if as part of the testing external
software has to be installed or external services acccessed.
Note installation in the server side is needed, as described in
:ref:`POS setup <pos_setup>`.
"""
import inspect
import operator
import os
import random
import re
import traceback
import distutils.version
import Levenshtein
import tc
import tl
from . import msgid_c
def image_spec_to_tuple(i):
distro = ""
spin = ""
version = ""
pl = ""
arch = ""
il = i.split(":")
if len(il) > 0:
distro = il[0]
if len(il) > 1:
spin = il[1]
if len(il) > 2:
version = il[2]
if len(il) > 3:
pl = il[3]
if len(il) > 4:
arch = il[4]
return distro, spin, version, pl, arch
def image_list_from_rsync_output(output):
imagel = []
# drwxrwxr-x 4,096 2018/10/19 00:41:04 .
# drwxr-xr-x 4,096 2018/10/11 06:24:44 clear:live:25550
# dr-xr-xr-x 4,096 2018/04/24 23:10:02 fedora:cloud-base-x86-64:28
# drwxr-xr-x 4,096 2018/10/11 20:52:34 rtk::114
# ...
# so we parse for 5 fields, take last
for line in output.splitlines():
tokens = line.split(None, 5)
if len(tokens) != 5:
continue
image = tokens[4]
if not ':' in image:
continue
imagel.append(image_spec_to_tuple(image))
return imagel
def image_select_best(image, available_images, target):
arch_default = target.bsp_model
image_spec = image_spec_to_tuple(image)
arch = image_spec[4]
if arch == "":
arch = arch_default
if arch == None or arch == "":
image_spec2 = list(image_spec)
image_spec2[4] = "ARCHITECTURE"
raise tc.blocked_e(
"no architecture specified (image %s), neither it could not be "
"guessed from the target's BSP model (%s); try specifying the "
"image as %s"
% (image, target.bsp_model, ":".join(image_spec2)))
target.report_info("POS: goal image spec: %s" % list(image_spec), dlevel = 2)
for available_image in available_images:
target.report_info("POS: available images: %s" % list(available_image),
dlevel = 2)
# filter which images have arch or no arch spec
available_images = filter(lambda x: x[4] == arch, available_images)
if not available_images:
raise tc.blocked_e(
"can't find image for architecture %s "
"in list of available image" % arch,
dict(images_available = \
"\n".join([ ":".join(i) for i in available_images ]))
)
for available_image in available_images:
target.report_info("POS: available images (filtered arch %s): %s"
% (arch, list(available_image)), dlevel = 2)
# filter first based on the distro (first field)
distro = image_spec[0]
if distro == "":
distro_images = available_images
else:
distro_images = filter(lambda x: x[0] == distro, available_images)
for available_image in distro_images:
target.report_info("POS: available images (filtered distro %s): %s"
% (distro, list(available_image)), dlevel = 2)
# now filter based on the distro spin; if none, well, pick one at random
spin = image_spec[1]
if spin == "":
spin_images = distro_images
else:
spin_images = filter(lambda x: x[1] == spin, distro_images)
if not spin_images:
raise tc.blocked_e(
"can't find match for image %s on available images" % image,
dict(images_available =
"\n".join([ ":".join(i) for i in available_images ]))
)
for available_image in spin_images:
target.report_info("POS: available images (filtered spin %s): %s"
% (spin, list(available_image)), dlevel = 2)
# now filter based on version -- rules change here -- if there is
# no version specified, pick what seems to be the most recent
# (highest)
version = image_spec[2]
if version == "":
versions = sorted([
(distutils.version.LooseVersion(i[2]) if i[2] != ""
else distutils.version.LooseVersion('0'))
for i in spin_images
])
version = versions[-1]
else:
version = distutils.version.LooseVersion(version)
version_images = filter(
lambda x: (
distutils.version.LooseVersion(x[2] if x[2] != "" else '0')
== version
),
spin_images)
if not version_images:
raise tc.blocked_e(
"can't find image match for version %s "
"in list of available images" % version,
dict(images_available =
"\n".join([ ":".join(i) for i in version_images ]))
)
for available_image in version_images:
target.report_info("POS: available images (filtered version %s): %s"
% (spin, list(available_image)), dlevel = 2)
# now filter based on subversion -- rules change here -- if there is
# no subversion specified, pick what seems to be the most recent
# (highest)
subversion = image_spec[3]
if subversion == "":
subversions = sorted([
(distutils.version.LooseVersion(i[3]) if i[3] != ""
else distutils.version.LooseVersion('0'))
for i in version_images
])
subversion = subversions[-1]
else:
subversion = distutils.version.LooseVersion(subversion)
subversion_images = filter(
lambda x: (
distutils.version.LooseVersion(x[3] if x[3] != "" else '0')
== subversion
),
version_images)
if not subversion_images:
raise tc.blocked_e(
"can't find image match for sub-version %s "
"in list of available images" % subversion,
dict(images_available =
"\n".join([ ":".join(i) for i in subversion_images ]))
)
for available_image in subversion_images:
target.report_info("POS: available images (filtered subversion %s): %s"
% (spin, list(available_image)), dlevel = 2)
# we might have multiple image choices if distro or live image
# weren't specified, so pick one
return random.choice(subversion_images)
# FIXME: what I don't like about this is that we have no info on the
# interconnect -- this must require it?
def target_power_cycle_to_pos_pxe(target):
target.report_info("POS: setting target to PXE boot Provisioning OS")
target.property_set("pos_mode", "pxe")
target.power.cycle()
# Now setup the local boot loader to boot off that
target.property_set("pos_mode", "local")
# FIXME: what I don't like about this is that we have no info on the
# interconnect -- this must require it?
def target_power_cycle_to_normal_pxe(target):
target.report_info("Setting target not to PXE boot Provisioning OS")
target.property_set("pos_mode", "local")
target.power.cycle()
def mk_persistent_tcf_d(target, subdirs = None):
if subdirs == None:
dirs = [ '/mnt/persistent.tcf.d' ]
else:
dirs = [ '/mnt/persistent.tcf.d/' + subdir for subdir in subdirs ]
# just create / recreate all the thirs
target.shell.run('mkdir -p ' + " ".join(dirs))
# Ensure there is a README -- this is slow, so don't do it if
# already there
output = target.shell.run(
'test -f /mnt/persistent.tcf.d/README || echo N""O' ,
output = True)
if 'NO' in output:
target.shell.run("""\
cat <<EOF > /mnt/persistent.tcf.d/README
This directory has been created by TCF's Provisioning OS to store files to
be provisioned in the root file system.
When flashing a new image to this partition, the contents in this tree
will not be removed/replaced. It is then faster to rsync things in
from the client machine.
EOF""")
def deploy_linux_kernel(ic, target, _kws):
"""Deploy a linux kernel tree in the local machine to the target's
root filesystem
This is normally given to :func:`target.pos.deploy_image
<tcfl.pos.extension.deploy_image>` as:
>>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION)
>>> target.pos.deploy_image(ic, IMAGENAME,
>>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ])
as it expects ``kws['pos_deploy_linux_kernel']`` which points to a
local directory in the form::
- boot/*
- lib/modules/KVER/*
all those will be rsynced to the target's persistent root area
(for speed) and from there to the root filesystem's /boot and
/lib/modules. Anything else in the ``/boot/`` and
``/lib/modules/`` directories will be replaced with what comes
from the *kernel tree*.
**Low level details**
When the target's image has been flashed in place,
:func:`tcfl.pos.deploy_image <tcfl.pos.extension.deploy_image>` is
asked to call this function.
The client will rsync the tree from the local machine to the
persistent space using :meth:`target.pos.rsync <extension.rsync>`,
which also caches it in a persistent area to speed up multiple
transfers.
"""
if not '' in _kws:
target.report_info("not deploying linux kernel because "
"*pos_deploy_linux_kernel_tree* keyword "
"has not been set for the target", dlevel = 2)
return
target.report_info("rsyncing boot image to target")
target.pos.rsync("%(pos_deploy_linux_kernel_tree)s/boot" % target.kws,
"/boot")
target.report_info("rsyncing lib/modules to target")
target.pos.rsync("%(pos_deploy_linux_kernel_tree)s/lib/modules"
% target.kws,
"/lib/modules")
target.testcase._targets_active()
target.report_pass("linux kernel transferred")
#:
#: Functions to boot a target into POS
#:
#: Different target drivers can be loaded and will add members to
#: these dictionaries to extend the abilities of the core system to
#: put targets in Provisioning OS mode.
#:
#: This then allows a single test script to work with multiple target
#: types without having to worry about details.
capability_fns = dict(
#: Function to call to power cycle the target and have it boot the
#: Provisioning OS.
#:
#: This shall be a one shot thing; the following power cycle shall
#: boot the target normally
#:
#: Arguments:
#: - tcfl.tc.target_c target: target to boot in POS mode
boot_to_pos = dict(),
#: Function to call to power cycle the target and have it boot the
#: installed OS (not the Provisioning OS).
#:
#: Arguments:
#: - tcfl.tc.target_c target: target to boot in normal mode
boot_to_normal = dict(),
#: Function to call to configure the boot loader once the system
#: has been provisoned.
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's boot has to be configured
#: - str root_part_dev: root device
#: - str image: image specification
boot_config = dict(),
#: Function to call to fix the boot loader from a system that
#: might have booted, we have something like a login prompt on the
#: serial console
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's boot has to be configured
boot_config_fix = dict(),
#: Function to use to partition the target's storage
#:
#: Will be called when the target has a property *pos_repartition*
#: set or when the system things the partition table is trashed
#: and needs reinitialization.
#:
#: Arguments:
#: - tcfl.tc.target_c target: target who's storage we are
#: partitioning
#: - str boot_dev: device used to boot
#:
#: returns: nothing, but sets target.root_part_dev, where the rootfs is
#:
mount_fs = dict(),
#: Post-deploy functions to run
extra_deploy = dict(),
)
_pos_capable_defaults = dict(
# backwards compat
boot_to_pos = 'pxe',
boot_to_normal = 'pxe',
boot_config = 'uefi',
mount_fs = 'multiroot',
partition = 'default',
)
def capability_register(capability, value, fns):
assert capability in capability_fns.keys(), \
"capability %s is not one of: %s" \
% (capability, " ".join(capability_fns.keys()))
assert isinstance(value, basestring), \
"capability value must be a string, got %s" % type(value).__name__
assert callable(fns) \
or (
isinstance(fns, list)
and all([ callable(i) for i in fns ])
), \
"fns %s is not a callable or list of callables" % fns
capability_fns.setdefault(capability, {})[value] = fns
class extension(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to handle Provisioning
OS capabilities.
"""
def __init__(self, target):
if 'pos_capable' not in target.rt:
raise self.unneeded
tc.target_extension_c.__init__(self, target)
pos_capable = target.kws['pos_capable']
if isinstance(pos_capable, bool):
if pos_capable == False:
raise tc.blocked_e("target is not POS capable",
dict(target = target))
target.report_info("WARNING! target's pos_capable is still old "
"style, update your config--taking "
"defaults")
self.capabilities = _pos_capable_defaults
elif isinstance(pos_capable, dict):
self.capabilities = pos_capable
else:
raise tc.blocked_e("Target's 'pos_capable' target is "
"not a dictionary of POS capabilities",
dict(target = self.target))
self.umount_list = [ '/mnt' ]
def _boot_dev_guess(self, boot_dev):
target = self.target
# What is our boot device?
if boot_dev:
assert isinstance(boot_dev, basestring), \
'boot_dev must be a string'
target.report_info("POS: boot device %s (from arguments)"
% boot_dev, dlevel = 3)
else:
boot_dev = target.kws.get('pos_boot_dev', None)
if boot_dev == None:
raise tc.blocked_e(
"Can't guess boot_dev (no `pos_boot_dev` tag available)",
{ 'target': target } )
target.report_info("POS: boot device %s (from pos_boot_dev tag)"
% boot_dev)
boot_dev = "/dev/" + boot_dev
# HACK: /dev/[hs]d* do partitions as /dev/[hs]dN, where as mmc and
# friends add /dev/mmcWHATEVERpN. Seriously...
if boot_dev.startswith("/dev/hd") \
or boot_dev.startswith("/dev/sd") \
or boot_dev.startswith("/dev/vd"):
target.kw_set('p_prefix', "")
else:
target.kw_set('p_prefix', "p")
return boot_dev
# FIXME: make this return fn and a description saying
# "capability %s/%s @ %s.%s()" so we can use it to feed to messages such as
# "rebooting into Provisioning OS [0/3] with capability %s/%s @ %s.%s()"
def cap_fn_get(self, capability, default = None):
"""
Return a target's POS capability.
:param str capability: name of the capability, as defined in
the target's tag :ref:`*pos_capable* <pos_capable>`.
:param str default: (optional) default to use if not
specified; DO NOT USE! WILL BE DEPRECATED!
"""
if capability not in capability_fns:
raise tc.blocked_e("Unknown POS capability '%s'; maybe "
"needs to be configured in "
"tcfl.pos.capability_fns?" %
capability, dict(target = self.target))
if capability not in self.capabilities:
self.target.report_info("WARNING! target's pos_capable "
"doesn't list '%s'; defaulting to '%s'"
% (capability, default))
capability_value = self.capabilities.get(capability, default)
if capability_value == None: # this means not needed/supported
self.target.report_info(
"POS: capability %s resolves to no-action" % capability)
return None
if capability_value not in capability_fns[capability]:
raise tc.blocked_e(
"target defines '%s' method for '%s' that is unknown to "
"the Provisioning OS library; maybe configuration for it "
"is not loaded?" % (capability_value, capability),
attachments = dict(target = self.target,
capability = capability,
value = capability_value)
)
capability_fn = capability_fns[capability][capability_value]
modname = capability_fn.__module__
self.target.report_info(
"POS: capability %s/%s by %s.%s" % (
capability, capability_value,
inspect.getsourcefile(capability_fn), capability_fn.__name__))
return capability_fn
_regex_waiting_for_login = re.compile(r".*\blogin:\s*$")
def _unexpected_console_output_try_fix(self, output, target):
# so when trying to boot POS we got unexpected console output;
# let's see what can we do about it.
if output == None:
# nah, can't do much
return
# looks like a login prompt? Maybe we can login and munge
# things around
if self._regex_waiting_for_login.search(output):
boot_config_fix_fn = target.pos.cap_fn_get('boot_config_fix',
'uefi')
if boot_config_fix_fn:
target.report_info("POS: got an unexpected login "
"prompt, will try to fix the "
"boot configuration")
boot_config_fix_fn(target)
else:
target.report_error(
"POS: seems we got a login prompt that is not POS, "
"but I don't know how to fix it; target does not "
"declare capability `boot_config_fix`",
attachments = dict(output = output))
def boot_to_pos(self, pos_prompt = None,
# plenty to boot to an nfsroot, hopefully
timeout = 60,
boot_to_pos_fn = None):
target = self.target
if boot_to_pos_fn == None:
# None specified, let's take from the target config
boot_to_pos_fn = self.cap_fn_get('boot_to_pos', 'pxe')
for tries in range(3):
target.report_info("POS: rebooting into Provisioning OS [%d/3]"
% tries)
boot_to_pos_fn(target)
# Sequence for TCF-live based on Fedora
if pos_prompt:
target.shell.linux_shell_prompt_regex = pos_prompt
try:
target.shell.up(timeout = timeout)
except tc.error_e as e:
outputf = e.attachments_get().get('console output', None)
if outputf:
output = open(outputf.name).read()
if output == None or output == "" or output == "\x00":
target.report_error("POS: no console output, retrying")
continue
# sometimes the BIOS has been set to boot local directly,
# so we might as well retry
target.report_error("POS: unexpected console output, retrying")
self._unexpected_console_output_try_fix(output, target)
continue
target.report_info("POS: got Provisioning OS shell")
break
else:
raise tc.blocked_e(
"POS: tried too many times to boot, without signs of life",
{ "console output": target.console.read(), 'target': target })
def boot_normal(self, boot_to_normal_fn = None):
"""
Power cycle the target (if neeed) and boot to normal OS (vs
booting to the Provisioning OS).
"""
target = self.target
if boot_to_normal_fn == None:
# None specified, let's take from the target config
boot_to_normal_fn = self.cap_fn_get('boot_to_normal')
boot_to_normal_fn(target)
def mount_fs(self, image, boot_dev):
"""Mount the target's filesystems in /mnt
When completed, this function has (maybe)
formatted/reformatted and mounted all of the target's
filesystems starting in /mnt.
For example, if the final system would have filesystems */boot*,
*/* and */home*, this function would mount them on:
- / on /mnt/
- /boot on /mnt/boot
- /home on /mnt/home
This allows :meth:`deploy_image` to rysnc content into the
final system.
:param str image: name of the image we are going to deploy in
this target
:param str boot_dev: device name the system will use to boot
"""
assert isinstance(image, basestring)
assert isinstance(boot_dev, basestring)
self.target.shell.run("lsblk")
mount_fs_fn = self.cap_fn_get("mount_fs")
return mount_fs_fn(self.target, image, boot_dev)
def rsyncd_start(self, ic):
"""
Start an *rsync* server on a target running Provisioning OS
This can be used to receive deployment files from any location
needed to execute later in the target. The server is attached to
the ``/mnt`` directory and the target is upposed to mount the
destination filesystems there.
This is usually called automatically for the user by the likes of
:func:`deploy_image` and others.
It will create a tunnel from the server to the target's port where
the rsync daemon is listening. A client can then connect to the
server's port to stream data over the rsync protocol. The server
address and port will be stored in the *target*'s keywords
*rsync_port* and *rsync_server* and thus can be accessed with:
>>> print target.kws['rsync_server'], target.kws['rsync_port']
:param tcfl.tc.target_c ic: interconnect (network) to which
the target is connected.
"""
target = self.target
target.shell.run("""\
cat > /tmp/rsync.conf <<EOF
[rootfs]
use chroot = true
path = /mnt/
read only = false
timeout = 60
uid = root
gid = root
EOF""")
# start rsync in the background, save it's PID file as rsync makes
# no pids and we might not have killall in the POS
target.shell.run(
"rsync --port 3000 --daemon --no-detach --config /tmp/rsync.conf &"
"echo $! > /tmp/rsync.pid")
# Tell the tunneling interface which IP address we want to use
target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
target.kw_set('rsync_port', target.tunnel.add(3000))
target.kw_set('rsync_server', target.rtb.parsed_url.hostname)
def rsync(self, src = None, dst = None,
persistent_name = None,
persistent_dir = '/persistent.tcf.d'):
"""
rsync data from the local machine to a target
The local machine is the machine executing the test script (where
*tcf run* was called).
This function will first rsync data to a location in the target
(persistent storage ``/persistent.tcd.d``) that will not be
overriden when flashing images. Then it will rsync it from there
to the final location.
This allows the content to be cached in between testcase execution
that reimages the target. Thus, the first run, the whole source
tree is transferred to the persistent area, but subsequent runs
will already find it there even when if the OS image has been
reflashed (as the reflashing will not touch the persistent
area). Of course this assumes the previous executions didn't wipe
the persistent area or the whole disk was not corrupted.
This function can be used, for example, when wanting to deploy
extra data to the target when using :func:`deploy_image`:
>>> @tcfl.tc.interconnect("ipv4_addr")
>>> @tcfl.tc.target("pos_capable")
>>> class _test(tcfl.tc.tc_c)
>>> ...
>>>
>>> @staticmethod
>>> def _deploy_mygittree(_ic, target, _kws):
>>> tcfl.pos.rsync(os.path.expanduser("~/somegittree.git"),
>>> dst = '/opt/somegittree.git')
>>>
>>> def deploy(self, ic, target):
>>> ic.power.on()
>>> target.pos.deploy_image(
>>> ic, "fedora::29",
>>> extra_deploy_fns = [ self._deploy_mygittree ])
>>>
>>> ...
In this example, the user has a cloned git tree in
``~/somegittree.git`` that has to be flashed to the target into
``/opt/somegittree.git`` after ensuring the root file system is
flashed with *Fedora 29*. :func:`deploy_image` will start the rsync
server and then call *_deploy_mygittree()* which will use
:meth:`target.pos.rsync <rsync>` to rsync from the user's
machine to the target's persistent location (in
``/mnt/persistent.tcf.d/somegittree.git``) and from there to the
final location of ``/mnt/opt/somegittree.git``. When the system
boots it will be of course in ``/opt/somegittree.git``
Because :meth:`target.pos.rsyncd_start <rsyncd_start>`
has been called already, we have now these keywords available
that allows to know where to connect to.
>>> target.kws['rsync_server']
>>> target.kws['rsync_port']
as setup by calling :meth:`target.pos.rsyncd_start
<rsyncd_start>` on the target. Functions such as
:meth:`target.pos.deploy_image <deploy_image>` do this for
you.
:param str src: (optional) source tree/file in the local machine
to be copied to the target's persistent area. If not specified,
nothing is copied to the persistent area.
:param str dst: (optional) destination tree/file in the target
machine; if specified, the file is copied from the persistent
area to the final destination. If not specified,
nothing is copied from the persistent area to the final
destination.
:param str persistent_name: (optional) name for the file/tree in
the persistent area; defaults to the basename of the source file
specification.
:param str persistent_dir: (optional) name for the persistent
area in the target, defaults to `/persistent.tcf.d`.
"""
target = self.target
target.shell.run("mkdir -p /mnt/%s" % persistent_dir)
# upload the directory to the persistent area
if persistent_name == None:
assert src != None, \
"no `src` parameter is given, `persistent_name` must " \
"then be specified"
persistent_name = os.path.basename(src)
if src != None:
target.report_info(
"rsyncing %s to target's persistent area /mnt%s/%s"
% (src, persistent_dir, persistent_name))
target.shcmd_local(
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
"time rsync -HaAX --numeric-ids --delete"
" --port %%(rsync_port)s "
" %s/. %%(rsync_server)s::rootfs/%s/%s"
% (src, persistent_dir, persistent_name))
target.testcase._targets_active()
if dst != None:
# There is a final destination specified, so now, in the
# target, make a copy from the persistent area to the final
# destination
parent_dirs = os.path.dirname(dst)
if parent_dirs != '':
target.shell.run("mkdir -p /mnt/%s" % parent_dirs)
target.shell.run(
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
"time rsync -HaAX --delete /mnt/%s/%s/. /mnt/%s"
% (persistent_dir, persistent_name, dst))
def rsync_np(self, src, dst, option_delete = False):
"""rsync data from the local machine to a target
The local machine is the machine executing the test script (where
*tcf run* was called).
Unlike :meth:`rsync`, this function will rsync data straight
from the local machine to the target's final destination, but
without using the persistent storage ``/persistent.tcd.d``.
This function can be used, for example, to flash a whole
distribution from the target--however, because that would be
very slow, :meth:`deploy_image` is used to transfer a distro
as a seed from the server (faster) and then from the local
machine, just whatever changed (eg: some changes being tested
in some package):
>>> @tcfl.tc.interconnect("ipv4_addr")
>>> @tcfl.tc.target("pos_capable")
>>> class _test(tcfl.tc.tc_c)
>>> ...
>>>
>>> def deploy_tree(_ic, target, _kws):
>>> target.pos.rsync_np("/SOME/DIR/my-fedora-29", "/")
>>>
>>> def deploy(self, ic, target):
>>> ic.power.on()
>>> target.pos.deploy_image(
>>> ic, "fedora::29",
>>> extra_deploy_fns = [ self.deploy_tree ])
>>>
>>> ...
In this example, the target will be flashed to whatever fedora
29 is available in the server and then
``/SOME/DIR/my-fedora-29`` will be rsynced on top.
:param str src: (optional) source tree/file in the local machine
to be copied to the target's persistent area. If not specified,
nothing is copied to the persistent area.
:param str dst: (optional) destination tree/file in the target
machine; if specified, the file is copied from the persistent
area to the final destination. If not specified,
nothing is copied from the persistent area to the final
destination.
:param bool option_delete: (optional) Add the ``--delete``
option to delete anything in the target that is not present
in the source (%(default)s).
"""
target = self.target
target.shell.run("mkdir -p /%s # create dest for rsync_np" % dst)
if option_delete:
_delete = "--delete"
else:
_delete = ""
# don't be verbose, makes it too slow and timesout when
# sending a lot of files
cmdline = \
"time sudo rsync -HaAX --numeric-ids %s" \
" --inplace" \
" --exclude=persistent.tcf.d --exclude='persistent.tcf.d/*'" \
" --port %%(rsync_port)s %s/. %%(rsync_server)s::rootfs/%s/." \
% (_delete, src, dst)
target.report_info(
"POS: rsyncing %s to target's /mnt/%s" % (src, dst), dlevel = -1,
attachments = dict(cmdline = cmdline))
output = target.shcmd_local(cmdline)
target.testcase._targets_active()
target.report_info(
"rsynced %s to target's /%s"
% (src, dst),
attachments = dict(cmdline = cmdline, output = output))
def rsyncd_stop(self):
"""
Stop an *rsync* server on a target running Provisioning OS
A server was started with :meth:`target.pos.rsyncd_start
<rsyncd_start>`; kill it gracefully.
"""
target = self.target
# Use sh syntax rather than bash's $(</tmp/rsync.pid) to avoid
# surprises if the shall changes; ideally we'd use killall, but we
# don't know if it is installed in the POS
target.shell.run("kill -9 `cat /tmp/rsync.pid`")
# remove the runnel we created to the rsync server and the
# keywords to access it
target.tunnel.remove(int(target.kws['rsync_port']))
target.kw_unset('rsync_port')
target.kw_unset('rsync_server')
def deploy_image(self, ic, image,
boot_dev = None, root_part_dev = None,
partitioning_fn = None,
extra_deploy_fns = None,
# mkfs has to have -F to avoid it asking questions
mkfs_cmd = "mkfs.ext4 -Fj %(root_part_dev)s",
pos_prompt = None,
# plenty to boot to an nfsroot, hopefully
timeout = 60,
# When flushing to USB drives, it can be slow
timeout_sync = 240,
target_power_cycle_to_pos = None,
boot_config = None):
"""Deploy an image to a target using the Provisioning OS
:param tcfl.tc.tc_c ic: interconnect off which we are booting the
Provisioning OS and to which ``target`` is connected.
:param str image: name of an image available in an rsync server
specified in the interconnect's ``pos_rsync_server`` tag. Each
image is specified as ``IMAGE:SPIN:VERSION:SUBVERSION:ARCH``, e.g:
- fedora:workstation:28::x86_64
- clear:live:25550::x86_64
- yocto:core-image-minimal:2.5.1::x86
Note that you can specify a partial image name and the closest
match to it will be selected. From the previous example, asking
for *fedora* would auto select *fedora:workstation:28::x86_64*
assuming the target supports the *x86_64* target.
:param str boot_dev: (optional) which is the boot device to use,
where the boot loader needs to be installed in a boot
partition. e.g.: ``sda`` for */dev/sda* or ``mmcblk01`` for
*/dev/mmcblk01*.
Defaults to the value of the ``pos_boot_dev`` tag.
:param str root_part_dev: (optional) which is the device to use
for the root partition. e.g: ``mmcblk0p4`` for
*/dev/mmcblk0p4* or ``hda5`` for */dev/hda5*.
If not specified, the system will pick up one from all the
different root partitions that are available, trying to select
the one that has the most similar to what we are installing to
minimize the install time.
:param extra_deploy_fns: list of functions to call after the
image has been deployed. e.g.:
>>> def deploy_linux_kernel(ic, target, kws, kernel_file = None):
>>> ...
the function will be passed keywords which contain values found
out during this execution
:returns str: name of the image that was deployed (in case it was
guessed)
FIXME:
- increase in property bd.stats.client.sos_boot_failures and
bd.stats.client.sos_boot_count (to get a baseline)
- tag bd.stats.last_reset to DATE
Note: you might want the interconnect power cycled
"""
assert isinstance(ic, tc.target_c), \
"ic must be an instance of tc.target_c, but found %s" \
% type(ic).__name__
assert isinstance(image, basestring)
target = self.target
testcase = target.testcase
boot_dev = self._boot_dev_guess(boot_dev)
with msgid_c("POS"):
self.boot_to_pos(pos_prompt = pos_prompt, timeout = timeout,
boot_to_pos_fn = target_power_cycle_to_pos)
testcase.targets_active()
kws = dict(
rsync_server = ic.kws['pos_rsync_server'],
image = image,
boot_dev = boot_dev,
)
kws.update(target.kws)
original_timeout = testcase.tls.expecter.timeout
try:
testcase.tls.expecter.timeout = 800
# List the available images and decide if we have the
# one we are asked to install, autocomplete missing
# fields and get us a good match if there is any.
image_list_output = target.shell.run(
"rsync %(rsync_server)s/" % kws, output = True)
images_available = image_list_from_rsync_output(
image_list_output)
image_final_tuple = image_select_best(image, images_available,
target)
image_final = ":".join(image_final_tuple)
kws['image'] = image_final
testcase.targets_active()
root_part_dev = self.mount_fs(image_final, boot_dev)
kws['root_part_dev'] = root_part_dev
target.report_info("POS: rsyncing %(image)s from "
"%(rsync_server)s to /mnt" % kws,
dlevel = -1)
target.shell.run(
"time rsync -HaAX --numeric-ids --delete --inplace"
" --exclude=/persistent.tcf.d"
" --exclude='/persistent.tcf.d/*'"
" %(rsync_server)s/%(image)s/. /mnt/." % kws)
target.report_info("POS: rsynced %(image)s from "
"%(rsync_server)s to /mnt" % kws)
# did the user provide an extra function to deploy stuff?
_extra_deploy_fns = []
more = self.cap_fn_get('extra_deploy')
if more:
_extra_deploy_fns += more
if extra_deploy_fns:
_extra_deploy_fns += extra_deploy_fns
if _extra_deploy_fns:
self.rsyncd_start(ic)
for extra_deploy_fn in _extra_deploy_fns:
target.report_info("POS: running extra deploy fn %s"
% extra_deploy_fn, dlevel = 2)
testcase.targets_active()
extra_deploy_fn(ic, target, kws)
self.rsyncd_stop()
# Configure the bootloader: by hand with shell
# commands, so it is easy to reproduce by a user
# typing them
testcase.targets_active()
target.report_info("POS: configuring bootloader")
boot_config_fn = target.pos.cap_fn_get('boot_config', 'uefi')
if boot_config_fn:
# maybe something, maybe nothing
boot_config_fn(target, boot_dev, image_final)
testcase.tls.expecter.timeout = timeout_sync
except Exception as e:
target.report_info(
"BUG? exception %s: %s %s" %
(type(e).__name__, e, traceback.format_exc()))
raise
finally:
testcase.tls.expecter.timeout = original_timeout
# FIXME: document
# sync, kill any processes left over in /mnt, unmount it
# don't fail if this fails, as it'd trigger another exception
# and hide whatever happened that make us fail. Just make a
# good hearted attempt at cleaning up
target.shell.run(
"sync; "
"which lsof"
" && kill -9 `lsof -Fp /home | sed -n '/^p/{s/^p//;p}'`; "
"cd /; "
"for device in %s; do umount -l $device || true; done"
% " ".join(reversed(target.pos.umount_list)))
target.report_info("POS: deployed %(image)s" % kws)
return kws['image']
def image_seed_match(lp, goal):
"""
Given two image/seed specifications, return the most similar one
>>> lp = {
>>> 'part1': 'clear:live:25550::x86-64',
>>> 'part2': 'fedora:workstation:28::x86',
>>> 'part3': 'rtk::91',
>>> 'part4': 'rtk::90',
>>> 'part5': 'rtk::114',
>>> }
>>> _seed_match(lp, "rtk::112")
>>> ('part5', 0.933333333333, 'rtk::114')
"""
goall = image_spec_to_tuple(str(goal))
scores = {}
for part_name, seed in lp.iteritems():
score = 0
seedl = image_spec_to_tuple(str(seed))
if seedl[0] == goall[0]:
# At least we want a distribution match for it to be
# considered
scores[part_name] = Levenshtein.seqratio(goall, seedl)
else:
scores[part_name] = 0
if scores:
selected, score = max(scores.iteritems(), key = operator.itemgetter(1))
return selected, score, lp[selected]
return None, 0, None
def deploy_tree(_ic, target, _kws):
"""
Rsync a local tree to the target after imaging
This is normally given to :func:`target.pos.deploy_image
<tcfl.pos.extension.deploy_image>` as:
>>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION)
>>> target.pos.deploy_image(ic, IMAGENAME,
>>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ])
"""
source_tree = getattr(target, "deploy_tree_src", None)
if source_tree == None:
target.report_info("not deploying local tree because "
"*target.deploy_tree_src* is missing or None ",
dlevel = 2)
return
target.report_info("rsyncing tree %s -> target:/" % source_tree,
dlevel = 1)
target.testcase._targets_active()
target.pos.rsync_np(source_tree, "/", option_delete = True)
target.testcase._targets_active()
target.report_pass("rsynced tree %s -> target:/" % source_tree)
import pos_multiroot # pylint: disable = wrong-import-order,wrong-import-position,relative-import
import pos_uefi # pylint: disable = wrong-import-order,wrong-import-position,relative-import
capability_register('mount_fs', 'multiroot', pos_multiroot.mount_fs)
capability_register('boot_to_pos', 'pxe', target_power_cycle_to_pos_pxe)
capability_register('boot_to_normal', 'pxe', target_power_cycle_to_normal_pxe)
capability_register('boot_config', 'uefi', pos_uefi.boot_config_multiroot)
capability_register('boot_config_fix', 'uefi', pos_uefi.boot_config_fix) | en | 0.822179 | #! /usr/bin/python2 # # Copyright (c) 2017 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # # # FIXME: # # - command line method to discover installed capabiltiies; print # each's __doc__ This module provides tools to image devices with a Provisioning OS. The general operation mode for this is instructing the device to boot the :term:`Provisioning OS <POS>`; at this point, the test script (or via the *tcf* client line) can interact with the POS over the serial console. Then the device can be partitioned, formatted, etc with general Linux command line. As well, we can provide an :mod:`rsync server <ttbl.rsync>` to provide OS images that can be flashed Booting to POS can be accomplished: - by network boot and root over NFS - by a special boot device pre-configured to always boot POS - any other Server side modules used actively by this system: - DHCP server :mod:`ttbl.dhcp`: provides dynamic IP address assignment; it can be configured so a pre-configured IP address is always assigned to a target and will provide also PXE/TFTP boot services to boot into POS mode (working in conjunction with a HTTP, TFTP and NFS servers). - rsync server :mod:`ttbl.rsync`: provides access to images to rsync into partitions (which is way faster than some other imaging methods when done over a 1Gbps link). - port redirector :mod:`ttbl.socat`: not strictly needed for POS, but useful to redirect ports out of the :term:`NUT` to the greater Internet. This comes handy if as part of the testing external software has to be installed or external services acccessed. Note installation in the server side is needed, as described in :ref:`POS setup <pos_setup>`. # drwxrwxr-x 4,096 2018/10/19 00:41:04 . # drwxr-xr-x 4,096 2018/10/11 06:24:44 clear:live:25550 # dr-xr-xr-x 4,096 2018/04/24 23:10:02 fedora:cloud-base-x86-64:28 # drwxr-xr-x 4,096 2018/10/11 20:52:34 rtk::114 # ... # so we parse for 5 fields, take last # filter which images have arch or no arch spec # filter first based on the distro (first field) # now filter based on the distro spin; if none, well, pick one at random # now filter based on version -- rules change here -- if there is # no version specified, pick what seems to be the most recent # (highest) # now filter based on subversion -- rules change here -- if there is # no subversion specified, pick what seems to be the most recent # (highest) # we might have multiple image choices if distro or live image # weren't specified, so pick one # FIXME: what I don't like about this is that we have no info on the # interconnect -- this must require it? # Now setup the local boot loader to boot off that # FIXME: what I don't like about this is that we have no info on the # interconnect -- this must require it? # just create / recreate all the thirs # Ensure there is a README -- this is slow, so don't do it if # already there \ cat <<EOF > /mnt/persistent.tcf.d/README This directory has been created by TCF's Provisioning OS to store files to be provisioned in the root file system. When flashing a new image to this partition, the contents in this tree will not be removed/replaced. It is then faster to rsync things in from the client machine. EOF Deploy a linux kernel tree in the local machine to the target's root filesystem This is normally given to :func:`target.pos.deploy_image <tcfl.pos.extension.deploy_image>` as: >>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION) >>> target.pos.deploy_image(ic, IMAGENAME, >>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ]) as it expects ``kws['pos_deploy_linux_kernel']`` which points to a local directory in the form:: - boot/* - lib/modules/KVER/* all those will be rsynced to the target's persistent root area (for speed) and from there to the root filesystem's /boot and /lib/modules. Anything else in the ``/boot/`` and ``/lib/modules/`` directories will be replaced with what comes from the *kernel tree*. **Low level details** When the target's image has been flashed in place, :func:`tcfl.pos.deploy_image <tcfl.pos.extension.deploy_image>` is asked to call this function. The client will rsync the tree from the local machine to the persistent space using :meth:`target.pos.rsync <extension.rsync>`, which also caches it in a persistent area to speed up multiple transfers. #: #: Functions to boot a target into POS #: #: Different target drivers can be loaded and will add members to #: these dictionaries to extend the abilities of the core system to #: put targets in Provisioning OS mode. #: #: This then allows a single test script to work with multiple target #: types without having to worry about details. #: Function to call to power cycle the target and have it boot the #: Provisioning OS. #: #: This shall be a one shot thing; the following power cycle shall #: boot the target normally #: #: Arguments: #: - tcfl.tc.target_c target: target to boot in POS mode #: Function to call to power cycle the target and have it boot the #: installed OS (not the Provisioning OS). #: #: Arguments: #: - tcfl.tc.target_c target: target to boot in normal mode #: Function to call to configure the boot loader once the system #: has been provisoned. #: #: Arguments: #: - tcfl.tc.target_c target: target who's boot has to be configured #: - str root_part_dev: root device #: - str image: image specification #: Function to call to fix the boot loader from a system that #: might have booted, we have something like a login prompt on the #: serial console #: #: Arguments: #: - tcfl.tc.target_c target: target who's boot has to be configured #: Function to use to partition the target's storage #: #: Will be called when the target has a property *pos_repartition* #: set or when the system things the partition table is trashed #: and needs reinitialization. #: #: Arguments: #: - tcfl.tc.target_c target: target who's storage we are #: partitioning #: - str boot_dev: device used to boot #: #: returns: nothing, but sets target.root_part_dev, where the rootfs is #: #: Post-deploy functions to run # backwards compat Extension to :py:class:`tcfl.tc.target_c` to handle Provisioning OS capabilities. # What is our boot device? # HACK: /dev/[hs]d* do partitions as /dev/[hs]dN, where as mmc and # friends add /dev/mmcWHATEVERpN. Seriously... # FIXME: make this return fn and a description saying # "capability %s/%s @ %s.%s()" so we can use it to feed to messages such as # "rebooting into Provisioning OS [0/3] with capability %s/%s @ %s.%s()" Return a target's POS capability. :param str capability: name of the capability, as defined in the target's tag :ref:`*pos_capable* <pos_capable>`. :param str default: (optional) default to use if not specified; DO NOT USE! WILL BE DEPRECATED! # this means not needed/supported # so when trying to boot POS we got unexpected console output; # let's see what can we do about it. # nah, can't do much # looks like a login prompt? Maybe we can login and munge # things around # plenty to boot to an nfsroot, hopefully # None specified, let's take from the target config # Sequence for TCF-live based on Fedora # sometimes the BIOS has been set to boot local directly, # so we might as well retry Power cycle the target (if neeed) and boot to normal OS (vs booting to the Provisioning OS). # None specified, let's take from the target config Mount the target's filesystems in /mnt When completed, this function has (maybe) formatted/reformatted and mounted all of the target's filesystems starting in /mnt. For example, if the final system would have filesystems */boot*, */* and */home*, this function would mount them on: - / on /mnt/ - /boot on /mnt/boot - /home on /mnt/home This allows :meth:`deploy_image` to rysnc content into the final system. :param str image: name of the image we are going to deploy in this target :param str boot_dev: device name the system will use to boot Start an *rsync* server on a target running Provisioning OS This can be used to receive deployment files from any location needed to execute later in the target. The server is attached to the ``/mnt`` directory and the target is upposed to mount the destination filesystems there. This is usually called automatically for the user by the likes of :func:`deploy_image` and others. It will create a tunnel from the server to the target's port where the rsync daemon is listening. A client can then connect to the server's port to stream data over the rsync protocol. The server address and port will be stored in the *target*'s keywords *rsync_port* and *rsync_server* and thus can be accessed with: >>> print target.kws['rsync_server'], target.kws['rsync_port'] :param tcfl.tc.target_c ic: interconnect (network) to which the target is connected. \ cat > /tmp/rsync.conf <<EOF [rootfs] use chroot = true path = /mnt/ read only = false timeout = 60 uid = root gid = root EOF # start rsync in the background, save it's PID file as rsync makes # no pids and we might not have killall in the POS # Tell the tunneling interface which IP address we want to use rsync data from the local machine to a target The local machine is the machine executing the test script (where *tcf run* was called). This function will first rsync data to a location in the target (persistent storage ``/persistent.tcd.d``) that will not be overriden when flashing images. Then it will rsync it from there to the final location. This allows the content to be cached in between testcase execution that reimages the target. Thus, the first run, the whole source tree is transferred to the persistent area, but subsequent runs will already find it there even when if the OS image has been reflashed (as the reflashing will not touch the persistent area). Of course this assumes the previous executions didn't wipe the persistent area or the whole disk was not corrupted. This function can be used, for example, when wanting to deploy extra data to the target when using :func:`deploy_image`: >>> @tcfl.tc.interconnect("ipv4_addr") >>> @tcfl.tc.target("pos_capable") >>> class _test(tcfl.tc.tc_c) >>> ... >>> >>> @staticmethod >>> def _deploy_mygittree(_ic, target, _kws): >>> tcfl.pos.rsync(os.path.expanduser("~/somegittree.git"), >>> dst = '/opt/somegittree.git') >>> >>> def deploy(self, ic, target): >>> ic.power.on() >>> target.pos.deploy_image( >>> ic, "fedora::29", >>> extra_deploy_fns = [ self._deploy_mygittree ]) >>> >>> ... In this example, the user has a cloned git tree in ``~/somegittree.git`` that has to be flashed to the target into ``/opt/somegittree.git`` after ensuring the root file system is flashed with *Fedora 29*. :func:`deploy_image` will start the rsync server and then call *_deploy_mygittree()* which will use :meth:`target.pos.rsync <rsync>` to rsync from the user's machine to the target's persistent location (in ``/mnt/persistent.tcf.d/somegittree.git``) and from there to the final location of ``/mnt/opt/somegittree.git``. When the system boots it will be of course in ``/opt/somegittree.git`` Because :meth:`target.pos.rsyncd_start <rsyncd_start>` has been called already, we have now these keywords available that allows to know where to connect to. >>> target.kws['rsync_server'] >>> target.kws['rsync_port'] as setup by calling :meth:`target.pos.rsyncd_start <rsyncd_start>` on the target. Functions such as :meth:`target.pos.deploy_image <deploy_image>` do this for you. :param str src: (optional) source tree/file in the local machine to be copied to the target's persistent area. If not specified, nothing is copied to the persistent area. :param str dst: (optional) destination tree/file in the target machine; if specified, the file is copied from the persistent area to the final destination. If not specified, nothing is copied from the persistent area to the final destination. :param str persistent_name: (optional) name for the file/tree in the persistent area; defaults to the basename of the source file specification. :param str persistent_dir: (optional) name for the persistent area in the target, defaults to `/persistent.tcf.d`. # upload the directory to the persistent area # don't be verbose, makes it too slow and timesout when # sending a lot of files # There is a final destination specified, so now, in the # target, make a copy from the persistent area to the final # destination # don't be verbose, makes it too slow and timesout when # sending a lot of files rsync data from the local machine to a target The local machine is the machine executing the test script (where *tcf run* was called). Unlike :meth:`rsync`, this function will rsync data straight from the local machine to the target's final destination, but without using the persistent storage ``/persistent.tcd.d``. This function can be used, for example, to flash a whole distribution from the target--however, because that would be very slow, :meth:`deploy_image` is used to transfer a distro as a seed from the server (faster) and then from the local machine, just whatever changed (eg: some changes being tested in some package): >>> @tcfl.tc.interconnect("ipv4_addr") >>> @tcfl.tc.target("pos_capable") >>> class _test(tcfl.tc.tc_c) >>> ... >>> >>> def deploy_tree(_ic, target, _kws): >>> target.pos.rsync_np("/SOME/DIR/my-fedora-29", "/") >>> >>> def deploy(self, ic, target): >>> ic.power.on() >>> target.pos.deploy_image( >>> ic, "fedora::29", >>> extra_deploy_fns = [ self.deploy_tree ]) >>> >>> ... In this example, the target will be flashed to whatever fedora 29 is available in the server and then ``/SOME/DIR/my-fedora-29`` will be rsynced on top. :param str src: (optional) source tree/file in the local machine to be copied to the target's persistent area. If not specified, nothing is copied to the persistent area. :param str dst: (optional) destination tree/file in the target machine; if specified, the file is copied from the persistent area to the final destination. If not specified, nothing is copied from the persistent area to the final destination. :param bool option_delete: (optional) Add the ``--delete`` option to delete anything in the target that is not present in the source (%(default)s). # create dest for rsync_np" % dst) # don't be verbose, makes it too slow and timesout when # sending a lot of files Stop an *rsync* server on a target running Provisioning OS A server was started with :meth:`target.pos.rsyncd_start <rsyncd_start>`; kill it gracefully. # Use sh syntax rather than bash's $(</tmp/rsync.pid) to avoid # surprises if the shall changes; ideally we'd use killall, but we # don't know if it is installed in the POS # remove the runnel we created to the rsync server and the # keywords to access it # mkfs has to have -F to avoid it asking questions # plenty to boot to an nfsroot, hopefully # When flushing to USB drives, it can be slow Deploy an image to a target using the Provisioning OS :param tcfl.tc.tc_c ic: interconnect off which we are booting the Provisioning OS and to which ``target`` is connected. :param str image: name of an image available in an rsync server specified in the interconnect's ``pos_rsync_server`` tag. Each image is specified as ``IMAGE:SPIN:VERSION:SUBVERSION:ARCH``, e.g: - fedora:workstation:28::x86_64 - clear:live:25550::x86_64 - yocto:core-image-minimal:2.5.1::x86 Note that you can specify a partial image name and the closest match to it will be selected. From the previous example, asking for *fedora* would auto select *fedora:workstation:28::x86_64* assuming the target supports the *x86_64* target. :param str boot_dev: (optional) which is the boot device to use, where the boot loader needs to be installed in a boot partition. e.g.: ``sda`` for */dev/sda* or ``mmcblk01`` for */dev/mmcblk01*. Defaults to the value of the ``pos_boot_dev`` tag. :param str root_part_dev: (optional) which is the device to use for the root partition. e.g: ``mmcblk0p4`` for */dev/mmcblk0p4* or ``hda5`` for */dev/hda5*. If not specified, the system will pick up one from all the different root partitions that are available, trying to select the one that has the most similar to what we are installing to minimize the install time. :param extra_deploy_fns: list of functions to call after the image has been deployed. e.g.: >>> def deploy_linux_kernel(ic, target, kws, kernel_file = None): >>> ... the function will be passed keywords which contain values found out during this execution :returns str: name of the image that was deployed (in case it was guessed) FIXME: - increase in property bd.stats.client.sos_boot_failures and bd.stats.client.sos_boot_count (to get a baseline) - tag bd.stats.last_reset to DATE Note: you might want the interconnect power cycled # List the available images and decide if we have the # one we are asked to install, autocomplete missing # fields and get us a good match if there is any. # did the user provide an extra function to deploy stuff? # Configure the bootloader: by hand with shell # commands, so it is easy to reproduce by a user # typing them # maybe something, maybe nothing # FIXME: document # sync, kill any processes left over in /mnt, unmount it # don't fail if this fails, as it'd trigger another exception # and hide whatever happened that make us fail. Just make a # good hearted attempt at cleaning up Given two image/seed specifications, return the most similar one >>> lp = { >>> 'part1': 'clear:live:25550::x86-64', >>> 'part2': 'fedora:workstation:28::x86', >>> 'part3': 'rtk::91', >>> 'part4': 'rtk::90', >>> 'part5': 'rtk::114', >>> } >>> _seed_match(lp, "rtk::112") >>> ('part5', 0.933333333333, 'rtk::114') # At least we want a distribution match for it to be # considered Rsync a local tree to the target after imaging This is normally given to :func:`target.pos.deploy_image <tcfl.pos.extension.deploy_image>` as: >>> target.kw_set("pos_deploy_linux_kernel", SOMELOCALLOCATION) >>> target.pos.deploy_image(ic, IMAGENAME, >>> extra_deploy_fns = [ tcfl.pos.deploy_linux_kernel ]) # pylint: disable = wrong-import-order,wrong-import-position,relative-import # pylint: disable = wrong-import-order,wrong-import-position,relative-import | 2.40607 | 2 |
test.py | darshanajaint/scene-representation-networks | 349 | 6631028 | import configargparse
import os, time, datetime
import torch
import numpy as np
import dataio
from torch.utils.data import DataLoader
from srns import *
import util
p = configargparse.ArgumentParser()
p.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
# Note: in contrast to training, no multi-resolution!
p.add_argument('--img_sidelength', type=int, default=128, required=False,
help='Sidelength of test images.')
p.add_argument('--data_root', required=True, help='Path to directory with training data.')
p.add_argument('--logging_root', type=str, default='./logs',
required=False, help='Path to directory where checkpoints & tensorboard events will be saved.')
p.add_argument('--batch_size', type=int, default=32, help='Batch size.')
p.add_argument('--preload', action='store_true', default=False, help='Whether to preload data to RAM.')
p.add_argument('--max_num_instances', type=int, default=-1,
help='If \'data_root\' has more instances, only the first max_num_instances are used')
p.add_argument('--specific_observation_idcs', type=str, default=None,
help='Only pick a subset of specific observations for each instance.')
p.add_argument('--has_params', action='store_true', default=False,
help='Whether each object instance already comes with its own parameter vector.')
p.add_argument('--save_out_first_n',type=int, default=250, help='Only saves images of first n object instances.')
p.add_argument('--checkpoint_path', default=None, help='Path to trained model.')
# Model options
p.add_argument('--num_instances', type=int, required=True,
help='The number of object instances that the model was trained with.')
p.add_argument('--tracing_steps', type=int, default=10, help='Number of steps of intersection tester.')
p.add_argument('--fit_single_srn', action='store_true', required=False,
help='Only fit a single SRN for a single scene (not a class of SRNs) --> no hypernetwork')
p.add_argument('--use_unet_renderer', action='store_true',
help='Whether to use a DeepVoxels-style unet as rendering network or a per-pixel 1x1 convnet')
p.add_argument('--embedding_size', type=int, default=256,
help='Dimensionality of latent embedding.')
opt = p.parse_args()
device = torch.device('cuda')
def test():
if opt.specific_observation_idcs is not None:
specific_observation_idcs = list(map(int, opt.specific_observation_idcs.split(',')))
else:
specific_observation_idcs = None
dataset = dataio.SceneClassDataset(root_dir=opt.data_root,
max_num_instances=opt.max_num_instances,
specific_observation_idcs=specific_observation_idcs,
max_observations_per_instance=-1,
samples_per_instance=1,
img_sidelength=opt.img_sidelength)
dataset = DataLoader(dataset,
collate_fn=dataset.collate_fn,
batch_size=1,
shuffle=False,
drop_last=False)
model = SRNsModel(num_instances=opt.num_instances,
latent_dim=opt.embedding_size,
has_params=opt.has_params,
fit_single_srn=opt.fit_single_srn,
use_unet_renderer=opt.use_unet_renderer,
tracing_steps=opt.tracing_steps)
assert (opt.checkpoint_path is not None), "Have to pass checkpoint!"
print("Loading model from %s" % opt.checkpoint_path)
util.custom_load(model, path=opt.checkpoint_path, discriminator=None,
overwrite_embeddings=False)
model.eval()
model.cuda()
# directory structure: month_day/
renderings_dir = os.path.join(opt.logging_root, 'renderings')
gt_comparison_dir = os.path.join(opt.logging_root, 'gt_comparisons')
util.cond_mkdir(opt.logging_root)
util.cond_mkdir(gt_comparison_dir)
util.cond_mkdir(renderings_dir)
# Save command-line parameters to log directory.
with open(os.path.join(opt.logging_root, "params.txt"), "w") as out_file:
out_file.write('\n'.join(["%s: %s" % (key, value) for key, value in vars(opt).items()]))
print('Beginning evaluation...')
with torch.no_grad():
instance_idx = 0
idx = 0
psnrs, ssims = list(), list()
for model_input, ground_truth in dataset:
model_outputs = model(model_input)
psnr, ssim = model.get_psnr(model_outputs, ground_truth)
psnrs.extend(psnr)
ssims.extend(ssim)
instance_idcs = model_input['instance_idx']
print("Object instance %d. Running mean PSNR %0.6f SSIM %0.6f" %
(instance_idcs[-1], np.mean(psnrs), np.mean(ssims)))
if instance_idx < opt.save_out_first_n:
output_imgs = model.get_output_img(model_outputs).cpu().numpy()
comparisons = model.get_comparisons(model_input,
model_outputs,
ground_truth)
for i in range(len(output_imgs)):
prev_instance_idx = instance_idx
instance_idx = instance_idcs[i]
if prev_instance_idx != instance_idx:
idx = 0
img_only_path = os.path.join(renderings_dir, "%06d" % instance_idx)
comp_path = os.path.join(gt_comparison_dir, "%06d" % instance_idx)
util.cond_mkdir(img_only_path)
util.cond_mkdir(comp_path)
pred = util.convert_image(output_imgs[i].squeeze())
comp = util.convert_image(comparisons[i].squeeze())
util.write_img(pred, os.path.join(img_only_path, "%06d.png" % idx))
util.write_img(comp, os.path.join(comp_path, "%06d.png" % idx))
idx += 1
with open(os.path.join(opt.logging_root, "results.txt"), "w") as out_file:
out_file.write("%0.6f, %0.6f" % (np.mean(psnrs), np.mean(ssims)))
print("Final mean PSNR %0.6f SSIM %0.6f" % (np.mean(psnrs), np.mean(ssims)))
def main():
test()
if __name__ == '__main__':
main()
| import configargparse
import os, time, datetime
import torch
import numpy as np
import dataio
from torch.utils.data import DataLoader
from srns import *
import util
p = configargparse.ArgumentParser()
p.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
# Note: in contrast to training, no multi-resolution!
p.add_argument('--img_sidelength', type=int, default=128, required=False,
help='Sidelength of test images.')
p.add_argument('--data_root', required=True, help='Path to directory with training data.')
p.add_argument('--logging_root', type=str, default='./logs',
required=False, help='Path to directory where checkpoints & tensorboard events will be saved.')
p.add_argument('--batch_size', type=int, default=32, help='Batch size.')
p.add_argument('--preload', action='store_true', default=False, help='Whether to preload data to RAM.')
p.add_argument('--max_num_instances', type=int, default=-1,
help='If \'data_root\' has more instances, only the first max_num_instances are used')
p.add_argument('--specific_observation_idcs', type=str, default=None,
help='Only pick a subset of specific observations for each instance.')
p.add_argument('--has_params', action='store_true', default=False,
help='Whether each object instance already comes with its own parameter vector.')
p.add_argument('--save_out_first_n',type=int, default=250, help='Only saves images of first n object instances.')
p.add_argument('--checkpoint_path', default=None, help='Path to trained model.')
# Model options
p.add_argument('--num_instances', type=int, required=True,
help='The number of object instances that the model was trained with.')
p.add_argument('--tracing_steps', type=int, default=10, help='Number of steps of intersection tester.')
p.add_argument('--fit_single_srn', action='store_true', required=False,
help='Only fit a single SRN for a single scene (not a class of SRNs) --> no hypernetwork')
p.add_argument('--use_unet_renderer', action='store_true',
help='Whether to use a DeepVoxels-style unet as rendering network or a per-pixel 1x1 convnet')
p.add_argument('--embedding_size', type=int, default=256,
help='Dimensionality of latent embedding.')
opt = p.parse_args()
device = torch.device('cuda')
def test():
if opt.specific_observation_idcs is not None:
specific_observation_idcs = list(map(int, opt.specific_observation_idcs.split(',')))
else:
specific_observation_idcs = None
dataset = dataio.SceneClassDataset(root_dir=opt.data_root,
max_num_instances=opt.max_num_instances,
specific_observation_idcs=specific_observation_idcs,
max_observations_per_instance=-1,
samples_per_instance=1,
img_sidelength=opt.img_sidelength)
dataset = DataLoader(dataset,
collate_fn=dataset.collate_fn,
batch_size=1,
shuffle=False,
drop_last=False)
model = SRNsModel(num_instances=opt.num_instances,
latent_dim=opt.embedding_size,
has_params=opt.has_params,
fit_single_srn=opt.fit_single_srn,
use_unet_renderer=opt.use_unet_renderer,
tracing_steps=opt.tracing_steps)
assert (opt.checkpoint_path is not None), "Have to pass checkpoint!"
print("Loading model from %s" % opt.checkpoint_path)
util.custom_load(model, path=opt.checkpoint_path, discriminator=None,
overwrite_embeddings=False)
model.eval()
model.cuda()
# directory structure: month_day/
renderings_dir = os.path.join(opt.logging_root, 'renderings')
gt_comparison_dir = os.path.join(opt.logging_root, 'gt_comparisons')
util.cond_mkdir(opt.logging_root)
util.cond_mkdir(gt_comparison_dir)
util.cond_mkdir(renderings_dir)
# Save command-line parameters to log directory.
with open(os.path.join(opt.logging_root, "params.txt"), "w") as out_file:
out_file.write('\n'.join(["%s: %s" % (key, value) for key, value in vars(opt).items()]))
print('Beginning evaluation...')
with torch.no_grad():
instance_idx = 0
idx = 0
psnrs, ssims = list(), list()
for model_input, ground_truth in dataset:
model_outputs = model(model_input)
psnr, ssim = model.get_psnr(model_outputs, ground_truth)
psnrs.extend(psnr)
ssims.extend(ssim)
instance_idcs = model_input['instance_idx']
print("Object instance %d. Running mean PSNR %0.6f SSIM %0.6f" %
(instance_idcs[-1], np.mean(psnrs), np.mean(ssims)))
if instance_idx < opt.save_out_first_n:
output_imgs = model.get_output_img(model_outputs).cpu().numpy()
comparisons = model.get_comparisons(model_input,
model_outputs,
ground_truth)
for i in range(len(output_imgs)):
prev_instance_idx = instance_idx
instance_idx = instance_idcs[i]
if prev_instance_idx != instance_idx:
idx = 0
img_only_path = os.path.join(renderings_dir, "%06d" % instance_idx)
comp_path = os.path.join(gt_comparison_dir, "%06d" % instance_idx)
util.cond_mkdir(img_only_path)
util.cond_mkdir(comp_path)
pred = util.convert_image(output_imgs[i].squeeze())
comp = util.convert_image(comparisons[i].squeeze())
util.write_img(pred, os.path.join(img_only_path, "%06d.png" % idx))
util.write_img(comp, os.path.join(comp_path, "%06d.png" % idx))
idx += 1
with open(os.path.join(opt.logging_root, "results.txt"), "w") as out_file:
out_file.write("%0.6f, %0.6f" % (np.mean(psnrs), np.mean(ssims)))
print("Final mean PSNR %0.6f SSIM %0.6f" % (np.mean(psnrs), np.mean(ssims)))
def main():
test()
if __name__ == '__main__':
main()
| en | 0.601734 | # Note: in contrast to training, no multi-resolution! # Model options # directory structure: month_day/ # Save command-line parameters to log directory. | 2.086247 | 2 |
baseforms.py | JezzaHehn/pyifs | 0 | 6631029 | <reponame>JezzaHehn/pyifs<filename>baseforms.py
from colour import Color
from math import sqrt
class Transform(object):
def __init__(self, rng):
self.r, self.g, self.b = Color(hsl=(rng.random(), 1, 0.5)).rgb
self.rng = rng
def transform_colour(self, r, g, b):
r = (self.r + r) / 2.0
g = (self.g + g) / 2.0
b = (self.b + b) / 2.0
return r, g, b
def get_name(self):
return self.__class__.__name__
class ComplexTransform(Transform):
def transform(self, px, py):
z = complex(px, py)
z2 = self.f(z)
return z2.real, z2.imag
class MoebiusBase(ComplexTransform):
"""
This applies a random Moebius transform and then its inverse.
"""
def __init__(self, rng, xform):
super(MoebiusBase, self).__init__(rng)
self.coef_a = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_b = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_c = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_d = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.xform = xform
self.transform_colour = self.xform.transform_colour
def get_name(self):
return "Moeb" + self.xform.get_name()
def f(self, z):
# apply pre-Moebius (az+b)/(cz+d)
z = (self.coef_a * z + self.coef_b) / (self.coef_c * z + self.coef_d)
# apply inner transform
z = complex(*self.xform.transform(z.real, z.imag))
# return post-Moebius (dz-b)/(-cz+a), which is inverse of pre-Moebius
return (self.coef_d * z - self.coef_b) / (-self.coef_c * z + self.coef_a)
class SphericalBase(Transform):
"""
Since the spherical transform is its own inverse, it can simply be applied twice.
"""
def __init__(self, rng, xform):
super(SphericalBase, self).__init__(rng)
self.xform = xform
def get_name(self):
return "Spheri" + self.xform.get_name()
def transform(self, px, py):
# first spherical
r2 = sqrt(px**2 + py**2)**2
px, py = px/r2, py/r2
# inner transform
px, py = self.xform.transform(px, py)
# second spherical
r2 = sqrt(px**2 + py**2)**2
return px/r2, py/r2
| from colour import Color
from math import sqrt
class Transform(object):
def __init__(self, rng):
self.r, self.g, self.b = Color(hsl=(rng.random(), 1, 0.5)).rgb
self.rng = rng
def transform_colour(self, r, g, b):
r = (self.r + r) / 2.0
g = (self.g + g) / 2.0
b = (self.b + b) / 2.0
return r, g, b
def get_name(self):
return self.__class__.__name__
class ComplexTransform(Transform):
def transform(self, px, py):
z = complex(px, py)
z2 = self.f(z)
return z2.real, z2.imag
class MoebiusBase(ComplexTransform):
"""
This applies a random Moebius transform and then its inverse.
"""
def __init__(self, rng, xform):
super(MoebiusBase, self).__init__(rng)
self.coef_a = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_b = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_c = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_d = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.xform = xform
self.transform_colour = self.xform.transform_colour
def get_name(self):
return "Moeb" + self.xform.get_name()
def f(self, z):
# apply pre-Moebius (az+b)/(cz+d)
z = (self.coef_a * z + self.coef_b) / (self.coef_c * z + self.coef_d)
# apply inner transform
z = complex(*self.xform.transform(z.real, z.imag))
# return post-Moebius (dz-b)/(-cz+a), which is inverse of pre-Moebius
return (self.coef_d * z - self.coef_b) / (-self.coef_c * z + self.coef_a)
class SphericalBase(Transform):
"""
Since the spherical transform is its own inverse, it can simply be applied twice.
"""
def __init__(self, rng, xform):
super(SphericalBase, self).__init__(rng)
self.xform = xform
def get_name(self):
return "Spheri" + self.xform.get_name()
def transform(self, px, py):
# first spherical
r2 = sqrt(px**2 + py**2)**2
px, py = px/r2, py/r2
# inner transform
px, py = self.xform.transform(px, py)
# second spherical
r2 = sqrt(px**2 + py**2)**2
return px/r2, py/r2 | en | 0.808977 | This applies a random Moebius transform and then its inverse. # apply pre-Moebius (az+b)/(cz+d) # apply inner transform # return post-Moebius (dz-b)/(-cz+a), which is inverse of pre-Moebius Since the spherical transform is its own inverse, it can simply be applied twice. # first spherical # inner transform # second spherical | 3.483839 | 3 |
routes.py | BLM16/URL-Shortener | 0 | 6631030 | from flask import Blueprint, render_template, redirect, url_for, request
from sqlalchemy.sql import text
from sqlalchemy.exc import SQLAlchemyError
import re
from config import engine, db
from models.url import URL
routes = Blueprint("routes", __name__, static_folder = 'static', template_folder = 'templates')
@routes.errorhandler(404)
def PageNotFound(e):
return redirect(url_for('routes.Error', title = "Error: 404 - page not found", msg = e))
@routes.route('/')
def Index():
return render_template("index.html")
@routes.route('/', methods = ['POST'])
def SetURL():
# Get the url
url = request.form['url']
if not url:
return
# Define valid url regex
url_regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate the url
if not url_regex.match(url):
return render_template("index.html", error = True, urlVal = url)
# Connect to the DB
with engine.connect() as con:
# See if there is already a key for the url
try:
sql = text("SELECT * FROM url WHERE url = :url")
res = con.execute(sql, url = url).fetchall()
except SQLAlchemyError as e:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error', msg = type(e)))
except:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error'))
# If there is a key display that link
if len(res) > 0:
return render_template("index.html", short = f'{request.url_root}{res[0].key}')
# Generate a new key
key = URL.GenerateKey()
try:
# Insert the KVP into the database
kvp = URL(key, url)
db.session.add(kvp)
db.session.commit()
except:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error'))
# Display the new link from the key
return render_template("index.html", short = f'{request.url_root}{key}')
@routes.route('/<key>')
def KeyRedir(key):
# Connect to the DB
with engine.connect() as con:
# Get the url associated with the key
sql = text("SELECT url FROM url WHERE key = :key")
url = con.execute(sql, key = key).scalar()
# Redirect to the url for the key
if url:
return redirect(url)
else:
return redirect(url_for('routes.Error', title = "Error: 404 - page not found", msg = f"The key <{key}> does not exist."))
@routes.route('/error')
def Error():
# Get the error parameters
title = request.args.get('title')
msg = request.args.get('msg')
back = request.args.get('back')
return render_template("error.html", title = title, msg = msg, back = back)
| from flask import Blueprint, render_template, redirect, url_for, request
from sqlalchemy.sql import text
from sqlalchemy.exc import SQLAlchemyError
import re
from config import engine, db
from models.url import URL
routes = Blueprint("routes", __name__, static_folder = 'static', template_folder = 'templates')
@routes.errorhandler(404)
def PageNotFound(e):
return redirect(url_for('routes.Error', title = "Error: 404 - page not found", msg = e))
@routes.route('/')
def Index():
return render_template("index.html")
@routes.route('/', methods = ['POST'])
def SetURL():
# Get the url
url = request.form['url']
if not url:
return
# Define valid url regex
url_regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate the url
if not url_regex.match(url):
return render_template("index.html", error = True, urlVal = url)
# Connect to the DB
with engine.connect() as con:
# See if there is already a key for the url
try:
sql = text("SELECT * FROM url WHERE url = :url")
res = con.execute(sql, url = url).fetchall()
except SQLAlchemyError as e:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error', msg = type(e)))
except:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error'))
# If there is a key display that link
if len(res) > 0:
return render_template("index.html", short = f'{request.url_root}{res[0].key}')
# Generate a new key
key = URL.GenerateKey()
try:
# Insert the KVP into the database
kvp = URL(key, url)
db.session.add(kvp)
db.session.commit()
except:
return redirect(url_for('routes.Error', title = 'Error: Unhandled error'))
# Display the new link from the key
return render_template("index.html", short = f'{request.url_root}{key}')
@routes.route('/<key>')
def KeyRedir(key):
# Connect to the DB
with engine.connect() as con:
# Get the url associated with the key
sql = text("SELECT url FROM url WHERE key = :key")
url = con.execute(sql, key = key).scalar()
# Redirect to the url for the key
if url:
return redirect(url)
else:
return redirect(url_for('routes.Error', title = "Error: 404 - page not found", msg = f"The key <{key}> does not exist."))
@routes.route('/error')
def Error():
# Get the error parameters
title = request.args.get('title')
msg = request.args.get('msg')
back = request.args.get('back')
return render_template("error.html", title = title, msg = msg, back = back)
| en | 0.663466 | # Get the url # Define valid url regex # http:// or https:// #domain... # ...or ip # optional port # Validate the url # Connect to the DB # See if there is already a key for the url # If there is a key display that link # Generate a new key # Insert the KVP into the database # Display the new link from the key # Connect to the DB # Get the url associated with the key # Redirect to the url for the key # Get the error parameters | 2.624493 | 3 |
.circleci/scripts/wait_for_server.py | ybt195/determined | 1,729 | 6631031 | <filename>.circleci/scripts/wait_for_server.py<gh_stars>1000+
import argparse
import socket
import time
def wait_for_server(host, port, timeout=5.0):
for _ in range(100):
try:
with socket.create_connection((host, port), timeout=timeout):
return
except OSError:
time.sleep(1)
raise Exception(f"Timed out waiting for the {host}:{port}.")
def main() -> None:
parser = argparse.ArgumentParser(description="Wait for server helper.")
parser.add_argument("host", help="Host")
parser.add_argument("port", help="Port")
args = parser.parse_args()
wait_for_server(args.host, args.port)
if __name__ == "__main__":
main()
| <filename>.circleci/scripts/wait_for_server.py<gh_stars>1000+
import argparse
import socket
import time
def wait_for_server(host, port, timeout=5.0):
for _ in range(100):
try:
with socket.create_connection((host, port), timeout=timeout):
return
except OSError:
time.sleep(1)
raise Exception(f"Timed out waiting for the {host}:{port}.")
def main() -> None:
parser = argparse.ArgumentParser(description="Wait for server helper.")
parser.add_argument("host", help="Host")
parser.add_argument("port", help="Port")
args = parser.parse_args()
wait_for_server(args.host, args.port)
if __name__ == "__main__":
main()
| none | 1 | 3.063749 | 3 |
|
LongestLines.py | TurtleShell/DigitImageClassifier | 0 | 6631032 | <reponame>TurtleShell/DigitImageClassifier
#LongestLines
"""
This file creates a feature that tracks
the longest dark line and the number of
distinct lines in each major direction
"""
import math
from DataFormatFunctions import *
from TraversalHelperFunctions import *
from HelperClasses import *
def getLineLenFromCoords(coords, sqMatrix, direction, traversedCoords):
nextCoords = coords
pixelsTraversed = 0
while (nextCoords.isValid() and nextCoords.isDark(sqMatrix)):
traversedCoords.append(nextCoords)
pixelsTraversed += 1
nextCoords = Coords(nextCoords.x+direction[0], nextCoords.y+direction[1])
return pixelsTraversed
def getLineObjForDir(sqMatrix, direction):
traversedCoords = []
lineLenPointObjList = []
lineLens = []
countedCoords = []
for y in range(IMG_HEIGHT):
for x in range(IMG_WIDTH):
coords = Coords(x, y)
if (coords not in traversedCoords):
lineLen = getLineLenFromCoords(coords, sqMatrix, direction, traversedCoords)
lineLenPointObj = LineLenPointObj(coords, lineLen)
lineLenPointObjList.append(lineLenPointObj)
lineLens.append(lineLen)
maxLen = max(lineLens)
for lineLenPointObj in lineLenPointObjList:
lineLen = lineLenPointObj.length
if (lineLen >= LL_LEN_THRESH):
lineLenCoords = lineLenPointObj.coords
if (coordsDistantFromList(lineLenCoords, countedCoords, LL_COORD_DIST_THRESH)):
countedCoords.append(lineLenCoords)
linesOverThresh = len(countedCoords)
return LineLenDirectionObj(maxLen, linesOverThresh)
def getLongestLinesObject(imgVector):
sqMatrix = imgVectorToSquareMatrix(imgVector)
llList = []
for direction in DIRECTIONS_LIST:
lineObj = getLineObjForDir(sqMatrix, direction)
llList.append(lineObj)
return llList
def setLongestLinesFeature(imgVector, vectori, featureMatrix):
llList = getLongestLinesObject(imgVector)
for i,lineObj in enumerate(llList):
fIndex = i*2
featureMatrix[fIndex,vectori] = lineObj.maxLenVal
featureMatrix[fIndex+1,vectori] = lineObj.linesVal
def createLongestLinesFeatureMatrixFromInput(inputMatrix):
vectors = np.shape(inputMatrix)[1]
featureMatrix = np.zeros((LL_INPUT_SIZE, vectors))
for vectori in range(vectors):
imgVector = inputMatrix[:,vectori]
setLongestLinesFeature(imgVector, vectori, featureMatrix)
return featureMatrix
| #LongestLines
"""
This file creates a feature that tracks
the longest dark line and the number of
distinct lines in each major direction
"""
import math
from DataFormatFunctions import *
from TraversalHelperFunctions import *
from HelperClasses import *
def getLineLenFromCoords(coords, sqMatrix, direction, traversedCoords):
nextCoords = coords
pixelsTraversed = 0
while (nextCoords.isValid() and nextCoords.isDark(sqMatrix)):
traversedCoords.append(nextCoords)
pixelsTraversed += 1
nextCoords = Coords(nextCoords.x+direction[0], nextCoords.y+direction[1])
return pixelsTraversed
def getLineObjForDir(sqMatrix, direction):
traversedCoords = []
lineLenPointObjList = []
lineLens = []
countedCoords = []
for y in range(IMG_HEIGHT):
for x in range(IMG_WIDTH):
coords = Coords(x, y)
if (coords not in traversedCoords):
lineLen = getLineLenFromCoords(coords, sqMatrix, direction, traversedCoords)
lineLenPointObj = LineLenPointObj(coords, lineLen)
lineLenPointObjList.append(lineLenPointObj)
lineLens.append(lineLen)
maxLen = max(lineLens)
for lineLenPointObj in lineLenPointObjList:
lineLen = lineLenPointObj.length
if (lineLen >= LL_LEN_THRESH):
lineLenCoords = lineLenPointObj.coords
if (coordsDistantFromList(lineLenCoords, countedCoords, LL_COORD_DIST_THRESH)):
countedCoords.append(lineLenCoords)
linesOverThresh = len(countedCoords)
return LineLenDirectionObj(maxLen, linesOverThresh)
def getLongestLinesObject(imgVector):
sqMatrix = imgVectorToSquareMatrix(imgVector)
llList = []
for direction in DIRECTIONS_LIST:
lineObj = getLineObjForDir(sqMatrix, direction)
llList.append(lineObj)
return llList
def setLongestLinesFeature(imgVector, vectori, featureMatrix):
llList = getLongestLinesObject(imgVector)
for i,lineObj in enumerate(llList):
fIndex = i*2
featureMatrix[fIndex,vectori] = lineObj.maxLenVal
featureMatrix[fIndex+1,vectori] = lineObj.linesVal
def createLongestLinesFeatureMatrixFromInput(inputMatrix):
vectors = np.shape(inputMatrix)[1]
featureMatrix = np.zeros((LL_INPUT_SIZE, vectors))
for vectori in range(vectors):
imgVector = inputMatrix[:,vectori]
setLongestLinesFeature(imgVector, vectori, featureMatrix)
return featureMatrix | en | 0.902498 | #LongestLines This file creates a feature that tracks
the longest dark line and the number of
distinct lines in each major direction | 3.044856 | 3 |
calculateStatistics.py | dahe-cvl/isvc2020_overscan_detection | 0 | 6631033 | import numpy as np
import cv2
import csv
from itertools import islice
import os
def readSamples(db_path, image_size):
files = []
print(db_path)
# r=root, d=directories, f = files
for r, d, f in os.walk(db_path):
for file in f:
if '.png' in file:
files.append(os.path.join(r, file))
all_samples_r = [];
all_samples_g = [];
all_samples_b = [];
for f in files:
# print(f)
# read images
frame = cv2.imread(f);
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
frame_np = np.array(frame);
# resize image
dim = (image_size, image_size);
frame_resized = cv2.resize(frame_np, dim, interpolation=cv2.INTER_AREA);
# print(frame_resized.shape)
# split image
b, g, r = cv2.split(frame_resized);
all_samples_r.append(r);
all_samples_g.append(g);
all_samples_b.append(b);
print("--------------------------------------------------")
print("process frame: " + str(f))
all_samples_r_np = np.array(all_samples_r)
all_samples_g_np = np.array(all_samples_g)
all_samples_b_np = np.array(all_samples_b)
print(all_samples_r_np.shape)
print(all_samples_g_np.shape)
print(all_samples_b_np.shape)
return all_samples_r_np, all_samples_g_np, all_samples_b_np
def checkStatistics(zero_centered_r_np, zero_centered_g_np, zero_centered_b_np, normalized_r_np, normalized_g_np, normalized_b_np):
# calculate zero-centered frames
print(np.mean(zero_centered_r_np))
print(np.mean(zero_centered_g_np))
print(np.mean(zero_centered_b_np))
# calculate standard deviation for each color channel
print(np.std(normalized_r_np))
print(np.std(normalized_g_np))
print(np.std(normalized_b_np))
def calculateSTD(all_samples_r_np, all_samples_g_np, all_samples_b_np ):
print("calculate standard deviation of zero-centered frames ... ")
std_r = np.std(all_samples_r_np);
std_g = np.std(all_samples_g_np);
std_b = np.std(all_samples_b_np);
print(std_r)
print(std_g)
print(std_b)
return std_r, std_g, std_b
def calculateMean(all_samples_r_np, all_samples_g_np, all_samples_b_np):
print("calculate mean value for each color channel ... ")
mean_r = np.mean(all_samples_r_np);
mean_g = np.mean(all_samples_g_np);
mean_b = np.mean(all_samples_b_np);
print(mean_r)
print(mean_g)
print(mean_b)
return mean_r, mean_g, mean_b;
#print("calculate mean image for each color channel ... ")
#mean_r = np.mean(all_samples_r_np, axis=0);
#mean_g = np.mean(all_samples_g_np, axis=0);
#mean_b = np.mean(all_samples_b_np, axis=0);
#print(mean_r.shape)
#print(mean_g.shape)
#print(mean_b.shape)
#print("merge color channels to one mean image ... ")
#mean_frame = cv2.merge((mean_b, mean_g, mean_r));
#print(mean_frame.shape)
#print("save image ... ")
#cv2.imwrite(dst_path + "/mean_frame_" + str(image_size) + ".jpg", mean_frame)
def saveStatistics(dst_path, image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b):
print("save statistics to file ... ")
fp = open(dst_path + "statistics_" + str(image_size) + "x" + str(image_size) + ".txt", 'w');
fp.write("image_size:" + str(image_size) + "\n")
fp.write("mean_r = " + str(mean_r.round(5)) + "\n")
fp.write("mean_g = " + str(mean_g.round(5)) + "\n")
fp.write("mean_b = " + str(mean_b.round(5)) + "\n")
fp.write("std_r = " + str(std_r.round(5)) + "\n")
fp.write("std_g = " + str(std_g.round(5)) + "\n")
fp.write("std_b = " + str(std_b.round(5)) + "\n")
def loadStatistics(statistics_filepath):
print("save statistics to file ... ")
fp = open(statistics_filepath, 'r');
lines = fp.readlines();
print(lines)
image_size = int(lines[0].split(':')[1]);
mean_r = float(lines[1].split(' = ')[1]);
mean_g = float(lines[2].split(' = ')[1]);
mean_b = float(lines[3].split(' = ')[1]);
std_r = float(lines[4].split(' = ')[1]);
std_g = float(lines[5].split(' = ')[1]);
std_b = float(lines[6].split(' = ')[1]);
return image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b;
def main():
print("prepare keras database");
############################################################################
## CONFIGURATION
############################################################################
db_path = "/caa/Projects02/vhh/private/database_nobackup/VHH_datasets/generated/stc/20191203/db_v7/train/"
dst_path = "/caa/Projects02/vhh/private/database_nobackup/VHH_datasets/generated/stc/20191203/db_v7/"
image_size = 128;
############################################################################
print("get all samples...");
all_samples_r_np, all_samples_g_np, all_samples_b_np = readSamples(db_path, image_size);
ACTIVE_FLAG = True;
if(ACTIVE_FLAG == True):
mean_r, mean_g, mean_b = calculateMean(all_samples_r_np, all_samples_g_np, all_samples_b_np);
std_r, std_g, std_b = calculateSTD(all_samples_r_np, all_samples_g_np, all_samples_b_np);
# save statiscits
saveStatistics(dst_path, image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b);
elif (ACTIVE_FLAG == False):
image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b = loadStatistics(dst_path + "/statistics_" + str(image_size) + "x"+ str(image_size) + ".txt")
# zero-centering
zero_centered_r_np = all_samples_r_np - mean_r;
zero_centered_g_np = all_samples_g_np - mean_g;
zero_centered_b_np = all_samples_b_np - mean_b;
# normalization
normalized_r_np = zero_centered_r_np / std_r;
normalized_g_np = zero_centered_g_np / std_g;
normalized_b_np = zero_centered_b_np / std_b;
checkStatistics(zero_centered_r_np, zero_centered_g_np, zero_centered_b_np, normalized_r_np, normalized_g_np,
normalized_b_np);
# print(np.std(samples_b_tmp));
# print(np.std(samples_g_tmp));
# print(np.std(samples_r_tmp));
if(__name__== "__main__"):
main(); | import numpy as np
import cv2
import csv
from itertools import islice
import os
def readSamples(db_path, image_size):
files = []
print(db_path)
# r=root, d=directories, f = files
for r, d, f in os.walk(db_path):
for file in f:
if '.png' in file:
files.append(os.path.join(r, file))
all_samples_r = [];
all_samples_g = [];
all_samples_b = [];
for f in files:
# print(f)
# read images
frame = cv2.imread(f);
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
frame_np = np.array(frame);
# resize image
dim = (image_size, image_size);
frame_resized = cv2.resize(frame_np, dim, interpolation=cv2.INTER_AREA);
# print(frame_resized.shape)
# split image
b, g, r = cv2.split(frame_resized);
all_samples_r.append(r);
all_samples_g.append(g);
all_samples_b.append(b);
print("--------------------------------------------------")
print("process frame: " + str(f))
all_samples_r_np = np.array(all_samples_r)
all_samples_g_np = np.array(all_samples_g)
all_samples_b_np = np.array(all_samples_b)
print(all_samples_r_np.shape)
print(all_samples_g_np.shape)
print(all_samples_b_np.shape)
return all_samples_r_np, all_samples_g_np, all_samples_b_np
def checkStatistics(zero_centered_r_np, zero_centered_g_np, zero_centered_b_np, normalized_r_np, normalized_g_np, normalized_b_np):
# calculate zero-centered frames
print(np.mean(zero_centered_r_np))
print(np.mean(zero_centered_g_np))
print(np.mean(zero_centered_b_np))
# calculate standard deviation for each color channel
print(np.std(normalized_r_np))
print(np.std(normalized_g_np))
print(np.std(normalized_b_np))
def calculateSTD(all_samples_r_np, all_samples_g_np, all_samples_b_np ):
print("calculate standard deviation of zero-centered frames ... ")
std_r = np.std(all_samples_r_np);
std_g = np.std(all_samples_g_np);
std_b = np.std(all_samples_b_np);
print(std_r)
print(std_g)
print(std_b)
return std_r, std_g, std_b
def calculateMean(all_samples_r_np, all_samples_g_np, all_samples_b_np):
print("calculate mean value for each color channel ... ")
mean_r = np.mean(all_samples_r_np);
mean_g = np.mean(all_samples_g_np);
mean_b = np.mean(all_samples_b_np);
print(mean_r)
print(mean_g)
print(mean_b)
return mean_r, mean_g, mean_b;
#print("calculate mean image for each color channel ... ")
#mean_r = np.mean(all_samples_r_np, axis=0);
#mean_g = np.mean(all_samples_g_np, axis=0);
#mean_b = np.mean(all_samples_b_np, axis=0);
#print(mean_r.shape)
#print(mean_g.shape)
#print(mean_b.shape)
#print("merge color channels to one mean image ... ")
#mean_frame = cv2.merge((mean_b, mean_g, mean_r));
#print(mean_frame.shape)
#print("save image ... ")
#cv2.imwrite(dst_path + "/mean_frame_" + str(image_size) + ".jpg", mean_frame)
def saveStatistics(dst_path, image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b):
print("save statistics to file ... ")
fp = open(dst_path + "statistics_" + str(image_size) + "x" + str(image_size) + ".txt", 'w');
fp.write("image_size:" + str(image_size) + "\n")
fp.write("mean_r = " + str(mean_r.round(5)) + "\n")
fp.write("mean_g = " + str(mean_g.round(5)) + "\n")
fp.write("mean_b = " + str(mean_b.round(5)) + "\n")
fp.write("std_r = " + str(std_r.round(5)) + "\n")
fp.write("std_g = " + str(std_g.round(5)) + "\n")
fp.write("std_b = " + str(std_b.round(5)) + "\n")
def loadStatistics(statistics_filepath):
print("save statistics to file ... ")
fp = open(statistics_filepath, 'r');
lines = fp.readlines();
print(lines)
image_size = int(lines[0].split(':')[1]);
mean_r = float(lines[1].split(' = ')[1]);
mean_g = float(lines[2].split(' = ')[1]);
mean_b = float(lines[3].split(' = ')[1]);
std_r = float(lines[4].split(' = ')[1]);
std_g = float(lines[5].split(' = ')[1]);
std_b = float(lines[6].split(' = ')[1]);
return image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b;
def main():
print("prepare keras database");
############################################################################
## CONFIGURATION
############################################################################
db_path = "/caa/Projects02/vhh/private/database_nobackup/VHH_datasets/generated/stc/20191203/db_v7/train/"
dst_path = "/caa/Projects02/vhh/private/database_nobackup/VHH_datasets/generated/stc/20191203/db_v7/"
image_size = 128;
############################################################################
print("get all samples...");
all_samples_r_np, all_samples_g_np, all_samples_b_np = readSamples(db_path, image_size);
ACTIVE_FLAG = True;
if(ACTIVE_FLAG == True):
mean_r, mean_g, mean_b = calculateMean(all_samples_r_np, all_samples_g_np, all_samples_b_np);
std_r, std_g, std_b = calculateSTD(all_samples_r_np, all_samples_g_np, all_samples_b_np);
# save statiscits
saveStatistics(dst_path, image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b);
elif (ACTIVE_FLAG == False):
image_size, mean_r, mean_g, mean_b, std_r, std_g, std_b = loadStatistics(dst_path + "/statistics_" + str(image_size) + "x"+ str(image_size) + ".txt")
# zero-centering
zero_centered_r_np = all_samples_r_np - mean_r;
zero_centered_g_np = all_samples_g_np - mean_g;
zero_centered_b_np = all_samples_b_np - mean_b;
# normalization
normalized_r_np = zero_centered_r_np / std_r;
normalized_g_np = zero_centered_g_np / std_g;
normalized_b_np = zero_centered_b_np / std_b;
checkStatistics(zero_centered_r_np, zero_centered_g_np, zero_centered_b_np, normalized_r_np, normalized_g_np,
normalized_b_np);
# print(np.std(samples_b_tmp));
# print(np.std(samples_g_tmp));
# print(np.std(samples_r_tmp));
if(__name__== "__main__"):
main(); | en | 0.338932 | # r=root, d=directories, f = files # print(f) # read images # resize image # print(frame_resized.shape) # split image # calculate zero-centered frames # calculate standard deviation for each color channel #print("calculate mean image for each color channel ... ") #mean_r = np.mean(all_samples_r_np, axis=0); #mean_g = np.mean(all_samples_g_np, axis=0); #mean_b = np.mean(all_samples_b_np, axis=0); #print(mean_r.shape) #print(mean_g.shape) #print(mean_b.shape) #print("merge color channels to one mean image ... ") #mean_frame = cv2.merge((mean_b, mean_g, mean_r)); #print(mean_frame.shape) #print("save image ... ") #cv2.imwrite(dst_path + "/mean_frame_" + str(image_size) + ".jpg", mean_frame) ############################################################################ ## CONFIGURATION ############################################################################ ############################################################################ # save statiscits # zero-centering # normalization # print(np.std(samples_b_tmp)); # print(np.std(samples_g_tmp)); # print(np.std(samples_r_tmp)); | 2.551862 | 3 |
Exercises/ejercicio-36.py | shoriwe-upb/TallerEjercicios | 0 | 6631034 | <gh_stars>0
def main():
a = float(input("Number a: "))
b = float(input("Number b: "))
c = float(input("Number c: "))
if a + b > c:
print("Es mayor")
elif a + b < c:
print("Es menor")
if __name__ == '__main__':
main()
| def main():
a = float(input("Number a: "))
b = float(input("Number b: "))
c = float(input("Number c: "))
if a + b > c:
print("Es mayor")
elif a + b < c:
print("Es menor")
if __name__ == '__main__':
main() | none | 1 | 3.818494 | 4 |
|
speech.py | dlei/class-transcribe | 0 | 6631035 | <reponame>dlei/class-transcribe<filename>speech.py
#!/usr/bin/python
import sys
import urllib2
import os
import json
import subprocess as sp
# url = "https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US"
url = 'https://www.google.com/speech-api/v2/recognize?xjerr=1&client=chromium&lang=en-US'
fileName = str(sys.argv[1])
fileExtension = os.path.splitext(fileName)[1]
converted = False
print fileExtension
if fileExtension != ".flac":
fnull = open(os.devnull, 'w')
sp.call("pacpl --overwrite -t flac " + fileName, shell = True, stdout = fnull, stderr = fnull)
fnull.close()
fileName = os.path.splitext(fileName)[0] + '.flac'
converted = True
try:
binary_audio = open(fileName, 'rb')
except:
print "Failed to get binary data."
size_of_audio = os.path.getsize(fileName)
if converted:
os.remove(fileName)
request = urllib2.Request(url)
request.add_header('Content-type','audio/x-flac; rate=16000')
request.add_header('Content-length', str(size_of_audio))
request.add_data(binary_audio)
try:
response = urllib2.urlopen(request)
print response
except urllib2.URLError, e:
print "Unable to connect"
except urllib2.HTTPError, e:
print "Oops, bad request"
content = response.read()
data = json.loads(content)
print data["hypotheses"][0]["utterance"]
| #!/usr/bin/python
import sys
import urllib2
import os
import json
import subprocess as sp
# url = "https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US"
url = 'https://www.google.com/speech-api/v2/recognize?xjerr=1&client=chromium&lang=en-US'
fileName = str(sys.argv[1])
fileExtension = os.path.splitext(fileName)[1]
converted = False
print fileExtension
if fileExtension != ".flac":
fnull = open(os.devnull, 'w')
sp.call("pacpl --overwrite -t flac " + fileName, shell = True, stdout = fnull, stderr = fnull)
fnull.close()
fileName = os.path.splitext(fileName)[0] + '.flac'
converted = True
try:
binary_audio = open(fileName, 'rb')
except:
print "Failed to get binary data."
size_of_audio = os.path.getsize(fileName)
if converted:
os.remove(fileName)
request = urllib2.Request(url)
request.add_header('Content-type','audio/x-flac; rate=16000')
request.add_header('Content-length', str(size_of_audio))
request.add_data(binary_audio)
try:
response = urllib2.urlopen(request)
print response
except urllib2.URLError, e:
print "Unable to connect"
except urllib2.HTTPError, e:
print "Oops, bad request"
content = response.read()
data = json.loads(content)
print data["hypotheses"][0]["utterance"] | en | 0.444293 | #!/usr/bin/python # url = "https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US" | 2.98433 | 3 |
tests/lineage/test_lineage.py | ktmud/incubator-airflow | 2 | 6631036 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.lineage import AUTO
from airflow.lineage.entities import File
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestLineage(unittest.TestCase):
def test_lineage(self):
dag = DAG(
dag_id='test_prepare_lineage',
start_date=DEFAULT_DATE
)
f1s = "/tmp/does_not_exist_1-{}"
f2s = "/tmp/does_not_exist_2-{}"
f3s = "/tmp/does_not_exist_3"
file1 = File(f1s.format("{{ execution_date }}"))
file2 = File(f2s.format("{{ execution_date }}"))
file3 = File(f3s)
with dag:
op1 = DummyOperator(task_id='leave1',
inlets=file1,
outlets=[file2, ])
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1',
inlets=AUTO,
outlets=file3)
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3',
inlets=["leave1", "upstream_level_1"])
op1.set_downstream(op3)
op2.set_downstream(op3)
op3.set_downstream(op4)
op4.set_downstream(op5)
dag.clear()
# execution_date is set in the context in order to avoid creating task instances
ctx1 = {"ti": TI(task=op1, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx2 = {"ti": TI(task=op2, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx3 = {"ti": TI(task=op3, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx5 = {"ti": TI(task=op5, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
# prepare with manual inlets and outlets
op1.pre_execute(ctx1)
self.assertEqual(len(op1.inlets), 1)
self.assertEqual(op1.inlets[0].url, f1s.format(DEFAULT_DATE))
self.assertEqual(len(op1.outlets), 1)
self.assertEqual(op1.outlets[0].url, f2s.format(DEFAULT_DATE))
# post process with no backend
op1.post_execute(ctx1)
op2.pre_execute(ctx2)
self.assertEqual(len(op2.inlets), 0)
op2.post_execute(ctx2)
op3.pre_execute(ctx3)
self.assertEqual(len(op3.inlets), 1)
self.assertEqual(op3.inlets[0].url, f2s.format(DEFAULT_DATE))
self.assertEqual(op3.outlets[0], file3)
op3.post_execute(ctx3)
# skip 4
op5.pre_execute(ctx5)
self.assertEqual(len(op5.inlets), 2)
op5.post_execute(ctx5)
| # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.lineage import AUTO
from airflow.lineage.entities import File
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestLineage(unittest.TestCase):
def test_lineage(self):
dag = DAG(
dag_id='test_prepare_lineage',
start_date=DEFAULT_DATE
)
f1s = "/tmp/does_not_exist_1-{}"
f2s = "/tmp/does_not_exist_2-{}"
f3s = "/tmp/does_not_exist_3"
file1 = File(f1s.format("{{ execution_date }}"))
file2 = File(f2s.format("{{ execution_date }}"))
file3 = File(f3s)
with dag:
op1 = DummyOperator(task_id='leave1',
inlets=file1,
outlets=[file2, ])
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1',
inlets=AUTO,
outlets=file3)
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3',
inlets=["leave1", "upstream_level_1"])
op1.set_downstream(op3)
op2.set_downstream(op3)
op3.set_downstream(op4)
op4.set_downstream(op5)
dag.clear()
# execution_date is set in the context in order to avoid creating task instances
ctx1 = {"ti": TI(task=op1, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx2 = {"ti": TI(task=op2, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx3 = {"ti": TI(task=op3, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
ctx5 = {"ti": TI(task=op5, execution_date=DEFAULT_DATE),
"execution_date": DEFAULT_DATE}
# prepare with manual inlets and outlets
op1.pre_execute(ctx1)
self.assertEqual(len(op1.inlets), 1)
self.assertEqual(op1.inlets[0].url, f1s.format(DEFAULT_DATE))
self.assertEqual(len(op1.outlets), 1)
self.assertEqual(op1.outlets[0].url, f2s.format(DEFAULT_DATE))
# post process with no backend
op1.post_execute(ctx1)
op2.pre_execute(ctx2)
self.assertEqual(len(op2.inlets), 0)
op2.post_execute(ctx2)
op3.pre_execute(ctx3)
self.assertEqual(len(op3.inlets), 1)
self.assertEqual(op3.inlets[0].url, f2s.format(DEFAULT_DATE))
self.assertEqual(op3.outlets[0], file3)
op3.post_execute(ctx3)
# skip 4
op5.pre_execute(ctx5)
self.assertEqual(len(op5.inlets), 2)
op5.post_execute(ctx5)
| en | 0.868302 | # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # execution_date is set in the context in order to avoid creating task instances # prepare with manual inlets and outlets # post process with no backend # skip 4 | 1.912357 | 2 |
tools/maketestgds.py | gdmcbain/gdspy | 239 | 6631037 | ######################################################################
# #
# Copyright 2009 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
import gdspy
import numpy
lib = gdspy.GdsLibrary("TESTLIB", unit=1, precision=1e-7)
### PolygonSet
cell = lib.new_cell("PolygonSet")
p = gdspy.PolygonSet(
[
[(10, 0), (11, 0), (10, 1)],
[(11, 0), (10, 1), (11, 1)],
[(11, 1), (12, 1), (11, 2)],
],
1,
2,
)
cell.add(p)
cell = lib.new_cell("PolygonSet_fillet")
orig = gdspy.PolygonSet(
[
[
(0, 0),
(-1, 0),
(0, -1),
(0.5, -0.5),
(1, 0),
(1, 1),
(4, -1),
(1, 3),
(1, 2),
(0, 1),
],
[(2, -1), (3, -1), (2.5, -2)],
]
)
orig.datatypes = [0, 1]
p = gdspy.copy(orig, 0, 5)
p.layers = [1, 1]
p.fillet(0.3, max_points=0)
cell.add(p)
p = gdspy.copy(orig, 5, 5)
p.layers = [2, 2]
p.fillet([0.3, 0.2, 0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.4, 0.1, 0.2, 0], max_points=0)
cell.add(p)
p = gdspy.copy(orig, 5, 0)
p.layers = [3, 3]
p.fillet(
[[0.1, 0.1, 0.4, 0, 0.4, 0.1, 0.1, 0.4, 0.4, 0.1], [0.2, 0.2, 0.5]], max_points=0
)
cell.add(p)
p = gdspy.copy(orig, 0, 0)
p.layers = [4, 4]
p.fillet([0.8, [10.0, 10.0, 20.0]], max_points=199, precision=1e-6)
cell.add(p)
### FlexPath
def broken(p0, v0, p1, v1, p2, w):
den = v1[1] * v0[0] - v1[0] * v0[1]
lim = 1e-12 * (v0[0] ** 2 + v0[1] ** 2) * (v1[0] ** 2 + v1[1] ** 2)
if den ** 2 < lim:
u0 = u1 = 0
p = 0.5 * (p0 + p1)
else:
dx = p1[0] - p0[0]
dy = p1[1] - p0[1]
u0 = (v1[1] * dx - v1[0] * dy) / den
u1 = (v0[1] * dx - v0[0] * dy) / den
p = 0.5 * (p0 + v0 * u0 + p1 + v1 * u1)
if u0 <= 0 and u1 >= 0:
return [p]
return [p0, p2, p1]
def pointy(p0, v0, p1, v1):
r = 0.5 * numpy.sqrt(numpy.sum((p0 - p1) ** 2))
v0 /= numpy.sqrt(numpy.sum(v0 ** 2))
v1 /= numpy.sqrt(numpy.sum(v1 ** 2))
return [p0, 0.5 * (p0 + p1) + 0.5 * (v0 - v1) * r, p1]
cell = lib.new_cell("FlexPath1")
fp = gdspy.FlexPath([(0, 0), (1, 1)], 0.1, layer=[1], gdsii_path=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(1, 0), (2, 1)],
0.1,
[-0.1, 0.1],
tolerance=1e-5,
ends=["round", "extended"],
layer=[2, 3],
max_points=6,
)
cell.add(fp)
fp = gdspy.FlexPath(
[(2, 0), (3, 1)], [0.1, 0.2], 0.2, ends=(0.2, 0.1), layer=4, datatype=[1, 1]
)
cell.add(fp)
fp = gdspy.FlexPath(
[(3, 0), (4, 1)],
[0.1, 0.2, 0.1],
[-0.2, 0, 0.2],
ends=[(0.2, 0.1), "smooth", pointy],
datatype=5,
)
cell.add(fp)
cell = lib.new_cell("FlexPath2")
fp = gdspy.FlexPath(
[(0, 0), (0.5, 0), (1, 0), (1, 1), (0, 1), (-1, -2), (-2, 0)],
0.05,
[0, -0.1, 0, 0.1],
corners=["natural", "circular bend", "circular bend", "circular bend"],
ends=["flush", "extended", (0.1, 0.2), "round"],
tolerance=1e-4,
layer=[0, 1, 1, 2],
bend_radius=[0, 0.3, 0.3, 0.2],
max_points=10,
)
cell.add(fp)
cell = lib.new_cell("FlexPath3")
pts = numpy.array(
[
(0, 0),
(0.5, 0),
(1, 0),
(1, 2),
(3, 0),
(2, -1),
(2, -2),
(0, -1),
(1, -2),
(1, -3),
]
)
fp = gdspy.FlexPath(
pts + numpy.array((0, 5)),
[0.1, 0.1, 0.1],
0.15,
layer=[1, 2, 3],
corners=["natural", "miter", "bevel"],
ends=(0.5, 0),
)
cell.add(fp)
fp = gdspy.FlexPath(
pts + numpy.array((5, 0)),
[0.1, 0.1, 0.1],
0.15,
layer=[4, 5, 6],
corners=["round", "smooth", broken],
ends=[pointy, "smooth", (0, 0.5)],
)
cell.add(fp)
cell = lib.new_cell("FlexPath4")
fp = gdspy.FlexPath(
[(0, 0)],
[0.1, 0.2, 0.1],
0.15,
layer=[1, 2, 3],
corners=["natural", "miter", "bevel"],
)
fp.segment((1, 0))
fp.segment((1, 1), 0.1, 0.05)
fp.segment((1, 1), [0.2, 0.1, 0.1], -0.05, True)
fp.segment((-1, 1), 0.2, [-0.2, 0, 0.3], True)
fp.arc(2, 0, 0.5 * numpy.pi)
fp.arc(3, 0.5 * numpy.pi, numpy.pi, 0.1, 0)
fp.arc(1, 0.4 * numpy.pi, -0.4 * numpy.pi, [0.1, 0.2, 0.1], [0.2, 0, -0.2])
fp.turn(1, 0.4 * numpy.pi)
fp.turn(1, "ll", 0.15, 0)
fp.turn(0.5, "r", [0.1, 0.05, 0.1], [0.15, 0, -0.15])
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=20, ends="round", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=21, ends="extended", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=22, ends=(0.1, 0.2), tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=23, ends="smooth", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=10, corners="round", ends="round", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=11, corners="smooth", ends="extended", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=12, corners="smooth", ends="smooth", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=13, corners="round", ends="round", tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=14, corners="smooth", ends=(0.2, 0.2), tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=15, corners="round", ends="smooth", tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(5, 2)], [0.05, 0.1, 0.2], [-0.2, 0, 0.4], layer=[4, 5, 6])
fp.parametric(lambda u: numpy.array((5.5 + 3 * u, 2 + 3 * u ** 2)), relative=False)
fp.segment((0, 1), relative=True)
fp.parametric(
lambda u: numpy.array(
(2 * numpy.cos(0.5 * numpy.pi * u) - 2, 3 * numpy.sin(0.5 * numpy.pi * u))
),
[0.2, 0.1, 0.05],
[-0.3, 0, 0.3],
)
fp.parametric(lambda u: numpy.array((-2 * u, 0)), 0.1, 0.2)
fp.bezier([(-3, 0), (-2, -3), (0, -4), (0, -5)], offset=[-0.2, 0, 0.2])
fp.bezier(
[(5, 0), (1, -1), (1, 5), (3, 2), (5, 2)],
[0.05, 0.1, 0.2],
[-0.2, 0, 0.4],
relative=False,
)
cell.add(fp)
fp = gdspy.FlexPath([(2, -1)], 0.1, layer=7, tolerance=1e-5, max_points=0)
fp.smooth(
[(1, 0), (1, -1), (0, -1)],
angles=[numpy.pi / 3, None, -2 / 3.0 * numpy.pi, None],
cycle=True,
)
cell.add(fp)
fp = gdspy.FlexPath([(2.5, -1.5)], 0.1, layer=8)
fp.smooth(
[(3, -1.5), (4, -2), (5, -1), (6, -2), (7, -1.5), (7.5, -1.5)],
relative=False,
width=0.2,
)
cell.add(fp)
cell = lib.new_cell("FlexPath5")
fp = gdspy.FlexPath([(0, 0)], [2, 1, 1], 5)
fp.segment((15, 20))
fp.scale(0.7)
fp.turn(10, "r")
fp.transform((10, 0), -1.5, 1.5, x_reflection=True)
fp.segment((10, -10), relative=True)
fp.rotate(-0.7)
fp.translate(50, 30)
fp.segment((-10, 0))
cell.add(fp)
### RobustPath
cell = lib.new_cell("RobustPath1")
rp = gdspy.RobustPath((0, 0), 0.1, layer=[1], gdsii_path=True)
rp.segment((1, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(1, 0),
0.1,
[-0.1, 0.1],
tolerance=1e-5,
ends=["round", "extended"],
layer=[2, 3],
max_points=6,
)
rp.segment((2, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(2, 0), [0.1, 0.2], 0.2, ends=(0.2, 0.1), layer=4, datatype=[1, 1]
)
rp.segment((3, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(3, 0),
[0.1, 0.2, 0.1],
[-0.2, 0, 0.2],
ends=[(0.2, 0.1), "smooth", "flush"],
datatype=5,
)
rp.segment((4, 1))
cell.add(rp)
cell = lib.new_cell("RobustPath2")
rp = gdspy.RobustPath((0, 0), [0.1, 0.2, 0.1], 0.15, layer=[1, 2, 3])
rp.segment((1, 0))
rp.segment((1, 1), 0.1, 0.05)
rp.segment((1, 1), [0.2, 0.1, 0.1], -0.05, True)
rp.segment((-1, 1), 0.2, [-0.2, 0, 0.3], True)
rp.arc(2, 0, 0.5 * numpy.pi)
rp.arc(3, 0.7 * numpy.pi, numpy.pi, 0.1, 0)
rp.arc(2, 0.4 * numpy.pi, -0.4 * numpy.pi, [0.1, 0.2, 0.1], [0.2, 0, -0.2])
rp.turn(1, -0.3 * numpy.pi)
rp.turn(1, "rr", 0.15)
rp.turn(0.5, "l", [0.05, 0.1, 0.05], [0.15, 0, -0.15])
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=20, ends="round", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=21, ends="extended", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=22, ends=(0.1, 0.2), tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=23, ends="smooth", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=10, ends="round", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=11, ends="extended", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=12, ends="smooth", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=13, ends="round", tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=14, ends=(0.2, 0.2), tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=15, ends="smooth", tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((5, 2), [0.05, 0.1, 0.2], [-0.2, 0, 0.4], layer=[4, 5, 6])
rp.parametric(lambda u: numpy.array((5.5 + 3 * u, 2 + 3 * u ** 2)), relative=False)
rp.segment((0, 1), relative=True)
rp.parametric(
lambda u: numpy.array(
(2 * numpy.cos(0.5 * numpy.pi * u) - 2, 3 * numpy.sin(0.5 * numpy.pi * u))
),
width=[0.2, 0.1, 0.05],
offset=[-0.3, 0, 0.3],
)
rp.parametric(lambda u: numpy.array((-2 * u, 0)), width=0.1, offset=0.2)
rp.bezier([(-3, 0), (-2, -3), (0, -4), (0, -5)], offset=[-0.2, 0, 0.2])
rp.bezier(
[(4.5, 0), (1, -1), (1, 5), (3, 2), (5, 2)],
width=[0.05, 0.1, 0.2],
offset=[-0.2, 0, 0.4],
relative=False,
)
cell.add(rp)
rp = gdspy.RobustPath((2, -1), 0.1, layer=7, tolerance=1e-4, max_points=0)
rp.smooth(
[(1, 0), (1, -1), (0, -1)],
angles=[numpy.pi / 3, None, -2 / 3.0 * numpy.pi, None],
cycle=True,
)
cell.add(rp)
rp = gdspy.RobustPath((2.5, -1.5), 0.1, layer=8)
rp.smooth(
[(3, -1.5), (4, -2), (5, -1), (6, -2), (7, -1.5), (7.5, -1.5)],
relative=False,
width=0.2,
)
cell.add(rp)
cell = lib.new_cell("RobustPath3")
rp = gdspy.RobustPath((0, 0), 0.1)
rp.parametric(
lambda u: numpy.array((3 * numpy.sin(numpy.pi * u), -3 * numpy.cos(numpy.pi * u))),
relative=False,
)
rp.parametric(
lambda u: numpy.array(
(3.5 - 3 * numpy.cos(numpy.pi * u), -0.5 + 3 * numpy.sin(numpy.pi * u))
),
lambda u: numpy.array((numpy.sin(numpy.pi * u), numpy.cos(numpy.pi * u))),
relative=True,
)
cell.add(rp)
cell = lib.new_cell("RobustPath4")
rp = gdspy.FlexPath([(0, 0)], [2, 1, 1], 5)
rp.segment((15, 20))
rp.scale(0.7)
rp.turn(10, "r")
rp.transform((10, 0), -1.5, 1.5, x_reflection=True)
rp.segment((10, -10), relative=True)
rp.rotate(-0.7)
rp.translate(50, 30)
rp.segment((-10, 0))
cell.add(rp)
### Curve
cell = lib.new_cell("Hobby1")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(2, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, None])
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(4, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, None, 2 / 3.0 * numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, 3 / 4.0 * numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=7))
c = gdspy.Curve(2, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, numpy.pi / 2, None])
cell.add(gdspy.Polygon(c.get_points(), layer=9))
c = gdspy.Curve(4, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, None])
cell.add(gdspy.Polygon(c.get_points(), layer=11))
c = gdspy.Curve(0, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, -numpy.pi / 2])
cell.add(gdspy.Polygon(c.get_points(), layer=13))
c = gdspy.Curve(2, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, -numpy.pi, -numpy.pi / 2])
cell.add(gdspy.Polygon(c.get_points(), layer=15))
c = gdspy.Curve(4, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[-numpy.pi / 4, 0, numpy.pi / 2, -numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=17))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(2, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(4, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, None, 2 / 3.0 * numpy.pi], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=6))
c = gdspy.Curve(0, 2, tolerance=1e-3)
c.i(
[(1, 0), (1, 1), (0, 1)],
angles=[numpy.pi / 3, None, None, 3 / 4.0 * numpy.pi],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=8))
c = gdspy.Curve(2, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, numpy.pi / 2, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=10))
c = gdspy.Curve(4, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=12))
c = gdspy.Curve(0, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, -numpy.pi / 2], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=14))
c = gdspy.Curve(2, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, -numpy.pi, -numpy.pi / 2], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=16))
c = gdspy.Curve(4, 4, tolerance=1e-3)
c.i(
[(1, 0), (1, 1), (0, 1)],
angles=[-numpy.pi / 4, 0, numpy.pi / 2, -numpy.pi],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=18))
cell = lib.new_cell("Hobby2")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_start=0)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_end=0)
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_start=0, curl_end=0)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi / 2, None, None, None, -numpy.pi / 2],
curl_start=0,
curl_end=0,
)
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[None, 0, None, 0, None],
curl_start=0,
curl_end=1,
)
cell.add(gdspy.Polygon(c.get_points(), layer=6))
cell = lib.new_cell("Hobby3")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=2)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_out=2)
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=2, t_out=2)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=[2, 1, 1, 1, 1], t_out=[1, 1, 1, 1, 2])
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=[1, 1, 2, 1, 1], t_out=[1, 2, 1, 1, 1])
cell.add(gdspy.Polygon(c.get_points(), layer=6))
cell = lib.new_cell("Hobby4")
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=10))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
t_in=[2, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 2],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=11))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
t_in=[1, 1, 2, 1, 1],
t_out=[1, 2, 1, 1, 1],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=12))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi * 3 / 4.0, None, None, None, -numpy.pi * 3 / 4.0],
t_in=[2, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 2],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=13))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi * 3 / 4.0, None, None, None, -numpy.pi * 3 / 4.0],
t_in=[1, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 1],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=14))
### END
lib.write_gds("tests/test.gds")
gdspy.LayoutViewer(lib)
| ######################################################################
# #
# Copyright 2009 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
import gdspy
import numpy
lib = gdspy.GdsLibrary("TESTLIB", unit=1, precision=1e-7)
### PolygonSet
cell = lib.new_cell("PolygonSet")
p = gdspy.PolygonSet(
[
[(10, 0), (11, 0), (10, 1)],
[(11, 0), (10, 1), (11, 1)],
[(11, 1), (12, 1), (11, 2)],
],
1,
2,
)
cell.add(p)
cell = lib.new_cell("PolygonSet_fillet")
orig = gdspy.PolygonSet(
[
[
(0, 0),
(-1, 0),
(0, -1),
(0.5, -0.5),
(1, 0),
(1, 1),
(4, -1),
(1, 3),
(1, 2),
(0, 1),
],
[(2, -1), (3, -1), (2.5, -2)],
]
)
orig.datatypes = [0, 1]
p = gdspy.copy(orig, 0, 5)
p.layers = [1, 1]
p.fillet(0.3, max_points=0)
cell.add(p)
p = gdspy.copy(orig, 5, 5)
p.layers = [2, 2]
p.fillet([0.3, 0.2, 0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.4, 0.1, 0.2, 0], max_points=0)
cell.add(p)
p = gdspy.copy(orig, 5, 0)
p.layers = [3, 3]
p.fillet(
[[0.1, 0.1, 0.4, 0, 0.4, 0.1, 0.1, 0.4, 0.4, 0.1], [0.2, 0.2, 0.5]], max_points=0
)
cell.add(p)
p = gdspy.copy(orig, 0, 0)
p.layers = [4, 4]
p.fillet([0.8, [10.0, 10.0, 20.0]], max_points=199, precision=1e-6)
cell.add(p)
### FlexPath
def broken(p0, v0, p1, v1, p2, w):
den = v1[1] * v0[0] - v1[0] * v0[1]
lim = 1e-12 * (v0[0] ** 2 + v0[1] ** 2) * (v1[0] ** 2 + v1[1] ** 2)
if den ** 2 < lim:
u0 = u1 = 0
p = 0.5 * (p0 + p1)
else:
dx = p1[0] - p0[0]
dy = p1[1] - p0[1]
u0 = (v1[1] * dx - v1[0] * dy) / den
u1 = (v0[1] * dx - v0[0] * dy) / den
p = 0.5 * (p0 + v0 * u0 + p1 + v1 * u1)
if u0 <= 0 and u1 >= 0:
return [p]
return [p0, p2, p1]
def pointy(p0, v0, p1, v1):
r = 0.5 * numpy.sqrt(numpy.sum((p0 - p1) ** 2))
v0 /= numpy.sqrt(numpy.sum(v0 ** 2))
v1 /= numpy.sqrt(numpy.sum(v1 ** 2))
return [p0, 0.5 * (p0 + p1) + 0.5 * (v0 - v1) * r, p1]
cell = lib.new_cell("FlexPath1")
fp = gdspy.FlexPath([(0, 0), (1, 1)], 0.1, layer=[1], gdsii_path=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(1, 0), (2, 1)],
0.1,
[-0.1, 0.1],
tolerance=1e-5,
ends=["round", "extended"],
layer=[2, 3],
max_points=6,
)
cell.add(fp)
fp = gdspy.FlexPath(
[(2, 0), (3, 1)], [0.1, 0.2], 0.2, ends=(0.2, 0.1), layer=4, datatype=[1, 1]
)
cell.add(fp)
fp = gdspy.FlexPath(
[(3, 0), (4, 1)],
[0.1, 0.2, 0.1],
[-0.2, 0, 0.2],
ends=[(0.2, 0.1), "smooth", pointy],
datatype=5,
)
cell.add(fp)
cell = lib.new_cell("FlexPath2")
fp = gdspy.FlexPath(
[(0, 0), (0.5, 0), (1, 0), (1, 1), (0, 1), (-1, -2), (-2, 0)],
0.05,
[0, -0.1, 0, 0.1],
corners=["natural", "circular bend", "circular bend", "circular bend"],
ends=["flush", "extended", (0.1, 0.2), "round"],
tolerance=1e-4,
layer=[0, 1, 1, 2],
bend_radius=[0, 0.3, 0.3, 0.2],
max_points=10,
)
cell.add(fp)
cell = lib.new_cell("FlexPath3")
pts = numpy.array(
[
(0, 0),
(0.5, 0),
(1, 0),
(1, 2),
(3, 0),
(2, -1),
(2, -2),
(0, -1),
(1, -2),
(1, -3),
]
)
fp = gdspy.FlexPath(
pts + numpy.array((0, 5)),
[0.1, 0.1, 0.1],
0.15,
layer=[1, 2, 3],
corners=["natural", "miter", "bevel"],
ends=(0.5, 0),
)
cell.add(fp)
fp = gdspy.FlexPath(
pts + numpy.array((5, 0)),
[0.1, 0.1, 0.1],
0.15,
layer=[4, 5, 6],
corners=["round", "smooth", broken],
ends=[pointy, "smooth", (0, 0.5)],
)
cell.add(fp)
cell = lib.new_cell("FlexPath4")
fp = gdspy.FlexPath(
[(0, 0)],
[0.1, 0.2, 0.1],
0.15,
layer=[1, 2, 3],
corners=["natural", "miter", "bevel"],
)
fp.segment((1, 0))
fp.segment((1, 1), 0.1, 0.05)
fp.segment((1, 1), [0.2, 0.1, 0.1], -0.05, True)
fp.segment((-1, 1), 0.2, [-0.2, 0, 0.3], True)
fp.arc(2, 0, 0.5 * numpy.pi)
fp.arc(3, 0.5 * numpy.pi, numpy.pi, 0.1, 0)
fp.arc(1, 0.4 * numpy.pi, -0.4 * numpy.pi, [0.1, 0.2, 0.1], [0.2, 0, -0.2])
fp.turn(1, 0.4 * numpy.pi)
fp.turn(1, "ll", 0.15, 0)
fp.turn(0.5, "r", [0.1, 0.05, 0.1], [0.15, 0, -0.15])
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=20, ends="round", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=21, ends="extended", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=22, ends=(0.1, 0.2), tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(-5, 6)], 0.8, layer=23, ends="smooth", tolerance=1e-4)
fp.segment((1, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=10, corners="round", ends="round", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=11, corners="smooth", ends="extended", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 6)], 0.8, layer=12, corners="smooth", ends="smooth", tolerance=1e-5
)
fp.segment((1, 0), 0.1, relative=True)
fp.segment((0, 1), 0.8, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=13, corners="round", ends="round", tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=14, corners="smooth", ends=(0.2, 0.2), tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath(
[(-3, 8)], 0.1, layer=15, corners="round", ends="smooth", tolerance=1e-5
)
fp.segment((1, 0), 0.8, relative=True)
fp.segment((0, 1), 0.1, relative=True)
cell.add(fp)
fp = gdspy.FlexPath([(5, 2)], [0.05, 0.1, 0.2], [-0.2, 0, 0.4], layer=[4, 5, 6])
fp.parametric(lambda u: numpy.array((5.5 + 3 * u, 2 + 3 * u ** 2)), relative=False)
fp.segment((0, 1), relative=True)
fp.parametric(
lambda u: numpy.array(
(2 * numpy.cos(0.5 * numpy.pi * u) - 2, 3 * numpy.sin(0.5 * numpy.pi * u))
),
[0.2, 0.1, 0.05],
[-0.3, 0, 0.3],
)
fp.parametric(lambda u: numpy.array((-2 * u, 0)), 0.1, 0.2)
fp.bezier([(-3, 0), (-2, -3), (0, -4), (0, -5)], offset=[-0.2, 0, 0.2])
fp.bezier(
[(5, 0), (1, -1), (1, 5), (3, 2), (5, 2)],
[0.05, 0.1, 0.2],
[-0.2, 0, 0.4],
relative=False,
)
cell.add(fp)
fp = gdspy.FlexPath([(2, -1)], 0.1, layer=7, tolerance=1e-5, max_points=0)
fp.smooth(
[(1, 0), (1, -1), (0, -1)],
angles=[numpy.pi / 3, None, -2 / 3.0 * numpy.pi, None],
cycle=True,
)
cell.add(fp)
fp = gdspy.FlexPath([(2.5, -1.5)], 0.1, layer=8)
fp.smooth(
[(3, -1.5), (4, -2), (5, -1), (6, -2), (7, -1.5), (7.5, -1.5)],
relative=False,
width=0.2,
)
cell.add(fp)
cell = lib.new_cell("FlexPath5")
fp = gdspy.FlexPath([(0, 0)], [2, 1, 1], 5)
fp.segment((15, 20))
fp.scale(0.7)
fp.turn(10, "r")
fp.transform((10, 0), -1.5, 1.5, x_reflection=True)
fp.segment((10, -10), relative=True)
fp.rotate(-0.7)
fp.translate(50, 30)
fp.segment((-10, 0))
cell.add(fp)
### RobustPath
cell = lib.new_cell("RobustPath1")
rp = gdspy.RobustPath((0, 0), 0.1, layer=[1], gdsii_path=True)
rp.segment((1, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(1, 0),
0.1,
[-0.1, 0.1],
tolerance=1e-5,
ends=["round", "extended"],
layer=[2, 3],
max_points=6,
)
rp.segment((2, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(2, 0), [0.1, 0.2], 0.2, ends=(0.2, 0.1), layer=4, datatype=[1, 1]
)
rp.segment((3, 1))
cell.add(rp)
rp = gdspy.RobustPath(
(3, 0),
[0.1, 0.2, 0.1],
[-0.2, 0, 0.2],
ends=[(0.2, 0.1), "smooth", "flush"],
datatype=5,
)
rp.segment((4, 1))
cell.add(rp)
cell = lib.new_cell("RobustPath2")
rp = gdspy.RobustPath((0, 0), [0.1, 0.2, 0.1], 0.15, layer=[1, 2, 3])
rp.segment((1, 0))
rp.segment((1, 1), 0.1, 0.05)
rp.segment((1, 1), [0.2, 0.1, 0.1], -0.05, True)
rp.segment((-1, 1), 0.2, [-0.2, 0, 0.3], True)
rp.arc(2, 0, 0.5 * numpy.pi)
rp.arc(3, 0.7 * numpy.pi, numpy.pi, 0.1, 0)
rp.arc(2, 0.4 * numpy.pi, -0.4 * numpy.pi, [0.1, 0.2, 0.1], [0.2, 0, -0.2])
rp.turn(1, -0.3 * numpy.pi)
rp.turn(1, "rr", 0.15)
rp.turn(0.5, "l", [0.05, 0.1, 0.05], [0.15, 0, -0.15])
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=20, ends="round", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=21, ends="extended", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=22, ends=(0.1, 0.2), tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-5, 6), 0.8, layer=23, ends="smooth", tolerance=1e-4)
rp.segment((1, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=10, ends="round", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=11, ends="extended", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 6), 0.8, layer=12, ends="smooth", tolerance=1e-5)
rp.segment((1, 0), 0.1, relative=True)
rp.segment((0, 1), 0.8, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=13, ends="round", tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=14, ends=(0.2, 0.2), tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((-3, 8), 0.1, layer=15, ends="smooth", tolerance=1e-5)
rp.segment((1, 0), 0.8, relative=True)
rp.segment((0, 1), 0.1, relative=True)
cell.add(rp)
rp = gdspy.RobustPath((5, 2), [0.05, 0.1, 0.2], [-0.2, 0, 0.4], layer=[4, 5, 6])
rp.parametric(lambda u: numpy.array((5.5 + 3 * u, 2 + 3 * u ** 2)), relative=False)
rp.segment((0, 1), relative=True)
rp.parametric(
lambda u: numpy.array(
(2 * numpy.cos(0.5 * numpy.pi * u) - 2, 3 * numpy.sin(0.5 * numpy.pi * u))
),
width=[0.2, 0.1, 0.05],
offset=[-0.3, 0, 0.3],
)
rp.parametric(lambda u: numpy.array((-2 * u, 0)), width=0.1, offset=0.2)
rp.bezier([(-3, 0), (-2, -3), (0, -4), (0, -5)], offset=[-0.2, 0, 0.2])
rp.bezier(
[(4.5, 0), (1, -1), (1, 5), (3, 2), (5, 2)],
width=[0.05, 0.1, 0.2],
offset=[-0.2, 0, 0.4],
relative=False,
)
cell.add(rp)
rp = gdspy.RobustPath((2, -1), 0.1, layer=7, tolerance=1e-4, max_points=0)
rp.smooth(
[(1, 0), (1, -1), (0, -1)],
angles=[numpy.pi / 3, None, -2 / 3.0 * numpy.pi, None],
cycle=True,
)
cell.add(rp)
rp = gdspy.RobustPath((2.5, -1.5), 0.1, layer=8)
rp.smooth(
[(3, -1.5), (4, -2), (5, -1), (6, -2), (7, -1.5), (7.5, -1.5)],
relative=False,
width=0.2,
)
cell.add(rp)
cell = lib.new_cell("RobustPath3")
rp = gdspy.RobustPath((0, 0), 0.1)
rp.parametric(
lambda u: numpy.array((3 * numpy.sin(numpy.pi * u), -3 * numpy.cos(numpy.pi * u))),
relative=False,
)
rp.parametric(
lambda u: numpy.array(
(3.5 - 3 * numpy.cos(numpy.pi * u), -0.5 + 3 * numpy.sin(numpy.pi * u))
),
lambda u: numpy.array((numpy.sin(numpy.pi * u), numpy.cos(numpy.pi * u))),
relative=True,
)
cell.add(rp)
cell = lib.new_cell("RobustPath4")
rp = gdspy.FlexPath([(0, 0)], [2, 1, 1], 5)
rp.segment((15, 20))
rp.scale(0.7)
rp.turn(10, "r")
rp.transform((10, 0), -1.5, 1.5, x_reflection=True)
rp.segment((10, -10), relative=True)
rp.rotate(-0.7)
rp.translate(50, 30)
rp.segment((-10, 0))
cell.add(rp)
### Curve
cell = lib.new_cell("Hobby1")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(2, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, None])
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(4, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, None, 2 / 3.0 * numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, 3 / 4.0 * numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=7))
c = gdspy.Curve(2, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, numpy.pi / 2, None])
cell.add(gdspy.Polygon(c.get_points(), layer=9))
c = gdspy.Curve(4, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, None])
cell.add(gdspy.Polygon(c.get_points(), layer=11))
c = gdspy.Curve(0, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, -numpy.pi / 2])
cell.add(gdspy.Polygon(c.get_points(), layer=13))
c = gdspy.Curve(2, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, -numpy.pi, -numpy.pi / 2])
cell.add(gdspy.Polygon(c.get_points(), layer=15))
c = gdspy.Curve(4, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[-numpy.pi / 4, 0, numpy.pi / 2, -numpy.pi])
cell.add(gdspy.Polygon(c.get_points(), layer=17))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(2, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[numpy.pi / 3, None, None, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(4, 0, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, None, 2 / 3.0 * numpy.pi], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=6))
c = gdspy.Curve(0, 2, tolerance=1e-3)
c.i(
[(1, 0), (1, 1), (0, 1)],
angles=[numpy.pi / 3, None, None, 3 / 4.0 * numpy.pi],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=8))
c = gdspy.Curve(2, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, None, numpy.pi / 2, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=10))
c = gdspy.Curve(4, 2, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, None], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=12))
c = gdspy.Curve(0, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, None, -numpy.pi / 2], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=14))
c = gdspy.Curve(2, 4, tolerance=1e-3)
c.i([(1, 0), (1, 1), (0, 1)], angles=[None, 0, -numpy.pi, -numpy.pi / 2], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=16))
c = gdspy.Curve(4, 4, tolerance=1e-3)
c.i(
[(1, 0), (1, 1), (0, 1)],
angles=[-numpy.pi / 4, 0, numpy.pi / 2, -numpy.pi],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=18))
cell = lib.new_cell("Hobby2")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_start=0)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_end=0)
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], curl_start=0, curl_end=0)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi / 2, None, None, None, -numpy.pi / 2],
curl_start=0,
curl_end=0,
)
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[None, 0, None, 0, None],
curl_start=0,
curl_end=1,
)
cell.add(gdspy.Polygon(c.get_points(), layer=6))
cell = lib.new_cell("Hobby3")
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)])
cell.add(gdspy.Polygon(c.get_points(), layer=1))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=2)
cell.add(gdspy.Polygon(c.get_points(), layer=2))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_out=2)
cell.add(gdspy.Polygon(c.get_points(), layer=3))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=2, t_out=2)
cell.add(gdspy.Polygon(c.get_points(), layer=4))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=[2, 1, 1, 1, 1], t_out=[1, 1, 1, 1, 2])
cell.add(gdspy.Polygon(c.get_points(), layer=5))
c = gdspy.Curve(0, 0, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], t_in=[1, 1, 2, 1, 1], t_out=[1, 2, 1, 1, 1])
cell.add(gdspy.Polygon(c.get_points(), layer=6))
cell = lib.new_cell("Hobby4")
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i([(1, 2), (2, 1), (3, 2), (4, 0)], cycle=True)
cell.add(gdspy.Polygon(c.get_points(), layer=10))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
t_in=[2, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 2],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=11))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
t_in=[1, 1, 2, 1, 1],
t_out=[1, 2, 1, 1, 1],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=12))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi * 3 / 4.0, None, None, None, -numpy.pi * 3 / 4.0],
t_in=[2, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 2],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=13))
c = gdspy.Curve(0, 3, tolerance=1e-3)
c.i(
[(1, 2), (2, 1), (3, 2), (4, 0)],
angles=[numpy.pi * 3 / 4.0, None, None, None, -numpy.pi * 3 / 4.0],
t_in=[1, 1, 1, 1, 1],
t_out=[1, 1, 1, 1, 1],
cycle=True,
)
cell.add(gdspy.Polygon(c.get_points(), layer=14))
### END
lib.write_gds("tests/test.gds")
gdspy.LayoutViewer(lib)
| de | 0.338759 | ###################################################################### # # # Copyright 2009 <NAME>. # # This file is part of gdspy, distributed under the terms of the # # Boost Software License - Version 1.0. See the accompanying # # LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> # # # ###################################################################### ### PolygonSet ### FlexPath ### RobustPath ### Curve ### END | 1.934171 | 2 |
ALDS1/12c.py | ToshikiShimizu/AOJ | 0 | 6631038 | <reponame>ToshikiShimizu/AOJ
import heapq
MAX = 10000
INFTY = 1<<20
WHITE = 0
GRAY = 1
BLACK = 2
def dijkstra(n, adj):
PQ = []
color = [None for i in range(n)]
d = [None for i in range(n)]
for i in range(n):
d[i] = INFTY
color[i] = WHITE
d[0] = 0
heapq.heappush(PQ,(0,0))
color[0] = GRAY
while(len(PQ)>0):
f = heapq.heappop(PQ)
u = f[1]
color[u] = BLACK
if (d[u] < f[0]):
continue
for j in range(len(adj[u])):
v = adj[u][j][0]
if color[v]==BLACK:
continue
if (d[v]>d[u]+adj[u][j][1]):
d[v] = d[u] + adj[u][j][1]
heapq.heappush(PQ,(d[v],v))
color[v] = GRAY
for i in range(n):
if d[i] == INFTY:
tmp = -1
else:
tmp = d[i]
print (str(i)+" "+str(tmp))
if __name__=="__main__":
adj = [[] for i in range(MAX)]
n = int(input())
for i in range(n):
ls = list(map(int,input().split()))
u = ls[0]
k = ls[1]
for j in range(k):
adj[u].append([ls[2+2*j],ls[2+2*j+1]])
dijkstra(n, adj)
| import heapq
MAX = 10000
INFTY = 1<<20
WHITE = 0
GRAY = 1
BLACK = 2
def dijkstra(n, adj):
PQ = []
color = [None for i in range(n)]
d = [None for i in range(n)]
for i in range(n):
d[i] = INFTY
color[i] = WHITE
d[0] = 0
heapq.heappush(PQ,(0,0))
color[0] = GRAY
while(len(PQ)>0):
f = heapq.heappop(PQ)
u = f[1]
color[u] = BLACK
if (d[u] < f[0]):
continue
for j in range(len(adj[u])):
v = adj[u][j][0]
if color[v]==BLACK:
continue
if (d[v]>d[u]+adj[u][j][1]):
d[v] = d[u] + adj[u][j][1]
heapq.heappush(PQ,(d[v],v))
color[v] = GRAY
for i in range(n):
if d[i] == INFTY:
tmp = -1
else:
tmp = d[i]
print (str(i)+" "+str(tmp))
if __name__=="__main__":
adj = [[] for i in range(MAX)]
n = int(input())
for i in range(n):
ls = list(map(int,input().split()))
u = ls[0]
k = ls[1]
for j in range(k):
adj[u].append([ls[2+2*j],ls[2+2*j+1]])
dijkstra(n, adj) | none | 1 | 3.303034 | 3 |
|
eveIntel/limboRun.py | Marclass/EveIntel | 0 | 6631039 | <reponame>Marclass/EveIntel
#!/usr/bin/env python
from limbo.limbo import main
import argparse
import sys
def runSlack(token):
parser = argparse.ArgumentParser(description="Run the limbo chatbot for Slack")
parser.add_argument('--test', '-t', dest='test', action='store_true', required=False, help='Enter command line mode to enter a limbo repl')
parser.add_argument('--hook', dest='hook', action='store', default='message',
help='Specify the hook to test. (Defaults to "message")')
parser.add_argument('-c', dest="command", help='run a single command')
parser.add_argument('--database', '-d', dest='database_name', default='D:\\sqlite3\\SlackBotDB\\limbo.sqlite3',
help="Where to store the limbo sqlite database. Defaults to limbo.sqlite")
parser.add_argument('--pluginpath', '-pp', dest='pluginpath', default="C:\\Python27\\Lib\\limbo\\plugins",
help="The path where limbo should look to find its plugins")
#if(token and token!=""):
parser.add_argument('--token','-tk', dest='token', default=token, help="Token to use instead of environ var")
args = parser.parse_args()
main(args)
while(True):
try:
runSlack("")
except Exception as e:
#e = sys.exc_info()[0]
print("Exception: "+str(e))
| #!/usr/bin/env python
from limbo.limbo import main
import argparse
import sys
def runSlack(token):
parser = argparse.ArgumentParser(description="Run the limbo chatbot for Slack")
parser.add_argument('--test', '-t', dest='test', action='store_true', required=False, help='Enter command line mode to enter a limbo repl')
parser.add_argument('--hook', dest='hook', action='store', default='message',
help='Specify the hook to test. (Defaults to "message")')
parser.add_argument('-c', dest="command", help='run a single command')
parser.add_argument('--database', '-d', dest='database_name', default='D:\\sqlite3\\SlackBotDB\\limbo.sqlite3',
help="Where to store the limbo sqlite database. Defaults to limbo.sqlite")
parser.add_argument('--pluginpath', '-pp', dest='pluginpath', default="C:\\Python27\\Lib\\limbo\\plugins",
help="The path where limbo should look to find its plugins")
#if(token and token!=""):
parser.add_argument('--token','-tk', dest='token', default=token, help="Token to use instead of environ var")
args = parser.parse_args()
main(args)
while(True):
try:
runSlack("")
except Exception as e:
#e = sys.exc_info()[0]
print("Exception: "+str(e)) | en | 0.372037 | #!/usr/bin/env python #if(token and token!=""): #e = sys.exc_info()[0] | 2.634441 | 3 |
catalog.py | KtQiu/mini_sql_python | 0 | 6631040 | import six
import sys
import pickle
import os
class col(object):
r'''
class for col information
@param:
col_name: the name of the col
attr: attribution, default is int
Generally, we have 'int', 'char(n)' and 'float'
is_unique: the data is unique or notz
'''
def __init__(self, col_name=None, attr='int', is_unique=0, data=None):
super(col).__init__()
self.col_name = col_name
self.attr = attr
self.is_unique = is_unique
if data == None:
data = []
self.data = data
# self.data = (data == None ? list(): data)
def set_attr(self, attr):
self.attr = attr
def set_is_unique(self, is_unique):
self.is_unique = is_unique
def set_col_name(self, col_name):
self.col_name = col_name
def add_data(self, data):
if data in self.data and self.is_unique == 1:
print(
'Cannot insert a duplicate data when {} is \'unique\''.
format(self.col_name))
return False
else:
self.data.append(data)
print(self.data)
return True
class table(object):
r'''
class for tabel information
@param:
table_name: the name of table
primary_key: primary key, if not exist, None
col_list: a list containing col class (implemented above)
which covers the information of the col
'''
def __init__(self,
table_name=None,
primary_key=None,
col_list=[],
col_index=[]):
# super(table).__init__()
self.table_name = table_name
self.primary_key = primary_key
self.col_index = col_index
self.col_list = col_list
# self.data = data
def __str__(self):
table_str = ''
for key, val in six.iteritems(self):
if table_str:
table_str += '\n'
table_str += key + '=' + str(val)
return self.__class__.__name__ + '\n' + table_str
def set_table_name(self, table_name):
self.table_name = table_name
def set_primary_key(self, key):
self.primary_key = key
def add_col(self, _col):
if _col.col_name not in self.col_index:
self.col_index.append(_col.col_name)
self.col_list.append(_col)
else:
print('Column Redundant')
sys.exit(0)
def drop_col(self, _col):
if _col.col_name in self.col_index:
del self.col_index[_col.col_name]
del self.col_list[_col]
else:
print('cannot drop a col which does not exist')
sys.exit(0)
class Database(object):
def __init__(self, table_names=[], tables={}):
self.table_names = table_names
self.tables = tables
def __getstate__(self):
# print('====================')
return (self.table_names, self.tables)
def __setstate__(self, state):
(self.table_names, self.tables) = state
def save(self):
with open('database/data.pickle', 'wb') as file:
pickle.dump(self, file, -1)
def load(self):
os.makedirs('./database', exist_ok=True)
try:
with open('database/data.pickle', 'rb') as file:
self = pickle.load(file)
except EOFError:
print("EOFERROR")
except FileNotFoundError:
# print('cannnot find the file! plz new a file named data.pickle')
with open('database/data.pickle', 'wb') as file:
pickle.dump(self, file, -1)
pickle.dump(self.__dict__, file, 1)
return self
def add_table(self, _table):
if _table.table_name in self.table_names:
print(
"Cannot have table_names with the same names. RedundancyError")
sys.exit(0)
else:
self.table_names.append(_table.table_name)
self.tables[_table.table_name] = _table
def drop_table(self, _table_name):
try:
i = self.table_names.index(_table_name)
del self.table_names[i]
del self.tables[_table_name]
except ValueError:
print("Not find such table")
sys.exit(0)
# if _table_name not in self.table_names:
# print("Cannot find table: {} in database".format(
# _table.table_name))
# sys.exit(0)
# else:
# del self.table_names[_table.table_name]
# def self.tables[]
| import six
import sys
import pickle
import os
class col(object):
r'''
class for col information
@param:
col_name: the name of the col
attr: attribution, default is int
Generally, we have 'int', 'char(n)' and 'float'
is_unique: the data is unique or notz
'''
def __init__(self, col_name=None, attr='int', is_unique=0, data=None):
super(col).__init__()
self.col_name = col_name
self.attr = attr
self.is_unique = is_unique
if data == None:
data = []
self.data = data
# self.data = (data == None ? list(): data)
def set_attr(self, attr):
self.attr = attr
def set_is_unique(self, is_unique):
self.is_unique = is_unique
def set_col_name(self, col_name):
self.col_name = col_name
def add_data(self, data):
if data in self.data and self.is_unique == 1:
print(
'Cannot insert a duplicate data when {} is \'unique\''.
format(self.col_name))
return False
else:
self.data.append(data)
print(self.data)
return True
class table(object):
r'''
class for tabel information
@param:
table_name: the name of table
primary_key: primary key, if not exist, None
col_list: a list containing col class (implemented above)
which covers the information of the col
'''
def __init__(self,
table_name=None,
primary_key=None,
col_list=[],
col_index=[]):
# super(table).__init__()
self.table_name = table_name
self.primary_key = primary_key
self.col_index = col_index
self.col_list = col_list
# self.data = data
def __str__(self):
table_str = ''
for key, val in six.iteritems(self):
if table_str:
table_str += '\n'
table_str += key + '=' + str(val)
return self.__class__.__name__ + '\n' + table_str
def set_table_name(self, table_name):
self.table_name = table_name
def set_primary_key(self, key):
self.primary_key = key
def add_col(self, _col):
if _col.col_name not in self.col_index:
self.col_index.append(_col.col_name)
self.col_list.append(_col)
else:
print('Column Redundant')
sys.exit(0)
def drop_col(self, _col):
if _col.col_name in self.col_index:
del self.col_index[_col.col_name]
del self.col_list[_col]
else:
print('cannot drop a col which does not exist')
sys.exit(0)
class Database(object):
def __init__(self, table_names=[], tables={}):
self.table_names = table_names
self.tables = tables
def __getstate__(self):
# print('====================')
return (self.table_names, self.tables)
def __setstate__(self, state):
(self.table_names, self.tables) = state
def save(self):
with open('database/data.pickle', 'wb') as file:
pickle.dump(self, file, -1)
def load(self):
os.makedirs('./database', exist_ok=True)
try:
with open('database/data.pickle', 'rb') as file:
self = pickle.load(file)
except EOFError:
print("EOFERROR")
except FileNotFoundError:
# print('cannnot find the file! plz new a file named data.pickle')
with open('database/data.pickle', 'wb') as file:
pickle.dump(self, file, -1)
pickle.dump(self.__dict__, file, 1)
return self
def add_table(self, _table):
if _table.table_name in self.table_names:
print(
"Cannot have table_names with the same names. RedundancyError")
sys.exit(0)
else:
self.table_names.append(_table.table_name)
self.tables[_table.table_name] = _table
def drop_table(self, _table_name):
try:
i = self.table_names.index(_table_name)
del self.table_names[i]
del self.tables[_table_name]
except ValueError:
print("Not find such table")
sys.exit(0)
# if _table_name not in self.table_names:
# print("Cannot find table: {} in database".format(
# _table.table_name))
# sys.exit(0)
# else:
# del self.table_names[_table.table_name]
# def self.tables[]
| en | 0.479712 | class for col information @param: col_name: the name of the col attr: attribution, default is int Generally, we have 'int', 'char(n)' and 'float' is_unique: the data is unique or notz # self.data = (data == None ? list(): data) class for tabel information @param: table_name: the name of table primary_key: primary key, if not exist, None col_list: a list containing col class (implemented above) which covers the information of the col # super(table).__init__() # self.data = data # print('====================') # print('cannnot find the file! plz new a file named data.pickle') # if _table_name not in self.table_names: # print("Cannot find table: {} in database".format( # _table.table_name)) # sys.exit(0) # else: # del self.table_names[_table.table_name] # def self.tables[] | 3.662374 | 4 |
pms/student/migrations/0009_auto_20190406_0044.py | iammeliodas/pms_django | 0 | 6631041 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-05 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20190405_2356'),
]
operations = [
migrations.RemoveField(
model_name='studentdetails',
name='slug',
),
migrations.AddField(
model_name='registerdstudents',
name='slug',
field=models.SlugField(allow_unicode=True, default='a', verbose_name='Slug'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-05 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20190405_2356'),
]
operations = [
migrations.RemoveField(
model_name='studentdetails',
name='slug',
),
migrations.AddField(
model_name='registerdstudents',
name='slug',
field=models.SlugField(allow_unicode=True, default='a', verbose_name='Slug'),
),
] | en | 0.712805 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-04-05 19:14 | 1.450859 | 1 |
etl/parsers/etw/Microsoft_Windows_Workplace_Join.py | IMULMUL/etl-parser | 104 | 6631042 | <filename>etl/parsers/etw/Microsoft_Windows_Workplace_Join.py
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Workplace Join
GUID : 76ab12d5-c986-4e60-9d7c-2a092b284cdd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=100, version=0)
class Microsoft_Windows_Workplace_Join_100_0(Etw):
pattern = Struct(
"ActivityId" / WString,
"JWT" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=101, version=0)
class Microsoft_Windows_Workplace_Join_101_0(Etw):
pattern = Struct(
"ServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=102, version=0)
class Microsoft_Windows_Workplace_Join_102_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorMessage" / WString,
"ServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=103, version=0)
class Microsoft_Windows_Workplace_Join_103_0(Etw):
pattern = Struct(
"HttpStatus" / Int32sl,
"ServiceUri" / WString,
"TraceId" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=104, version=0)
class Microsoft_Windows_Workplace_Join_104_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ZoneUriIsAddedTo" / WString,
"ZoneUriExistsIn" / WString,
"Uri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=200, version=0)
class Microsoft_Windows_Workplace_Join_200_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ActivityId" / WString,
"SoapResponse" / WString,
"ErrorMessage" / WString,
"RegistrationServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=201, version=0)
class Microsoft_Windows_Workplace_Join_201_0(Etw):
pattern = Struct(
"ActivityId" / WString,
"SoapResponse" / WString,
"RegistrationServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=300, version=0)
class Microsoft_Windows_Workplace_Join_300_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorMessage" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=400, version=0)
class Microsoft_Windows_Workplace_Join_400_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorCodeText" / WString,
"ErrorMessage" / WString,
"ErrorData" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=401, version=0)
class Microsoft_Windows_Workplace_Join_401_0(Etw):
pattern = Struct(
"Message" / WString,
"Data" / WString
)
| <filename>etl/parsers/etw/Microsoft_Windows_Workplace_Join.py
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Workplace Join
GUID : 76ab12d5-c986-4e60-9d7c-2a092b284cdd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=100, version=0)
class Microsoft_Windows_Workplace_Join_100_0(Etw):
pattern = Struct(
"ActivityId" / WString,
"JWT" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=101, version=0)
class Microsoft_Windows_Workplace_Join_101_0(Etw):
pattern = Struct(
"ServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=102, version=0)
class Microsoft_Windows_Workplace_Join_102_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorMessage" / WString,
"ServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=103, version=0)
class Microsoft_Windows_Workplace_Join_103_0(Etw):
pattern = Struct(
"HttpStatus" / Int32sl,
"ServiceUri" / WString,
"TraceId" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=104, version=0)
class Microsoft_Windows_Workplace_Join_104_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ZoneUriIsAddedTo" / WString,
"ZoneUriExistsIn" / WString,
"Uri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=200, version=0)
class Microsoft_Windows_Workplace_Join_200_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ActivityId" / WString,
"SoapResponse" / WString,
"ErrorMessage" / WString,
"RegistrationServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=201, version=0)
class Microsoft_Windows_Workplace_Join_201_0(Etw):
pattern = Struct(
"ActivityId" / WString,
"SoapResponse" / WString,
"RegistrationServiceUri" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=300, version=0)
class Microsoft_Windows_Workplace_Join_300_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorMessage" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=400, version=0)
class Microsoft_Windows_Workplace_Join_400_0(Etw):
pattern = Struct(
"ExitCode" / Int32ul,
"ErrorCodeText" / WString,
"ErrorMessage" / WString,
"ErrorData" / WString
)
@declare(guid=guid("76ab12d5-c986-4e60-9d7c-2a092b284cdd"), event_id=401, version=0)
class Microsoft_Windows_Workplace_Join_401_0(Etw):
pattern = Struct(
"Message" / WString,
"Data" / WString
)
| en | 0.368476 | # -*- coding: utf-8 -*- Microsoft-Windows-Workplace Join GUID : 76ab12d5-c986-4e60-9d7c-2a092b284cdd | 2.129209 | 2 |
sharestats_item_editor/rexam_item_editor/misc.py | essb-mt-section/sharestats-item-editor | 5 | 6631043 | <reponame>essb-mt-section/sharestats-item-editor
import os
import tempfile
import re
def replace_list_element(lst, source_idx, target_idx):
"""replaces an element in a list"""
if source_idx < len(lst) and target_idx<len(
lst):
tmp = lst.pop(source_idx)
return lst[:target_idx] + [tmp] + lst[target_idx:]
else:
return []
def subdict(d, nested_keys=None):
""":return the dict nested hierarchically indicated by nested_keys
or None if key list is incorrect
:param nested_keys list of keys or a single keys
"""
if not isinstance(nested_keys, (tuple, list)):
nested_keys = [nested_keys]
for k in nested_keys:
try:
d = d[k]
except:
return {}
return d
def splitstrip(text, sep):
return list(map(lambda x: x.strip(), text.split(sep)))
def yesno(bool):
if bool:
return "Yes"
else:
return "No"
def get_temp_dir(appname, make_dir=True):
# creates and returns a temp folder
tmpdir = tempfile.gettempdir()
tmpdir = os.path.join(tmpdir, appname)
if make_dir:
try:
os.mkdir(tmpdir)
except:
pass
return tmpdir
class CaseInsensitiveStringList(object):
"""String list that handles string search case insensitive"""
def __init__(self, str_list=()):
self._str_list = list(str_list)
self._str_lower = [x.lower() for x in self._str_list]
def __len__(self):
return len(self._str_list)
def append(self, new_string):
self._str_list.append(new_string)
self._str_lower.append(new_string.lower())
def pop(self, index=-1):
self._str_lower.pop(index)
return self._str_list.pop(index)
def remove(self, element):
"""removes element and returns it, raises exception in not included"""
element = str(element).lower()
idx = self._str_lower.index(element)
self._str_lower.pop(idx)
return self._str_list.pop(idx)
def remove_all(self, element):
element = str(element).lower()
while True:
try:
idx = self._str_lower.index(element)
except:
break
self._str_list.pop(idx)
self._str_lower.pop(idx)
def __contains__(self, item):
return str(item).lower() in self._str_lower
def get(self):
return self._str_list
def remove_all(str_list, element, ignore_cases=False):
"""removes all occurrences of element from string list and ignores
optionally letter cases"""
if ignore_cases:
return [e for e in str_list \
if str(e).lower() != str(element).lower()]
else:
return [e for e in str_list if e != element]
def extract_parameter(txt):
# extract parameter for text line
m = re.match(r"\s*\w+[\[\]\w]+:", txt)
if m is not None:
return {txt[:m.end()-1].strip(): txt[m.end():].strip()}
return None
def iter_list(data):
"""Generates iterator over the data.
If None, iterator over empty list. If data is not a list or a tuple,
iterator over list with one one element [data]
"""
if data is None:
return iter([])
elif isinstance(data, (list, tuple)):
return iter(data)
else:
return iter([data])
| import os
import tempfile
import re
def replace_list_element(lst, source_idx, target_idx):
"""replaces an element in a list"""
if source_idx < len(lst) and target_idx<len(
lst):
tmp = lst.pop(source_idx)
return lst[:target_idx] + [tmp] + lst[target_idx:]
else:
return []
def subdict(d, nested_keys=None):
""":return the dict nested hierarchically indicated by nested_keys
or None if key list is incorrect
:param nested_keys list of keys or a single keys
"""
if not isinstance(nested_keys, (tuple, list)):
nested_keys = [nested_keys]
for k in nested_keys:
try:
d = d[k]
except:
return {}
return d
def splitstrip(text, sep):
return list(map(lambda x: x.strip(), text.split(sep)))
def yesno(bool):
if bool:
return "Yes"
else:
return "No"
def get_temp_dir(appname, make_dir=True):
# creates and returns a temp folder
tmpdir = tempfile.gettempdir()
tmpdir = os.path.join(tmpdir, appname)
if make_dir:
try:
os.mkdir(tmpdir)
except:
pass
return tmpdir
class CaseInsensitiveStringList(object):
"""String list that handles string search case insensitive"""
def __init__(self, str_list=()):
self._str_list = list(str_list)
self._str_lower = [x.lower() for x in self._str_list]
def __len__(self):
return len(self._str_list)
def append(self, new_string):
self._str_list.append(new_string)
self._str_lower.append(new_string.lower())
def pop(self, index=-1):
self._str_lower.pop(index)
return self._str_list.pop(index)
def remove(self, element):
"""removes element and returns it, raises exception in not included"""
element = str(element).lower()
idx = self._str_lower.index(element)
self._str_lower.pop(idx)
return self._str_list.pop(idx)
def remove_all(self, element):
element = str(element).lower()
while True:
try:
idx = self._str_lower.index(element)
except:
break
self._str_list.pop(idx)
self._str_lower.pop(idx)
def __contains__(self, item):
return str(item).lower() in self._str_lower
def get(self):
return self._str_list
def remove_all(str_list, element, ignore_cases=False):
"""removes all occurrences of element from string list and ignores
optionally letter cases"""
if ignore_cases:
return [e for e in str_list \
if str(e).lower() != str(element).lower()]
else:
return [e for e in str_list if e != element]
def extract_parameter(txt):
# extract parameter for text line
m = re.match(r"\s*\w+[\[\]\w]+:", txt)
if m is not None:
return {txt[:m.end()-1].strip(): txt[m.end():].strip()}
return None
def iter_list(data):
"""Generates iterator over the data.
If None, iterator over empty list. If data is not a list or a tuple,
iterator over list with one one element [data]
"""
if data is None:
return iter([])
elif isinstance(data, (list, tuple)):
return iter(data)
else:
return iter([data]) | en | 0.649643 | replaces an element in a list :return the dict nested hierarchically indicated by nested_keys or None if key list is incorrect :param nested_keys list of keys or a single keys # creates and returns a temp folder String list that handles string search case insensitive removes element and returns it, raises exception in not included removes all occurrences of element from string list and ignores optionally letter cases # extract parameter for text line Generates iterator over the data. If None, iterator over empty list. If data is not a list or a tuple, iterator over list with one one element [data] | 3.475411 | 3 |
test/playground.py | dustfine/python-learn | 0 | 6631044 | import os
print(os.cpu_count()) | import os
print(os.cpu_count()) | none | 1 | 1.58478 | 2 |
|
communication/__init__.py | AlexanderPollak/SKA-Compressor-COM | 0 | 6631045 | from connection import com
from connection import sensor
from connection import compressor
from connection import error | from connection import com
from connection import sensor
from connection import compressor
from connection import error | none | 1 | 1.135531 | 1 |
|
isbp/producer1.py | 5GZORRO/sla-breach-predictor | 0 | 6631046 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 14 13:09:20 2021
@author: dlaskaratos
"""
from kafka import KafkaProducer
import json
import pandas as pd
import numpy as np
from datetime import datetime
import time
producer = KafkaProducer(bootstrap_servers = '172.28.3.196:9092')
data = {
"data": {
"eventType": "new_SLA",
"transactionID": "e2e2ecaeec944aa793ff701e667c1908",
"productID": "2",
"resourceID": "250f91b5-a42b-46a5-94cd-419b1f3aa9e0",
"instanceID": "52",
"kafka_ip": "172.28.3.196",
"kafka_port": "9092",
"topic": "isbp-topic"}
}
msg = json.dumps(data)
producer.send('isbp-topic', msg.encode('utf-8'))
producer.flush()
data = {
"data": {
"eventType": "new_SLA",
"transactionID": "e2e2ecaeec944aa793ff701e667c1908",
"productID": "1",
"resourceID": "250f91b5-a42b-46a5-94cd-419b1f3aa9e0",
"instanceID": "52",
"kafka_ip": "172.28.3.196",
"kafka_port": "9092",
"topic": "isbp-topic"}
}
msg = json.dumps(data)
producer.send('isbp-topic', msg.encode('utf-8'))
producer.flush() | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 14 13:09:20 2021
@author: dlaskaratos
"""
from kafka import KafkaProducer
import json
import pandas as pd
import numpy as np
from datetime import datetime
import time
producer = KafkaProducer(bootstrap_servers = '172.28.3.196:9092')
data = {
"data": {
"eventType": "new_SLA",
"transactionID": "e2e2ecaeec944aa793ff701e667c1908",
"productID": "2",
"resourceID": "250f91b5-a42b-46a5-94cd-419b1f3aa9e0",
"instanceID": "52",
"kafka_ip": "172.28.3.196",
"kafka_port": "9092",
"topic": "isbp-topic"}
}
msg = json.dumps(data)
producer.send('isbp-topic', msg.encode('utf-8'))
producer.flush()
data = {
"data": {
"eventType": "new_SLA",
"transactionID": "e2e2ecaeec944aa793ff701e667c1908",
"productID": "1",
"resourceID": "250f91b5-a42b-46a5-94cd-419b1f3aa9e0",
"instanceID": "52",
"kafka_ip": "172.28.3.196",
"kafka_port": "9092",
"topic": "isbp-topic"}
}
msg = json.dumps(data)
producer.send('isbp-topic', msg.encode('utf-8'))
producer.flush() | en | 0.698464 | # -*- coding: utf-8 -*- Created on Thu Oct 14 13:09:20 2021 @author: dlaskaratos | 2.040303 | 2 |
tools/infer_mot.py | violetweir/PaddleDetection | 23 | 6631047 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
sys.path.append(parent_path)
# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import paddle
from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config
from ppdet.engine import Tracker
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
from ppdet.utils.logger import setup_logger
logger = setup_logger('train')
def parse_args():
parser = ArgsParser()
parser.add_argument(
'--video_file', type=str, default=None, help='Video name for tracking.')
parser.add_argument(
"--data_type",
type=str,
default='mot',
help='Data type of tracking dataset, should be in ["mot", "kitti"]')
parser.add_argument(
"--det_results_dir",
type=str,
default=None,
help="Directory name for detection results.")
parser.add_argument(
'--output_dir',
type=str,
default='output',
help='Directory name for output tracking results.')
parser.add_argument(
'--save_images',
action='store_true',
help='Save tracking results (image).')
parser.add_argument(
'--save_videos',
action='store_true',
help='Save tracking results (video).')
parser.add_argument(
'--show_image',
action='store_true',
help='Show tracking results (image).')
args = parser.parse_args()
return args
def run(FLAGS, cfg):
# build Tracker
tracker = Tracker(cfg, mode='test')
# load weights
if cfg.architecture in ['DeepSORT']:
if cfg.det_weights != 'None':
tracker.load_weights_sde(cfg.det_weights, cfg.reid_weights)
else:
tracker.load_weights_sde(None, cfg.reid_weights)
else:
tracker.load_weights_jde(cfg.weights)
# inference
tracker.mot_predict(
video_file=FLAGS.video_file,
data_type=FLAGS.data_type,
model_type=cfg.architecture,
output_dir=FLAGS.output_dir,
save_images=FLAGS.save_images,
save_videos=FLAGS.save_videos,
show_image=FLAGS.show_image,
det_results_dir=FLAGS.det_results_dir)
def main():
FLAGS = parse_args()
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
check_gpu(cfg.use_gpu)
check_version()
place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
place = paddle.set_device(place)
run(FLAGS, cfg)
if __name__ == '__main__':
main()
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
sys.path.append(parent_path)
# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import paddle
from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config
from ppdet.engine import Tracker
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
from ppdet.utils.logger import setup_logger
logger = setup_logger('train')
def parse_args():
parser = ArgsParser()
parser.add_argument(
'--video_file', type=str, default=None, help='Video name for tracking.')
parser.add_argument(
"--data_type",
type=str,
default='mot',
help='Data type of tracking dataset, should be in ["mot", "kitti"]')
parser.add_argument(
"--det_results_dir",
type=str,
default=None,
help="Directory name for detection results.")
parser.add_argument(
'--output_dir',
type=str,
default='output',
help='Directory name for output tracking results.')
parser.add_argument(
'--save_images',
action='store_true',
help='Save tracking results (image).')
parser.add_argument(
'--save_videos',
action='store_true',
help='Save tracking results (video).')
parser.add_argument(
'--show_image',
action='store_true',
help='Show tracking results (image).')
args = parser.parse_args()
return args
def run(FLAGS, cfg):
# build Tracker
tracker = Tracker(cfg, mode='test')
# load weights
if cfg.architecture in ['DeepSORT']:
if cfg.det_weights != 'None':
tracker.load_weights_sde(cfg.det_weights, cfg.reid_weights)
else:
tracker.load_weights_sde(None, cfg.reid_weights)
else:
tracker.load_weights_jde(cfg.weights)
# inference
tracker.mot_predict(
video_file=FLAGS.video_file,
data_type=FLAGS.data_type,
model_type=cfg.architecture,
output_dir=FLAGS.output_dir,
save_images=FLAGS.save_images,
save_videos=FLAGS.save_videos,
show_image=FLAGS.show_image,
det_results_dir=FLAGS.det_results_dir)
def main():
FLAGS = parse_args()
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
check_gpu(cfg.use_gpu)
check_version()
place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
place = paddle.set_device(place)
run(FLAGS, cfg)
if __name__ == '__main__':
main()
| en | 0.823256 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # add python path of PadleDetection to sys.path # ignore warning log # build Tracker # load weights # inference | 1.777511 | 2 |
setup.py | MD-Studio/MDStudio_pylie | 1 | 6631048 | # -*- coding: utf-8 -*-
# package: pylie
# file: setup.py
#
# Part of ‘pylie’, providing LIE data modelling routines
# LIEStudio package.
#
# Copyright © 2016 <NAME>, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'pylie'
setup(
name=distribution_name,
version=0.2,
description='LIE modelling library of the MDStudio application',
author="""
<NAME> - VU University - Amsterdam
<NAME> - Zefiros Software (www.zefiros.eu)
<NAME> - eScience Center (https://www.esciencecenter.nl/)""",
author_email=['<EMAIL>', '<EMAIL>'],
url='https://github.com/MD-Studio/MDStudio_pylie',
license='Apache Software License 2.0',
keywords='MDStudio LIE statistics modelling',
platforms=['Any'],
packages=find_packages(),
package_data={distribution_name: ['schemas/*', 'schemas/endpoints/*']},
py_modules=[distribution_name],
test_suite="tests",
install_requires=[
'dill', 'numpy', 'pandas', 'statsmodels', 'jsonschema', 'matplotlib',
'scikit-learn', 'openpyxl'],
include_package_data=True,
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
| # -*- coding: utf-8 -*-
# package: pylie
# file: setup.py
#
# Part of ‘pylie’, providing LIE data modelling routines
# LIEStudio package.
#
# Copyright © 2016 <NAME>, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'pylie'
setup(
name=distribution_name,
version=0.2,
description='LIE modelling library of the MDStudio application',
author="""
<NAME> - VU University - Amsterdam
<NAME> - Zefiros Software (www.zefiros.eu)
<NAME> - eScience Center (https://www.esciencecenter.nl/)""",
author_email=['<EMAIL>', '<EMAIL>'],
url='https://github.com/MD-Studio/MDStudio_pylie',
license='Apache Software License 2.0',
keywords='MDStudio LIE statistics modelling',
platforms=['Any'],
packages=find_packages(),
package_data={distribution_name: ['schemas/*', 'schemas/endpoints/*']},
py_modules=[distribution_name],
test_suite="tests",
install_requires=[
'dill', 'numpy', 'pandas', 'statsmodels', 'jsonschema', 'matplotlib',
'scikit-learn', 'openpyxl'],
include_package_data=True,
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
| en | 0.775928 | # -*- coding: utf-8 -*- # package: pylie # file: setup.py # # Part of ‘pylie’, providing LIE data modelling routines # LIEStudio package. # # Copyright © 2016 <NAME>, VU University Amsterdam, the Netherlands # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <NAME> - VU University - Amsterdam <NAME> - Zefiros Software (www.zefiros.eu) <NAME> - eScience Center (https://www.esciencecenter.nl/) | 1.334004 | 1 |
01-logica-de-programacao-e-algoritmos/Aula 05/2 Parametros/ex05.py | rafaelbarretomg/Uninter | 0 | 6631049 | # contagem em uma linha so
def contador(fim, inicio=0, passo=1):
for i in range(inicio, fim, passo):
print('{} ' .format(i), end='')
print('\n')
# Programa Principal
contador(20, 10, 2)
contador(12)
| # contagem em uma linha so
def contador(fim, inicio=0, passo=1):
for i in range(inicio, fim, passo):
print('{} ' .format(i), end='')
print('\n')
# Programa Principal
contador(20, 10, 2)
contador(12)
| pt | 0.998064 | # contagem em uma linha so # Programa Principal | 3.731656 | 4 |
flask_video_stream/db.py | andricampagnaro/documentacoes_e_testes | 0 | 6631050 | import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 8000
BUFFER_SIZE = 1024 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
while True:
conn, addr = s.accept()
data = conn.recv(BUFFER_SIZE)
print(f'Connection address: {addr}')
print(f"received data: {data.decode()}")
if data:
f = open('database/video3.mp4', 'rb')
l = f.read(1024)
while(l):
conn.send(l)
l = f.read(1024)
f.close()
conn.close() | import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 8000
BUFFER_SIZE = 1024 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
while True:
conn, addr = s.accept()
data = conn.recv(BUFFER_SIZE)
print(f'Connection address: {addr}')
print(f"received data: {data.decode()}")
if data:
f = open('database/video3.mp4', 'rb')
l = f.read(1024)
while(l):
conn.send(l)
l = f.read(1024)
f.close()
conn.close() | en | 0.957284 | # Normally 1024, but we want fast response | 2.855491 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.