blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
215094489cf5fc83d00a073b7700b0e554846376 | ba9d27ba3e0523d209ad5d6920638619c6b1fd5f | /uucf_iicf.py | 6afe7798813f6027890a3980b0c11ac21eafb5cc | [] | no_license | qxy0731/recommender_system | dacb45ffa311d5dce7637b012088b009206eea94 | c4a66bf8d96bedb8ba27e844ac91c3b2843ae4a0 | refs/heads/master | 2020-03-25T11:13:16.716200 | 2018-08-06T12:13:05 | 2018-08-06T12:13:05 | 143,723,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,631 | py | import pandas as pd
import numpy as np
def userBaseRating(id,item,matrix,k):
need_compare = matrix.loc[matrix.index==id]
baseline = np.array(need_compare).astype(np.float64)
if np.std(baseline) == 0.0:
baseline[0][0] = baseline[0][0]+0.001
result_list = []
user_list = []
item_index = item - 1
for i in range(0, len(matrix)):
each_line = np.array([matrix.iloc[i]]).astype(np.float64)
if np.std(each_line) == 0.0:
each_line[0][0] = each_line[0][0]+0.001
if matrix.iloc[i].tolist()[item_index]!=0:
result_list.append([matrix.index[i],np.corrcoef(each_line,baseline)[0,1],matrix.iloc[i].tolist()[item_index]])
result_list = sorted(result_list, key=lambda x : x[1])
result_list.reverse()
result_list = result_list[1:]
if len(result_list)<=k:
for i in range(0,len(result_list)):
user_list.append(result_list[i])
else:
for i in range(0,k):
user_list.append(result_list[i])
curr_sum = 0
curr_count = 0
for i in range(0,len(user_list)):
curr_sum += user_list[i][1]*user_list[i][2]
curr_count += user_list[i][1]
if curr_count==0:
return 0
else:
final_result = curr_sum/curr_count
return final_result
def itemBaseRating(id,item,matrix,k):
matrix = matrix.T
need_compare = matrix.loc[matrix.index==item]
baseline = np.array(need_compare).astype(np.float64)
if np.std(baseline) == 0.0:
baseline[0][0] = baseline[0][0]+0.001
result_list = []
m_result = []
item_list = []
id_index = id - 1
for i in range(0, len(matrix)):
each_line = np.array([matrix.iloc[i]]).astype(np.float64)
if np.std(each_line) == 0.0:
each_line[0][0] = each_line[0][0]+0.001
if matrix.iloc[i].tolist()[id_index]!=0:
result_list.append([matrix.index[i],np.corrcoef(each_line,baseline)[0,1],matrix.iloc[i].tolist()[id_index]])
result_list = sorted(result_list, key=lambda x : x[1])
result_list.reverse()
result_list = result_list[1:]
if len(result_list)<=k:
for i in range(0,len(result_list)):
item_list.append(result_list[i])
else:
for i in range(0,k):
item_list.append(result_list[i])
curr_sum = 0
curr_count = 0
for i in range(0,len(item_list)):
curr_sum += item_list[i][1]*item_list[i][2]
curr_count += item_list[i][1]
if curr_count==0:
return 0
else:
final_result = curr_sum/curr_count
return final_result
def userbestfive(id,matrix,k):
result = []
baseline = matrix.iloc[matrix.index==id].values.tolist()[0]
for i in range(0,len(baseline)):
if baseline[i]!=0:
rating = userBaseRating(id,matrix.columns[i],matrix,k)
result.append([rating,matrix.columns[i]])
result = sorted(result, key=lambda x : x[0])
result.reverse()
final_result = []
if len(result)<=5:
for i in range(0,len(result)):
final_result.append(result[i][1])
else:
for i in range(0,5):
final_result.append(result[i][1])
return final_result
def itembestfive(id,matrix,k):
result = []
baseline = matrix.iloc[matrix.index==id].values.tolist()[0]
for i in range(0,len(baseline)):
if baseline[i]!=0:
rating = itemBaseRating(id,matrix.columns[i],matrix,k)
result.append([rating,matrix.columns[i]])
result = sorted(result, key=lambda x : x[0])
result.reverse()
final_result = []
if len(result)<=5:
for i in range(0,len(result)):
final_result.append(result[i][1])
else:
for i in range(0,5):
final_result.append(result[i][1])
return final_result
def wrap_user(test_data,matrix,k=25):
final_result = {}
count = 0
print("-----------nil={}----user_start-----------------".format(k))
for each in test_data:
print("{}/{}".format(count,len(test_data)))
final_result[each] = userBaseRating(each[0],each[1],matrix,k)
count += 1
print("-----------nil={}----user_done-----------------".format(k))
return final_result
def wrap_item(test_data,matrix,k=25):
final_result = {}
count = 0
print("-----------nil={}----item_start-----------------".format(k))
for each in test_data:
print("{}/{}".format(count,len(test_data)))
final_result[each] = itemBaseRating(each[0],each[1],matrix,k)
count += 1
print("-----------nil={}----item_done-----------------".format(k))
return final_result
| [
"[email protected]"
] | |
3623a8066acb114f575df08764f00a1ffea08a49 | 9d01ddd9276d3c447e9d1138c68f60eeb39aabf4 | /mysiteenv/bin/autopep8 | 489b02c77469275b3cfad0f94bd8c93f60b8d2d6 | [] | no_license | itsaiub/django-basic-site-sandex | de21da1399bed55ca5593ef1b78328d58f81b0f9 | d6a9bc7133a2a399dadf2a3e5cb7f23bfecaab1c | refs/heads/master | 2020-06-25T13:20:35.634588 | 2019-07-28T17:56:35 | 2019-07-28T17:56:35 | 199,320,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | #!/home/a1ubkh4n/Documents/Developmnet/My_Django_Stuff/django-basic-site/mysiteenv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.4.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.4.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.4.4', 'console_scripts', 'autopep8')()
)
| [
"[email protected]"
] | ||
35928e56fa96d416f5a328d4a776d067d6a29232 | 9d3c175eb62fbefa8af7ad579adf0e577e94d044 | /tradewithfriends/wsgi.py | a8f88caa4a1bc1b5b50a10246b421f437588cc71 | [
"Apache-2.0"
] | permissive | JonForce/tradewithfriends | 426ddbd317bb67692938a934103dbb31ae890871 | f1b7bf22a14209f6d41a89427f3fce39ba02b816 | refs/heads/main | 2023-03-18T22:57:57.744558 | 2021-03-09T23:36:52 | 2021-03-09T23:36:52 | 346,169,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for tradewithfriends project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tradewithfriends.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
cef3d665a937b3759dbb5f60cdbc86bf9bd51e73 | 2ce27b05f45cef6ce3ae5c02b8e83e548def2fc6 | /ADVANCE/Modules/Collection Module/OrderedDict( ).py | 10e231ad02867a4b4253c970cd82b37a96558fba | [] | no_license | Ajay2521/Python | 775b7d99736e83e4d0c37302b91d1413dd2c0d3b | a426dd7717de8a5e60e584d208ae7120bb84c1b3 | refs/heads/master | 2022-12-01T17:49:12.672061 | 2020-08-15T14:55:12 | 2020-08-15T14:55:12 | 273,632,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | # Lets see about "Collections"
# collection module is used to store a collection of data in a container.
# OrderedDict() = used to return dictionary object with names for each position in the dictionary.
# where object where keys maintain the order of insertion.
# NOTE : insert key again, the previous value will be overwritten for that key.
# Here is the program for OrderedDict()
import collections
d = collections.OrderedDict()
d['A'] = 1
d['B'] = 2
d['C'] = 3
d['D'] = 4
d['E'] = 5
print('\nData in "d" is :\n')
print(d)
print('\nkeys in "d" is :\n')
for key in d:
print(key)
print('\nValues in "d" is :\n')
for key in d:
print(d[key])
print('\nKeys and values in "d" is :\n')
for key,value in d.items():
print(key,value)
| [
"[email protected]"
] | |
df3ef3f3ae6a6a5509dd53328ff2d985126a9de4 | cb56395b14bc9951a8587001aac87a8795bf91b8 | /in_depth/migrations/0031_auto_20200207_2032.py | f8b4aa0d1c352a857214913c14a1def6c17d7af2 | [] | no_license | natmey/newamerica-cms | 998b4f09dca8208f3ffdd7c78029fb896e25d38e | bc34d35ee96fcdab90324e8a3fe84e342573c9cd | refs/heads/main | 2023-09-03T20:02:37.412813 | 2021-09-15T17:06:53 | 2021-09-15T17:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,613 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-02-07 20:32
from __future__ import unicode_literals
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('in_depth', '0030_auto_20190926_1000'),
]
operations = [
migrations.AlterField(
model_name='indepthprofile',
name='body',
field=wagtail.core.fields.StreamField([('introduction', wagtail.core.blocks.RichTextBlock()), ('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock()), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))])), ('collapsible', wagtail.core.blocks.StructBlock([('hidden_by_default', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder')), ('people', wagtail.core.blocks.StreamBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('person', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.TextBlock(required=True)), ('title', wagtail.core.blocks.TextBlock(help_text='125 character limit', max_length=125, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('twitter', wagtail.core.blocks.URLBlock(required=False))]))], help_text='Grid of people with short bios that appear on click', icon='group')), ('panels', wagtail.core.blocks.StreamBlock([('panel', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder'))]))], icon='doc-empty-inverse'))], icon='list-ul')), ('image', wagtail.images.blocks.ImageChooserBlock(help_text='Legacy option. Consider using Inline Image instead.', icon='placeholder', template='blocks/image_block.html'))]))])), ('data_reference', wagtail.core.blocks.StructBlock([('fields_to_display', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('field_name', wagtail.core.blocks.CharBlock(required=True)), ('label', wagtail.core.blocks.CharBlock(required=False)), ('format', wagtail.core.blocks.ChoiceBlock(choices=[('date', 'Date'), ('number', 'Number (with thousands-place comma)'), ('percent', 'Percent'), ('string', 'Plain-text'), ('price', 'Price'), ('rank', 'Rank'), ('markdown', 'Rich-text')])), ('footnote_field', wagtail.core.blocks.CharBlock(required=False))]), help_text='Specify the field where values to display will be found.'))])), ('video_data_reference', wagtail.core.blocks.StructBlock([('field_name', wagtail.core.blocks.CharBlock(required=True)), ('host_site', wagtail.core.blocks.ChoiceBlock(choices=[('youtube', 'Youtube'), ('vimeo', 'Vimeo')]))]))]),
),
migrations.AlterField(
model_name='indepthsection',
name='panels',
field=wagtail.core.fields.StreamField([('panel', wagtail.core.blocks.StructBlock([('panel_title', wagtail.core.blocks.CharBlock(required=True)), ('panel_color_theme', wagtail.core.blocks.ChoiceBlock(choices=[('white', 'White'), ('grey', 'Grey'), ('black', 'Black')])), ('panel_body', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder')), ('people', wagtail.core.blocks.StreamBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('person', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.TextBlock(required=True)), ('title', wagtail.core.blocks.TextBlock(help_text='125 character limit', max_length=125, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('twitter', wagtail.core.blocks.URLBlock(required=False))]))], help_text='Grid of people with short bios that appear on click', icon='group')), ('panels', wagtail.core.blocks.StreamBlock([('panel', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder'))]))], icon='doc-empty-inverse'))], icon='list-ul')), ('image', wagtail.images.blocks.ImageChooserBlock(help_text='Legacy option. Consider using Inline Image instead.', icon='placeholder', template='blocks/image_block.html')), ('collapsible', wagtail.core.blocks.StructBlock([('hidden_by_default', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder')), ('people', wagtail.core.blocks.StreamBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('person', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.TextBlock(required=True)), ('title', wagtail.core.blocks.TextBlock(help_text='125 character limit', max_length=125, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('twitter', wagtail.core.blocks.URLBlock(required=False))]))], help_text='Grid of people with short bios that appear on click', icon='group')), ('panels', wagtail.core.blocks.StreamBlock([('panel', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('body', wagtail.core.blocks.StreamBlock([('introduction', wagtail.core.blocks.RichTextBlock(icon='openquote')), ('heading', wagtail.core.blocks.CharBlock(classname='full title', icon='title', template='blocks/heading.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('inline_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large'), ('width-full', 'Full-width')])), ('use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False)), ('figure_number', wagtail.core.blocks.CharBlock(max_length=3, required=False)), ('figure_title', wagtail.core.blocks.CharBlock(max_length=100, required=False)), ('open_image_on_click', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='image')), ('video', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('table', wagtail.contrib.table_block.blocks.TableBlock(template='blocks/table.html')), ('button', wagtail.core.blocks.StructBlock([('button_text', wagtail.core.blocks.CharBlock(max_length=50, required=True)), ('button_link', wagtail.core.blocks.URLBlock(default='https://www.', required=True)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left-aligned', 'Left'), ('center-aligned', 'Center')]))])), ('iframe', wagtail.core.blocks.StructBlock([('source_url', wagtail.core.blocks.URLBlock(required=True)), ('column_width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the iframe. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('width', wagtail.core.blocks.IntegerBlock(help_text='The iframe will look best if the width is at least as large as the column width. Note that the maximum, in 2018 and earlier, used to be 1050.', required=True)), ('height', wagtail.core.blocks.IntegerBlock(required=True)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')])), ('fallback_image_use_original', wagtail.core.blocks.BooleanBlock(help_text='check if you do not want image compressed. Should be checked for all figures.', required=False))], icon='link')), ('datawrapper', wagtail.core.blocks.StructBlock([('chart_id', wagtail.core.blocks.CharBlock(help_text='The 5 character ID for the chart, e.g. "kT4Qi"', required=True)), ('embed_code', wagtail.core.blocks.TextBlock(help_text='The "Responsive Embed" code provided by Datawrapper', required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], help_text='The maximum width of the chart. Always use "Column Width" for non-report content types (e.g. blog posts, About pages). Never use "Full-Width" unless specifically instructed to by your designer.', required=False)), ('fallback_image', wagtail.images.blocks.ImageChooserBlock(help_text='The fallback image will be rendered for the PDF', icon='image', required=False)), ('fallback_image_align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Centered'), ('left', 'Left'), ('right', 'Right')])), ('fallback_image_width', wagtail.core.blocks.ChoiceBlock(choices=[('initial', 'Auto'), ('width-133', 'Medium'), ('width-166', 'Large'), ('width-200', 'X-Large')]))], icon='code')), ('dataviz', wagtail.core.blocks.StructBlock([('container_id', wagtail.core.blocks.CharBlock(required=True)), ('width', wagtail.core.blocks.ChoiceBlock(choices=[('column-width', 'Column Width (max 650px)'), ('width-1200', 'Site Width (max 1200px)'), ('full-width', 'Full Width (max 100%)')], required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('max_width', wagtail.core.blocks.IntegerBlock(help_text='for legacy dataviz projects', required=False)), ('show_chart_buttons', wagtail.core.blocks.BooleanBlock(default=False, required=False))], icon='code')), ('timeline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subheading', wagtail.core.blocks.CharBlock(required=False)), ('default_view', wagtail.core.blocks.ChoiceBlock(choices=[('timeline', 'Timeline'), ('list', 'List')], help_text='Should the default view be a timeline or a list?', required=False)), ('major_timeline_splits', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_eras', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))]), default='', required=False)), ('event_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), default='', required=False)), ('event_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('italicize_title', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('category', wagtail.core.blocks.CharBlock(required=False)), ('start_date', wagtail.core.blocks.DateBlock(required=True)), ('end_date', wagtail.core.blocks.DateBlock(required=False)), ('date_display_type', wagtail.core.blocks.ChoiceBlock(choices=[('year', 'Year'), ('month', 'Month'), ('day', 'Day')], help_text='Controls how specific the date is displayed'))])))], icon='arrows-up-down')), ('google_map', wagtail.core.blocks.StructBlock([('use_page_address', wagtail.core.blocks.BooleanBlock(default=False, help_text='If selected, map will use the address already defined for this page, if applicable. For most posts besides events, this should be left unchecked and the form below should be completed.', required=False)), ('street', wagtail.core.blocks.TextBlock(required=False)), ('city', wagtail.core.blocks.TextBlock(default='Washington', required=False)), ('state', wagtail.core.blocks.TextBlock(default='D.C.', required=False)), ('zipcode', wagtail.core.blocks.TextBlock(default='200', required=False))], icon='site')), ('resource_kit', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('description', wagtail.core.blocks.TextBlock(required=False)), ('resources', wagtail.core.blocks.StreamBlock([('post', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.PageChooserBlock(required=True))], icon='redirect', label='Post')), ('external_resource', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.core.blocks.URLBlock(required=True))], icon='site', label='External resource')), ('attachment', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(required=True)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('resource', wagtail.documents.blocks.DocumentChooserBlock(required=True))], icon='doc-full', label='Attachment'))]))], icon='folder'))]))], icon='doc-empty-inverse'))], icon='list-ul')), ('image', wagtail.images.blocks.ImageChooserBlock(help_text='Legacy option. Consider using Inline Image instead.', icon='placeholder', template='blocks/image_block.html'))]))]))]))]))], blank=True, null=True),
),
]
| [
"[email protected]"
] | |
fa791bcfc4fb1d50ab8f8cfe6e6459465c2d68af | 15910b24ccda1c724b96cb2817b91eea9d52754b | /mysite/settings.py | 78934459cf06f898d3ad7815ad223cd360b98891 | [] | no_license | itenjin/my-first-blog | a35c3393d22cae27af3e485f4683294c5c791e0e | 9a024c8ad0f2bb9c869e3bb6c4aa14be92dde840 | refs/heads/master | 2020-12-20T08:10:56.926513 | 2020-01-25T08:20:24 | 2020-01-25T08:20:24 | 236,011,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,201 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bdlg2i@0ug1$-0ot_v@iib(rjhyfhr8-v2f%=@@hb&$o+p)w5r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
4f1290ae2c41f84e42ef101171d6fb7de6d99783 | e1f5ba03cd1b6c5e8d1a346dae31a6c9d3c81f96 | /challenges/next_prime_generator.py | c6d1b8622896744e373fec282aee2082a1f59dbe | [] | no_license | BParesh89/The_Modern_Python3_Bootcamp | 7637ead43e2ca7697b7833b621885cc4e4d1ccff | e9de6a4ba9af8dde83e449758fb1ef41e348f702 | refs/heads/master | 2020-03-24T14:54:31.606805 | 2018-03-23T12:26:20 | 2018-03-23T12:26:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | def next_prime():
num = 2
while True:
if len([i for i in range(1, num+1) if (num/i) == num//i]) != 2:
num += 1
else:
yield num
num += 1
# testing
primes = next_prime()
assert([next(primes) for i in range(25)] == [2, 3, 5, 7, 11, 13, 17, 19,
23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])
| [
"[email protected]"
] | |
22be9255832aa7850ab8a6efc46867967c5f805f | 555bc867d36c37c4b554457d34f628226f019bb2 | /scrape.py | bb0ebfe5b533a138fb400a4b6c43a2f158e96fd3 | [] | no_license | StanfordVLSI/scrape-spec | fd13db8aaecdc19bf65c285fe2754d20f8d6e454 | 81f0789bfb860f9d5d86823e8c7559d03172114d | refs/heads/master | 2021-01-18T20:22:33.724905 | 2015-04-07T17:33:11 | 2015-04-07T17:33:11 | 16,095,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | html = urllib.urlopen(url).read()
soup = BeautifulSoup.BeautifulSoup(html)
| [
"[email protected]"
] | |
63be64a3a36f83159883a808df9d45fc1527c8ad | 83cfe7c9556c7fc5a0cda7eb73a213e05d620bb0 | /main.py | 7b2fae585c2966a933a9befb4a9656e519677f42 | [] | no_license | tang1323/JD_Scrapy | c3d2906d49c4ad537de2934dea9fb5b413d8d386 | 5da0f4266ad9329b52672a231459e5c24473b708 | refs/heads/master | 2023-04-02T01:37:00.863567 | 2021-04-14T14:17:15 | 2021-04-14T14:17:15 | 357,930,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from scrapy.cmdline import execute
import sys
import os
# print(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", "jd_book"])
| [
"[email protected]"
] | |
180837e9d3ec975df3c36cda46aefe8967b14c64 | 24357a12af42349b9b787b47722a738a6694419b | /Learn/Algorithms/Warmup/A Very Big Sum/bigsum.py | 006c05a10f9c2fa21e804a5d960cd7e00319d267 | [
"MIT"
] | permissive | Adriel-M/HackerRank | 1aee2f3501f7db01bf2c7eabc00ff555779a345c | 0f361bb7eaa3d2db3dd96de511c4b7fa02efa8c5 | refs/heads/master | 2021-01-21T10:49:03.610333 | 2017-06-18T03:37:16 | 2017-06-18T03:37:16 | 91,708,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # Given code
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
# Start
sum = 0
for i in range(n):
sum += arr[i]
print(sum)
| [
"[email protected]"
] | |
1ccc25559589a6ae95b9b0f94b76a092ecb8c49c | 409e2694e5ce48d18814948f00993ec3552e3a29 | /soaktest/cratemonitor.py~ | 5f0f4bc43c9768cd9eda1c585fb6fd42f4f3d40c | [] | no_license | AstroSaiko/cratemonitor | c5e958689ec6bb473a6bf5e3b7b39987fd96ea57 | 4f9cfe358de470e5dfa062cc9325a939275f716b | refs/heads/master | 2021-01-11T06:16:48.906702 | 2017-07-20T11:55:45 | 2017-07-20T11:55:45 | 70,058,218 | 0 | 0 | null | 2016-10-05T12:30:14 | 2016-10-05T12:30:14 | null | UTF-8 | Python | false | false | 20,040 | #!/usr/bin/env python
import subprocess
import sys
import os
import signal
import time
#HOSTNAME = sys.args[1]
HOSTNAME = "mch-e1a04-18"
#Defining every board as a class, hence treating each card as an object with sensor data
#===============
# Start PM Class
#===============
class PM:
"""Power module object"""
def __init__(self, PMIndex):
self.PMIndex = PMIndex #PM index in crate
self.entity = "10.{0}".format(str(96 + self.PMIndex)) #converting PM index to ipmi entity
self.hostname = HOSTNAME
#Initializing empty variables
self.tempA = None
self.tempB = None
self.tempBase = None
self.VIN = None
self.VOutA = None
self.VOutB = None
self.volt12V = None
self.volt3V3 = None
self.currentSum = None
self.flavor = None
#Get data upon instantiation
self.sensorValueList = self.getData()
def setHostname(self, hostname):
self.hostname = hostname
def getData(self):
self.proc = subprocess.Popen(("ipmitool -H {0} -U '' -P '' sdr entity {1}".format(self.hostname, self.entity)).split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
(self.data, self.err) = self.proc.communicate()
if self.err != '':
#if not "Get HPM.x Capabilities request failed, compcode = c9" in err:
if self.err != "Get HPM.x Capabilities request failed, compcode = c9\n":
print self.err
return -1
self.data = self.data.split('\n')
#=========================================#
# This block is for NAT-PM-DC840 type PMs #
#=========================================#
if "NAT-PM-DC840" in self.data[0]:
self.flavor = "NAT-PM-DC840"
if self.data == '':
print "Error or whatever"
else:
for item in self.data:
#Temperatures
if "TBrick-A" in item:
self.tempA = item.strip().split(" ")[17]
elif "TBrick-B" in item:
self.tempB = item.strip().split(" ")[17]
elif "T-Base" in item:
self.tempBase = item.strip().split(" ")[19]
#Input Voltage
elif "VIN" in item:
self.VIN = item.strip().split(" ")[22]
#Output Voltage
elif "VOUT-A" in item:
self.VOutA = item.strip().split(" ")[19]
elif "VOUT-B" in item:
self.VOutB = item.strip().split(" ")[19]
#12V
elif "12V" in item:
self.volt12V = item.strip().split(" ")[22]
elif "3.3V" in item:
self.volt3V3 = item.strip().split(" ")[21]
#Total utput current
elif "Current(SUM)" in item:
self.currentSum = item.strip().split(" ")[13]
#==========================================#
# End NAT-PM-DC840 block #
#==========================================#
return [self.tempA, self.tempB, self.tempBase, self.VIN, self.VOutA, self.VOutB, self.volt12V, self.volt3V3, self.currentSum]
def printSensorValues(self):
#self.getData()
if self.flavor == "NAT-PM-DC840":
print ''
print "==============================="
print " Sensor Values for PM{0} ".format(self.PMIndex)
print "==============================="
print ''
print "TBrick-A:", self.tempA, "degC"
print "TBrick-B:", self.tempB, "degC"
print "T-Base:", self.tempBase, "degC"
print "Input Voltage:", self.VIN, "V"
print "Ouput Voltage A:", self.VOutA, "V"
print "Output Voltage B:", self.VOutB, "V"
print "12V:", self.volt12V, "V"
print "3.3V:", self.volt3V3, "V"
print "Total Current:", self.currentSum, "V"
print ""
else:
print "Unknown PM flavor. Check code and PM class"
#=============
# End PM class
#=============
#================
# Start MCH class
#================
class MCH:
"""MCH object"""
def __init__(self, MCHIndex = 1):
self.MCHIndex = MCHIndex
self.entity = "194.{0}".format(str(96 + self.MCHIndex)) #converting MCH index to ipmi entity
self.hostname = "mch-e1a04-18"
#Initializing empty variables
self.flavor = None
self.tempCPU = None
self.tempIO = None
self.volt1V5 = None
self.volt1V8 = None
self.volt2V5 = None
self.volt3V3 = None
self.volt12V = None
self.current = None
#Get data upon instantiation
self.sensorValueList = self.getData()
def setHostname(self, hostname):
self.hostname = hostname
def getData(self):
self.proc = subprocess.Popen(("ipmitool -H {0} -U admin -P admin sdr entity {1}".format(self.hostname, self.entity)).split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
(self.data, self.err) = self.proc.communicate()
if self.err != '':
#if not "Get HPM.x Capabilities request failed, compcode = c9" in err:
if self.err != "Get HPM.x Capabilities request failed, compcode = c9\n":
print self.err
return -1
self.data = self.data.split('\n')
#=========================================#
# This block is for NAT-MCH-MCMC type MCH #
#=========================================#
if "NAT-MCH-MCMC" in self.data[0]:
self.flavor = "NAT-MCH-MCMC"
for item in self.data:
if "Temp CPU" in item:
self.tempCPU = item.strip().split(" ")[18]
elif "Temp I/O" in item:
self.tempIO = item.strip().split(" ")[18]
elif "Base 1.2V" in item:
self.volt1V2 = item.strip().split(" ")[17]
elif "Base 1.5V" in item:
self.volt1V5 = item.strip().split(" ")[17]
elif "Base 1.8V" in item:
self.volt1V8 = item.strip().split(" ")[17]
elif "Base 2.5V" in item:
self.volt2V5 = item.strip().split(" ")[17]
elif "Base 3.3V" in item:
self.volt3V3 = item.strip().split(" ")[17]
elif "Base 12V" in item:
self.volt12V = item.strip().split(" ")[18]
elif "Base Current" in item:
self.current = item.strip().split(" ")[14]
#==========================================#
# End NAT-MCH-MCMC block #
#==========================================#
return [self.tempCPU, self.tempIO, self.volt1V2, self.volt1V8, self.volt2V5, self.volt3V3, self.volt12V, self.current]
def printSensorValues(self):
#self.getData()
if self.flavor == "NAT-MCH-MCMC":
print ''
print "==============================="
print " Sensor Values for MCH{0} ".format(self.MCHIndex)
print "==============================="
print ''
print "Temp CPU:", self.tempCPU, "degC"
print "Temp I/O:", self.tempIO, "degC"
print "Base 1.2V:", self.volt1V2, "V"
print "Base 1.5V:", self.volt1V5, "V"
print "Base 1.8V:", self.volt1V8, "V"
print "Base 2.5V:", self.volt2V5, "V"
print "Base 3.3V:", self.volt12V, "V"
print "Base 12V:", self.volt12V, "V"
print "Base Current:", self.current, "V"
print ""
else:
print "Unknown MCH flavor, check code and MCH class"
#==============
# End MCH class
#==============
#================
# Start CU class
#================
class CU:
'''Cooling Unit object'''
def __init__(self, CUIndex):
self.hostname = HOSTNAME
self.CUIndex = CUIndex
self.entity = "30.{0}".format(96 + CUIndex)
if self.CUIndex == 1:
self.target = "0xa8"
else:
self.target = "0xaa"
#Initializing empty variables
self.flavor = None
self.CU3V3 = None
self.CU12V = None
self.CU12V_1 = None
self.LM75Temp = None
self.LM75Temp2 = None
self.fan1 = None
self.fan2 = None
self.fan3 = None
self.fan4 = None
self.fan5 = None
self.fan6 = None
#Get data upon instantiation
self.sensorValueList = self.getData()
def setHostname(self, hostname):
self.hostname = hostname
def checkFlavor(self, flavor):
self._proc = subprocess.Popen(("ipmitool -H {0} -U admin -P admin sdr entity {1}".format(self.hostname, self.entity)).split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
(self._data, self._err) = self._proc.communicate()
self._data = self._data.split('\n')
if flavor in self._data[0]:
self.flavor = flavor
return True
else:
return False
def getData(self):
self.proc = subprocess.Popen(("ipmitool -H {0} -U '' -P '' -T 0x82 -b 7 -t {1} -B 0 sdr".format(self.hostname, self.target)).split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
(self.data, self.err) = self.proc.communicate()
if self.err != '':
#if not "Get HPM.x Capabilities request failed, compcode = c9" in err:
if self.err != "Get HPM.x Capabilities request failed, compcode = c9\n":
print self.err
return -1
self.data = self.data.split('\n')
#=====================================================#
# This block is for Schroff uTCA CU type Cooling Unit #
#=====================================================#
if self.checkFlavor("Schroff uTCA CU"):
for item in self.data:
if "+3.3V" in item:
self.CU3V3 = item.strip().split(" ")[13]
elif "+12V " in item:
self.CU12V = item.strip().split(" ")[14]
elif "+12V_1" in item:
self.CU12V_1 = item.strip().split(" ")[12]
elif "LM75 Temp " in item:
self.LM75Temp = item.strip().split(" ")[10]
elif "LM75 Temp2" in item:
self.LM75Temp2 = item.strip().split(" ")[9]
elif "Fan 1" in item:
self.fan1 = item.strip().split(" ")[14]
elif "Fan 2" in item:
self.fan2 = item.strip().split(" ")[14]
elif "Fan 3" in item:
self.fan3 = item.strip().split(" ")[14]
elif "Fan 4" in item:
self.fan4 = item.strip().split(" ")[14]
elif "Fan 5" in item:
self.fan5 = item.strip().split(" ")[14]
elif "Fan 6" in item:
self.fan6 = item.strip().split(" ")[14]
#=====================================================#
# END Schroff uTCA CU type Cooling Unit block #
#=====================================================#
return [self.CU3V3, self.CU12V, self.CU12V_1, self.LM75Temp, self.LM75Temp2, self.fan1, self.fan2, self.fan3, self.fan4, self.fan5, self.fan6]
def printSensorValues(self):
#self.getData()
if self.flavor == "Schroff uTCA CU":
print ''
print "==============================="
print " Sensor Values for CU{0} ".format(self.CUIndex)
print "==============================="
print ''
print "+3.3V:", self.CU3V3, "V"
print "+12V:", self.CU12V, "V"
print "+12V_1:", self.CU12V_1, "V"
print "LM75 Temp:", self.LM75Temp, "degC"
print "LM75 Temp2:", self.LM75Temp2, "degC"
print "Fan 1:", self.fan1, "rpm"
print "Fan 2:", self.fan2, "rpm"
print "Fan 3:", self.fan3, "rpm"
print "Fan 4:", self.fan4, "rpm"
print "Fan 5:", self.fan5, "rpm"
print "Fan 6:", self.fan6, "rpm"
print ""
else:
print "Unkown CU type, check code and CU class"
#=============
# END CU class
#=============
#################
# Start AMC13 class
#################
class AMC13:
'''AMC13 object'''
def __init__(self):
self.hostname = HOSTNAME
#Initializing empty variables
self.flavor = None
self.T2Temp = None
self.volt12V = None
self.volt3V3 = None
self.volt1V2 = None
#Get data upon instantiation
self.sensorValueList = self.getData()
def setHostname(self, hostname):
self.hostname = hostname
def getData(self):
self.proc = subprocess.Popen(("ipmitool -H {0} -U admin -P admin sdr entity 193.122".format(self.hostname)).split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
(self.data, self.err) = self.proc.communicate()
if self.err != '':
#if not "Get HPM.x Capabilities request failed, compcode = c9" in err:
if self.err != "Get HPM.x Capabilities request failed, compcode = c9\n":
print self.err
return -1
self.data = self.data.split('\n')
#=====================================================#
# This block is for BU AMC13 type amc13 #
#=====================================================#
if "BU AMC13" in self.data[0]:
self.flavor = "BU AMC13"
for item in self.data:
if "T2 Temp" in item:
self.T2Temp = item.strip().split(" ")[19]
elif "+12V" in item:
self.volt12V = item.strip().split(" ")[21]
elif "+3.3V" in item:
self.volt3V3 = item.strip().split(" ")[20]
elif "+1.2V" in item:
self.volt1V2 = item.strip().split(" ")[20]
#=====================================================#
# END BU AMC13 type block #
#=====================================================#
return [self.T2Temp, self.volt12V, self.volt3V3, self.volt1V2]
def printSensorValues(self):
#self.getData()
if self.flavor == "BU AMC13":
print ''
print "==============================="
print " Sensor Values for AMC13 "
print "==============================="
print ''
print "T2Temp:", self.T2Temp, "degC"
print "+12V:", self.volt12V, "V"
print "+3.3V:", self.volt3V3, "V"
print "+1.2V:", self.volt1V2, "V"
print ''
else:
print "Unkown AMC13 type, check code and AMC13 class"
if __name__ == "__main__":
try :
PM1 = PM(1)
PM2 = PM(2)
PM1.printSensorValues()
PM2.printSensorValues()
MCH = MCH()
MCH.printSensorValues()
CU1 = CU(1)
CU2 = CU(2)
CU1.printSensorValues()
CU2.printSensorValues()
amc13 = AMC13()
amc13.printSensorValues()
sys.exit(0)
except:
sys.exit(1)
| [
"[email protected]"
] | ||
883391c1d19e61fda70384595cc39bdfbc529465 | 6bf079887a516552df39ed85aa872dcec8c53dba | /easytime.py | eebf1e5034d8f7e7c6d7e2f2f929390c44e26523 | [] | no_license | Rojetto/tri_me | a6164d141081f200887d1c58790b34ccfa28a9db | 4aad8dcc3f6fae22552dbb15ff25a00d90abb7fd | refs/heads/master | 2022-12-31T16:25:37.457716 | 2020-10-21T18:50:27 | 2020-10-21T18:50:27 | 305,162,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | import time
class Timer:
def __init__(self):
self.running_timer = None
self.root_timers = []
def tic(self, message):
if self.running_timer is not None:
subs_to_check = self.running_timer.subtimers
else:
subs_to_check = self.root_timers
timer_to_start = None
for sub in subs_to_check:
if sub.name == message:
timer_to_start = sub
if timer_to_start is None:
timer_to_start = SubTimer(message)
if self.running_timer is not None:
self.running_timer.sub(timer_to_start)
else:
self.root_timers.append(timer_to_start)
timer_to_start.start()
self.running_timer = timer_to_start
def toc(self):
dt = 0
if self.running_timer is not None:
dt = self.running_timer.stop()
self.running_timer = self.running_timer.parent
return dt
def print(self):
sorted_subs = sorted(self.root_timers, key=lambda s: s.total, reverse=True)
for sub in sorted_subs:
sub.print(0)
class SubTimer:
def __init__(self, name):
self.running = False
self.start_time = 0.0
self.name = name
self.min = None
self.max = 0.0
self.total = 0.0
self.n = 0
self.subtimers = []
self.parent = None
def start(self):
self.start_time = time.process_time()
self.running = True
def stop(self):
dt = time.process_time() - self.start_time
self.running = False
self.total += dt
self.n += 1
if self.n == 1:
self.min = dt
else:
self.min = min(self.min, dt)
self.max = max(self.max, dt)
return dt
def sub(self, sub):
sub.parent = self
self.subtimers.append(sub)
def print(self, indentation):
print(f"{'': <{indentation*4}}{self.name: <{70-indentation*4}} | sum {self.total*1000: 8.2f} | avg {self.total/self.n*1000: 5.2f} | min {self.min*1000: 5.2f} | max {self.max*1000: 5.2f}")
sorted_subs = sorted(self.subtimers, key=lambda s: s.total, reverse=True)
for sub in sorted_subs:
sub.print(indentation+1) | [
"[email protected]"
] | |
21ca12886bb240c872416bd1713c2883bb1b1656 | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/portal/mihailPortal003.py | 0fc67c291e3067805fe15f8738dbdf7605c1b3c1 | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 34 | py | sm.warp(913070020, 1)
sm.dispose() | [
"[email protected]"
] | |
ba2060d114859a3af94f75e11cbba8c096d153f0 | a6fe8aeaa30a22b65d98a2bb360b6d761a2e17fc | /venv/lib/python2.7/site-packages/kubernetes/client/models/v1_downward_api_volume_file.py | 35061dd4f030568d5fe0f491969453e1a8e52379 | [
"MIT"
] | permissive | 784134748/kubernetes-install | 54a2a8e83e2f47f2064270649725899282b7b244 | 5df59632c2619632e422948b667fb68eab9ff5be | refs/heads/master | 2022-12-15T13:52:43.486633 | 2019-03-27T13:01:06 | 2019-03-27T13:01:06 | 176,937,818 | 0 | 0 | MIT | 2022-05-25T01:56:18 | 2019-03-21T12:13:41 | Python | UTF-8 | Python | false | false | 6,862 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1DownwardAPIVolumeFile(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'field_ref': 'V1ObjectFieldSelector',
'mode': 'int',
'path': 'str',
'resource_field_ref': 'V1ResourceFieldSelector'
}
attribute_map = {
'field_ref': 'fieldRef',
'mode': 'mode',
'path': 'path',
'resource_field_ref': 'resourceFieldRef'
}
def __init__(self, field_ref=None, mode=None, path=None, resource_field_ref=None):
"""
V1DownwardAPIVolumeFile - a model defined in Swagger
"""
self._field_ref = None
self._mode = None
self._path = None
self._resource_field_ref = None
self.discriminator = None
if field_ref is not None:
self.field_ref = field_ref
if mode is not None:
self.mode = mode
self.path = path
if resource_field_ref is not None:
self.resource_field_ref = resource_field_ref
@property
def field_ref(self):
"""
Gets the field_ref of this V1DownwardAPIVolumeFile.
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:return: The field_ref of this V1DownwardAPIVolumeFile.
:rtype: V1ObjectFieldSelector
"""
return self._field_ref
@field_ref.setter
def field_ref(self, field_ref):
"""
Sets the field_ref of this V1DownwardAPIVolumeFile.
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:param field_ref: The field_ref of this V1DownwardAPIVolumeFile.
:type: V1ObjectFieldSelector
"""
self._field_ref = field_ref
@property
def mode(self):
"""
Gets the mode of this V1DownwardAPIVolumeFile.
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:return: The mode of this V1DownwardAPIVolumeFile.
:rtype: int
"""
return self._mode
@mode.setter
def mode(self, mode):
"""
Sets the mode of this V1DownwardAPIVolumeFile.
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param mode: The mode of this V1DownwardAPIVolumeFile.
:type: int
"""
self._mode = mode
@property
def path(self):
"""
Gets the path of this V1DownwardAPIVolumeFile.
Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
:return: The path of this V1DownwardAPIVolumeFile.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this V1DownwardAPIVolumeFile.
Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
:param path: The path of this V1DownwardAPIVolumeFile.
:type: str
"""
if path is None:
raise ValueError("Invalid value for `path`, must not be `None`")
self._path = path
@property
def resource_field_ref(self):
"""
Gets the resource_field_ref of this V1DownwardAPIVolumeFile.
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
:return: The resource_field_ref of this V1DownwardAPIVolumeFile.
:rtype: V1ResourceFieldSelector
"""
return self._resource_field_ref
@resource_field_ref.setter
def resource_field_ref(self, resource_field_ref):
"""
Sets the resource_field_ref of this V1DownwardAPIVolumeFile.
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
:param resource_field_ref: The resource_field_ref of this V1DownwardAPIVolumeFile.
:type: V1ResourceFieldSelector
"""
self._resource_field_ref = resource_field_ref
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1DownwardAPIVolumeFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
625d50dda4359da865759ed0333751585f4106c7 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/wroscoe_donkey/donkey-master/donkey/calibrate.py | e027b7f0e6b5d7bdeb9d0084d88bf53eef468d31 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 578 | py | from time import sleep
class PCA9685_Controller:
# Init with 60hz frequency by default, good for servos.
def __init__(self, channel, frequency=60):
import Adafruit_PCA9685
# Initialise the PCA9685 using the default address (0x40).
self.pwm = Adafruit_PCA9685.PCA9685()
self.pwm.set_pwm_freq(frequency)
self.channel = channel
def set_pulse(self, pulse):
self.pwm.set_pwm(self.channel, 0, pulse)
if __name__ == '__main__':
c = PCA9685_Controller(0)
for i in range (1, 1000):
c.set_pulse(i)
sleep(1)
| [
"[email protected]"
] | |
6a185135a514f9f2d8fac69e9c918e6114c528dc | b6dc4fd480fa3c345171c386276220789c890540 | /Main.py | 7b06e64497e1ab302070bc079cf7a3092295aa61 | [
"Apache-2.0"
] | permissive | Ranjan2104/Create-Audio-Book-from-pdf | 14c6997964dde73ecc9e9346dafa0b8465715026 | 3f382109e7f3d084e6388e80eaa407461c0dd2d8 | refs/heads/main | 2023-05-09T04:59:11.077269 | 2021-05-24T06:05:08 | 2021-05-24T06:05:08 | 370,242,910 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py |
import pyttsx3
import PyPDF2
pdf_book = open('THE FALLING OF THE LEAVES.pdf','rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_book)
num_pages_counter = pdf_reader.numPages
play = pyttsx3.init()
print('Playing Audio Book from pdf')
for num in range(0, num_pages_counter):
page = pdf_reader.getPage(num)
data = page.extractText()
play.say(data)
play.runAndWait()
print('Book is Finished, Thank you!')
## contributed by Amresh Ranjan. | [
"[email protected]"
] | |
3950cb48b92dd278195da63c5cf144d2830992f6 | 085fd3aefa463ba16c1fd73d4ed9cd2b4c2c26df | /python-tkinter-cb/py_coursebook_n.py | 293f55604aed5f75c8e5517e4ebc749ed67d0250 | [] | no_license | Jammyhammy/Jammy-Linux-Unix-Scripts | f2b4e1d7195b00ed60cff61d0afd1f10beff255a | 886255618f0fd0b716bd5aad97a8228a6a02acfc | refs/heads/master | 2021-01-10T16:30:20.512119 | 2015-12-23T23:45:54 | 2015-12-23T23:45:54 | 48,515,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,042 | py | #!/usr/bin/python3
# Adil Khokhar
# CLID: axk9375
# CMPS 499 python assignment designed to work on python 3.2.3
# Import tkinter
from tkinter import *
from tkinter import messagebox
import tkinter.messagebox
import tkinter
# Predefined data of classes and professors.
classes = ['CMPS260-001','CMPS260-002','CMPS261-001','CMPS327-001','CMPS450-001','CMPS499-002']
profs = ['Dr.Radle', 'Dr.Etheridge', 'Dr.Kumar', 'Mr.Ducrest']
courselist = {}
courselist2 = [
['CMPS260-001', 'Dr.Etheridge'],
['CMPS260-001', 'Mr.Ducrest'],
['CMPS261-001', 'Dr.Etheridge'],
]
# Create root window and the frames
root = Tk()
frame = Frame(root)
frame.pack()
frame2 = Frame(root)
frame2.pack( side = BOTTOM )
framelist = Frame(root)
framelist.pack( side = BOTTOM )
frame3 = Frame(root)
frame3.pack( side = BOTTOM )
# Add in entries for user to type in name of professor and course name.
entlabel = Label(frame, text = "Professor & Course Entry")
entlabel.pack()
entlabel.grid(row=1)
profField = Entry(frame, text = "professor", width=30)
profField.insert(0, "ProfName")
profField.pack()
profField.grid(row=2)
courseField = Entry(frame, text = "course", width=30)
courseField.insert(0, "CMPS499-001")
courseField.pack()
courseField.grid(row=3)
# Create and label the listboxes for classes, professors, and assignments
clabel = Label(framelist, text = "Classes Professors Assignments")
clabel.pack(side= TOP)
classListBox = Listbox(framelist)
i = 0
for course in classes:
classListBox.insert(i,course)
i = i + 1
classListBox.pack( side = LEFT )
profListBox = Listbox(framelist)
j = 0
for prof in profs:
profListBox.insert(i,prof)
j = j + 1
profListBox.pack( side = LEFT )
# Dictionary does not work.
#longest = 20
#pclListBox = Listbox(framelist)
#for key,value in courselist.items():
# entry = '{}: {}'.format(key, (''.join(value)))
# longest = max(longest, len(entry))
# pclListBox.insert(END, entry)
#pclListBox.config(width=longest)
#pclListBox.pack( side = LEFT )
# Just use a list.
pclListBox = Listbox(framelist)
for prof,course in courselist2 :
pclListBox.insert(END, prof, course)
pclListBox.pack( side = LEFT )
# Function definitions for adding, removing, professors and classes, and assignments.
def addProf():
profadd = profField.get()
profListBox.insert(END, (profadd))
profs.append(profadd)
def remProf():
profname = profListBox.get(ACTIVE)
profListBox.delete(ANCHOR)
def addClass():
classadd = courseField.get()
classListBox.insert(END, (classadd))
classes.append(classadd)
def remClass():
classname = classListBox.get(ACTIVE)
classListBox.delete(ANCHOR)
def addEntry():
#An actual assignment manager doesn't work.
#toplevel = Toplevel()
#toplevel.title('Manage Assignments')
#toplevel.focus_set()
pclListBox.insert(END, classListBox.get(ACTIVE), profListBox.get(ACTIVE))
courselist2.append([classListBox.get(ACTIVE), profListBox.get(ACTIVE)])
def remEntry():
pclListBox.delete(ANCHOR)
#Add in buttons for adding/removing professors, classes, and assignments.
paddbutt = Button(frame2, text="Add Professor", width = 30, command = addProf)
paddbutt.pack(side = BOTTOM)
paddbutt.grid(row=1)
caddbutt = Button(frame2, text="Add Class", width = 30, command = addClass)
caddbutt.pack(side = BOTTOM)
caddbutt.grid(row=2)
prembutt = Button(frame2, text="Remove Professor", width = 30, command = remProf)
prembutt.pack(side = BOTTOM)
prembutt.grid(row=3)
crembutt = Button(frame2, text="Remove Class", width = 30, command = remClass)
crembutt.pack(side = BOTTOM)
crembutt.grid(row=4)
pclabutt = Button(frame2, text="Add Assignment", width = 30, command = addEntry)
pclabutt.pack(side = BOTTOM)
pclabutt.grid(row=5)
pclrbutt = Button(frame2, text="Remove Assignment", width = 30, command = remEntry)
pclrbutt.pack(side = BOTTOM)
pclrbutt.grid(row=6)
pcllabel = Label(frame3, text = "For assignments, select a professor and course (the highlighting will disappear but this is fine)")
pcllabel.pack(side= BOTTOM)
pcllabel.grid(row=7)
root.mainloop()
| [
"[email protected]"
] | |
8aee9580ace49c930fcb28d706798f51905c3ab5 | ecacf5a06d2b2ece7dedcd726f90f327b57cdb61 | /Macros/FMX/FMX_Glyph_Table.py | 2577514373258712e85a0707610cd636f183e013 | [
"MIT"
] | permissive | beckonandmuse/freemix-fontlab | ea5fee59c59e1cbec59a5252a4ed943deae52ae3 | 259198115ee91ab15a7c78be182cafcd503f330e | refs/heads/master | 2020-12-28T22:12:20.660270 | 2016-01-25T10:37:18 | 2016-01-25T10:37:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | #FLM: FMX Glyph Table
# http://remix-tools.com/freemix
# (C) Tim Ahrens, 2012
from FL import *
while fl.font != None:
import FMX_glyph_table_copy
import FMX_glyph_table_paste
try:
import os, MacOS
import wx
use_wx = True
except ImportError:
try:
from Carbon.Scrap import GetCurrentScrap, ClearCurrentScrap
use_wx = False
except ImportError:
fl.Message( 'Sorry, this macro is currently only available for Mac OS.' )
break
# get clipboard
if use_wx:
try:
clip = wx.Clipboard()
except:
dummy = wx.App(0)
clip = wx.Clipboard()
text = wx.TextDataObject()
clip.Open()
clip.GetData( text )
clip.Close()
clip_rows = text.GetText().encode( 'utf-8' ).splitlines()
else:
try:
clip_rows = GetCurrentScrap().GetScrapFlavorData( 'utf8' ).splitlines()
except ( TypeError, MacOS.Error ):
try:
clip_rows = GetCurrentScrap().GetScrapFlavorData( 'TEXT' ).splitlines()
except ( TypeError, MacOS.Error ):
clip_rows = ['']
if clip_rows:
clip_columns = clip_rows.pop(0).split('\t')
if 'base name' in clip_columns:
dialog = FMX_glyph_table_paste.FMXdialog()
dialog.select_master( FMX_glyph_table_copy.dialog_selected_master )
if dialog.d.Run() == 1:
fl.output = ''
print 'Pasting table\n'
FMX_glyph_table_paste.paste_table( clip_columns, clip_rows, fl.font, dialog.selected_master )
print '\nDone.'
break
# dialog for copy
dialog = FMX_glyph_table_copy.FMXdialog( FMX_glyph_table_copy.dialog_selection )
dialog.select_master( FMX_glyph_table_copy.dialog_selected_master )
if dialog.d.Run() != 2:
FMX_glyph_table_copy.dialog_selection = dialog.selection
FMX_glyph_table_copy.dialog_selected_master = dialog.selected_master
# write to clipboard
if use_wx:
text = wx.TextDataObject( dialog.string )
# clip = Clipboard()
clip.Open()
clip.SetData( text )
clip.Close()
else:
ClearCurrentScrap()
GetCurrentScrap().PutScrapFlavor( 'utf8', 0, dialog.string.encode( 'utf8' ) )
break
| [
"[email protected]"
] | |
60c187b1c9946b736e3099653c70367fc5235f80 | f6457da7fa1852c108e60431913b1064fa554228 | /scrc/dataset_creation/citation_dataset_creator.py | fd28e2e28b617b630c98d76c9670b54022680084 | [] | no_license | schnajos/SwissCourtRulingCorpus | f6b8b9c5780ed846fd542ddf6a98e6518b174f96 | de7ef4e9e75beee55ca4b0362d5e3a96fbe68b2b | refs/heads/main | 2023-08-24T14:37:22.010544 | 2021-10-13T07:13:46 | 2021-10-13T07:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,158 | py | import configparser
import inspect
from collections import Counter
from root import ROOT_DIR
from scrc.dataset_creation.dataset_creator import DatasetCreator
from scrc.utils.log_utils import get_logger
import numpy as np
import pandas as pd
from scrc.utils.main_utils import string_contains_one_of_list
from scrc.utils.term_definitions_converter import TermDefinitionsConverter
class CitationDatasetCreator(DatasetCreator):
"""
Creates a dataset with the text as input and the citations as labels
"""
def __init__(self, config: dict):
super().__init__(config)
self.logger = get_logger(__name__)
self.debug = False
self.split_type = "date-stratified"
self.dataset_name = "citation_prediction"
self.num_ruling_citations = 1000 # the 1000 most common ruling citations will be included
def get_dataset(self, feature_col, lang, save_reports):
df = self.get_df(self.get_engine(self.db_scrc), feature_col, 'citations', lang, save_reports)
this_function_name = inspect.currentframe().f_code.co_name
folder = self.create_dir(self.datasets_subdir, this_function_name)
# calculate most common BGE citations
most_common_rulings = self.get_most_common_citations(df, folder, 'rulings')
# list with only the most common laws
most_common_laws = self.get_most_common_citations(df, folder, 'laws')
law_abbr_by_lang = self.get_law_abbr_by_lang()
# IMPORTANT: we need to take care of the fact that the laws are named differently in each language but refer to the same law!
def replace_citations(series, ref_mask_token="<ref>"):
# TODO think about splitting laws and rulings into two separate labels
labels = set()
for law in series.citations['laws']:
citation = law['text']
found_string_in_list = string_contains_one_of_list(citation, list(law_abbr_by_lang[lang].keys()))
if found_string_in_list:
series.text = series.text.replace(citation, ref_mask_token)
labels.add(law_abbr_by_lang['de'][found_string_in_list])
for ruling in series.citations['rulings']:
citation = ruling['text']
if string_contains_one_of_list(citation, most_common_rulings):
series.text = series.text.replace(citation, ref_mask_token)
labels.add(citation)
series['label'] = list(labels)
return series
df = df.apply(replace_citations, axis='columns')
df = df.rename(columns={"text": "text"}) # normalize column names
labels, _ = list(np.unique(np.hstack(df.label), return_index=True))
return df, labels
def get_most_common_citations(self, df, folder, type, plot_n_most_common=10):
"""
Retrieves the most common citations of a given type (rulings/laws).
Additionally plots the plot_n_most_common most common citations
:param df:
:param folder:
:param type:
:return:
"""
valid_types = ['rulings', 'laws']
if type not in valid_types:
raise ValueError(f"Please supply a valid citation type from {valid_types}")
type_citations = []
for citations in df.citations:
for type_citation in citations[type]:
type_citations.append(type_citation['text'])
most_common_with_frequency = Counter(type_citations).most_common(self.num_ruling_citations)
# Plot the 10 most common citations
# remove BGG articles because they are obvious
most_common_interesting = [(k, v) for k, v in most_common_with_frequency if 'BGG' not in k]
ruling_citations = pd.DataFrame.from_records(most_common_interesting[:plot_n_most_common],
columns=['citation', 'frequency'])
ax = ruling_citations.plot.bar(x='citation', y='frequency', rot=90)
ax.get_figure().savefig(folder / f'most_common_{type}_citations.png', bbox_inches="tight")
return list(dict(most_common_with_frequency).keys())
def get_law_abbr_by_lang(self):
term_definitions = TermDefinitionsConverter().extract_term_definitions()
law_abbr_by_lang = {lang: dict() for lang in self.languages}
for definition in term_definitions:
for lang in definition['languages']:
if lang in self.languages:
for entry in definition['languages'][lang]:
if entry['type'] == 'ab': # ab stands for abbreviation
# append the string of the abbreviation as key and the id as value
law_abbr_by_lang[lang][entry['text']] = definition['id']
return law_abbr_by_lang
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read(ROOT_DIR / 'config.ini') # this stops working when the script is called from the src directory!
citation_dataset_creator = CitationDatasetCreator(config)
citation_dataset_creator.create_dataset()
| [
"[email protected]"
] | |
0a6b81d4784680af10eaaef85d0ebdc6e2d629af | 0d995e4e1b8032575ee63a12ea5ea0f53cbd064c | /django_utils/experiments/experiment2/plots/flow_histograms.py | 6ae7955a72cdc62c6ff02ad78b283f68c8b069f9 | [] | no_license | megacell/phi | b4247d3d0b628163a07332ec6a7f7330d77494ad | 77c0360b19a0a9dcf8692c0473e09a3d9b712274 | refs/heads/master | 2021-01-18T15:10:00.415006 | 2015-08-16T03:43:39 | 2015-08-16T03:43:39 | 33,515,243 | 0 | 1 | null | 2015-04-07T01:29:59 | 2015-04-07T01:29:58 | null | UTF-8 | Python | false | false | 1,139 | py | __author__ = 'lei'
from django.db import connection
from django_utils import config
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.io as sio
from collections import defaultdict
def plot_route_flow():
sql = '''select flow_count from experiment2_routes;'''
cursor = connection.cursor()
cursor.execute(sql)
x = [i for i, in cursor]
plt.hist(x,bins=50, range=(1,50))
plt.xlabel("Flow")
plt.ylabel("Number of routes")
plt.show()
class LinkAgg:
def __init__(self):
self.link_flows = defaultdict(lambda: 0)
def add(self, flow_count, links):
for i in links:
self.link_flows[i] += flow_count
def get_flows(self):
return self.link_flows.values()
def plot_link_flow():
sql = '''select flow_count, r.links from experiment2_routes r;'''
cursor = connection.cursor()
cursor.execute(sql)
la = LinkAgg()
for flow, links in cursor:
la.add(flow, links)
plt.hist(la.get_flows() ,bins=100, range=(0,10000))
plt.xlabel("Flow")
plt.ylabel("Number of links")
plt.show()
plot_route_flow()
plot_link_flow() | [
"[email protected]"
] | |
f2f3041aa7667a569d3d6c52ae2beb148a2044e0 | 65d5b402a50c48a4545782c04b2886f0fbda8f25 | /E02.py | 03fdd5e2ec073864c058b52824492111444e34c2 | [] | no_license | jinxyou/suda-python-programming | 9fa09d8fff47f6c879323fe952b8d0407009616a | 6099eaa86dde134d20eaa6d7cb1aa768ab7a2aba | refs/heads/master | 2023-07-18T11:49:37.425508 | 2021-08-25T15:35:21 | 2021-08-25T15:35:21 | 262,232,810 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | name=input("请输入姓名:")
print("姓名字符串长度为",len(name)) | [
"[email protected]"
] | |
6664eb92071a1b4880bc280fc80816896294f906 | 6237a4d717a7055c9b0f1de3204554cbf5069b62 | /Book/migrations/0002_auto_20210318_1333.py | 8a219f62c192ddb48de127bc496bf68f0cf02708 | [] | no_license | sylvia198591/BookProject | 180b9072c13cad5a5f996d60946caab78ae503b1 | eabfb6cfe63e45f7f73c1500bad6aa8d3a1c62fb | refs/heads/master | 2023-03-27T00:25:10.827236 | 2021-03-22T08:43:49 | 2021-03-22T08:43:49 | 350,118,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # Generated by Django 3.1.7 on 2021-03-18 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Book', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={},
),
]
| [
"[email protected]"
] | |
2c9a7cab66384539834af9271b2d0eb96a66ad7b | 505ce732deb60c4cb488c32d10937a5faf386dce | /di_website/whatwedo/migrations/0015_auto_20190902_1127.py | e6229e5acdbd22b38171004cb2641e9339fa2ee9 | [] | no_license | davidebukali/di_web_test | cbdbb92b2d54c46771b067a24480e6699f976a15 | a826817a553d035140bb8b6768f3fd2b451199d8 | refs/heads/develop | 2023-02-11T13:21:26.281899 | 2021-01-08T04:37:34 | 2021-01-08T04:37:34 | 319,560,677 | 0 | 0 | null | 2021-01-08T04:37:35 | 2020-12-08T07:30:51 | HTML | UTF-8 | Python | false | false | 11,324 | py | # Generated by Django 2.2.3 on 2019-09-02 11:27
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('whatwedo', '0014_exampletopic_servicespage_servicespagerelatedexample_servicespagerelatednews'),
]
operations = [
migrations.AlterField(
model_name='servicespage',
name='body',
field=wagtail.core.fields.StreamField([('paragraph_block', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock())])), ('button_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('link_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('credit_name', wagtail.core.blocks.CharBlock(help_text='Name of the image source', required=False)), ('credit_url', wagtail.core.blocks.URLBlock(help_text='URL of the image source', required=False))])), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html'))], blank=True, null=True, verbose_name='Page Body'),
),
migrations.AlterField(
model_name='servicespage',
name='sections',
field=wagtail.core.fields.StreamField([('paragraph_block', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'])), ('center', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock()), ('center', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('banner_block', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html')), ('text', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.TextBlock(template='blocks/banner/text.html')), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('content', wagtail.core.blocks.TextBlock(required=False))], template='blocks/banner/list_item.html'), template='blocks/banner/list.html'))])), ('meta', wagtail.core.blocks.CharBlock(help_text='Anything from a name, location e.t.c - usually to provide credit for the text', required=False)), ('buttons', wagtail.core.blocks.StreamBlock([('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('document_box', wagtail.core.blocks.StructBlock([('box_heading', wagtail.core.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.core.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.core.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('media_orientation', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')], required=False)), ('light', wagtail.core.blocks.BooleanBlock(default=False, help_text='Sets the background to a lighter colour', required=False))]))], blank=True, null=True, verbose_name='Sections'),
),
migrations.AlterField(
model_name='whatwedopage',
name='body',
field=wagtail.core.fields.StreamField([('paragraph_block', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock())])), ('button_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('link_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('credit_name', wagtail.core.blocks.CharBlock(help_text='Name of the image source', required=False)), ('credit_url', wagtail.core.blocks.URLBlock(help_text='URL of the image source', required=False))])), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html'))], blank=True, null=True, verbose_name='Page Body'),
),
migrations.AlterField(
model_name='whatwedopage',
name='sections',
field=wagtail.core.fields.StreamField([('locations_map', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(icon='fa-heading', required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', required=False, template='blocks/paragraph_block.html')), ('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('light', wagtail.core.blocks.BooleanBlock(default=False, help_text='Applies a lighter background to the section', required=False))])), ('focus_area', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Our focus areas', icon='fa-heading', required=False)), ('focus_areas', wagtail.core.blocks.ListBlock(wagtail.core.blocks.TextBlock(icon='fa-text'), required=False)), ('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('light', wagtail.core.blocks.BooleanBlock(default=False, help_text='Applies a lighter background to the section', required=False))])), ('expertise', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Our expertise', icon='fa-heading', required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', required=False, template='blocks/paragraph_block.html')), ('expertise_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.TextBlock(icon='fa-text')), ('description', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', required=False, template='blocks/paragraph_block.html'))]), required=False)), ('light', wagtail.core.blocks.BooleanBlock(default=False, help_text='Applies a lighter background to the section', required=False))])), ('banner', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html')), ('text', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.TextBlock(template='blocks/banner/text.html')), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('content', wagtail.core.blocks.TextBlock(required=False))], template='blocks/banner/list_item.html'), template='blocks/banner/list.html'))])), ('meta', wagtail.core.blocks.CharBlock(help_text='Anything from a name, location e.t.c - usually to provide credit for the text', required=False)), ('buttons', wagtail.core.blocks.StreamBlock([('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('document_box', wagtail.core.blocks.StructBlock([('box_heading', wagtail.core.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.core.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.core.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('media_orientation', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')], required=False)), ('light', wagtail.core.blocks.BooleanBlock(default=False, help_text='Sets the background to a lighter colour', required=False))])), ('duo', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(help_text='Section heading', icon='fa-heading', required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html')), ('side_text', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', required=True, template='blocks/paragraph_block.html')), ('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))]))])), ('testimonial', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('cite', wagtail.core.blocks.TextBlock(help_text='The source of the testimonial')), ('image', wagtail.images.blocks.ImageChooserBlock(required=False))]))], blank=True, null=True, verbose_name='Sections'),
),
]
| [
"[email protected]"
] | |
886731935a5dfe268ff4db27827ce72009460777 | 20a6bc61215f407fb2c5a3746e286e73514927be | /AWSManager.py | 5affe9577b36d641c5613f93e3748b288ab2904d | [] | no_license | DijunQuant/EquityCrash | 87d9d5601129275f6a4a3a9ba63bdee71c4485f0 | e2b1765bad8434c4e04eaccec18daaa743da2fb5 | refs/heads/master | 2021-09-07T10:48:04.713846 | 2018-02-21T20:02:39 | 2018-02-21T20:02:39 | 114,173,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import s3fs
import boto3
s3 = boto3.resource('s3')
bucket_name_features='flashcrash_features'
# Call S3 to list current buckets
response = s3.list_buckets()
# Get a list of all bucket names from the response
buckets = [bucket['Name'] for bucket in response['Buckets']]
# Print out the bucket list
print("Bucket List: %s" % buckets)
if bucket_name_features not in buckets:
s3.create_bucket(Bucket=bucket_name_features)
#s3.Object(bucket_name_features, 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))
ec2 = boto3.client('ec2')
response = ec2.describe_instances()
print(response) | [
"[email protected]"
] | |
dc41fe3d3d84e6b6c923df62180017c5ec8d506a | 18e60e3c8dadd6f29f18ebbc23e6f453b5583c00 | /ex2/costFunctionReg.py | 581c5be9e82ee45fb165cc238a0ec789854e6cdf | [
"MIT"
] | permissive | junwon1994/Coursera-ML | c6c8be494aaf82fd87aa1dd92afd5c384c223913 | 91e96c3c14c058cd6d745a4fada1baf40d91458f | refs/heads/master | 2021-04-09T11:24:10.063113 | 2018-04-27T04:48:51 | 2018-04-27T04:48:51 | 125,448,256 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import numpy as np
from costFunction import costFunction
def costFunctionReg(theta, X, y, lambda_):
"""Computes the cost of using theta as the parameter
for regularized logistic regression and the gradient
of the cost w.r.t. to the parameters.
"""
# Initialize some useful values
m = len(y) # number of training examples
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost of a particular choice of theta.
# You should set J to the cost.
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
J, grad = costFunction(theta, X, y)
theta = np.r_[0, theta[1:]]
J += lambda_ * sum(theta**2) / (2 * m)
grad += lambda_ * theta / m
# =============================================================
return J, grad
| [
"[email protected]"
] | |
0fc4a08b5dfb3374f739c3e8d8f2d14726a326c3 | 7257456218030c7d7c5674a07d9f110794d410bb | /django-project/accounting/urls.py | 3c00fc7c51773393b0c36ae63bc4daaf98148aed | [] | no_license | palazzem/invoicer | d3c8141b6931bb3df871bbc79afdc1984b04987c | 6ed4e8b6413f23854e894cc36fb5725a23f3dd9a | refs/heads/master | 2020-12-24T13:29:06.550844 | 2014-06-07T23:42:39 | 2014-06-07T23:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.conf.urls import patterns, include, url
from rest_framework.routers import DefaultRouter
from .customers.apiviews import CustomerViewSet
router = DefaultRouter()
router.register('customers', CustomerViewSet)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
| [
"[email protected]"
] | |
303ca26d0e68eec756a099ea155af19a2ae664b0 | a5536039f375e28ad2c8fdb153f63feae7a534cf | /find-mersenne-primes | 82216f51626442d222e850b84e996b6496fc0f65 | [
"CC0-1.0"
] | permissive | za3k/short-programs | 70fe9b362e43d24c39bc4912f974d37699d5c690 | c543a64d1a1a8be26d16ab33bcffdea76eabf55c | refs/heads/master | 2023-08-09T23:19:34.689912 | 2023-07-23T16:05:18 | 2023-07-23T16:05:18 | 36,656,691 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | #!/usr/bin/python
def sieve(n):
primes = [2]
for x in range(2,n):
for p in primes:
if p**2 >x:
primes.append(x)
break
if x % p == 0:
break
return primes
def is_prime(n):
return not any(n % x == 0 for x in range(2, int(n**0.5)+1))
#return not any(n % x == 0 for x in range(2, n))
def is_mersenne_prime(p):
m = 2**p-1
s = 4
for _ in range(p-2):
s = (s**2 - 2) % m
return s == 0
print(' '.join([str(x) for x in sieve(10000) if is_mersenne_prime(x)]))
| [
"[email protected]"
] | ||
97fe78f09f019d6d1070051a2856d4348a5f139d | 3777658387aa9e78d7c04202d7fd47d59b9e1271 | /fraudAnalysis/Text_analysis/text_cleaning.py | 2bbfacf59b14b5e7a19122740a2821e638eaaaa3 | [] | no_license | jocoder22/PythonDataScience | 709363ada65b6db61ee73c27d8be60587a74f072 | c5a9af42e41a52a7484db0732ac93b5945ade8bb | refs/heads/master | 2022-11-08T17:21:08.548942 | 2022-10-27T03:21:53 | 2022-10-27T03:21:53 | 148,178,242 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | #!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
from zipfile import ZipFile
from io import BytesIO
import nltk
from nltk import word_tokenize, wordpunct_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
import string
import re
plt.style.use('ggplot')
sp = '\n\n'
url2 = 'https://assets.datacamp.com/production/repositories/2162/datasets/94f2356652dc9ea8f0654b5e9c29645115b6e77f/chapter_4.zip'
# download all the zip files
response = requests.get(url2)
# unzip the content
zipp = ZipFile(BytesIO(response.content))
# Dsiplay files names in the zip file
mylist = [filename for filename in zipp.namelist()]
data = pd.read_csv(zipp.open(mylist[5]))
data2 = data.copy()
print(data2.content.head())
data2['newcontent'] = data2['content'].apply(word_tokenize)
data2['newcontent2'] = data2.apply(
lambda row: wordpunct_tokenize(row['content']), axis=1)
# data2['newcontent2'] = data2[['newcontent2']].rstrip()
excludePunt = set(string.punctuation)
stopword = set(stopwords.words('english'))
stopword.update(("to", "th", "e", "cc", "subject", "http", "from",
"sent", "ect", "u", "fwd", "w", "n", "s", "www", "com"))
wordlemm = WordNetLemmatizer()
wordporter = SnowballStemmer("english")
# Define word cleaning function
def cleantext(text, stop):
text = str(text).rstrip()
stopfree = " ".join([word for word in text.lower().split() if (
(word not in stopword) and (not word.isdigit()))])
puncfree = ''.join(word for word in stopfree if word not in excludePunt)
lemmy = " ".join(wordlemm.lemmatize(word)
for word in puncfree.split())
result = " ".join(wordporter.stem(word) for word in lemmy.split())
return result
text_clean = []
for text in data['clean_content']:
text_clean.append(cleantext(text, stopword).split())
print(text_clean[:2])
data2['cleanedcontent'] = text_clean
print(data2[['content', 'cleanedcontent']].head())
print(data2[['newcontent2', 'newcontent']].head())
| [
"[email protected]"
] | |
6a65a43269cda4781b29a8941808b9e47c02de4c | e75affcef5661d4edd0b45f3d74aed9ee06fb37e | /VolumeCalculator.py | 046352bf101d03c005e5453aace266e33cbff21d | [] | no_license | Pyrodox/Volume-Calculator | a9ebb8a32d0d5e67b491c1dd7f3b5571c01c05f0 | 4d1831f9c60b4376d4c16b45626b99d13e1b4e93 | refs/heads/main | 2023-03-08T19:13:24.276901 | 2021-02-26T05:20:52 | 2021-02-26T05:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | from Shift15.Restart_Function import confirm_restart
class RectangularPrism:
def __init__(self, length, width, height):
self.length = length
self.width = width
self.height = height
def volume(self):
volume = self.length * self.width * self.height
print(volume)
class Cylinder:
def __init__(self, r, h):
self.r = r
self.h = h
def volume(self):
volume = 3.14 * (self.r**2) * self.h
print(volume)
class Sphere:
def __init__(self, r):
self.r = r
def volume(self):
volume = 1.33 * 3.14 * (self.r**3)
print(volume)
class Cone:
def __init__(self, r, h):
self.r = r
self.h = h
def volume(self):
volume = .33 * 3.14 * (self.r**2) * self.h
print(volume)
class SquarePyramid:
def __init__(self, length, w, h):
self.length = length
self.w = w
self.h = h
def volume(self):
volume = .33 * self.length * self.w * self.h
print(volume)
def volume_calculations(option1):
val_error = "Please enter your values again."
if option1 == "rectangle":
try:
rlength = float(input("Please type the length: "))
rwidth = float(input("Please type the width: "))
rheight = float(input("Please type the height: "))
r1 = RectangularPrism(rlength, rwidth, rheight)
r1.volume()
except ValueError:
print(val_error)
elif option1 == "cylinder":
try:
cyradius = float(input("Please type the radius: "))
cyheight = float(input("Please type the height: "))
cy1 = Cylinder(cyradius, cyheight)
cy1.volume()
except ValueError:
print(val_error)
elif option1 == "sphere":
try:
sphradius = float(input("Please type the radius: "))
sp1 = Sphere(sphradius)
sp1.volume()
except ValueError:
print(val_error)
elif option1 == "cone":
try:
coradius = float(input("Please type the radius: "))
coheight = float(input("Please type the height: "))
co1 = Cone(coradius, coheight)
co1.volume()
except ValueError:
print(val_error)
elif option1 == "pyramid":
try:
pylength = float(input("Please type the length: "))
pywidth = float(input("Please type the length: "))
pyheight = float(input("Please type the height: "))
py1 = SquarePyramid(pylength, pywidth, pyheight)
py1.volume()
except ValueError:
print(val_error)
print("This calculator calculates volumes of rectangular prisms, cylinders, cones, square pyramids, and spheres.")
print("Please choose: rectangular prism by typing rectangle, and pyramid for square pyramids. "
"The rest are the same (cone is cone).")
while True:
choice_list = ["rectangle", "cylinder", "cone", "pyramid", "sphere"]
while True:
option = input("Please type which 3D shape you'd like to calculate the volume of: ")
option = option.strip().lower()
if option in choice_list:
break
while option not in choice_list:
print("Invalid input. Please read the instructions and try again")
break
volume_calculations(option)
if confirm_restart() == "yes":
print("The program has reset.")
continue
else:
break
| [
"[email protected]"
] | |
594d250e19d46d8b1250521d5e6ad4698faa04bd | c785e067692d37788c6ff7436b0660f7c8264934 | /Cscapp/migrations/0003_auto_20170718_0130.py | 728213db0f89ab2494e30200c503ea9abda54ecc | [] | no_license | Favouroked/CSC | 35ce8a847c95f8ae8cc188f086fbb5b015942bd2 | cd9e1732bb0e59bc2c73fec271ae23c68753a83c | refs/heads/master | 2021-01-01T17:35:16.779892 | 2017-07-25T06:04:47 | 2017-07-25T06:04:47 | 98,106,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-07-18 01:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cscapp', '0002_auto_20170718_0126'),
]
operations = [
migrations.AlterField(
model_name='textbook',
name='url',
field=models.URLField(null=True),
),
]
| [
"[email protected]"
] | |
eeeea0c38b93019c616bf2de8c2e56b260cc80e3 | eba3e4a3935d6422d1ed85aaf69337f5ba15fc74 | /tg-buildbot-extensions/WebApp/bbwi/start-bbwi.py | 3a51513f330a667a512363aeae70c9bf6ca8eabd | [] | no_license | arianepaola/tg2jython | 2ae74250ca43b021323ef0951a9763712c2eb3d6 | 971b9c3eb8ca941d1797bb4b458f275bdca5a2cb | refs/heads/master | 2021-01-21T12:07:48.815690 | 2009-03-27T02:38:11 | 2009-03-27T02:38:11 | 160,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | #!/home/steven/TurboGears/WebApp/webapp/bin/python
# -*- coding: utf-8 -*-
"""Start script for the bbwi TurboGears project.
This script is only needed during development for running from the project
directory. When the project is installed, easy_install will create a
proper start script.
"""
import sys
from bbwi.commands import start, ConfigurationError
if __name__ == "__main__":
try:
start()
except ConfigurationError, exc:
sys.stderr.write(str(exc))
sys.exit(1)
| [
"[email protected]"
] | |
88c24fc29ccc10f15e21e8b6e4fb94ce0c040e4d | 2223af74b56abb99c034cde43628009a6d8aebd9 | /RippleNet/src/main.py | c74d9c305bd89dd2fde2d080e85030149bf525eb | [] | no_license | rollben/KGRecommendation | 155e8b7177fb8c7138fe18638b33b53e138f4eb7 | 9fa903159f469143a3110b7483335c18a992c2f2 | refs/heads/master | 2022-06-29T16:13:05.493594 | 2020-05-10T17:00:01 | 2020-05-10T17:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | import argparse
import numpy as np
from data_loader import load_data
from time import time
from train import train
np.random.seed(555)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='movie', help='which dataset to use')
parser.add_argument('--dim', type=int, default=16, help='dimension of entity and relation embeddings')
parser.add_argument('--n_hop', type=int, default=2, help='maximum hops')
parser.add_argument('--kge_weight', type=float, default=0.01, help='weight of the KGE term')
parser.add_argument('--l2_weight', type=float, default=1e-7, help='weight of the l2 regularization term')
parser.add_argument('--lr', type=float, default=0.02, help='learning rate')
parser.add_argument('--batch_size', type=int, default=1480, help='batch size')
parser.add_argument('--n_epoch', type=int, default=10, help='the number of epochs')
parser.add_argument('--n_memory', type=int, default=32, help='size of ripple set for each hop')
parser.add_argument('--item_update_mode', type=str, default='plus_transform',
help='how to update item at the end of each hop')
parser.add_argument('--using_all_hops', type=bool, default=True,
help='whether using outputs of all hops or just the last hop when making prediction')
args = parser.parse_args()
show_loss = False
show_time = True
t = time()
data_info = load_data(args)
train(args, data_info, show_loss)
if show_time:
print('time used: %d s' % (time() - t)) | [
"[email protected]"
] | |
2e38946c5f826e0d1b334a8fa864d3a0e9ab5d50 | 9492fef60cc72579bb56de6463239601ea274ce1 | /Lab2/CrawlingData.py | 789fb101f6dc200fee2db93f5fd5395f8340f44b | [] | no_license | smeissa2019/Python-Work | 5c250e63086741ee9e1ae00f555260afdfc16cc2 | 0c26de21502489aeff1897054c2f7d586e0d9684 | refs/heads/master | 2020-07-16T04:55:40.682791 | 2019-12-03T23:50:41 | 2019-12-03T23:50:41 | 205,723,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | #%%
from random import choice
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
from textblob import TextBlob
import nltk
text = strip_headers(load_etext(60457)).strip()
answerUser = strip_headers(load_etext(1524)).strip()
blob = TextBlob(text)
source= open('C:/Users/sa418774/Desktop/PyWC/Python-Work/Week 6/book.txt', 'w', encoding="utf-16", newline='\n')
source2= open('C:/Users/sa418774/Desktop/PyWC/Python-Work/Week 6/book2.txt', 'w', encoding="utf-16", newline='\n')
source.write(text)
source2.write(answerUser)
source.close
source2.close
max = 0
index = 0
for key, sentence in enumerate(blob.sentences):
if(len(sentence.words)> max):
max = len(sentence.words)
index = key
print(max)
z = []
x = choice(text)
answerUser = input(x).strip().lower()
y = 1
while y >= 1:
if answerUser != "lol stop it":
answerUser = input("but Why?").strip().lower()
def analysis():
obj= TextBlob(answerUser)
z= obj.sentiment.polarity
if z == 0:
print("I am not sure if you like me or not")
elif z == 1:
print("I'm happy you like my questions so far, here is another one")
else:
print("but why are you mad?")
else:
print("Oh ..Okay")
exit()
| [
"[email protected]"
] | |
5d350e9643cc84e27261bf7db92244c305f5da2c | 24fac945c7825c502f9fb2e6df06db26b80b5676 | /blog/models.py | da8693a19118bfee451413073db331c1169e52c4 | [] | no_license | mario220696/my-first-blog | 8f0a9883c4ecde58a8c3483e5ad0823b81a4db7c | 72ec03a579965c984a770f0c4cca0a3eca28aa46 | refs/heads/master | 2020-03-17T11:48:03.954834 | 2018-05-15T19:28:36 | 2018-05-15T19:28:36 | 133,564,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
create_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title | [
"[email protected]"
] | |
7a99dbfd5c743c18645b105510b5d195beabbf1b | dd8daa4488be9e9c245d0c941afa7f1ad6924857 | /mainpage/migrations/0007_alter_article_article_audio.py | 4b676860b89cc491ece4f2fe08c0b1632c19f15d | [] | no_license | 2021-software-training/CQUTieba | 924e7db823eb65562c7a76f2345f65a9a94e96b8 | 59b3debbfa9b1aee267f8423c3a0ac03cdd8d11d | refs/heads/master | 2023-06-18T13:36:07.138091 | 2021-07-17T07:59:37 | 2021-07-17T07:59:37 | 384,048,024 | 2 | 0 | null | 2021-07-12T13:32:04 | 2021-07-08T08:02:18 | Python | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.2.5 on 2021-07-15 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainpage', '0006_remove_comment_comment_audio'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_audio',
field=models.IntegerField(default=-1),
),
]
| [
"[email protected]"
] | |
d26c22811198cb740c2f0d08c8d2405e5e3b2b21 | be449eb0d57035c5672d11e3db6015be463e2ccf | /new.py | 5bcf74db314af875d31ddd210d472d54683b3310 | [] | no_license | 8181suisho/gitpractice2 | 63b0d3e9749f6907210083fd92af6cfe2088969a | a396fb99c293d922e1c66431d316af87426d97b0 | refs/heads/master | 2020-12-23T13:36:51.194123 | 2020-02-09T20:06:53 | 2020-02-09T20:06:53 | 235,830,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | practice2 = "charenge pull"
print(practice2)
| [
"[email protected]"
] | |
f68284110d8f1e34f8c2d030c2e1f6b25773070c | 60537c95e827a279fdc2cc26b8739a328e8f3b23 | /scripts/install/opt/seagate/cortx/motr/bin/motr_mini_prov.py | da900815323934fcbda105c0f16a4f3c8642923f | [
"Apache-2.0"
] | permissive | xuning97/cortx-motr | aa6f49e77ef4d034d6ff16defcf16a60aa7089dc | 9fe714bb30fa069cbb3bed9c09c09e407623f31f | refs/heads/main | 2023-03-02T10:54:24.653670 | 2021-01-29T14:01:55 | 2021-01-29T14:01:55 | 334,010,300 | 0 | 0 | Apache-2.0 | 2021-01-29T02:03:41 | 2021-01-29T02:03:40 | null | UTF-8 | Python | false | false | 8,735 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
import sys
import errno
import os
import re
import subprocess
import time
from cortx.utils.conf_store import Conf
MOTR_KERNEL_FILE = "/lib/modules/{kernel_ver}/kernel/fs/motr/m0tr.ko"
MOTR_SYS_FILE = "/etc/sysconfig/motr"
MOTR_CONFIG_SCRIPT = "/opt/seagate/cortx/motr/libexec/motr_cfg.sh"
LNET_CONF_FILE = "/etc/modprobe.d/lnet.conf"
SYS_CLASS_NET_DIR = "/sys/class/net/"
MOTR_SYS_CFG = "/etc/sysconfig/motr"
SLEEP_SECS = 2
TIMEOUT_SECS = 120
class MotrError(Exception):
""" Generic Exception with error code and output """
def __init__(self, rc, message, *args):
self._rc = rc
self._desc = message % (args)
sys.stderr.write("error(%d): %s\n" %(self._rc, self._desc))
def __str__(self):
if self._rc == 0: return self._desc
return "error(%d): %s" %(self._rc, self._desc)
def execute_command(cmd, timeout_secs):
ps = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True)
stdout, stderr = ps.communicate(timeout=timeout_secs);
stdout = str(stdout, 'utf-8')
sys.stdout.write(f"[CMD] {cmd}\n")
sys.stdout.write(f"[OUT]\n{stdout}\n")
sys.stdout.write(f"[RET] {ps.returncode}\n")
if ps.returncode != 0:
raise MotrError(ps.returncode, f"{cmd} command failed")
return stdout, ps.returncode
def get_current_node(self):
cmd = "cat /etc/machine-id"
machine_id = execute_command(cmd, TIMEOUT_SECS)
machine_id = machine_id[0].split('\n')[0]
return Conf.get(self._index, 'cluster>server_nodes')[machine_id]
def restart_services(services):
for service in services:
cmd = "service {} stop".format(service)
execute_command(cmd, TIMEOUT_SECS)
cmd = "service {} start".format(service)
execute_command(cmd, TIMEOUT_SECS)
cmd = "service {} status".format(service)
execute_command(cmd, TIMEOUT_SECS)
def validate_file(file):
if not os.path.exists(file):
raise MotrError(errno.ENOENT, "{} not exist".format(file))
def is_hw_node(self):
node_type = Conf.get(self._index, f'cluster>{self._server_id}')['node_type']
if node_type == "HW":
return True
else:
return False
def validate_motr_rpm(self):
try:
cmd = "uname -r"
cmd_res = execute_command(cmd, TIMEOUT_SECS)
op = cmd_res[0]
kernel_ver = op.replace('\n', '')
kernel_module = f"/lib/modules/{kernel_ver}/kernel/fs/motr/m0tr.ko"
sys.stdout.write(f"[INFO] Checking for {kernel_module}\n")
validate_file(kernel_module)
sys.stdout.write(f"[INFO] Checking for {MOTR_SYS_FILE}\n")
validate_file(MOTR_SYS_FILE)
except MotrError as e:
sys.stderr.write("Validate motr rpm failed\n")
sys.exit(e._rc)
def motr_config(self):
is_hw = is_hw_node(self)
if is_hw:
execute_command(MOTR_CONFIG_SCRIPT, TIMEOUT_SECS)
def configure_net(self):
'''Wrapper function to detect lnet/libfabric transport'''
transport_type = Conf.get(self._index,
f'cluster>{self._server_id}')['network']['data']['transport_type']
if transport_type == "lnet":
configure_lnet_from_conf_store(self)
elif transport_type == "libfabric":
configure_libfabric(self)
else:
sys.stderr.write("[ERR] Unknown data transport type\n")
def configure_lnet_from_conf_store(self):
'''
Get iface and /etc/modprobe.d/lnet.conf params from
conf store. Configure lnet. Start lnet service
'''
iface = Conf.get(self._index,
f'cluster>{self._server_id}')['network']['data']['private_interfaces'][0]
iface_type = Conf.get(self._index,
f'cluster>{self._server_id}')['network']['data']['interface_type']
sys.stdout.write(f"[INFO] {iface_type}=({iface})\n")
sys.stdout.write(f"[INFO] Updating {LNET_CONF_FILE}\n")
with open(LNET_CONF_FILE, "w") as fp:
fp.write(f"options lnet networks={iface_type}({iface}) "
f"config_on_load=1 lnet_peer_discovery_disabled=1\n")
time.sleep(SLEEP_SECS)
restart_services(["lnet"])
def configure_libfabric(self):
pass
def create_lvm(node_name, index, metadata_dev):
index = index + 1
vg_name = f"vg_{node_name}_md{index}"
lv_swap_name = f"lv_main_swap{index}"
lv_md_name = f"lv_raw_md{index}"
try:
validate_file(metadata_dev)
cmd = f"fdisk -l {metadata_dev}"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"wipefs --all --force {metadata_dev}"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"pvcreate {metadata_dev}"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"vgcreate {vg_name} {metadata_dev}"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"vgchange --addtag {node_name} {vg_name}"
execute_command(cmd, TIMEOUT_SECS)
cmd = "vgscan --cache"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"lvcreate -n {lv_swap_name} {vg_name} -l 51%VG"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"lvcreate -n {lv_md_name} {vg_name} -l 100%FREE"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"mkswap -f /dev/{vg_name}/{lv_swap_name}"
execute_command(cmd, TIMEOUT_SECS)
cmd = f"test -e /dev/{vg_name}/{lv_swap_name}"
execute_command(cmd, TIMEOUT_SECS)
cmd = (
f"echo \"/dev/{vg_name}/{lv_swap_name} swap "
f"swap defaults 0 0\" >> /etc/fstab"
)
execute_command(cmd, TIMEOUT_SECS)
except:
pass
def config_lvm(self):
metadata_devices = Conf.get(self._index,
f'cluster>{self._server_id}')['storage']['metadata_devices']
sys.stdout.write(f"[INFO] server_id={self._server_id} "
f" metadata_device={metadata_devices}\n")
cmd = "swapoff -a"
execute_command(cmd, TIMEOUT_SECS)
for device in metadata_devices:
create_lvm(self._server_id, metadata_devices.index(device), device)
cmd = "swapon -a"
execute_command(cmd, TIMEOUT_SECS)
def get_lnet_xface() -> str:
lnet_xface = None
try:
with open(LNET_CONF_FILE, 'r') as f:
# Obtain interface name
for line in f.readlines():
if len(line.strip()) <= 0: continue
tokens = re.split(r'\W+', line)
if len(tokens) > 4:
lnet_xface = tokens[4]
break
except:
pass
if lnet_xface == None:
raise MotrError(errno.EINVAL, "Cant obtain iface details from %s"
, LNET_CONF_FILE)
if lnet_xface not in os.listdir(SYS_CLASS_NET_DIR):
raise MotrError(errno.EINVAL, "Invalid iface %s in lnet.conf"
, lnet_xface)
return lnet_xface
def check_pkgs(src_pkgs, dest_pkgs):
missing_pkgs = []
for src_pkg in src_pkgs:
found = False
for dest_pkg in dest_pkgs:
if src_pkg in dest_pkg:
found = True
break
if not found:
missing_pkgs.append(src_pkg)
if missing_pkgs:
raise MotrError(errno.ENOENT, f'Missing pkgs: {missing_pkgs}')
def test_lnet(self):
search_lnet_pkgs = ["kmod-lustre-client", "lustre-client"]
try:
# Check missing luster packages
cmd = 'rpm -qa | grep lustre'
cmd_res = execute_command(cmd, TIMEOUT_SECS)
temp = cmd_res[0]
lustre_pkgs = list(filter(None, temp.split("\n")))
check_pkgs(search_lnet_pkgs, lustre_pkgs)
lnet_xface = get_lnet_xface()
ip_addr = os.popen(f'ip addr show {lnet_xface}').read()
ip_addr = ip_addr.split("inet ")[1].split("/")[0]
cmd = "ping -c 3 {}".format(ip_addr)
cmd_res = execute_command(cmd, TIMEOUT_SECS)
sys.stdout.write("{}\n".format(cmd_res[0]))
except MotrError as e:
pass
| [
"[email protected]"
] | |
6a7d86e8d99455a983c64c61263ac4c176fece7f | be49b64225e4922b13e1891761408cd8b76b7e1a | /ngs_germline/smk/var/var_varscan.smk | 320b6b068d7ed97b09a6942a35c8dde06918af13 | [] | no_license | PengJia6/BioPipe | c727a62305eb1417db6614127cbfcd79ee0b7ec2 | 430be9e8f0277d46d6da4487d2b99b7ab1cc6130 | refs/heads/master | 2023-02-12T17:04:33.187852 | 2021-01-10T08:57:09 | 2021-01-10T08:57:09 | 226,059,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | smk | # Samtools Calling
# https://samtools.github.io/bcftools/
rule Samtools_Mpileup:
input:
unpack(getHQbamsample),
ref=path_genome,
sindex=path_genome + ".fai"
output:
path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.Samtools.mpileup"
params:
extra="",
dp=5
threads: config["threads"]["Samtools_Mpileup"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Samtools.MergeVcf.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Samtools.MergeVcf.tsv"
run:
shell(
"{path_samtools}samtools mpileup -B -f {input.ref} -o {output} {input.bam} "
"2>{log} 1>{log} ")
rule Varscan_Call:
input:
rules.Samtools_Mpileup.output
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.raw.vcf.gz"
params:
extra=" --p-value 0.05 ",
threads: config["threads"]["Varscan_Call"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call.tsv"
run:
shell("{path_varscan}varscan mpileup2cns {input} {params.extra} --output-vcf 1 | "
"{path_bcftools}bcftools view -Oz -o {output} 1>{log} 2>{log} ")
rule Varscan_Filter:
input:
rules.Varscan_Call.output.vcf
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.pass.vcf.gz"
params:
extra="",
threads: config["threads"]["Varscan_Filter"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter.tsv"
run:
shell("{path_bcftools}bcftools view {params.extra} -Oz -o {output.vcf} {input} 2>{log} 1>{log} ")
rule Varscan_Call_SNV:
input:
rules.Samtools_Mpileup.output
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.SNV.raw.vcf.gz"
params:
extra=" --p-value 0.05 ",
threads: config["threads"]["Varscan_Call_SNV"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call_SNV.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call_SNV.tsv"
run:
shell("{path_varscan}varscan mpileup2snp {input} {params.extra} --output-vcf 1 | "
"{path_bcftools}bcftools view -Oz -o {output} 1>{log} 2>{log} ")
rule Varscan_Filter_SNV:
input:
rules.Varscan_Call_SNV.output.vcf
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.SNV.pass.vcf.gz"
params:
extra="",
threads: config["threads"]["Varscan_Filter_SNV"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter_SNV.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter_SNV.tsv"
run:
shell("{path_bcftools}bcftools view {params.extra} -Oz -o {output.vcf} {input} 2>{log} 1>{log} ")
rule Varscan_Call_INDEL:
input:
rules.Samtools_Mpileup.output
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.INDEL.raw.vcf.gz"
params:
extra=" --p-value 0.05 ",
threads: config["threads"]["Varscan_Call_INDEL"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call_INDEL.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Call_INDEL.tsv"
run:
shell("{path_varscan}varscan mpileup2indel {input} {params.extra} --output-vcf 1 | "
"{path_bcftools}bcftools view -Oz -o {output} 1>{log} 2>{log} ")
rule Varscan_Filter_INDEL:
input:
rules.Varscan_Call_INDEL.output.vcf
output:
vcf=path_data + "germlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.INDEL.pass.vcf.gz"
params:
extra="",
threads: config["threads"]["Varscan_Filter_INDEL"]
log:
path_log + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter_INDEL.logs"
benchmark:
path_bm + "gremlineVar/varscan/perSample/{bam_sample}/{bam_sample}.varscan.Varscan_Filter_INDEL.tsv"
run:
shell("{path_bcftools}bcftools view {params.extra} -Oz -o {output.vcf} {input} 2>{log} 1>{log} ")
| [
"[email protected]"
] | |
23f3e43c270309d1ade32b0682a98185311ac35f | ef60d569adaa0c2e11ef6884713315ff915dc4e4 | /helloagain2.py | a504b7a6cac5081e2dff395927bcbf791c323d11 | [] | no_license | georgiawallace/assignments-hackthehood | 5593d9e8797438d83a9e6902e3332f31039a3fd5 | 2bd006f83a52feba26a49f06e7e95d690febe934 | refs/heads/main | 2023-06-02T14:00:52.152700 | 2021-06-17T18:46:39 | 2021-06-17T18:46:39 | 377,247,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | print("hello world")
bool = True
pool = False
if bool:
print("Hello again")
else:
print("guess not!")
grocerie_dict = {"Chicken": "$1.59", "Beef": "$1.99", "Cheese": "$1.00"}
phone_number = {"moms number":"3234852625", "dads number": "3234851030"} | [
"[email protected]"
] | |
65de156e4c96c3dd42682c01939f1b278948f55f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/171/45106/submittedfiles/testes.py | cf1fb2c89698a63423322164069c8b6bb6f82f80 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
n=int(input('digite o numero de pessoas:'))
menor=500
maior=0
for i in range(1,n+1,1):
nota=float(input('digite a nota:'))
if nota>maior:
maior=nota
if nota<menor:
menor=nota
print(maior)
print(menor) | [
"[email protected]"
] | |
cea8580c65ae268aeb7b6eb50e89b4021099b025 | 894b290b4f4f47b5eb523c23efd7bd6110d91b2f | /54_Rentalshops58/Rentalshops58/Rentalshops58/pipelines.py | fa51a420fad1a95f3d4149152bd30da81ad75e3f | [] | no_license | wliustc/SpiderS | 6650c00616d11239de8c045828bafdc5a299b1ce | 441f309c50d28c1a3917bed19321cd5cbe7c2861 | refs/heads/master | 2020-03-27T06:15:39.495785 | 2018-06-14T07:55:44 | 2018-06-14T07:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
class RentalShops58Pipeline(object):
# def process_item(self, item, spider):
# return item
def __init__(self):
self.file = codecs.open('test.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close() | [
"[email protected]"
] | |
fca6e040ae44e3085fde8c66889736e740be905c | ac924e7192a2d38380a903ea908edd659c8d997d | /BoostedDiTauReco/makeFileListTCP106X.py | ac4301f887cf760195952842c57b284d07ec6409 | [] | no_license | jingyucms/BoostedDiTau | e7209d7a9c7e5e4073dfe3a31af58185c013b24a | d28b83e07367452464d01090ab5037261ccc4900 | refs/heads/master | 2023-08-19T00:32:49.528310 | 2021-10-15T21:33:06 | 2021-10-15T21:33:06 | 178,277,147 | 0 | 2 | null | 2023-08-21T14:58:54 | 2019-03-28T20:27:24 | Python | UTF-8 | Python | false | false | 1,230 | py | import sys,string,math,os,glob
import numpy as np
prefix = "root://xrootd.unl.edu/"
filesPerList=50
def checkAndMakeDir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
def clearDir(dir):
for fil in glob.glob(dir+"/*"):
os.remove(fil)
masses = ['30','50']
bins = ['0to100','100to400','400toInf']
for mass in masses:
fileListDir="./filelists/TCP/"+mass+"/"
checkAndMakeDir("./filelists/TCP")
checkAndMakeDir(fileListDir)
clearDir(fileListDir)
for b in bins:
#searchString = '/store/user/nbower/Events/TCP_m_{}_w_1_htj_{}_slc6_amd64_gcc630_MINIAOD/'.format(mass, b)
#fileDir = searchString
searchString = '/store/user/zhangj/events/ALP/UL2017ReMiniAOD/TCP_m_{}_w_1_htj_{}*'.format(mass, b)
fileDir = '/store/user/zhangj/events/ALP/UL2017ReMiniAOD/'
query = 'eos root://cmseos.fnal.gov ls '+searchString
files = os.popen(query).read().split()
for nf in range(1, len(files)+1):
filelistIdx=int((nf-1)/filesPerList)
if nf%filesPerList==1:
out=open(fileListDir+'TCP_m_{}_w_1_htj_{}_{}.txt'.format(mass, b, str(filelistIdx)), 'w')
out.write(prefix+fileDir+files[nf-1]+"\n")
| [
"[email protected]"
] | |
dfef2af78351db7d5277646c1d8eca210361e3cf | 514676efb23a98fbce52bca937c7690f37dfa255 | /src/dlc_practical_prologue.py | 9fa7914271c05b1c09f9872804edab75c51e6d03 | [] | no_license | giorgiosav/dl-proj1 | 78f4a9173329c6b5b15d4e46cac3461766ba75a6 | 194e2074cad6bc1a22a33c1e1ddd69091c21e6db | refs/heads/master | 2022-12-27T21:10:32.450643 | 2020-10-08T11:06:47 | 2020-10-08T11:06:47 | 252,936,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | # We deleted all the parsing due to conflict with the test.py main
# Left only the useful part for the project
import torch
from torchvision import datasets
import os
def mnist_to_pairs(nb, input, target):
input = torch.functional.F.avg_pool2d(input, kernel_size=2)
a = torch.randperm(input.size(0))
a = a[:2 * nb].view(nb, 2)
input = torch.cat((input[a[:, 0]], input[a[:, 1]]), 1)
classes = target[a]
target = (classes[:, 0] <= classes[:, 1]).long()
return input, target, classes
######################################################################
def generate_pair_sets(nb):
data_dir = os.environ.get('PYTORCH_DATA_DIR')
if data_dir is None:
data_dir = './data'
train_set = datasets.MNIST(data_dir + '/mnist/', train=True, download=True)
train_input = train_set.data.view(-1, 1, 28, 28).float()
train_target = train_set.targets
test_set = datasets.MNIST(data_dir + '/mnist/', train=False, download=True)
test_input = test_set.data.view(-1, 1, 28, 28).float()
test_target = test_set.targets
return mnist_to_pairs(nb, train_input, train_target) + \
mnist_to_pairs(nb, test_input, test_target)
######################################################################
| [
"[email protected]"
] | |
b2e57fbb5272479185936b8f46de858418ed5c6f | 8f1452b97b07212c91deec8b0c6feba7b86c94dc | /server/sks/lib/DHTThread.py | 703980dff706835fead281d82bc1dfc8980b1d36 | [] | no_license | WaterInit/p2p-kademlia | d9d7cb114795b204c7ecb306276cb4862a301955 | b3e0ccc0fa91908e2f765b0478f7a737dbc849fb | refs/heads/master | 2021-03-19T15:32:26.028210 | 2016-02-10T13:14:40 | 2016-02-10T13:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | #!/usr/bin/python3
#from django.test import TestCase
import threading
import queue
from .node import node
class DHTThread(threading.Thread):
def __init__(self, ip, port):
print("Init DHTThread")
super(DHTThread, self).__init__()
self.input_q = queue.Queue(1)
self.output_q = queue.Queue(1)
self.ip = ip
self.port = port
self.knoten = node(self.ip, self.port)
print("DHTThread initialized.")
# self.run()
def run(self):
while True:
print("Waiting for requests...")
key = self.input_q.get()
print("Processing request.")
if key[0] == "put": # insert in DHT
print("PUT key with ID " + str(key[1]))
pgp_entry = self.knoten.insert_key(key[1], key[2])
self.output_q.put(pgp_entry)
elif key[0] == "get": # get from DHT
print("GET key with ID " + str(key[1]))
pgp_entry = self.knoten.get_key(key[1])
if pgp_entry is 0: # key not found
self.output_q.put(None)
else:
self.output_q.put(pgp_entry)
else:
self.output_q.put(None) | [
"[email protected]"
] | |
324f5fb3ebcda6221486b19a9815616f355fd473 | 2ad2a8e49fcfbc36bf4fbde88bbc54191e6cddf1 | /networking-odl/networking_odl/tests/unit/ml2/test_driver.py | e2ceda56c82d31e9c52cad03a7c6491411405f1d | [
"Apache-2.0"
] | permissive | opnfv/fds | 70b7cd819ad9f64e009615dfdda970e4ba31ed02 | 20cef81fd594f10949e151fd5a0a439af0a844e4 | refs/heads/master | 2021-01-12T10:57:33.088576 | 2018-09-18T14:47:07 | 2018-09-18T14:47:07 | 72,769,062 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | # Copyright (c) 2013-2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.tests.unit.plugins.ml2 import test_plugin
from networking_odl.common import constants as const
from networking_odl.ml2 import mech_driver as driver
class TestODLShim(test_plugin.Ml2PluginV2TestCase):
def setUp(self):
super(TestODLShim, self).setUp()
self.context = context.get_admin_context()
self.plugin = mock.Mock()
self.driver = driver.OpenDaylightMechanismDriver()
self.driver.odl_drv = mock.Mock()
def test_create_network_postcommit(self):
self.driver.create_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_NETWORKS,
self.context)
def test_update_network_postcommit(self):
self.driver.update_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_NETWORKS,
self.context)
def test_delete_network_postcommit(self):
self.driver.delete_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_NETWORKS,
self.context)
def test_create_subnet_postcommit(self):
self.driver.create_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_SUBNETS,
self.context)
def test_update_subnet_postcommit(self):
self.driver.update_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_SUBNETS,
self.context)
def test_delete_subnet_postcommit(self):
self.driver.delete_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_SUBNETS,
self.context)
def test_create_port_postcommit(self):
self.driver.create_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_PORTS,
self.context)
def test_update_port_postcommit(self):
self.driver.update_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_PORTS,
self.context)
def test_delete_port_postcommit(self):
self.driver.delete_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_PORTS,
self.context)
def test_bind_port_delegation(self):
# given front-end with attached back-end
front_end = self.driver
front_end.odl_drv = back_end = mock.MagicMock(
spec=driver.OpenDaylightDriver)
# given PortContext to be forwarded to back-end without using
context = object()
# when binding port
front_end.bind_port(context)
# then port is bound by back-end
back_end.bind_port.assert_called_once_with(context)
| [
"[email protected]"
] | |
09df95570b523b2551725ec260de6aadf05ddfc3 | bef8629b62ef6a8db8f8bdb54e2d8d10be6feb4a | /src/breeder.py | 3eea331334782793c1e773ab8a832ac925526b20 | [] | no_license | firemark/srpp-travel | 5ee8a6b80e935bfa2a36d1505490ae82972fc765 | 94590274356f979d73b36428dcee1591d0c89503 | refs/heads/master | 2021-01-13T01:40:46.086028 | 2013-12-03T23:16:07 | 2013-12-03T23:16:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | from random import shuffle, randint
from chromosome import Chromosome
from models import World
from config import mutation_chance_perc, population
class Breeder(object):
chromosome_list = None
chromosome_list_paired = None
def __init__(self, chromosome_list):
self.chromosome_list = []
self.chromosome_list_paired = []
def mutate_chromosomes(self):
for chromosome in chromosome_list
random_percent = randint(1, 100)
if random_percent <= mutation_chance_perc:
chromosome.do_mutation()
def sort_chromosomes_by_value(self):
for chromosome in self.chromosome_list
chromosome.do_evaluation()
self.chromosome_list.sort(key=lambda chromosome: chromosome.value)
def remove_weak_chromosomes(self):
sort_chromosome_list_by_value()
half_length = len(chromosome_list)/2
del chromosome_list[half_length:]
def pair_chromosomes(self):
while(len(chromosome_list) > 1):
pair = []
random_number1 = randint(0, len(chromosome_list))
pair[0] = chromosome_list.pop(random_number1)
random_number2 = randint(0, len(chromosome_list))
pair[1] = chromosome_list.pop(random_number2)
chromosome_list_paired.append(pair)
# if not even number of chromosomes, pair last chromosome with his clone
if len(chromosome_list) == 1 :
pair[0] = chromosome_list[0]
pair[1] = chromosome_list[0]
def crossover_chromosomes(self):
for pair in self.chromosome_list_paired:
chromosome0 = pair[0].do_crossover(pair[1])
chromosome1 = pair[1].do_crossover(pair[0])
self.chromosome_list.append(chromosome0)
self.chromosome_list.append(chromosome1)
chromosome_list_paired = []
def do_shit(self):
remove_weak_chromosomes()
pair_chromosomes()
crossover_chromosomes()
mutate_chromosomes()
def get_best_chromosome(self):
sort_chromosomes_by_value()
return self.chromosome_list[0]
def feed_breeder(self, world):
magazine_place = world.get_magazine_place()
places_list = world.get_places_list()
places_in_row = world.k
for number in range(population)
shuffle(places_list)
chromosome = Chromosome(places_list, magazine_place, places_in_row)
self.chromosome_list.append(chromosome)
| [
"[email protected]"
] | |
7a7016e9c99a4b6c025f3b6089460b2f5b45ccfc | 4ced88bdafcd597b841173a1cfa750ec6baa81c4 | /Task_1/exercise_three/scripts/data_processing.py | 61195d07706e919ef0af9abfa2d13b631535c56c | [] | no_license | MayarSherif/ARL_workshop_task | be0747af914449d2c07b3f22d38050f1cd90e5a6 | 5b7a32ca704e5b19b44f32d55817cb712b633377 | refs/heads/main | 2023-08-14T23:54:46.423570 | 2021-09-15T22:29:51 | 2021-09-15T22:29:51 | 406,930,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that listens to std_msgs/Strings published
## to the 'chatter' topic
import rospy
from std_msgs.msg import String
def response_to_pub(data):
name, age, height = data.data.split(",")
print(name)
print(age)
print(height)
def personalInfo_pub():
rospy.init_node('data_processing', anonymous=True)
rospy.Subscriber("raw_data", String, response_to_pub)
rospy.spin()
def talker(name, age, height):
rate = rospy.Rate(1) # 1hz
while not rospy.is_shutdown():
pub.publish({name, age, height})
rospy.loginfo({name, age, height})
rate.sleep()
if __name__ == '__main__':
pub = rospy.Publisher('user_info', String, queue_size=10)
personalInfo_pub()
| [
"[email protected]"
] | |
a749d89fd7026d3bdab5ab5831b489359aac321b | 6c3ec03233ac2af56175506baf3fdc382a62ceb2 | /python-leetcode/172-factorial-trailing-zeroes.py | 309583d0fb324ffe34c4f598e1ce018fb1ee21a8 | [] | no_license | scottliu77/leetcode | e9e4a9c56b8fe9bc15c566b27c5fa924bd25fe17 | 2ec7dcb8d7359ae4487767d86ebcecd3da979946 | refs/heads/master | 2021-09-24T11:30:40.023600 | 2018-10-09T03:19:28 | 2018-10-09T03:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | '''
Given an integer n, return the number of trailing zeroes in n!.
Note: Your solution should be in logarithmic time complexity.
Credits:
Special thanks to @ts for adding this problem and creating all test cases.
'''
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
trailing = 0
while n > 0:
n /= 5
trailing += n
return trailing | [
"[email protected]"
] | |
44174235a9fe80a22805bf5f1cf227c84e48c908 | be1c645dea7f35dea75a63d4984fdfdd31c15f3f | /tasking/users/forms.py | 9f48340d3a327a2e288d7d5d29fba60632088fdb | [
"MIT"
] | permissive | cmanallen/tasking | e46b2c5ec818d320fb59484751a2892e46e47489 | 0a613357148afdb4b55078c58429ec45bb60a436 | refs/heads/master | 2021-01-10T19:38:17.958677 | 2015-01-09T18:58:57 | 2015-01-09T18:58:57 | 22,313,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from django import forms
from .models import User
class UserRegisterForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
def save(self):
user = super(UserRegisterForm, self).save()
user.set_password(self.cleaned_data['password'])
user.save()
return user
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'email', 'password', 'avatar')
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = (
'username',
'email',
'first_name',
'last_name',
'avatar',
)
class UserChangePasswordForm(forms.ModelForm):
def save(self):
user = super(UserChangePasswordForm, self).save()
user.set_password(self.cleaned_data['new_password'])
user.save()
return user
class Meta:
model = User | [
"[email protected]"
] | |
d75b52141ade9e0f978c5b80994b23852e84b5a5 | a40057412754495d997e913bba9cfacb028e2990 | /02_face_alignment.py | 28d6cdaf4a18a133350fbbe824f9dc04d4b3f270 | [] | no_license | iorilan/dlib_on_ubuntu | 5cb49bb27c612093008124e0497c8d66911c8d76 | d9eababbbfad9b450fdb4ca9fd821504b01a60e0 | refs/heads/master | 2020-03-29T13:42:34.545246 | 2018-09-23T11:25:07 | 2018-09-23T11:25:07 | 149,977,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | py |
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to use dlib's face recognition tool for image alignment.
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy
import sys
import dlib
if len(sys.argv) != 3:
print(
"Call this program like this:\n"
" ./face_alignment.py shape_predictor_5_face_landmarks.dat ../examples/faces/bald_guys.jpg\n"
"You can download a trained facial shape predictor from:\n"
" http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
exit()
predictor_path = sys.argv[1]
face_file_path = sys.argv[2]
# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
# Load the image using Dlib
img = dlib.load_rgb_image(face_file_path)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
num_faces = len(dets)
if num_faces == 0:
print("Sorry, there were no faces found in '{}'".format(face_file_path))
exit()
# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
faces.append(sp(img, detection))
window = dlib.image_window()
# Get the aligned face images
# Optionally:
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
images = dlib.get_face_chips(img, faces, size=320)
for image in images:
window.set_image(image)
dlib.hit_enter_to_continue()
# It is also possible to get a single chip
image = dlib.get_face_chip(img, faces[0])
window.set_image(image)
dlib.hit_enter_to_continue()
| [
"[email protected]"
] | |
7d351e5edb6ffa58eb342618e8e83d01136005e2 | 3e968e71b4ef72a2c199675943d7a951faa57f49 | /test.py | 211eb1aae5d568cfa03058fc86333b790978fcca | [] | no_license | sanbuddhacharyas/Agrodoctor_NeuralNetwork | b2e7f288769270036ad75fd75a58d923c29dcd55 | 8f0141f44bb77c920c78883c6eee52ce556e88fb | refs/heads/master | 2020-04-26T14:10:22.254124 | 2019-03-03T16:57:30 | 2019-03-03T16:57:30 | 173,603,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from darkflow.net.build import TFNet
import cv2
cap = cv2.VideoCapture(0)
options = {"model": "cfg/tiny-yolo-voc.cfg",
"load": "bin/tiny-yolo-voc.weights",
"threshold": 0.2,
"gpu":0
}
tfnet = TFNet(options)
while True:
ret,frame = cap.read()
result = tfnet.return_predict(frame)
print(result)
| [
"sanbuddhacharyas"
] | sanbuddhacharyas |
7f320945fd7779cecaefc22eea6ef98e7d485e3b | d83c8108ec83f75e0f080f6ef4463b33a4031648 | /pythonarch.py | 356556b88218317a38633b467f076fe163514c3d | [] | no_license | VijayalakshmiHuddar/source_code_clusters_remote | 217e40caf4ecb99fce761f518cc2e988626e94d2 | 82a56915f1844740f6584ecf1fcc26070bbdda00 | refs/heads/master | 2022-05-18T05:55:45.249912 | 2019-06-10T12:36:02 | 2019-06-10T12:36:02 | 190,908,638 | 0 | 0 | null | 2022-04-28T20:38:28 | 2019-06-08T16:12:24 | JavaScript | UTF-8 | Python | false | false | 1,597 | py | Volume in drive C has no label.
Volume Serial Number is D8A5-E423
Directory of C:\Users\Desktop\Data_archive_clusters
11-04-2019 19:18 <DIR> .
11-04-2019 19:18 <DIR> ..
11-04-2019 19:06 359 java.txt
11-04-2019 19:06 412 javaarchive.txt
11-04-2019 19:08 592 javaarchivecluster.txt
11-04-2019 19:08 0 javaarchive_committed.txt
11-04-2019 19:07 469 javaarchive_one.txt
11-04-2019 19:09 643 meanstack.txt
11-04-2019 19:09 <DIR> mongodb_cluster
11-04-2019 19:18 0 pythonarch.py
11-04-2019 19:15 864 pythoncluster.py
11-04-2019 19:10 751 pythoncluster.txt
11-04-2019 19:17 922 pythondataarchive.py
11-04-2019 19:15 810 pythondataarchive.txt
11 File(s) 5,822 bytes
import os
import shutil
from zipfile import ZipFile
from os import path
from shutil import make_archive
def main():
# Check if file exists
if path.exists("guru99.txt"):
# get the path to the file in the current directory
src = path.realpath("guru99.txt");
# rename the original file
os.rename("career.guru99.txt","guru99.txt")
# now put things into a ZIP archive
root_dir,tail = path.split(src)
shutil.make_archive("guru99 archive", "zip", root_dir)
# more fine-grained control over ZIP files
with ZipFile("testguru99.zip","w") as newzip:
newzip.write("guru99.txt")
newzip.write("guru99.txt.bak")
if __name__== "__main__":
main()
3 Dir(s) 6,077,239,296 bytes free
| [
"[email protected]"
] | |
bb5856df7071cf837ec7b1544137dc9c978b235a | b02488bfef4c0ed39e258c2610b8c1c574881045 | /hitch_api/hitch_api/settings/base.py | 760cfa847f3bedcdae92dfede02b2ee4b13e084b | [] | no_license | Noeuclides/Hitch_Tech_Interview | afb39791839cf15d20f849e7ac4baa1499eaf346 | 255d4696809c26fae82ca3df0f7e0d123aac6d1f | refs/heads/master | 2023-03-10T08:05:35.879686 | 2021-02-23T16:08:57 | 2021-02-23T16:08:57 | 341,252,196 | 0 | 0 | null | 2021-02-23T05:46:25 | 2021-02-22T15:49:29 | Python | UTF-8 | Python | false | false | 2,986 | py | """
Django settings for hitch_api project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8!#r+4%i8&780t)ep#9_d)vfom00#)4c0yzdi2(ggp0_cy81lh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
LOCAL_APPS = [
'nba_api'
]
THIRD_APPS = [
'rest_framework'
]
INSTALLED_APPS = BASE_APPS + LOCAL_APPS + THIRD_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hitch_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hitch_api.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
28bf1f510cea118cf5d60a1fe31fd10baf986b7d | 00b433beed184444a73710bb148233a872ecac23 | /CozmoMusicMan/model/sound_player.py | 4e5750ecabd85abcca8bd3b2d3a531dfb1d299ea | [
"MIT"
] | permissive | Hoomano-Hackathon/CozmoIsCute | a927ba59c8b20a47e8c49e1e12ae0669989b7dac | 9b588b269e7a977f348814add5c7f76407d9b0e9 | refs/heads/master | 2021-01-25T10:51:16.623339 | 2017-06-12T12:05:05 | 2017-06-12T12:05:05 | 93,889,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import pygame
import os
import time
NB_MUSICS = 64
class Sound:
def __init__(self):
pygame.mixer.init()
pygame.mixer.pre_init()
dir_path = os.path.dirname(os.path.realpath(__file__))
self.s = []
files = os.listdir(os.path.join(dir_path, "Sound"))
files.sort()
for filename in files:
self.s.append(pygame.mixer.Sound(os.path.join(dir_path, "Sound", filename)))
# print(self.s)
def play(self, id, duration):
print(duration)
self.s[id].play(maxtime=int(duration*1000)-180)
time.sleep(duration)
| [
"[email protected]"
] | |
36f25095194fc8974ee0e984ad52867f04881322 | 2853b24860719047c956f68a9f16ecf246fc876f | /CS235Flix/adapters/memory_repository.py | 170069b5c54051a18e1d63d698bae62b6dc13ca0 | [] | no_license | nlon982/Assignment2Comp235 | afae7feff67d4e7d0b335722244b10acf92be0f6 | 11a9a260278ba15776c7b174e1e7deef7d5f8ff3 | refs/heads/main | 2023-01-02T00:16:18.870038 | 2020-10-26T11:42:05 | 2020-10-26T11:42:05 | 307,016,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,968 | py | import csv
import os
from datetime import datetime
from CS235Flix.adapters.movie_file_csv_reader import MovieFileCSVReader
from CS235Flix.adapters.repository import AbstractRepository
from CS235Flix.domain.movie import Movie, get_movie_hash
from CS235Flix.domain.director import Director
from CS235Flix.domain.actor import Actor
from CS235Flix.domain.person import get_person_hash
from CS235Flix.domain.genre import Genre, get_genre_hash
from CS235Flix.domain.user import User
from CS235Flix.domain.review import Review, make_review
from werkzeug.security import generate_password_hash
class MemoryRepository(AbstractRepository):
def __init__(self):
self.__movie_dict = dict()
self.__director_dict = dict()
self.__actor_dict = dict()
self.__genre_dict = dict()
self.__user_list = list()
def add_movie(self, a_movie):
self.__movie_dict[hash(a_movie)] = a_movie
def get_movie(self, title, release_year):
the_hash = get_movie_hash(title, release_year)
return self.__movie_dict[the_hash]
def get_all_movies(self):
return list(self.__movie_dict.values())
def add_director(self, a_director):
self.__director_dict[hash(a_director)] = a_director
def get_director(self, director_full_name):
the_hash = get_person_hash(director_full_name)
return self.__director_dict[the_hash]
def get_all_directors(self):
return list(self.__director_dict.values())
def add_actor(self, a_actor):
self.__actor_dict[hash(a_actor)] = a_actor
def get_actor(self, actor_full_name):
the_hash = get_person_hash(actor_full_name)
return self.__actor_dict[the_hash]
def get_all_actors(self):
return list(self.__actor_dict.values())
def add_genre(self, a_genre):
self.__genre_dict[hash(a_genre)] = a_genre
def get_genre(self, genre_name):
the_hash = get_genre_hash(genre_name)
return self.__genre_dict[the_hash]
def get_all_genres(self):
return list(self.__genre_dict.values())
def add_user(self, user):
self.__user_list.append(user)
def get_user(self, user_name):
return next((user for user in self.__user_list if user.user_name == user_name), None)
def get_all_users(self):
return self.__user_list
def get_movies_with_actor(self, actor_full_name): # this method isn't an Abstract Method (from the Abstract Repository), it's a helper for the below
movie_list = list()
try:
a_actor = self.get_actor(actor_full_name)
except:
return movie_list # Exception("Actor: {} is not in repository".format(actor_full_name))
for a_movie in self.get_all_movies():
if a_actor in a_movie.actors:
movie_list.append(a_movie)
return movie_list
def get_movies_with_director(self, director_full_name): # this method isn't an Abstract Method (from the Abstract Repository), it's a helper for the below
movie_list = list()
try:
a_director = self.get_director(director_full_name)
except:
#Exception("Director: {} is not in repository".format(director_full_name))
return movie_list
for a_movie in self.get_all_movies():
if a_director == a_movie.director:
movie_list.append(a_movie)
return movie_list
def get_movies_with_genre(self, genre_name): # this method isn't an Abstract Method (from the Abstract Repository), it's a helper for the below
movie_list = list()
try:
a_genre = self.get_genre(genre_name)
except:
#Exception("Genre: {} is not in repository".format(genre_name))
return movie_list
for a_movie in self.get_all_movies():
if a_genre in a_movie.genres:
movie_list.append(a_movie)
return movie_list
def get_movies_with_actor_director_or_genre(self, actor_full_name, director_full_name, genre_name):
movie_set_1 = set(self.get_movies_with_actor(actor_full_name))
movie_set_2 = set(self.get_movies_with_director(director_full_name))
movie_set_3 = set(self.get_movies_with_genre(genre_name))
return movie_set_1.union(movie_set_2, movie_set_3)
def add_movies(a_repo_instance, movie_list):
for a_movie in movie_list:
a_repo_instance.add_movie(a_movie)
def add_directors(a_repo_instance, director_list):
for a_director in director_list:
a_repo_instance.add_director(a_director)
def add_actors(a_repo_instance, actor_list):
for a_actor in actor_list:
a_repo_instance.add_actor(a_actor)
def add_genres(a_repo_instance, genre_list):
for a_genre in genre_list:
a_repo_instance.add_genre(a_genre)
def read_csv_file(csv_path): # a bit of magic
with open(csv_path, encoding='utf-8-sig') as infile:
reader = csv.reader(infile)
# Read first line of the the CSV file.
headers = next(reader)
# Read remaining rows from the CSV file.
for row in reader:
# Strip any leading/trailing white space from data read.
row = [item.strip() for item in row]
yield row
def add_users_to_memory_repository(user_csv_path, a_repo_instance):
for data_row in read_csv_file(user_csv_path):
user_name = data_row[1]
password = data_row[2]
hashed_password = generate_password_hash(password)
a_user = User(user_name, hashed_password)
a_repo_instance.add_user(a_user)
def add_reviews_to_memory_repository(review_csv_path, a_repo_instance): # this assumes movies and users are already loaded in to the repository
# also note, reviews don't exist standalone in the repostitory (they are stored with the user who made the review, and the movie)
for data_row in read_csv_file(review_csv_path):
user_name = data_row[0]
movie_title = data_row[1]
movie_release_year = data_row[2]
a_user = a_repo_instance.get_user(user_name)
if a_user is None:
raise Exception("User: {} does not exist in the repository for a review to be made".format(user_name))
a_movie = a_repo_instance.get_movie(movie_title, movie_release_year)
if a_movie is None:
raise Exception("The movie: {} ({}) does not exist in the repository for a review to be made".format(movie_title, movie_release_year))
review_text = data_row[3]
rating = float(data_row[4])
timestamp = datetime.fromisoformat(data_row[5])
make_review(a_user, a_movie, review_text, rating, timestamp) # this function is always called to store the review in both the user and movie
def populate(data_path, a_repo_instance):
movie_csv_path = os.path.join(data_path, 'Data1000Movies.csv')
user_csv_path = os.path.join(data_path, 'users.csv')
review_csv_path = os.path.join(data_path, 'reviews.csv')
movie_file_csv_reader_object = MovieFileCSVReader(movie_csv_path)
movie_file_csv_reader_object.read_csv_file()
movie_list = movie_file_csv_reader_object.dataset_of_movies
director_list = movie_file_csv_reader_object.dataset_of_directors
actor_list = movie_file_csv_reader_object.dataset_of_actors
genre_list = movie_file_csv_reader_object.dataset_of_genres
add_movies(a_repo_instance, movie_list)
add_directors(a_repo_instance, director_list)
add_actors(a_repo_instance, actor_list)
add_genres(a_repo_instance, genre_list)
add_users_to_memory_repository(user_csv_path, a_repo_instance)
add_reviews_to_memory_repository(review_csv_path, a_repo_instance) | [
"[email protected]"
] | |
09e07c9f9bb102b9fe6de09c34f48d77a3337e8b | e2e35d5c433c47ac31d8e2df037405c87456de5a | /CIFAR-10/Estimate/models.py | c3457d710fac5b7408a7b33806ed49d692e1128e | [
"MIT"
] | permissive | ihaeyong/Robust-f-divergence-measures | 8ca73e708d72fed8b20ecbc5ceb84600b7828ad5 | 93d9efe616b777602fa40e966c58ba7a280fae5e | refs/heads/main | 2023-03-07T20:22:06.516868 | 2021-02-15T09:00:39 | 2021-02-15T09:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,543 | py | from __future__ import print_function, division
import numpy as np
from keras.datasets import mnist, cifar10, cifar100, imdb
from keras.models import Model
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.core import Dropout, SpatialDropout1D
from keras.layers import Input
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.callbacks import ModelCheckpoint
from keras.callbacks import LearningRateScheduler
from keras.preprocessing import sequence
from keras.layers import LSTM
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from resnet import cifar10_resnet
from loss import (crossentropy, robust, unhinged, sigmoid, ramp, savage,
boot_soft)
# losses that need sigmoid on top of last layer
yes_softmax = ['crossentropy', 'forward', 'est_forward', 'backward',
'est_backward', 'boot_soft', 'savage']
# unhinged needs bounded models or it diverges
yes_bound = ['unhinged', 'ramp', 'sigmoid']
class KerasModel():
def get_data(self):
(X_train, y_train), (X_test, y_test) = self.load_data()
# idx_perm = np.random.RandomState(101).permutation(X_train.shape[0])
# X_train, y_train = X_train[idx_perm], y_train[idx_perm]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
return X_train, X_test, y_train, y_test
# custom losses for the CNN
def make_loss(self, loss, P=None):
if loss == 'crossentropy':
return crossentropy
elif loss in ['forward', 'backward']:
return robust(loss, P)
elif loss == 'unhinged':
return unhinged
elif loss == 'sigmoid':
return sigmoid
elif loss == 'ramp':
return ramp
elif loss == 'savage':
return savage
elif loss == 'boot_soft':
return boot_soft
else:
ValueError("Loss unknown.")
def compile(self, model, loss, P=None):
if self.optimizer is None:
ValueError()
metrics = ['accuracy']
model.compile(loss=self.make_loss(loss, P),
optimizer=self.optimizer, metrics=metrics)
model.summary()
self.model = model
def load_model(self, file):
self.model.load_weights(file)
print('Loaded model from %s' % file)
def fit_model(self, model_file, X_train, Y_train, validation_split=None,
validation_data=None):
# cannot do both
if validation_data is not None and validation_split is not None:
return ValueError()
callbacks = []
monitor = 'val_loss'
# monitor = 'val_acc'
mc_callback = ModelCheckpoint(model_file, monitor=monitor,
verbose=1, save_best_only=True)
callbacks.append(mc_callback)
if hasattr(self, 'scheduler'):
callbacks.append(self.scheduler)
# use data augmentation
if hasattr(self, 'data_generator'):
# hack for using validation with data augmentation
idx_val = np.round(validation_split * X_train.shape[0]).astype(int)
X_val, Y_val = X_train[:idx_val], Y_train[:idx_val]
X_train_local, Y_train_local = X_train[idx_val:], Y_train[idx_val:]
self.data_generator.fit(X_train_local)
history = \
self.model.fit_generator(
self.data_generator.flow(X_train_local, Y_train_local,
batch_size=self.num_batch),
steps_per_epoch=X_train.shape[0] // self.num_batch,
epochs=self.epochs,
validation_data=(X_val, Y_val),
verbose=1, callbacks=callbacks)
else:
history = self.model.fit(
X_train, Y_train, batch_size=self.num_batch,
epochs=self.epochs,
validation_split=validation_split,
validation_data=validation_data,
verbose=1, callbacks=callbacks)
# use the model that reached the lowest loss at training time
self.load_model(model_file)
return history.history
def evaluate_model(self, X, Y):
score = self.model.evaluate(X, Y, batch_size=self.num_batch, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
return score[1]
def predict_proba(self, X):
pred = self.model.predict(X, batch_size=self.num_batch, verbose=1)
return pred
class MNISTModel(KerasModel):
def __init__(self, num_batch=32):
self.num_batch = num_batch
self.classes = 10
self.epochs = 40
self.normalize = True
self.optimizer = None
def load_data(self):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
if self.normalize:
X_train = X_train / 255.
X_test = X_test / 255.
return (X_train, y_train), (X_test, y_test)
def build_model(self, loss, P=None):
input = Input(shape=(784,))
x = Dense(128, kernel_initializer='he_normal')(input)
x = Activation('relu')(x)
x = Dropout(0.2)(x)
x = Dense(128, kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
x = Dropout(0.2)(x)
output = Dense(10, kernel_initializer='he_normal')(x)
if loss in yes_bound:
output = BatchNormalization(axis=1)(output)
if loss in yes_softmax:
output = Activation('softmax')(output)
model = Model(inputs=input, outputs=output)
self.compile(model, loss, P)
class CIFAR10Model(KerasModel):
def __init__(self, num_batch=32, type='deep'):
self.num_batch = num_batch
self.classes = 10
self.img_channels = 3
self.img_rows = 32
self.img_cols = 32
self.filters = 32
self.num_pool = 2
self.num_conv = 3
self.type = type
self.epochs = 240
self.augmentation = True
self.optimizer = SGD(lr=0.1, momentum=0.9, decay=5e-4)
self.lr_scheduler()
self.decay = 0.0001
def load_data(self):
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.reshape(X_train.shape[0], self.img_rows,
self.img_cols, self.img_channels)
X_test = X_test.reshape(X_test.shape[0], self.img_rows, self.img_cols,
self.img_channels)
means = X_train.mean(axis=0)
X_train = (X_train - means)
X_test = (X_test - means)
if self.augmentation:
print('Data Augmentation')
# data augmentation
self.data_generator = \
ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
# they are 2D originally in cifar
y_train = y_train.ravel()
y_test = y_test.ravel()
return (X_train, y_train), (X_test, y_test)
def lr_scheduler(self):
def scheduler(epoch):
if epoch > 180:
return 0.0001
elif epoch > 120:
return 0.001
elif epoch > 60:
return 0.01
else:
return 0.1
print('LR scheduler')
self.scheduler = LearningRateScheduler(scheduler)
def build_model(self, loss, P=None):
model = cifar10_resnet(self, self.decay, loss)
self.compile(model, loss, P)
class CIFAR100Model(KerasModel):
def __init__(self, num_batch=32):
self.num_batch = num_batch
self.classes = 100 # 100 classes
self.img_channels = 3
self.img_rows = 32
self.img_cols = 32
self.filters = 32
self.num_pool = 2
self.num_conv = 3
self.epochs = 240
self.augmentation = True
self.optimizer = SGD(lr=0.1, momentum=0.9, decay=5e-4)
self.decay = 10 ** -3
self.lr_scheduler()
def lr_scheduler(self):
def scheduler(epoch):
if epoch > 180:
return 0.0001
elif epoch > 120:
return 0.001
elif epoch > 60:
return 0.01
else:
return 0.1
print('LR scheduler')
self.scheduler = LearningRateScheduler(scheduler)
def load_data(self):
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
X_train = X_train.reshape(X_train.shape[0], self.img_rows,
self.img_cols, self.img_channels)
X_test = X_test.reshape(X_test.shape[0], self.img_rows, self.img_cols,
self.img_channels)
means = X_train.mean(axis=0)
# std = np.std(X_train)
X_train = (X_train - means) # / std
X_test = (X_test - means) # / std
if self.augmentation:
print('Data Augmentation')
# data augmentation
self.data_generator = \
ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
# they are 2D originally in cifar
y_train = y_train.ravel()
y_test = y_test.ravel()
return (X_train, y_train), (X_test, y_test)
def build_model(self, loss, P=None):
model = cifar10_resnet(self, self.decay, loss)
self.compile(model, loss, P)
class IMDBModel(KerasModel):
def __init__(self, num_batch=32):
self.num_batch = num_batch
self.max_features = 5000
self.maxlen = 400
self.embedding_dims = 50
self.hidden_dims = 256
self.epochs = 50
self.classes = 2
self.optimizer = None
def load_data(self):
(X_train, y_train), (X_test, y_test) = \
imdb.load_data(num_words=self.max_features, seed=11)
X_train = sequence.pad_sequences(X_train, maxlen=self.maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=self.maxlen)
return (X_train, y_train), (X_test, y_test)
def build_model(self, loss, P=None):
input = Input(shape=(self.maxlen,))
x = Embedding(self.max_features, self.embedding_dims)(input)
x = SpatialDropout1D(0.8)(x)
x = Activation('relu')(x)
x = Flatten()(x)
output = Dense(self.classes, kernel_initializer='he_normal')(x)
if loss in yes_bound:
output = BatchNormalization(axis=1)(output)
if loss in yes_softmax:
output = Activation('softmax')(output)
model = Model(inputs=input, outputs=output)
self.compile(model, loss, P)
class LSTMModel(KerasModel):
def __init__(self, num_batch=32):
self.num_batch = num_batch
self.max_features = 5000
self.maxlen = 400
self.embedding_dims = 512
self.lstm_dim = 512
self.hidden_dims = 128
self.epochs = 50
self.classes = 2
self.optimizer = None
def load_data(self):
(X_train, y_train), (X_test, y_test) = \
imdb.load_data(num_words=self.max_features, seed=11)
X_train = sequence.pad_sequences(X_train, maxlen=self.maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=self.maxlen)
return (X_train, y_train), (X_test, y_test)
def build_model(self, loss, P=None):
input = Input(shape=(self.maxlen,))
x = Embedding(self.max_features, self.embedding_dims)(input)
x = SpatialDropout1D(0.8)(x)
x = LSTM(self.lstm_dim, kernel_initializer='uniform')(x)
x = Dense(self.embedding_dims, kernel_initializer='he_normal')(x)
x = Dropout(0.5)(x)
x = Activation('relu')(x)
output = Dense(self.classes, kernel_initializer='he_normal')(x)
if loss in yes_bound:
output = BatchNormalization(axis=1)(output)
if loss in yes_softmax:
output = Activation('softmax')(output)
model = Model(inputs=input, outputs=output)
self.compile(model, loss, P)
class NoiseEstimator():
def __init__(self, classifier, row_normalize=True, alpha=0.0,
filter_outlier=False, cliptozero=False, verbose=0):
"""classifier: an ALREADY TRAINED model. In the ideal case, classifier
should be powerful enough to only make mistakes due to label noise."""
self.classifier = classifier
self.row_normalize = row_normalize
self.alpha = alpha
self.filter_outlier = filter_outlier
self.cliptozero = cliptozero
self.verbose = verbose
def fit(self, X):
# number of classes
c = self.classifier.classes
T = np.empty((c, c))
# predict probability on the fresh sample
eta_corr = self.classifier.predict_proba(X)
# find a 'perfect example' for each class
for i in np.arange(c):
if not self.filter_outlier:
idx_best = np.argmax(eta_corr[:, i])
else:
eta_thresh = np.percentile(eta_corr[:, i], 97,
interpolation='higher')
robust_eta = eta_corr[:, i]
robust_eta[robust_eta >= eta_thresh] = 0.0
idx_best = np.argmax(robust_eta)
for j in np.arange(c):
T[i, j] = eta_corr[idx_best, j]
self.T = T
return self
def predict(self):
T = self.T
c = self.classifier.classes
if self.cliptozero:
idx = np.array(T < 10 ** -6)
T[idx] = 0.0
if self.row_normalize:
row_sums = T.sum(axis=1)
T /= row_sums[:, np.newaxis]
if self.verbose > 0:
print(T)
if self.alpha > 0.0:
T = self.alpha * np.eye(c) + (1.0 - self.alpha) * T
if self.verbose > 0:
print(T)
print(np.linalg.inv(T))
return T
| [
"[email protected]"
] | |
c8616b59d9be5b89978e12d61e49d23dfe326685 | 9bc7e06351e081c2d5dd4d35f0e30359a7dc3718 | /AFQ/dti.py | 9e273470ed5ca1e82f88f3bc9d5a66538b76fc9e | [
"BSD-2-Clause"
] | permissive | jhlegarreta/pyAFQ | 538fb282c9a0c41bc9dbf77d67d08047a7f9ecc9 | e4369e470de564cfcccde3366016903b79112abf | refs/heads/master | 2020-04-13T06:10:06.562664 | 2018-10-10T04:47:51 | 2018-10-10T04:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,518 | py | import os
import os.path as op
import numpy as np
import nibabel as nib
from dipy.core.geometry import vector_norm
from dipy.reconst import dti
import AFQ.utils.models as ut
__all__ = ["fit_dti", "predict", "tensor_odf"]
def _fit(gtab, data, mask=None):
dtimodel = dti.TensorModel(gtab)
return dtimodel.fit(data, mask=mask)
def fit_dti(data_files, bval_files, bvec_files, mask=None,
out_dir=None, file_prefix=None, b0_threshold=0):
"""
Fit the DTI model using default settings, save files with derived maps
Parameters
----------
data_files : str or list
Files containing DWI data. If this is a str, that's the full path to a
single file. If it's a list, each entry is a full path.
bval_files : str or list
Equivalent to `data_files`.
bvec_files : str or list
Equivalent to `data_files`.
mask : ndarray, optional
Binary mask, set to True or 1 in voxels to be processed.
Default: Process all voxels.
out_dir : str, optional
A full path to a directory to store the maps that get computed.
Default: maps get stored in the same directory as the last DWI file
in `data_files`.
b0_threshold : float
Returns
-------
file_paths : a dict with the derived maps that were computed and full-paths
to the files containing these maps.
Note
----
Maps that are calculated: FA, MD, AD, RD
"""
img, data, gtab, mask = ut.prepare_data(data_files, bval_files,
bvec_files, mask=mask,
b0_threshold=b0_threshold)
# In this case, we dump the fit object
dtf = _fit(gtab, data, mask=None)
FA, MD, AD, RD, params = dtf.fa, dtf.md, dtf.ad, dtf.rd, dtf.model_params
maps = [FA, MD, AD, RD, params]
names = ['FA', 'MD', 'AD', 'RD', 'params']
if out_dir is None:
if isinstance(data_files, list):
out_dir = op.join(op.split(data_files[0])[0], 'dti')
else:
out_dir = op.join(op.split(data_files)[0], 'dti')
if file_prefix is None:
file_prefix = ''
if not op.exists(out_dir):
os.makedirs(out_dir)
aff = img.affine
file_paths = {}
for m, n in zip(maps, names):
file_paths[n] = op.join(out_dir, file_prefix + 'dti_%s.nii.gz' % n)
nib.save(nib.Nifti1Image(m, aff), file_paths[n])
return file_paths
def predict(params_file, gtab, S0_file=None, out_dir=None):
"""
Create a signal prediction from DTI params
params_file : str
Full path to a file with parameters saved from a DKI fit
gtab : GradientTable object
The gradient table to predict for
S0_file : str
Full path to a nifti file that contains S0 measurements to incorporate
into the prediction. If the file contains 4D data, the volumes that
contain the S0 data must be the same as the gtab.b0s_mask.
"""
if out_dir is None:
out_dir = op.join(op.split(params_file)[0])
if S0_file is None:
S0 = 100
else:
S0 = nib.load(S0_file).get_data()
# If the S0 data is 4D, we assume it comes from an acquisition that had
# B0 measurements in the same volumes described in the gtab:
if len(S0.shape) == 4:
S0 = np.mean(S0[..., gtab.b0s_mask], -1)
# Otherwise, we assume that it's already a 3D volume, and do nothing
img = nib.load(params_file)
params = img.get_data()
pred = dti.tensor_prediction(params, gtab, S0=S0)
fname = op.join(out_dir, 'dti_prediction.nii.gz')
nib.save(nib.Nifti1Image(pred, img.affine), fname)
return fname
def tensor_odf(evals, evecs, sphere):
"""
Calculate the tensor Orientation Distribution Function
Parameters
----------
evals : array (4D)
Eigenvalues of a tensor. Shape (x, y, z, 3).
evecs : array (5D)
Eigenvectors of a tensor. Shape (x, y, z, 3, 3)
sphere : sphere object
The ODF will be calculated in each vertex of this sphere.
"""
odf = np.zeros((evals.shape[:3] + (sphere.vertices.shape[0],)))
mask = np.where((evals[..., 0] > 0) &
(evals[..., 1] > 0) &
(evals[..., 2] > 0))
lower = 4 * np.pi * np.sqrt(np.prod(evals[mask], -1))
projection = np.dot(sphere.vertices, evecs[mask])
projection /= np.sqrt(evals[mask])
odf[mask] = ((vector_norm(projection) ** -3) / lower).T
return odf
| [
"[email protected]"
] | |
a4661a157f215037db65aa8089f8630a7fb5f3f9 | 60a7ecc3fc5a91661554d15b14ab3f6a73771f0f | /feature_extraction/load_paths.py | e47c18b54b2fadb06b43af9c92bb4532f6135052 | [] | no_license | shannonyu/FAQ_rank | 7ccf8d298b5b93b64cf72bfa3acee12b1ce4c60b | 419ade38e382847c909e740a63cf92843a8ef292 | refs/heads/master | 2020-03-27T15:22:18.971421 | 2018-08-28T09:54:01 | 2018-08-28T09:54:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import os
import re
import yaml
import xml.etree.ElementTree as ET
def get_filepath(datafolder):
""" save the xml files except for the multiline files"""
return [os.path.join(datafolder, f) for f in os.listdir(datafolder) if "xml" in f and "multiline" not in f]
def filepaths_fromdict(datafolders):
"""takes in the datafolders dictionary from the config file and outputs a dictionary of format:
{set_: [datafile1, datafile2,datafile3]}"""
all_filepaths = {}
for key, value in datafolders.items():
all_filepaths[key] = get_filepath(value)
return all_filepaths
def load_configs(config_file):
"""loads config file containing the directory
of the different data samples"""
with open(config_file, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
datafolders = {}
for section in cfg:
datafolders[section]= cfg[section]
all_filepaths = filepaths_fromdict(datafolders)
return all_filepaths
| [
"[email protected]"
] | |
ea4ff4d37882f8ad8400d29764c18d576236198c | 4a510c304d5b262a583192c4d3d2a701157b6f87 | /inputData KSDr/dataKBS1.py | 13dac6c2357a72110eb6be5dddb2789dc767dfaf | [] | no_license | ChantalDE/MADLION | 4672b9060f6d20d87202c6cef2ce27cebc2941f8 | ecedb451fabb44b3aeeca893e0eaa8fc2bb64219 | refs/heads/main | 2023-02-03T23:30:19.680551 | 2020-12-11T22:10:46 | 2020-12-11T22:10:46 | 300,331,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import psycopg2
#from receive_lob import do_process
#data = do_process()
#doa_value = data[0]
#conf_value = data[1]
#pwr_value = data[2]
#lat = data[3]
#lon = data[4]
#test hardcoded values
name = 'KBS1'
angle = 38.17583333
lat = 27.957261
lon = -82.436587
conf_value = 2
pwr_value = 3
try:
connection = psycopg2.connect(user="geoserver", password="abc", host="3.22.118.224", port="5432", dbname="shapes")
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO points(long, lat, angle, name, conf_value, pwr_value) VALUES (%s,%s,%s,%s,%s,%s)"""
record_to_insert = (lon, lat, angle, name, conf_value, pwr_value)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into mobile table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into mobile table", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | [
"[email protected]"
] | |
e26428af933a1c4b112a53431bf1e3655d13ebd2 | 7bf06a59f3cd0d7e207155ebb01d4300efafb7b5 | /oslo/log/openstack/common/rpc/proxy.py | 9ab61c18af96e6412f517b7f4efc6483e2d8d189 | [
"Apache-2.0"
] | permissive | citrix-openstack-build/oslo.log | be41e6f72ee7a30ddbd29a761742e4ce58029a53 | f3f275a7625425d8790aa92ab9f94a51474d0aab | refs/heads/master | 2021-01-11T09:57:11.123286 | 2014-09-09T08:10:53 | 2014-09-09T08:10:53 | 24,143,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,454 | py | # Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
import six
from oslo.log.openstack.common import rpc
from oslo.log.openstack.common.rpc import common as rpc_common
from oslo.log.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overridden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionally (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'namespace': namespace, 'args': kwargs}
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.fanout_cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg)
| [
"[email protected]"
] | |
0803cf05c575a050e799a508b367e85647ab4042 | 75d348d60252eb9a4b3c98c5659991cc8ecc267c | /build/demos/dnn_rotate/catkin_generated/pkg.installspace.context.pc.py | 1ea18cc2452d5cd70cc8104fdc1a8d650fc80eac | [] | no_license | ipeitzsch/3D-Mapping-Using-a-UAV-an-IMU-and-a-2D-LiDAR | 379c2736d926599f20b2b39f64aa6f24327108f1 | 32d32a776854557fb66433dc3f7cca6ec69a9b9f | refs/heads/master | 2020-05-31T00:29:40.733706 | 2019-07-15T22:29:12 | 2019-07-15T22:29:12 | 190,035,574 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/catkin_ws/install/include".split(';') if "/home/ubuntu/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dnn_detect".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dnn_rotate"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
284c69195a94e570fa9b18b630f8dd6c444c0ded | b9d4204a4429350f35ce379c2673e3a5622fa52e | /chore_Hop/urls.py | dba93bc03196b11434d8e4ad805b74628a15926e | [] | no_license | Jgomez1996/chore_Hop | 7f362f6d6427bd01b33e1f6a35fe05eeeb1570a4 | 00a844dfa061fa4c7b63a6ab77ca512ae42eac5e | refs/heads/main | 2023-04-07T22:32:14.428644 | 2021-04-05T18:03:34 | 2021-04-05T18:03:34 | 353,771,884 | 0 | 0 | null | 2021-04-05T18:05:47 | 2021-04-01T17:09:50 | JavaScript | UTF-8 | Python | false | false | 803 | py | """chore_Hop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#from django.contrib import admin
from django.urls import path, include
urlpatterns = [
#path('admin/', admin.site.urls),
path('', include('chore_app.urls')),
]
| [
"[email protected]"
] | |
4bf94bdf8bcd8eb9a765460ba1c3722851fe93e0 | b1c648fbc10dac9a5da8a8dc5fce88be61e2dafc | /Hw6/13/13.py | 4a7e82f98959795de10433defd41a42add32897a | [] | no_license | Plabo1028/ML_NTU_HW | 1be6955810d8a1b74ba3bc7399646ba11af84a1e | abd5b175a195eaf0a6ce376c891d03e948d6c5ac | refs/heads/master | 2021-01-10T16:01:19.072317 | 2016-02-02T13:46:06 | 2016-02-02T13:46:06 | 42,713,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,808 | py | import numpy as np
import sys, math
import pylab as pl
from random import *
def calculate_Ein(x,y,u):
''' calculate median of interval '''
theta_interval = []
for i in range(x.shape[0]-1):
# print i
if i == 0:
theta_interval.append(float('-inf'))
theta_interval.append((x[i]+x[i+1])/2)
elif i == x.shape[0]-1:
theta_interval.append(float("inf"))
else:
theta_interval.append((x[i]+x[i+1])/2)
# print np.array(theta_interval)
# theta_interval = np.array( [float("-inf")]+[ (x[i]+x[i+1])/2 for i in range(0, x.shape[0]-1) ]+[float("inf")] )
# print 'theta_interval\n',theta_interval
# print 'u'
# raw_input('pause')
Sum_U = sum(u)
sign = 1
target_theta = 0.0
for theta in theta_interval:
y_positive = np.where(x > theta,1,-1)
y_negative = np.where(x < theta,1,-1)
'''calculate error like orange square in ppt'''
error_positive = sum((y_positive != y)*u)
error_negative = sum((y_negative != y)*u)
if error_positive < error_negative:
# positive ray because low error_positvie
if Sum_U > error_positive:
Sum_U = error_positive
sign = 1
target_theta = theta
else:
# negative ray because low error_negative
if Sum_U > error_negative:
Sum_U = error_negative
sign = -1
target_theta = theta
''' -inf and inf two case'''
if target_theta == float("inf"):
target_theta = 1.0
if target_theta == float("-inf"):
target_theta = -1.0
'''calculate scalingFactor like purple diamond in ppt'''
scalingFactor = 0.0
errorRate = 0
if sign == 1:
# positive ray
# print 'sign=1'
error = float(sum((np.where(x > target_theta,1,-1) != y)*u))
errorRate = error/float(sum(u))
# print 'error',error
# print 'errorRate',errorRate
# raw_input('pause in sign=1')
try:
scalingFactor = math.sqrt( (1-errorRate)/errorRate )
except:
scalingFactor = 0.5
# update weight error*scalingFactor + correct/scalingFactor
# using np.where to distinguish true of false
# Ein = sum(np.where(X[:,index_t] > theta_t,1,-1)!=y)/float(X.shape[0])
u_next = (np.where(x > target_theta,1,-1) != y )*u*scalingFactor + (np.where(x > target_theta,1,-1) == y)*u/scalingFactor
else:
# negative ray
# print 'sign=-1'
error = float(sum((np.where(x < target_theta,1,-1) != y)*u))
errorRate = error/float(sum(u))
# print 'error',error
# print 'errorRate',errorRate
# raw_input('pause in sign=-1')
try:
scalingFactor = math.sqrt( (1-errorRate)/errorRate )
except:
scalingFactor = 0.5
# update weight error*scalingFactor + correct/scalingFactor
# using np.where to distinguish true of false
# Ein = sum(np.where(X[:,index_t] < theta_t,1,-1)!=y)/float(X.shape[0])
u_next = (np.where(x < target_theta,1,-1) != y )*u*scalingFactor + (np.where(x < target_theta,1,-1) == y)*u/scalingFactor
alpha = math.log(scalingFactor,math.e)
# print errorRate
'''
errorRate
u_next : update the weight
alpha : ln(scalingFactor)
target_theta : distinguish positive or negative
sign : -1 iff negative ray ,1 iff positive ray
'''
return errorRate, u_next, alpha, target_theta, sign
def readData(path):
X = []
y = []
with open(path) as f:
for line in f:
items = line.strip().split(' ')
tmp_X = []
for i in range(0,len(items)-1):
tmp_X.append(float(items[i]))
X.append(tmp_X)
y.append(float(items[-1]))
# raw_input(line)
return np.array(X),np.array(y)
if __name__ == '__main__':
T = 300
'''initial'''
X,y = readData('../train.dat')
u = np.ones(X.shape[0])/X.shape[0]
u_next = u
sorted_index = []
for i in range(0, X.shape[1]): sorted_index.append(np.argsort(X[:,i]))
# alpha == weight
alpha = np.ones(T)
theta = np.ones(T)
sign = np.ones(T)
index = np.zeros(T)
Ein = np.zeros(T)
mini_error = 1
for t in range(0, T):
# best parameter in iteration t
alpha_t = 1
theta_t = 1
sign_t = 1
index_t = 1
Eu = float("inf")
for i in range(0,X.shape[1]):
'''i means x dim or y dim '''
xi = X[sorted_index[i],i]
yi = y[sorted_index[i]]
errorRate_this_time, u_this_time, alpha_this_time, theata_this_time, sing_this_time = calculate_Ein(xi, yi, u[sorted_index[i]])
if Eu > errorRate_this_time :
Eu = errorRate_this_time
if mini_error > errorRate_this_time:
mini_error = errorRate_this_time
index_t = i
u_next = u_this_time
alpha_t = alpha_this_time
# Ein_t = Ein_this_time
theta_t = theata_this_time
sign_t = sing_this_time
index[t] = index_t
u[sorted_index[index_t]] = u_next
alpha[t] = alpha_t
theta[t] = theta_t
sign[t] = sign_t
if sign_t == 1:
Ein[t] = sum(np.where(X[:,index_t] > theta_t,1,-1)!=y)/float(X.shape[0])
else:
Ein[t] = sum(np.where(X[:,index_t] < theta_t,1,-1)!=y)/float(X.shape[0])
x = np.arange(T)
y = np.array([Ein[t] for t in range(T) ])
# print y
# print x
pl.plot(x, y)
pl.xlabel('Time')
pl.ylabel('Ein')
pl.title('13')
# pl.show()
pl.savefig('13')
| [
"[email protected]"
] | |
ce8e2a80a4fdb0ea8e9edb5ce8e9f1042e40dee5 | 6c7b148130d60cd99a5b414d5194b71a8c702e18 | /setup.py | 938fdb1c9d28be6ca6cadb655922b0437615d960 | [
"ISC"
] | permissive | babab/tuhinga | 2fcb4fa0d9afb202b34a7d508be8d7feff05e302 | c4de1f4cd7ea3eb18a049a3091a73bfba335b4a3 | refs/heads/master | 2020-05-19T13:39:49.752307 | 2015-06-08T13:15:59 | 2015-06-08T13:15:59 | 28,750,376 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | # vim: set et ts=4 sw=4 sts=4 ai:
from setuptools import setup
import tuhinga
setup(
name='tuhinga',
version=tuhinga.__version__,
description='Minimalistic markup language that translates to XML/HTML',
author=tuhinga.__author__,
author_email='[email protected]',
url='http://github.com/babab/tuhinga',
download_url='https://pypi.python.org/pypi/tuhinga',
py_modules=['tuhinga'],
license='ISC',
long_description='{}\n{}'.format(open('README.rst').read(),
open('CHANGELOG.rst').read()),
platforms='any',
scripts=['tuh'],
data_files=[
('share/tuhinga/examples', ['examples/bootstrap-navbar.tuh',
'examples/dev-test.tuh',
'examples/pretty-minimal.tuh',
'examples/very-minimal.tuh']),
('share/tuhinga', ['LICENSE', 'README.rst', 'tuh.vim',
'tuhinga_webrepl.py'])
],
install_requires=['pycommand'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Adaptive Technologies',
'Topic :: Documentation',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Office/Business',
'Topic :: Text Editors :: Documentation',
'Topic :: Text Editors :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Linguistic',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
'Topic :: Utilities',
'BLOCK FOR UPLOAD',
],
)
| [
"[email protected]"
] | |
cf446bad7c1c634252be91eb0857d97fc533227f | 088276a2b02f74493c6303cbf17573957e1c2b3e | /KIWOOM/3_sqlite.py | 59417a7e212b24bd81d4e0e6c52c91ba6a956c48 | [] | no_license | naikiki87/python | 38f3ec9ed55b48df136708ad8e90e4358d536ca3 | 3c75cace24258c84b682e06033130ee627f7883c | refs/heads/master | 2023-06-05T09:49:51.931345 | 2021-06-30T04:35:41 | 2021-06-30T04:35:41 | 268,022,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import sqlite3
# con = sqlite3.connect("D:/python/db_sqlite/kospi.db")
con = sqlite3.connect("./kospi.db")
cur = con.cursor()
# cur.execute("CREATE TABLE PhoneBook(Name text, PhoneNum text);")
# cur.execute("INSERT INTO PhoneBook Values('Derick', '010-1234-5678');")
cur.execute("SELECT * FROM PhoneBook")
for row in cur:
print(row)
# con.commit()
# con.close() | [
"[email protected]"
] | |
a6dd3866eb3a9750c3726ce8a4d7752a01cf6294 | 9947d1e328a3262a35a61385dc537c3dc557ab7d | /The_diffcult_point/test_super.py | 332b966157b3e69fadeb60b6f17ab326afdfe6a4 | [] | no_license | nuass/lzh | d0a7c74a3295523d1fe15eeaa73997fc04469f06 | 3cb1cf1e448b88ade226d113a7da4eab7bbb5c09 | refs/heads/master | 2021-02-06T06:10:32.772831 | 2019-06-10T08:54:49 | 2019-06-10T08:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # coding=utf-8
class A(object):
def hao(self):
print("A")
class B(A):
def hao(self):
print(self.__class__.__mro__)
super(B,self).hao()
print("B")
B().hao() | [
"[email protected]"
] | |
a95cd447f2c5ed8448c46ac16af9dd1ab0b44f9d | 8b7c0ab7d9e18c7ef5ff96ae0ad69ca8dcb5f9f3 | /guest/sign/models.py | d5a91ca7160b18c5683de6cb4af8782f42bedc55 | [] | no_license | HCT118/guest_repository | 7811aca4f6b921874ebf6b40bd7a43d6e91836df | 63087825116b96238f7cdd91ee771817df01b593 | refs/heads/master | 2021-01-20T19:57:11.425199 | 2016-08-16T01:45:32 | 2016-08-16T01:45:32 | 65,777,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from django.db import models
# Create your models here.
# 发布会
class Event(models.Model):
name = models.CharField(max_length=100) # 发布会标题
limit = models.IntegerField() # 限制人数
status = models.BooleanField() # 状态
address = models.CharField(max_length=200) # 地址
start_time = models.DateTimeField('events time') # 发布会时间
create_time = models.DateTimeField(auto_now=True) # 创建时间(自动获取当前时间)
def __str__(self):
return self.name
# 嘉宾
class Guest(models.Model):
event = models.ForeignKey(Event) # 关联发布会id
realname = models.CharField(max_length=64) # 姓名
phone = models.CharField(max_length=16) # 手机号
email = models.EmailField() # 邮箱
sign = models.BooleanField() # 签到状态
create_time = models.DateTimeField(auto_now=True) # 创建时间(自动获取当前时间)
class Meta:
unique_together = ('phone', 'event')
def __str__(self):
return self.realname
| [
"[email protected]"
] | |
0992b0c379cfae2ef2b543e01d98c395df4e38b1 | 7e27d2b844e962a567e0311a6eb5ccf3fcdc7b98 | /lib/exabgp/configuration/current/l2vpn.py | ec197e604d92497bcdac9a1cd9c9241d57eb32fc | [] | no_license | slabakov/exabgp | 1dbf6a98b06a2c2cdbeedf0954d0429f0dbf98fb | 33f851d70715f4ba1792acc36436ef32b70c30c9 | refs/heads/master | 2020-12-30T19:46:09.570146 | 2015-06-05T15:36:19 | 2015-06-05T15:36:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | # encoding: utf-8
"""
parse_l2vpn.py
Created by Thomas Mangin on 2015-06-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.configuration.current.basic import Basic
class ParseL2VPN (Basic):
syntax = \
'syntax:\n' \
' l2vpn {\n' \
' vpls site_name {\n' \
' endpoint <vpls endpoint id; integer>\n' \
' base <label base; integer>\n' \
' offset <block offet; interger>\n' \
' size <block size; integer>\n' \
' route-distinguisher|rd 255.255.255.255:65535|65535:65536|65536:65535\n' \
' next-hop 192.0.1.254;\n' \
' origin IGP|EGP|INCOMPLETE;\n' \
' as-path [ as as as as] ;\n' \
' med 100;\n' \
' local-preference 100;\n' \
' community [ 65000 65001 65002 ];\n' \
' extended-community [ target:1234:5.6.7.8 target:1.2.3.4:5678 origin:1234:5.6.7.8 origin:1.2.3.4:5678 0x0002FDE800000001 l2info:19:0:1500:111 ]\n' \
' originator-id 10.0.0.10;\n' \
' cluster-list [ 10.10.0.1 10.10.0.2 ];\n' \
' withdraw\n' \
' name what-you-want-to-remember-about-the-route\n' \
' }\n' \
' }\n'
_str_vpls_bad_size = "you tried to configure an invalid l2vpn vpls block-size"
_str_vpls_bad_offset = "you tried to configure an invalid l2vpn vpls block-offset"
_str_vpls_bad_label = "you tried to configure an invalid l2vpn vpls label"
_str_vpls_bad_enpoint = "you tried to configure an invalid l2vpn vpls endpoint"
def __init__ (self, error):
self.error = error
def clear (self):
pass
def vpls_endpoint (self, scope, token):
number = int(token.pop(0))
if number < 0 or number > 0xFFFF:
return self.error.set(self._str_vpls_bad_enpoint)
vpls = scope[-1]['announce'][-1].nlri
vpls.ve = number
return True
def vpls_size (self, scope, token):
number = int(token.pop(0))
if number < 0 or number > 0xFFFF:
return self.error.set(self._str_vpls_bad_size)
vpls = scope[-1]['announce'][-1].nlri
vpls.size = number
return True
def vpls_offset (self, scope, token):
number = int(token.pop(0))
if number < 0 or number > 0xFFFF:
return self.error.set(self._str_vpls_bad_offset)
vpls = scope[-1]['announce'][-1].nlri
vpls.offset = number
return True
def vpls_base (self, scope, token):
number = int(token.pop(0))
if number < 0 or number > 0xFFFF:
return self.error.set(self._str_vpls_bad_label)
vpls = scope[-1]['announce'][-1].nlri
vpls.base = number
return True
| [
"[email protected]"
] | |
680b905428d8476bf2432d2b6bec403125573f01 | ad062b5d940a25b19855e1744a9061aec37805cf | /Clustering2.py | df8fe4df443ffff37577a2d1176f87041cafa48b | [] | no_license | djbelyak/PsyAnalysis | 782f0948d5ccac56804ce611037bbe1ea9e03913 | 4f310de3232eae9b8377966ff84216878de6e459 | refs/heads/master | 2020-03-26T01:42:07.612231 | 2015-04-21T15:10:56 | 2015-04-21T15:10:56 | 34,334,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | from numpy import loadtxt
from sklearn.cluster import DBSCAN
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Imputer
fileName = "Book1.csv"
data = loadtxt(fileName, delimiter=';', skiprows=1)
print data
pipeline = make_pipeline(Imputer(), DBSCAN(eps=3.161, algorithm='ball_tree', min_samples=5))
pipeline.fit(data)
name, dbscan = pipeline.steps[1]
print dbscan.core_sample_indices_
print dbscan.labels_
n_clusters_ = len(set(dbscan.labels_)) - (1 if -1 in dbscan.labels_ else 0)
print('Estimated number of clusters: %d' % n_clusters_)
n_error = 0
for i in dbscan.labels_:
if i == -1:
n_error += 1
print('Number of errors: %d' % n_error)
for cluster in range(n_clusters_):
n_size = 0
for i in dbscan.labels_:
if i == cluster:
n_size += 1
print('Cluster %d size: %d' % (cluster, n_size))
print dbscan.components_ | [
"[email protected]"
] | |
fe36467b4eedea7503d1237ef94c8e271e6d9172 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_algopiggy_codejam2.py | b5738aafdb4b48373f018ffc972bf9a5528e35b8 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 467 | py | def func(p):
n=0
s=0
index=0
while index < len(p) and p[index]=='-':
index+=1
if index>0:
n=1
for i in range(index,len(p)):
if s==0 and p[i]=='-': # encounters a - after +
s=1
elif s==1 and p[i]=='+': # encounters a + after -
s=0
n=n+2
if s==1:
n=n+2
return n
T = input()
for x in range(0,T):
p = raw_input()
print "Case #{}: {}".format(x+1,func(p))
| [
"[[email protected]]"
] | |
415081eb7337bc054786897989d04751650d570c | c922252e52eea97b7557937a2558bbec664d2e07 | /search/urls.py | c1bf06d10230d5caea9f4253015244e5b7c6d24c | [] | no_license | strar-buck/twitter_insta_news_feed | cfe1d4cd88b6dc938134d82ec0c960090390aee3 | 22858054ebf7821d4e5469163b14b542983fadff | refs/heads/master | 2021-06-11T01:26:30.671568 | 2017-02-01T09:11:09 | 2017-02-01T09:11:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.conf.urls import url
from search import views
urlpatterns = [
url(r'^search/tweets/', views.search_tweets, name='search_tweets'),
url(r'^search/insta/', views.search_insta, name='search_insta'),
url(r'^$', views.index, name='index'),
] | [
"[email protected]"
] | |
c3b2a2b690ae818621012fa290d2e1de48a46b0f | 6dd2c91fb434c096b9582531522a684e8b3e4bfc | /syn/32_xsynon_phil.py | 2a3fd0d68774f5cf96db723631d656bf8a9f8fda | [] | no_license | sichumon/synandant | 09dd6f991484a30be3b2503425813333c4b3b579 | 3d1c56a32e04556c46ee112045b18c84e3a131e3 | refs/heads/master | 2020-07-17T22:24:10.815968 | 2019-09-03T15:41:51 | 2019-09-03T15:41:51 | 206,112,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | #!/usr/bin/python
import string
import random
nlist = [['chaos', 'mayhem'],
['able', 'competent'],
['chaotic', 'disordered'],
['skilful', 'clever'],
['pandemonium', 'chaos'],
['colossal', 'huge'],
['intricate', 'complex'],
['conceited', 'arrogant'],
['contract', 'settlement'],
['converse', 'talk'],
['coy', 'shy'],
['create', 'produce'],
['vain', 'arrogant'],
['generate', 'create'],
['hazard', 'danger'],
['deceitful', 'dishonest'],
['hinder', 'block'],
['coy', 'timid'],
['despondent', 'downhearted'],
['overwhelmed', 'shocked'],
['delectable', 'delicious'],
['devise', 'invent'],
['deprive', 'deny'],
['dilute', 'weaken'],
['sad', 'despondent'],
['destroy', 'demolish'],
['dismal', 'miserable'],
['wreck', 'destroy'],
['devastated', 'distraught'],
['diminutive', 'minute'],
['educate', 'train'],
['minute', 'tiny'],
['tedious', 'monotonous'],
['durable', 'lasting'],
['enduring', 'lasting'],
['elongate', 'lengthen'],
['emblem', 'symbol'],
['emerge', 'appear'],
['monstrous', 'huge'],
['badge', 'emblem'],
['extend', 'elongate'],
['evaluate', 'assess'],
['precise', 'exact'],
['exhilarated', 'overjoyed'],
['exquisite', 'beautiful'],
['remove', 'extract'],
['rapid', 'swift'],
['appraise', 'assess'],
['renowned', 'eminent'],
['fatigue', 'exhaustion']]
def picksomequestions():
""" converts questions to a dict()"""
answers = dict()
for question in nlist:
answers[question[0]] = question[1]
if len(answers.keys()) > 50:
break
return answers
def picksomechoices(question, answer):
""" returns the correct q/a plus 3 other random choices as a dict()"""
""" because of the way dict() works all 4 choices will be unique """
choices = dict()
choices[question] = answer
for choice in random.sample(nlist, 10):
choices[choice[0]] = choice[1]
if len(choices.keys()) > 3:
break
return choices
def choose(multichoice, question, correct):
""" takes the list of choices, the correct q and the correct a. Returns 1 for correct and 0 for incorrect """
counter = 1
ncorrect = 0
allowed = '12345'
print("choose a synonym for "+question)
for option in multichoice.values():
print(str(counter)+")"+option)
if option == correct:
ncorrect = counter
counter = counter + 1
res = raw_input(">")
while (len(res) != 1 or res not in allowed):
res = raw_input(">")
#return res
if int(res) == ncorrect:
print("CORRECT!")
return 1
else:
print("\n >>>>>> The answer is actually -- " + correct)
print
return 0
# main program starts here
score = 0
answers = picksomequestions()
for question in random.sample(answers.keys(),49):
multichoice = picksomechoices(question, answers[question])
score = score + choose(multichoice, question, answers[question])
print("You scored "+str(100*score/50.0)+"%")
| [
"[email protected]"
] | |
ad4b79a74342aaad36f65b8ae24c44990f5fe4cc | 5ce92e9515236f6fb87ac977cf4558d49e361da6 | /twitter_thread/auth_backends.py | 917f324ed2569e689865e948fccd673c43b7fc59 | [] | no_license | JWatkins20/twitter_thread | a0e4e7bd33970227848c66bcf0403c98c0b13a32 | df5f24f600a08e26a348af9b2564d547c15471ad | refs/heads/master | 2022-11-06T22:17:23.749408 | 2020-07-30T15:12:24 | 2020-07-30T15:12:24 | 262,909,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.core.exceptions import ImproperlyConfigured
from django.apps import apps
class CustomUserModelBackend(ModelBackend):
def authenticate(self, username=None, password=None, **kwargs):
try:
user = self.user_class.objects.get(username=username)
if user.check_password(password):
return user
except self.user_class.DoesNotExist:
return None
def get_user(self, user_id):
try:
return self.user_class.objects.get(pk=user_id)
except self.user_class.DoesNotExist:
return None
@property
def user_class(self):
if not hasattr(self, '_user_class'):
self._user_class = apps.get_model(*settings.CUSTOM_USER_MODEL.split('.', 2))
if not self._user_class:
raise ImproperlyConfigured('Could not get custom user model')
return self._user_class | [
"[email protected]"
] | |
ad32ef3a8a166008f96ba4263fa9fefd1dfb39fa | c789641c4e3368541bdfe1bd4a13563622845bc0 | /alien_invasion.py | 2832011d51182795748e9f897378186ad8b5dc46 | [] | no_license | jimboozoz/Alien-Invasion | c860dbf71868bad2100a52cd38377d694f24c59d | df73b024ed7eb557b338117428d35a28dbaed9a8 | refs/heads/master | 2022-11-06T23:16:19.313190 | 2020-06-10T04:22:01 | 2020-06-10T04:22:01 | 271,169,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | import sys
import pygame
from settings import Settings
from ship import Ship
class AlienInvasion:
"""Overall Class to manager game assets and behavior"""
def __init__(self):
"""Initialize game and create resources"""
pygame.init()
self.settings = Settings()
self.screen = pygame.display.set_mode(
(self.settings.screen_width, self.settings.screen_height))
self.screen = pygame.display.set_mode((1200, 800))
pygame.display.set_caption("Alien Invasion")
self.ship = Ship(self)
# Set background color
self.bg_color = (230, 230, 230)
def run_game(self):
"""Start main loop for game"""
while True:
# Wath keyboard and mouse events.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Redraw the scree during each pass throguh the loop.
self.screen.fill(self.settings.bg_color)
self.ship.blitme()
"Make the most recently drawn screen visible"
pygame.display.flip()
if __name__ == '__main__':
"""Make a game instance and run the game."""
ai = AlienInvasion()
ai.run_game()
| [
"[email protected]"
] | |
1838b7866f0da3d4b64a2fc26ac90f64e3ca399f | b7a0702a22fd1d568aaebb53fb28327a2e2e515e | /tour/info.py | c3d30a277c45e59dacf0dd02570dae6983581ec9 | [] | no_license | wSuhye/API | c2fd00a2621db0676ff0ea23ee8bb388b20e3929 | bd7868715f3686169054fbda0c5f83713ae28051 | refs/heads/master | 2020-05-25T20:34:27.432490 | 2019-05-22T06:46:24 | 2019-05-22T06:46:24 | 187,979,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import requests, xmltodict
import json
def InfoFunc():
# apiKey = 'R%2BomFWTHyWm%2FPdy9PWKNvk1TWKHzKxMlIAk9Xed%2Fyr5TdrNOqjPYe4L4wTDIiVak3%2FkgkowpiKN757INUz5gLw%3D%3D'
# url = ''
raw_data = f'http://api.visitkorea.or.kr/openapi/service/rest/KorService/locationBasedList?serviceKey=R%2BomFWTHyWm%2FPdy9PWKNvk1TWKHzKxMlIAk9Xed%2Fyr5TdrNOqjPYe4L4wTDIiVak3%2FkgkowpiKN757INUz5gLw%3D%3D&numOfRows=10&pageNo=1&MobileOS=ETC&MobileApp=AppTest&arrange=A&contentTypeId=15&mapX=126.981611&mapY=37.568477&radius=1000&listYN=Y'
data = requests.get(raw_data).content
xmlObject = xmltodict.parse(data)
a = xmlObject['response']['body']['items']['item']
b = a[0]['firstimage']
AllInfo = []
for data in a:
AllInfo.append({
'title' : data['title'],
'addr1' : data['addr1'],
# 'firstimage' : data['firstimage']
})
return AllInfo
# print(AllInfo)
# print(data['']+'말을적자')
| [
"[email protected]"
] | |
076bd380512829e419772e1bf9829402d9be2022 | 6f026cb7bcca9a89cd1ce6764854a10116de9a10 | /2al1/rotate.py | 87d651850baae3d208ce04dc603ce9b2a0b81bf1 | [] | no_license | bubushkin/opencv_experiment | 5c4d3653319987e156a2cfaa42ec0d95d5eccc77 | a5faf97f68093053b2dfb9f7901c3360552ff048 | refs/heads/master | 2020-07-19T07:43:03.020843 | 2016-11-27T07:11:10 | 2016-11-27T07:11:10 | 73,769,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | '''
Created on Nov 24, 2016
@author: iskandar
'''
| [
"[email protected]"
] | |
a2464060f038d75f5b7906e1ad4eba0b78e60252 | e910318d01528d82040507a49eeeb8dade45b31f | /tests/multi_net/ssl_data.py | 788551d06a9a98e8a2af0b53d569355a03c91489 | [
"MIT"
] | permissive | pfalcon/pycopy | e844480a5e5cd463530328889daed2ba87552b8a | 3ac90ae9c3c6bbebfba9cada2d37025e35c62796 | refs/heads/pfalcon | 2023-08-30T09:39:52.290147 | 2022-09-08T16:42:38 | 2022-09-08T16:42:38 | 15,507,576 | 753 | 71 | MIT | 2021-05-08T04:59:21 | 2013-12-29T11:38:47 | C | UTF-8 | Python | false | false | 2,885 | py | # Simple test creating an SSL connection and transferring some data
# This test won't run under CPython because it requires key/cert
import usocket as socket, ussl as ssl
import ubinascii
PORT = 8000
# This self-signed key/cert pair is randomly generated and to be used for
# testing/demonstration only. You should always generate your own key/cert.
key = ubinascii.unhexlify(
b"3082013b020100024100cc20643fd3d9c21a0acba4f48f61aadd675f52175a9dcf07fbef"
b"610a6a6ba14abb891745cd18a1d4c056580d8ff1a639460f867013c8391cdc9f2e573b0f"
b"872d0203010001024100bb17a54aeb3dd7ae4edec05e775ca9632cf02d29c2a089b563b0"
b"d05cdf95aeca507de674553f28b4eadaca82d5549a86058f9996b07768686a5b02cb240d"
b"d9f1022100f4a63f5549e817547dca97b5c658038e8593cb78c5aba3c4642cc4cd031d86"
b"8f022100d598d870ffe4a34df8de57047a50b97b71f4d23e323f527837c9edae88c79483"
b"02210098560c89a70385c36eb07fd7083235c4c1184e525d838aedf7128958bedfdbb102"
b"2051c0dab7057a8176ca966f3feb81123d4974a733df0f958525f547dfd1c271f9022044"
b"6c2cafad455a671a8cf398e642e1be3b18a3d3aec2e67a9478f83c964c4f1f"
)
cert = ubinascii.unhexlify(
b"308201d53082017f020203e8300d06092a864886f70d01010505003075310b3009060355"
b"0406130258583114301206035504080c0b54686550726f76696e63653110300e06035504"
b"070c075468654369747931133011060355040a0c0a436f6d70616e7958595a3113301106"
b"0355040b0c0a436f6d70616e7958595a3114301206035504030c0b546865486f73744e61"
b"6d65301e170d3139313231383033333935355a170d3239313231353033333935355a3075"
b"310b30090603550406130258583114301206035504080c0b54686550726f76696e636531"
b"10300e06035504070c075468654369747931133011060355040a0c0a436f6d70616e7958"
b"595a31133011060355040b0c0a436f6d70616e7958595a3114301206035504030c0b5468"
b"65486f73744e616d65305c300d06092a864886f70d0101010500034b003048024100cc20"
b"643fd3d9c21a0acba4f48f61aadd675f52175a9dcf07fbef610a6a6ba14abb891745cd18"
b"a1d4c056580d8ff1a639460f867013c8391cdc9f2e573b0f872d0203010001300d06092a"
b"864886f70d0101050500034100b0513fe2829e9ecbe55b6dd14c0ede7502bde5d46153c8"
b"e960ae3ebc247371b525caeb41bbcf34686015a44c50d226e66aef0a97a63874ca5944ef"
b"979b57f0b3"
)
# Server
def instance0():
ctx = ssl.SSLContext()
ctx.set_cert_key(cert, key)
multitest.globals(IP=multitest.get_network_ip())
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(socket.getaddrinfo("0.0.0.0", PORT)[0][-1])
s.listen(1)
multitest.next()
s2, _ = s.accept()
s2 = ctx.wrap_socket(s2, server_side=True)
print(s2.read(16))
s2.write(b"server to client")
s.close()
# Client
def instance1():
multitest.next()
ctx = ssl.SSLContext()
s = socket.socket()
s.connect(socket.getaddrinfo(IP, PORT)[0][-1])
s = ctx.wrap_socket(s)
s.write(b"client to server")
print(s.read(16))
s.close()
| [
"[email protected]"
] | |
d319c0a1f222f90ea687693f97695f6661d5d468 | db5684eeac1c7359017a5d109028ce2b8b49d1a7 | /app_rbac/forms/role.py | 69b2ef8c424d894b2facb22f662425cd0f00ce1f | [] | no_license | Alan-AW/CrmSys | a4873c52e1f6bb05c45377459b0a040ff7dbbc75 | 95119dd7b96b981a00541e8adcee410eb1fbe865 | refs/heads/main | 2023-08-22T08:04:44.207347 | 2021-10-13T08:08:44 | 2021-10-13T08:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from django import forms
from app_rbac.models import Role
class RoleModelForm(forms.ModelForm):
class Meta:
model = Role
fields = ['title'] # 如果直接写成 '__all__', 表示对所有字段都可以进行操作
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'})
}
| [
"[email protected]"
] | |
7e6beccea0df80d8bc7f4e4c006266c1add8558a | 975cac2bd861496702977c414a42b4a1f21e12df | /Snake.py | 0fd61fb4608faa54cffc0608869fe0b784171052 | [] | no_license | AikKh/SnakeGame | ff61eb5a7d867360757ad66fce0f65e88b40dacf | 16ba0aa87426762e81b3726eacc611d09ed88f56 | refs/heads/main | 2023-08-23T03:27:55.945599 | 2021-10-19T08:32:36 | 2021-10-19T08:32:36 | 418,836,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | class Snake:
_cor = []
def __init__(self, head):
self._head = head
self._cor = [self._head]
self._size = len(self._cor)
self._direction = None
def snakeMaker(self):
if self._direction == 0:
self._cor.append((self._cor[-1][0], self._cor[-1][1] + 1))
elif self._direction == 1:
self._cor.append((self._cor[-1][0] - 1, self._cor[-1][1]))
elif self._direction == 2:
self._cor.append((self._cor[-1][0], self._cor[-1][1] - 1))
elif self._direction == 3:
self._cor.append((self._cor[-1][0] + 1, self._cor[-1][1]))
def move(self):
if self._direction == 0:
future_head = (self._head[0], self._head[1] - 1)
self._cor.insert(0, future_head)
del self._cor[-1]
self._head = self._cor[0]
elif self._direction == 1:
future_head = (self._head[0] + 1, self._head[1])
self._cor.insert(0, future_head)
del self._cor[-1]
self._head = self._cor[0]
elif self._direction == 2:
future_head = (self._head[0], self._head[1] + 1)
self._cor.insert(0, future_head)
del self._cor[-1]
self._head = self._cor[0]
elif self._direction == 3:
future_head = (self._head[0] - 1, self._head[1])
self._cor.insert(0, future_head)
del self._cor[-1]
self._head = self._cor[0] | [
"[email protected]"
] | |
4f2d2efc8705f06a3f470a9a0321433434e07034 | 8558b7f65ae50dde5595f1ae73331b42d2db94ef | /Desafio2.py | 3caa318fc4ca1292372234c3234feda634ac62b7 | [] | no_license | pamsfih/Python-Projects | 91e32d7859316690984fbdf5bb3033e48c658343 | c5d0db4805dbb94e4758ff7362d7b15ac20796fe | refs/heads/master | 2021-07-20T10:15:01.193260 | 2020-05-22T14:02:49 | 2020-05-22T14:02:49 | 171,526,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | '''
Online Python Compiler.
Code, Compile, Run and Debug python program online.
Write your code in this editor and press "Run" button to execute it.
'''
# Ler o dia, mês e ano e mostrar mensagem na tela #
dia = input ('Qual o dia que você nasceu?')
mes = input ('Qual o mês que você nasceu?')
ano = input ('Qual o ano que você nasceu?')
print ('Você nasceu no dia', dia, 'de', mes, 'de', ano) | [
"[email protected]"
] | |
309df542c787497111d6f5a5e8316f21b066b169 | 85a7dde9c48945972a7f521f0fbb2eb56b323aa2 | /src/racecar/racecar/scripts/wall.py | 81392b99d9a1053aef41ba18a713b8b9a02e1479 | [
"BSD-3-Clause"
] | permissive | jwmcgettigan/renegade | 1e8f61a14d6a5a7aff5c410f0c26bb166f95bd03 | ef76bebc6867683e1fb3201be547f42aa6e65881 | refs/heads/master | 2021-04-06T13:53:12.945602 | 2018-07-17T22:09:13 | 2018-07-17T22:09:13 | 124,680,527 | 1 | 0 | null | 2018-07-17T22:09:14 | 2018-03-10T17:33:52 | Makefile | UTF-8 | Python | false | false | 338 | py | #!/usr/bin/python
from item import Item
import cv2, numpy as np, math
class Wall(Item):
def __init__(self, angle, distance):
super(Wall, self).__init__()
self.angle = angle
self.distance = distance
def getAngle(self):
return self.angle
def getDistance(self):
return self.distance
| [
"[email protected]"
] | |
0b0744d57bf13ad78238920e4eccaeb0cc149bce | d2252432a981125fe77d9c34ce7753332575dff6 | /get_keywords.py | d89ab62ea8d58adbfc989aeec9d8b9cda26afe6e | [] | no_license | anastasiiaCher/keywords-network | 132292e21b76669e5d1a711e601002b5e4dd9b13 | 93671e6ef79bb75d8bfa6ffc7fdeec97d3e10793 | refs/heads/master | 2020-09-13T12:04:38.829333 | 2020-05-04T10:46:48 | 2020-05-04T10:46:48 | 222,773,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,967 | py | import pke
import nltk
import csv
import pandas as pd
import unicodecsv as csv2
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
lemmatizer = WordNetLemmatizer()
file = open("stopwords.txt", "r")
for line in file:
words = line.split(",")
stoplist = stopwords.words('english') + list(words)
def get_keywords(content):
content = content.replace("-", "")
if len(content) <= 500:
NGraph = 8
NStat = 5
if 500 < len(content) < 1000:
NGraph = 13
NStat = 10
if len(content) >= 1000:
NGraph = 18
NStat = 15
PositionRank = []
MultipartiteRank = []
TFIDF = []
TextRank = []
# PKE - TF-IDF
extractorTFIDF = pke.unsupervised.TfIdf()
extractorTFIDF.load_document(input=content, language="en", normalization=None)
extractorTFIDF.candidate_selection(n=4, stoplist=stoplist)
df = pke.load_document_frequency_file(
input_file='C:/Users/admin/Anaconda3/Lib/site-packages/pke/models/df-semeval2010.tsv.gz')
extractorTFIDF.candidate_weighting(df=df)
keyphrasesTFIDF = extractorTFIDF.get_n_best(n=NStat)
for key in keyphrasesTFIDF:
TFIDF.append(key[0])
# PKE - TextRank
pos = {'NOUN', 'PROPN', 'ADJ'}
extractorTextRank = pke.unsupervised.TextRank()
extractorTextRank.load_document(input=content, language='en', normalization=None)
extractorTextRank.candidate_weighting(window=2, pos=pos, top_percent=0.33)
keyphrasesTextRank = extractorTextRank.get_n_best(n=NGraph)
for key in keyphrasesTextRank:
TextRank.append(key[0])
# PKE - PositionRank
pos = {'NOUN', 'PROPN', 'ADJ'}
grammar = "NP: {<ADJ>*<NOUN|PROPN>+}"
extractorPositionRank = pke.unsupervised.PositionRank()
extractorPositionRank.load_document(input=content, language='en', normalization=None)
extractorPositionRank.candidate_selection(grammar=grammar, maximum_word_number=4)
extractorPositionRank.candidate_weighting(window=2, pos=pos)
keyphrasesPositionRank = extractorPositionRank.get_n_best(n=NGraph)
for key in keyphrasesPositionRank:
PositionRank.append(key[0])
# PKE - MultipartiteRank
extractorMultipartiteRank = pke.unsupervised.MultipartiteRank()
extractorMultipartiteRank.load_document(input=content)
pos = {'NOUN', 'PROPN', 'ADJ'}
extractorMultipartiteRank.candidate_selection(pos=pos, stoplist=stoplist)
extractorMultipartiteRank.candidate_weighting(alpha=3, threshold=0.95, method='average')
keyphrasesMultipartiteRank = extractorMultipartiteRank.get_n_best(n=NGraph)
for key in keyphrasesMultipartiteRank:
MultipartiteRank.append(key[0])
inter1 = set(PositionRank).intersection(set(MultipartiteRank))
inter2 = set(TFIDF).intersection(set(TextRank))
to_remove_fin = []
to_add = []
to_remove = []
for elem1 in inter2:
for elem2 in inter1:
if (" " not in elem1) and (" " not in elem2) and (
lemmatizer.lemmatize(elem1) in lemmatizer.lemmatize(elem2)):
to_remove_fin.append(elem2)
to_remove.append(elem1)
to_add.append(elem1)
if (" " not in elem1) and (" " not in elem2) and (
lemmatizer.lemmatize(elem2) in lemmatizer.lemmatize(elem1)):
to_remove_fin.append(elem2)
to_remove.append(elem1)
to_add.append(elem2)
if (elem1 in elem2) and (' ' in elem1) and (elem1 != elem2):
to_remove_fin.append(elem2)
elif (elem1 in elem2) and (' ' not in elem1) and (elem1 != elem2):
to_remove.append(elem1)
to_remove = set(to_remove)
for elem in to_remove:
inter2.remove(elem)
inter = set(inter1).union(set(inter2))
inter = list(inter)
new_inter = inter
new_inter = new_inter + list(set(to_add))
for i in range(0, len(inter)):
count = 0
poses = []
tokens = [word for word in nltk.word_tokenize(inter[i]) if word not in stoplist]
new_inter[i] = ' '.join(tokens)
tags = list(nltk.pos_tag(tokens))
for tag in tags:
poses.append(tag[1])
for pos in poses:
if 'NN' in pos:
count += 1
if count == 0:
to_remove_fin.append(new_inter[i])
if len(poses) > 4:
to_remove_fin.append(new_inter[i])
to_remove_fin = list(set(to_remove_fin))
new_inter = list(set(new_inter).difference(to_remove_fin))
return new_inter
author = []
abstract = []
year = []
language = []
with open('2_abs_translated.csv', 'r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
abstract.append(row["TRANSLATED_ABSTRACT"])
year.append(row["YEAR"])
author.append(row["AUTHOR"])
language.append(row["LANGUAGE"])
df = pd.DataFrame(
{'Abstract': abstract,
'Year': year,
'Author': author,
'Language': language})
df.sort_values("Abstract", inplace=True)
df.drop_duplicates(subset="Abstract", keep='first', inplace=True)
with open('2_abs_keywords2013.csv', 'wb') as f:
writer = csv2.writer(f)
writer.writerow(["AUTHOR", "YEAR", "LANGUAGE", "TEXT", "KEYWORDS"])
for i in range(0, df.shape[0]):
if df.iloc[i]["Year"] == '2013':
keywords = get_keywords(df.iloc[i]["Abstract"])
print(i + 1)
print(df.iloc[i]["Abstract"])
print(keywords)
writer.writerow([df.iloc[i]["Author"],
df.iloc[i]["Year"],
df.iloc[i]["Language"],
df.iloc[i]["Abstract"],
keywords]) | [
"[email protected]"
] | |
50b53e22e810036a456495b488c6b16c6e493249 | d4c47276c8fbd15240aa228eda04ee8e338caf02 | /Python/Python 2nen/First/chap2/chap2-13.py | 01888407e2cb14922c94d88d5a194cbc4c110d42 | [] | no_license | developer579/Practice | a745384450172fb327913c130303ab76492096f1 | 54084468af83afcc44530e757800c8c3678147c1 | refs/heads/main | 2023-05-06T01:36:06.222554 | 2021-06-02T07:04:03 | 2021-06-02T07:04:03 | 324,312,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import requests
from bs4 import BeautifulSoup
import urllib
load_url = "https://www.ymori.com/books/python2nen/test2.html"
html = requests.get(load_url)
soup = BeautifulSoup(html.content,"html.parser")
for element in soup.find_all("img"):
src = element.get("src")
image_url = urllib.parse.urljoin(load_url,src)
filename = image_url.split("/")[-1]
print(image_url,">>",filename)
| [
"[email protected]"
] | |
0bd83ff990ff936293623e8779875ff6ce165188 | cc70c80a9a70827d1c76005067e8e9999ac3f7b2 | /twitter.py | d227924314c903afdd64e1a6389e5cbbce585512 | [] | no_license | guptaankush936/Twitter-Sentiment-Analysis | 5af251332383080d976e5621ae5d4f1b07ea6b89 | 1a41c9024a017616cf3e55c32faec6955dae277c | refs/heads/main | 2023-07-18T04:03:28.261701 | 2021-08-19T18:56:29 | 2021-08-19T18:56:29 | 398,045,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | # -*- coding: utf-8 -*-
"""
Connect Twitter webpage with the model
"""
# importing the required libraries
from flask import Flask, render_template, request, redirect, url_for
from joblib import load
from get_tweets import get_related_tweets
# load the pipeline object
pipeline = load("text_classification.joblib")
# function to get results for a particular text query
def requestResults(name):
# get the tweets text
tweets = get_related_tweets(name)
# get the prediction
tweets['prediction'] = pipeline.predict(tweets['tweet_text'])
# get the value counts of different labels predicted
data = str(tweets.prediction.value_counts()) + '\n\n'
return data + str(tweets)
# start flask
app = Flask(__name__)
# render default webpage
@app.route('/')
def home():
return render_template('twitter.html')
# when the post method detect, then redirect to success function
@app.route('/', methods=['POST', 'GET'])
def get_data():
if request.method == 'POST':
user = request.form['search']
return redirect(url_for('success', name=user))
# get the data for the requested query
@app.route('/success/<name>')
def success(name):
return "<xmp>" + str(requestResults(name)) + " </xmp> "
if __name__ == '__main__' :
app.run(debug=True)
| [
"[email protected]"
] | |
d22bead336d05eea1ac1ba5ab189322de7a5078d | a88894960dc60d477597089a8f6903b5994f7290 | /fhikers_prototype v 2 1/rutas/migrations/0003_auto_20201125_1107.py | 1781717d449e1d021190facaf15de8f2c4041c6f | [] | no_license | student10github/fhikers | ddb59440ff8415b87f0ebe65f4b9c66c3fdd36fe | aae9dc393506f822107599fc2ad5e8e58b0734ca | refs/heads/main | 2023-01-28T23:03:04.176051 | 2020-12-09T13:29:51 | 2020-12-09T13:29:51 | 313,342,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # Generated by Django 3.1.1 on 2020-11-25 11:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rutas', '0002_auto_20201124_1812'),
]
operations = [
migrations.AlterField(
model_name='ruta',
name='id_etapa',
field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='rutas.etapa'),
),
migrations.AlterField(
model_name='ruta',
name='id_pais',
field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='rutas.pais'),
),
migrations.AlterField(
model_name='ruta',
name='id_usuario',
field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='rutas.usuario'),
),
]
| [
"[email protected]"
] | |
9c4061f174785212776e38574da8ada8490df6ac | 8db57792bf725ba64c874cbb88324c97c55089a4 | /opencv_webapp/cv_functions.py | d5772f3f6e32bedff5690ac77c0b49e22130b952 | [] | no_license | sleepycat27/django_opencv | af7f22b0bb0e8f02ed5eb8c7e793da805d843956 | 642eb2dee2d4770d02d74edab24456d0f1138ec4 | refs/heads/master | 2023-06-28T16:48:36.981723 | 2021-08-02T04:27:19 | 2021-08-02T04:27:19 | 391,776,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | from django.conf import settings
import numpy as np
import cv2
def cv_detect_face(path): # path parameter를 통해 파일 경로를 받아들이게 됩니다.
# path == './media/images/2020/02/21/test_image.jpg'
img = cv2.imread(path, 1)
if (type(img) is np.ndarray):
print(img.shape) # 세로, 가로, 채널
resize_needed = False
if img.shape[1] > 640: # ex) 가로(img.shape[1])가 1280일 경우,
resize_needed = True
new_w = img.shape[1] * (640.0 / img.shape[1]) # 1280 * (640/1280) = 1280 * 0.5
new_h = img.shape[0] * (640.0 / img.shape[1]) # 기존 세로 * (640/1280) = 기존 세로 * 0.5
elif img.shape[0] > 480: # ex) 세로(img.shape[0])가 960일 경우,
resize_needed = True
new_w = img.shape[1] * (480.0 / img.shape[0]) # 기존 가로 * (480/960) = 기존 가로 * 0.5
new_h = img.shape[0] * (480.0 / img.shape[0]) # 960 * (480/960) = 960 * 0.5
if resize_needed == True:
img = cv2.resize(img, (int(new_w), int(new_h)))
# Haar-based Cascade Classifier : AdaBoost 기반 머신러닝 물체 인식 모델
# 이미지에서 눈, 얼굴 등의 부위를 찾는데 주로 이용
# 이미 학습된 모델을 OpenCV 에서 제공 (http://j.mp/2qIxrxX)
baseUrl = settings.MEDIA_ROOT_URL + settings.MEDIA_URL
# baseUrl = '.' + '/media/' -> './media/'
face_cascade = cv2.CascadeClassifier(baseUrl+'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(baseUrl+'haarcascade_eye.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# detectMultiScale(Original img, ScaleFactor, minNeighbor) : further info. @ http://j.mp/2SxjtKR
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
cv2.imwrite(path, img)
else:
print('Error occurred within cv_detect_face!')
print(path)
| [
"[email protected]"
] | |
ace9776350106333e01428703cc03f5b601d4100 | cfdc7227acc9358672733e9860c5037726d8c58f | /fmanager/fmanager/migrations/0001_initial.py | 9dcd6911a13d99687f3d33e16182b969e1241713 | [] | no_license | andrav12/FinancialManager | 1b44e25e7bbc3ab5fd991fb7caa2e8a9415a267f | dcd3546771b78199639d4413033e818264fc6598 | refs/heads/master | 2020-05-21T00:29:12.888000 | 2019-05-12T13:48:27 | 2019-05-12T13:48:27 | 185,829,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | # Generated by Django 2.2 on 2019-04-21 18:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('number', models.CharField(max_length=20)),
('cvv', models.IntegerField()),
('amount', models.FloatField()),
('expireDate', models.DateField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('description', models.CharField(max_length=100)),
('amount', models.FloatField()),
('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='fmanager.Card')),
],
),
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('description', models.TextField(null=True)),
('state', models.IntegerField(choices=[(0, 'In progress'), (1, 'Done'), (2, 'Archived')])),
('objective', models.FloatField()),
('amountCollected', models.FloatField(default=0)),
('createdAt', models.DateField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
40875c5acbf5a739eb7b464c44cc3f0f101a9b59 | bc5e56d51d5b101b44a7ebdfc5cf34621337533f | /venv/Lib/site-packages/Crypto/Cipher/ARC2.py | fa997e4477a0a28ff83733627c0670d99f258ffc | [
"Apache-2.0"
] | permissive | alstjr8/KopoBlockchain | 0879db2eb4ed1b26d8a337e14565d5ea2dbffa4e | 7f875cee97c069f7ed6d067253834bd62218e826 | refs/heads/master | 2022-09-05T07:35:17.324397 | 2020-05-28T10:47:51 | 2020-05-28T10:47:51 | 265,499,071 | 0 | 0 | Apache-2.0 | 2020-05-20T08:23:11 | 2020-05-20T08:23:10 | null | UTF-8 | Python | false | false | 4,824 | py | # -*- coding: utf-8 -*-
#
# Cipher/ARC2.py : ARC2.py
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RC2 symmetric cipher
RC2_ (Rivest's Cipher version 2) is a symmetric block cipher designed
by Ron Rivest in 1987. The cipher started as a proprietary design,
that was reverse engineered and anonymously posted on Usenet in 1996.
For this reason, the algorithm was first called *Alleged* RC2 (ARC2),
since the company that owned RC2 (RSA Data Inc.) did not confirm whether
the details leaked into public domain were really correct.
The company eventually published its full specification in RFC2268_.
RC2 has a fixed data block size of 8 bytes. Length of its keys can vary from
8 to 128 bits. One particular property of RC2 is that the actual
cryptographic strength of the key (*effective key length*) can be reduced
via a parameter.
Even though RC2 is not cryptographically broken, it has not been analyzed as
thoroughly as AES, which is also faster than RC2.
New designs should not use RC2.
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import ARC2
>>> from Crypto import Random
>>>
>>> key = b'Sixteen byte key'
>>> iv = Random.new().read(ARC2.block_size)
>>> cipher = ARC2.new(key, ARC2.MODE_CFB, iv)
>>> msg = iv + cipher.encrypt(b'Attack at dawn')
.. _RC2: http://en.wikipedia.org/wiki/RC2
.. _RFC2268: http://tools.ietf.org/html/rfc2268
:undocumented: __revision__, __package__
"""
__revision__ = "$Id$"
from Crypto.Cipher import blockalgo
from Crypto.Cipher import _ARC2
class RC2Cipher (blockalgo.BlockAlgo):
"""RC2 cipher object"""
def __init__(self, key, *args, **kwargs):
"""Initialize an ARC2 cipher object
See also `new()` at the module level."""
blockalgo.BlockAlgo.__init__(self, _ARC2, key, *args, **kwargs)
def new(key, *args, **kwargs):
"""Create a new RC2 cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
Its length can vary from 1 to 128 bytes.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
Default is `MODE_ECB`.
IV : byte string
The initialization vector to use for encryption or decryption.
It is ignored for `MODE_ECB` and `MODE_CTR`.
For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption
and `block_size` +2 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
It is mandatory.
For all other modes, it must be `block_size` bytes longs.
counter : callable
(*Only* `MODE_CTR`). A stateful function that returns the next
*counter block*, which is a byte string of `block_size` bytes.
For better performance, use `Crypto.Util.Counter`.
segment_size : integer
(*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext
are segmented in.
It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.
effective_keylen : integer
Maximum cryptographic strength of the key, in bits.
It can vary from 0 to 1024. The default value is 1024.
:Return: an `RC2Cipher` object
"""
return RC2Cipher(key, *args, **kwargs)
#: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`.
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: Output FeedBack (OFB). See `blockalgo.MODE_OFB`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `blockalgo.MODE_CTR`.
MODE_CTR = 6
#: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`.
MODE_OPENPGP = 7
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = range(1,16+1)
| [
"[email protected]"
] | |
ec89933699fcc9f49abc83b7d57cd049397e8c5c | a60446e8ccfc1c4428467c226357bb2f5e418613 | /core/migrations/0005_auto_20201105_1216.py | cc1dc3deebd1bd6e0db5278245656a0ae69f2bb2 | [] | no_license | lucasousa/api-tourist-spots | 06c64ce8e8d04ca684f91917e9f1bfc8c7ab1779 | a01949fc69500c61019fafe565c0a170d414112b | refs/heads/main | 2023-05-27T09:32:08.829557 | 2021-06-13T21:30:09 | 2021-06-13T21:30:09 | 376,287,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # Generated by Django 3.1.3 on 2020-11-05 12:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('attraction', '0001_initial'),
('address', '0001_initial'),
('comments', '0001_initial'),
('evaluations', '0001_initial'),
('core', '0004_auto_20201105_1213'),
]
operations = [
migrations.AddField(
model_name='touristspot',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='address.address'),
),
migrations.AddField(
model_name='touristspot',
name='attractions',
field=models.ManyToManyField(to='attraction.Attraction'),
),
migrations.AddField(
model_name='touristspot',
name='comments',
field=models.ManyToManyField(to='comments.Comment'),
),
migrations.AddField(
model_name='touristspot',
name='evaluation',
field=models.ManyToManyField(to='evaluations.Evaluation'),
),
]
| [
"[email protected]"
] | |
4508211a48184e786e5567dc14a2c0c6669617b7 | c69103ac7c14828c8a14e6c57eca54ae1a23d28c | /temp.py | a018ebeb88236850f4342772aa20add57e0c0dbb | [] | no_license | iamaaditya/GoogleCodeJam | 6e18fd83b1d3da4cc66af8889d3b6db3ae55529c | 5a1c934d7b647aa135edcbf8143109eb6b91b275 | refs/heads/master | 2021-01-23T11:54:43.832702 | 2014-04-18T01:33:57 | 2014-04-18T01:33:57 | 18,745,451 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | #Rectangular
def printMatrixSpiral(width,height):
"""
width: width of the matrix
height: height of the matrix
"""
result = []
#special case
if width is 0 or height is 0 : return result
#calculate outer loop limit
if height<width: minS = height
else: minS=width
if minS & 1 is 1: limit = minS/2
else: limit = minS/2+1
#calculate length
xlen=width-1
ylen=height-1
#outer loop
for index in range(limit+1):
i,j = index,index # init to coordinate of diagonal entry
if xlen<0 or ylen<0: return result
if xlen == 0 and minS&1==1:
for jj in range(ylen+1):
result.append((i,j+jj))
return result
if ylen ==0 and minS&1==1:
for ii in range(xlen+1):
result.append((i+ii,j))
return result
#turn right
while(i<index+xlen):
result.append((i,j))
i+=1
#turn down
while j< index+ylen:
result.append((i,j))
j+=1
#turn left
while i>index:
result.append((i,j))
i-=1
#turn up
while j>index:
result.append((i,j))
j-=1
#next spiral square
xlen-=2
ylen-=2
return result
| [
"[email protected]"
] | |
204f896029a0f59e209c18ad7de7b01cebe99e9b | bbf587f29779e232ca6f0c36378b9530c97b494c | /apps/posts/migrations/0003_alter_post_likes.py | 17305078b42fb37434cd5a7819542f641788b966 | [] | no_license | chegreyev/starnavi_test | dcfb90460124027819a16dc1f9fd523e1530436d | a29aa3db2e5173a5156deeacce7c6df5fed4fb2e | refs/heads/master | 2023-04-04T14:04:56.867489 | 2021-04-18T21:55:53 | 2021-04-18T21:55:53 | 359,250,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 3.2 on 2021-04-14 11:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, to='posts.Like'),
),
]
| [
"[email protected]"
] | |
5859d1c2164cbd49404b0fd87958dcdfdd9df998 | 30435a504418f7aef07f5b1b3d6886f0cb72dba1 | /myproject/인터페이스_프로젝트/project/프로젝트/roof/page/admin.py | 586dbc3c04c68b06feb494d59918719c894f4e18 | [] | no_license | Seungeun-Song/Study-for-K-Digital-AI-developer | a8511fb6d08a5121c2fcdd8f9706459fe9ffa689 | bf48bb94918517a9db14359b81b9f710c49ef67c | refs/heads/master | 2023-07-02T06:51:05.122824 | 2021-08-12T14:36:39 | 2021-08-12T14:36:39 | 328,096,871 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | from django.contrib import admin
from page.models import Tag, Category, Member, Post, Photo
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('title', 'content')
prepopulated_fields = {'slug': ('title',)}
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('title', 'content')
prepopulated_fields = {'slug': ('title',)}
@admin.register(Member)
class MemberAdmin(admin.ModelAdmin):
list_display = ('name', 'content', 'category_list')
prepopulated_fields = {'slug': ('name',)}
def category_list(self, obj):
return ', '.join(o.title for o in obj.category.all())
class PhotoInline(admin.StackedInline):
model = Photo
extra = 1
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
inlines = (PhotoInline,)
list_display = ('title', 'content','date','category','member','tag_list')
prepopulated_fields = {'slug': ('title',)}
def tag_list(self, obj):
return ', '.join(o.title for o in obj.tag.all())
@admin.register(Photo)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('post', 'title', 'content')
prepopulated_fields = {'slug': ('title',)} | [
"[email protected]"
] | |
fa8b1867d453f0443fee77e71f6c799375aa58ac | 8c0d775236afdf989477e309789a2f67adf94c86 | /scraper.py | ad7cc424d4a36e3935946a1949e26a79294bfd2d | [] | no_license | lucivpav/CovidTravelNewsCZ | 9fe476cf57567378814ad7052662b3df24a45089 | d6e91d3265ebe2c24acd378f25ea6f726cd09c0b | refs/heads/master | 2023-08-01T13:45:00.944482 | 2021-09-28T11:16:13 | 2021-09-28T11:16:13 | 410,520,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | from bs4 import BeautifulSoup
import requests
import sys
import getopt
import re
import datetime
from dateutil import tz
from definitions import STATUS_FILE, MOST_RECENT_FILE, MAIN_URL, CountryData
PROGRAM_NAME = 'scraper.py'
def usage():
print('usage: python3 ' + PROGRAM_NAME + ' [--help|--production]')
def writeCountryData(filePath, countryData):
with open(filePath, 'w') as statusFile:
statusFile.write('country,updateTime,updateTimestamp,link\n')
for oneCountryData in countryData:
statusFile.write(oneCountryData.country + ',' + \
oneCountryData.updateTime + ',' + \
str(oneCountryData.updateTimestamp) + ',' + \
oneCountryData.link + '\n')
def parseCountryNameFromAnchor(countryAnchor):
strong = countryAnchor.find('strong')
if strong != None:
return strong.string
return countryAnchor.string
def parseRawUpdateTime(articleDateElement):
updatedElement = articleDateElement.find('span', {'class': 'updated'})
if updatedElement == None:
return articleDateElement.string
return updatedElement.find('span', {'class': 'time'}).string
def main(argv):
opts, args = getopt.getopt(argv, '', ['help', 'production'])
production = False
for opt, arg in opts:
if opt == '--help':
usage()
exit(0)
elif opt == '--production':
production = True
else:
usage()
exit(1)
response = requests.get(MAIN_URL)
soup = BeautifulSoup(response.content, 'html.parser')
countryAnchors = soup.find('div', {'class': 'article_content'}) \
.div.find_all('a', href=re.compile('https://www.mzv.cz/'))
if not production:
countryAnchors = countryAnchors[0:3] # artificially limit the list for dev purposes
countryLinks = list(map(lambda a: a.attrs['href'], countryAnchors))
countryNames = list(map(parseCountryNameFromAnchor, countryAnchors))
countryData = []
for i, countryLink in enumerate(countryLinks):
response = requests.get(countryLink)
soup = BeautifulSoup(response.content, 'html.parser')
articleDateElement = soup.find('p', {'class': 'articleDate'})
if articleDateElement == None:
print('Warning: skipping unparsable country: ' + countryNames[i])
# TODO: the list of unparsable countries should be part of an email, for reference
continue
rawUpdateTime = parseRawUpdateTime(articleDateElement)
dateMatch = re.search('\s?(.+)\s/', rawUpdateTime)
if dateMatch != None:
date = dateMatch.group(1)
time = re.search('/\s(.+)', rawUpdateTime).group(1)
else:
date = re.search('\s?(.+)$', rawUpdateTime).group(1)
time = '00:00'
updateTime = date + ' ' + time
updateTimestamp = datetime.datetime.strptime(updateTime, "%d.%m.%Y %H:%M") \
.replace(tzinfo=tz.gettz('Europe/Prague')).timestamp()
updateTimestamp = int(updateTimestamp)
countryData.append(CountryData(countryNames[i], updateTime, updateTimestamp, countryLink))
writeCountryData(STATUS_FILE, countryData)
mostRecent = sorted(countryData, key=lambda d: d.updateTimestamp, reverse=True)
writeCountryData(MOST_RECENT_FILE, mostRecent)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | |
2553ae9d49be4c776d690187ca3c6a7f3bf54e71 | ea2bd7e9d780802238aaa26d6870759b0038aa8d | /pyvcloud/vcd/task.py | 5b511116aaf5812c15cb7216b8bbdc963da92915 | [
"Apache-2.0"
] | permissive | pacogomez/pyvcloud | 5b000c1804e8fc501fe241c23a6348bea768de99 | 731aded20b999d269472caf65df774c284dd49b6 | refs/heads/master | 2021-05-07T19:30:37.783803 | 2018-01-10T16:28:26 | 2018-01-10T16:28:26 | 108,920,856 | 0 | 1 | Apache-2.0 | 2017-12-28T17:33:15 | 2017-10-30T23:44:05 | Python | UTF-8 | Python | false | false | 2,551 | py | # VMware vCloud Director Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import find_link
from pyvcloud.vcd.client import RelationType
class Task(object):
def __init__(self, client):
self.client = client
def update(self,
status,
namespace,
operation,
operation_name,
details,
progress,
owner_href,
owner_name,
owner_type,
user_href,
user_name,
org_href=None,
task_href=None,
error_message=None):
t = E.Task(
status=status,
serviceNamespace=namespace,
type=EntityType.TASK.value,
operation=operation,
operationName=operation_name,
name='task')
t.append(E.Owner(href=owner_href, name=owner_name, type=owner_type))
if error_message is not None:
t.append(
E.Error(
stackTrace='',
majorErrorCode='500',
message=error_message,
minorErrorCode='INTERNAL_SERVER_ERROR'))
t.append(
E.User(href=user_href, name=user_name, type=EntityType.USER.value))
if progress is not None:
t.append(E.Progress(progress))
t.append(E.Details(details))
if task_href is None:
org_resource = self.client.get_resource(org_href)
link = find_link(org_resource, RelationType.DOWN,
EntityType.TASKS_LIST.value)
return self.client.post_resource(link.href, t,
EntityType.TASK.value)
else:
return self.client.put_resource(task_href, t,
EntityType.TASK.value)
| [
"[email protected]"
] | |
26b3891eec39dfde413b9ee962dceb8927c638ff | 3ef33dabaac35d2343b8dee98fd975de4edc0d3c | /dailyfresh/df_user/migrations/0001_initial.py | c8c5b1916e1d740dcd3ce4e89f1fd86f83b0a226 | [] | no_license | junjie0825/dailyfresh | 2d3ff79b7723a028321778d3fee6fe646e46bd83 | e57b2dfb04cce27b0b4bdeaff0f79133e0a83853 | refs/heads/master | 2021-04-03T01:36:57.090898 | 2018-03-16T03:44:56 | 2018-03-16T03:44:56 | 124,513,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('uname', models.CharField(max_length=20)),
('upwd', models.CharField(max_length=40)),
('uemail', models.CharField(max_length=30)),
('urece', models.CharField(max_length=20)),
('uaddress', models.CharField(max_length=100)),
('uzip', models.CharField(max_length=6)),
('uphone', models.CharField(max_length=11)),
],
),
]
| [
"[email protected]"
] | |
ec1edfb6f58b5c7a66a9728ed4404c74f04a978b | 8b57fa74ec6e94c27f871f367c6e875c01bd04fd | /dash_access_manager/models.py | 20b97e4822d4f54920eaad68fdb474aeec58c747 | [
"MIT"
] | permissive | evan-lh/dash-access-manager | 868866126a4fde8968b87d6a61f5cdad705b271b | ac9dde4a38e5f7f0b0d64164c0c06e758c29e645 | refs/heads/master | 2023-02-02T15:46:09.617587 | 2020-09-25T07:40:48 | 2020-09-25T07:40:48 | 297,599,659 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from mongoengine import *
from flask_login import UserMixin
import bcrypt
class User(Document, UserMixin):
username = StringField(required=True)
hashed_password = StringField()
def check_password(self, password: bytes) -> bool:
return bcrypt.checkpw(password, self.hashed_password.encode('utf-8'))
| [
"[email protected]"
] | |
b2c6463440ebd4a67ba172fc4008e5247252558a | 11304b571e4f8fd95579f7e49e25067813ef8fbc | /venv/lib/python2.7/site-packages/astropy/io/fits/verify.py | 7c0b2d8bae7b338e71c866b5619edd82c49321f7 | [
"MIT"
] | permissive | EnSlavingBlair/Coincidences | 98fa10375a32dc5cdf57827b5aa4f07e5fc79bd1 | bba3435d9f0530822ddd2ab48a6a0bb84aa95f15 | refs/heads/master | 2021-10-23T05:39:55.560314 | 2021-10-20T05:33:23 | 2021-10-20T05:33:23 | 247,924,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,733 | py | # Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import unicode_literals
import operator
import warnings
from ...extern.six import next
from ...utils import indent
from ...utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix',
'fix+ignore', 'fix+warn', 'fix+exception',
'silentfix+ignore', 'silentfix+warn', 'silentfix+exception']
class _Verify(object):
"""
Shared methods for verification.
"""
def run_option(self, option='warn', err_text='', fix_text='Fixed.',
fix=None, fixable=True):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ['warn', 'exception']:
fixable = False
# fix the value
elif not fixable:
text = 'Unfixable error: {}'.format(text)
else:
if fix:
fix()
text += ' ' + fix_text
return (fixable, text)
def verify(self, option='warn'):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError('Option {!r} not recognized.'.format(option))
if opt == 'ignore':
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if '+' in opt:
fix_opt, report_opt = opt.split('+')
elif opt in ['fix', 'silentfix']:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, 'exception'
else:
fix_opt, report_opt = None, opt
if fix_opt == 'silentfix' and report_opt == 'ignore':
# Fixable errors were fixed, but don't report anything
return
if fix_opt == 'silentfix':
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == 'fix' and report_opt == 'ignore':
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, 'Verification reported errors:')
messages.append('Note: astropy.io.fits uses zero-based indexing.\n')
if fix_opt == 'silentfix' and not unfixable:
return
elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError('\n' + '\n'.join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __new__(cls, val=None, unit='Element'):
return super(cls, cls).__new__(cls, val)
def __init__(self, val=None, unit='Element'):
self.unit = unit
def __str__(self):
return '\n'.join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent('{} {}:'.format(self.unit, element),
shift=shift)
yield first_line
for line in next_lines:
yield line
element += 1
| [
"[email protected]"
] | |
85fc5601d2d5791a5b19a3f13e926a3393ad1c03 | 5be9b9fe6544af1764c4e4bf8b4adbe6205a353f | /tests/settings.py | a3d40947ffbeca31128a8f5fe95b074e34cbca4d | [
"MIT"
] | permissive | GermanoGuerrini/django-rohypnol | d3f8ba996e95f871aaccddba9922809b4d54dee6 | 09686e71920c2263bcb3a5ae8954556699af4ca4 | refs/heads/master | 2021-03-12T20:27:14.587219 | 2015-01-27T15:18:17 | 2015-01-27T15:18:17 | 29,015,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from os.path import abspath, join, dirname
PROJECT_ROOT = abspath(dirname(__file__))
REPOSITORY_ROOT = abspath(join(PROJECT_ROOT, '..'))
CACHE_ROOT = abspath(join(REPOSITORY_ROOT, '.cache'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'rohypnol_test'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': CACHE_ROOT,
}
}
SECRET_KEY = 'xxx'
ROOT_URLCONF = ''
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.contenttypes',
'rohypnol',
)
MIDDLEWARE_CLASSES = () | [
"[email protected]"
] | |
4705c8a49f431f81ffebe488801982b1534b97a5 | 395907654b7d2c3794df4e5fea0b256faad5befe | /LibraryAPI/main.py | 0f2b0b5a2cc833ba4c15e8140da9656964257445 | [] | no_license | syskantechnosoft/2105PythonBatch | 9c67956ac7bba832cf21286dfb0e3421ff504e75 | 5d6ecbc6e9a4212e86a532dd749dbe79669f6931 | refs/heads/main | 2023-08-24T08:29:05.046864 | 2021-10-13T18:07:33 | 2021-10-13T18:07:33 | 429,123,329 | 1 | 0 | null | 2021-11-17T16:37:08 | 2021-11-17T16:37:07 | null | UTF-8 | Python | false | false | 3,367 | py | from flask import Flask, request, jsonify
from daos.book_dao_postgres import BookDaoPostgres
from entities.book import Book
from exceptions.book_unavailable_error import BookUnavailableError
from exceptions.not_found_exception import ResourceNotFoundError
from services.book_service_impl import BookServiceImpl
import logging
app: Flask = Flask(__name__)
logging.basicConfig(filename="records.log", level=logging.DEBUG, format=f'%(asctime)s %(levelname)s %(message)s')
# Handler methods create your WEB API layer
# They are responsible for handling HTTP request and giving back responses
# Parsing and generating JSONs, giving back status codes as appropriate
# They SHOULD NOT be directly responsible for CRUD operations or Bussiness Logic
# Your Handler should use services. THEY SHOULD NOT uses DAOs directly
book_dao = BookDaoPostgres()
book_service = BookServiceImpl(book_dao) # Dependency Injection
@app.route("/books", methods=["POST"])
def create_book():
body = request.json # json will return a python dictionary version of that JSON
book = Book(body["bookId"], body["title"], body["author"], body["available"], body["quality"], body["returnDate"])
book_service.add_book(book) # pass off the heavier logic to the service
return f"Created book with id{book.book_id}", 201 # 201 is the status code for creating a new resource
@app.route("/books/<book_id>", methods=["GET"])
def get_book_by_id(book_id: str):
try:
book = book_service.retrieve_book_by_id(int(book_id))
return jsonify(book.as_json_dict())
except ResourceNotFoundError as e:
return str(e), 404
@app.route("/books", methods=["GET"])
def get_all_books():
title = request.args.get("title") # return the value of title. If no title returns None
if title is not None:
books = book_service.find_books_by_tile_containing(title)
json_books = [b.as_json_dict() for b in books]
return jsonify(json_books)
else:
books = book_service.retrieve_all_books() # list of books
json_books = [b.as_json_dict() for b in books] # list json dict
return jsonify(json_books)
@app.route("/books/<book_id>", methods=["PUT"])
def update_book(book_id: str):
body = request.json # json will return a python dictionary version of that JSON
book = Book(body["bookId"], body["title"], body["author"], body["available"], body["quality"], body["returnDate"])
# the body might contain a valid ID of a book to update
# The ID specified in the URI at the top overrides anything in the body
book.book_id = int(book_id)
book_service.update_book(book)
return "updated successfully"
@app.route("/books/<book_id>", methods=["DELETE"])
def delete_book(book_id: str):
try:
book_service.remove_book(int(book_id))
return "Deleted successfully", 200
except ResourceNotFoundError as e:
return "The resource could not be found", 404
@app.route("/books/<book_id>/checkout", methods=["PATCH"])
def checkout_book(book_id: str):
try:
book_service.checkout_book(int(book_id))
return f"The book with id {book_id} was successfully checked out"
except BookUnavailableError as e:
return str(e), 422 # request could not be processed even though all the information and formatting is correct
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
1be1dac43e1259fada5849985809d3a89fd22dfd | 2d9e50bda4c63fe1e87b67a38d667a7d70fa6ca3 | /website/urls.py | 03b389f91dfc98a44ac22588bfba792ded6b3d8f | [] | no_license | Ency-Ch/Barbers | 52f68f427e832fb1638876858da6c3d7e4bfcb1d | 6f6d748a0bd1c2f7f570a60d04b0bdf50c7f5788 | refs/heads/master | 2023-04-08T19:05:23.381525 | 2021-04-12T07:58:26 | 2021-04-12T07:58:26 | 357,102,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from django.urls import path
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'home'),
path('contact.html', views.contact, name = 'contact'),
path('gallery.html', views.gallery, name = 'gallery'),
]
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.