blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73f2c0dde7fa284795d91a91d76bba4c78ef7a6f | 4b801b5aafac91dd71b9dc3f9a247efe98dc13f0 | /week3/serializer.py | d455db1ad93ca9f2f7b190f82c2ae1c108b7b58f | [
"MIT"
] | permissive | Langat05/Awards | 19f6a627861b7e54ebef705d804e121957185baa | df8f3f9ca1b7cbae1d88f3a3531a02a81b82186d | refs/heads/master | 2023-01-05T20:20:58.369698 | 2020-10-27T14:05:55 | 2020-10-27T14:05:55 | 306,889,072 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from rest_framework import serializers
from .models import *
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ['title', 'description', 'image', 'author', 'created_date', 'author_profile', 'link']
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ['user', 'image', 'bio'] | [
"[email protected]"
] | |
a35c23f7c9e71b8fed716df8ac9cb0bd61868c9b | f03d41c0ee4d05a6af5eb154fef4c27a2e638e8d | /ontask/tests/test_create_screen_captures.py | 3123acb4faec6ae4fe523c81a834c2d2a4ca58d1 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LGPL-2.1-only",
"Python-2.0"
] | permissive | abelardopardo/ontask_b | c461f561b0965735c158199e94366d5bf1a8a676 | c432745dfff932cbe7397100422d49df78f0a882 | refs/heads/master | 2023-08-17T03:27:35.900314 | 2023-08-16T07:29:26 | 2023-08-16T07:29:26 | 110,175,024 | 43 | 32 | MIT | 2023-08-15T23:08:20 | 2017-11-09T22:49:43 | Python | UTF-8 | Python | false | false | 31,280 | py | """Create screen captures to include in the documentation."""
import os
from django.conf import settings
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from ontask import models, tests
class ScreenTutorialTest(tests.ScreenTests):
def test(self):
"""
Create a workflow, upload data and merge
:return:
"""
# Login
self.login('[email protected]')
self.body_ss('workflow_index_empty.png')
#
# Create new workflow
#
self.selenium.find_element(
By.CLASS_NAME,
'js-create-workflow').click()
self.wait_for_modal_open()
self.selenium.find_element(
By.ID,
'id_name').send_keys(self.workflow_name)
desc = self.selenium.find_element(By.ID, 'id_description_text')
desc.send_keys(self.description)
# Take capture of the modal
self.modal_ss('workflow_create.png')
# Close the modal.
desc.send_keys(Keys.RETURN)
self.wait_for_modal_close()
WebDriverWait(self.selenium, 10).until(
EC.visibility_of_element_located(
(By.XPATH, "//table[@id='dataops-table']")
)
)
self.body_ss('dataops_datauploadmerge2.png')
# End of session
self.logout()
class ScreenImportTest(tests.ScreenTests):
def test(self):
# Login
self.login('[email protected]')
# Open Import page
self.selenium.find_element(By.LINK_TEXT, 'Import workflow').click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//body/div/h1"),
'Import workflow')
)
#
# Import workflow
#
self.selenium.find_element(By.ID, 'id_name').send_keys(
self.workflow_name
)
self.selenium.find_element(By.ID, 'id_wf_file').send_keys(
os.path.join(settings.BASE_DIR(), 'initial_workflow.gz')
)
# Picture of the body
self.body_ss('workflow_import.png')
# Click the import button
self.selenium.find_element(
By.XPATH,
"//form/div/button[@type='Submit']"
).click()
self.wait_for_page(title='OnTask :: Workflows')
# End of session
self.logout()
class ScreenTestFixtureBasic(
tests.InitialWorkflowFixture,
tests.ScreenTests,
):
pass
# def setUp(self):
# super().setUp()
# # Insert a SQL Connection
# sqlc = SQLConnection(
# name='remote server',
# description_text='Server with student records',
# conn_type='mysql',
# conn_driver='',
# db_user='remote_db_user',
# db_password=True,
# db_host='dbserver.bogus.com',
# db_port=None,
# db_name='demographics',
# db_table='s_records'
# )
# sqlc.save()
#
# # Insert an Amazon Athena Connection
# athenac = AthenaConnection(
# name='athena connection',
# description_text='Connection to amazon athena server',
# aws_access_key='[YOUR AWS ACCESS KEY HERE]',
# aws_secret_access_key='[YOUR AWS SECRET ACCESS KEY HERE]',
# aws_bucket_name='[S3 BUCKET NAME HERE]',
# aws_file_path='[FILE PATH WITHIN BUCKET HERE]',
# aws_region_name='[AWS REGION NAME HERE]',
# )
# athenac.save()
#
class ScreenTestSQLAdmin(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
self.body_ss('workflow_superuser_index.png')
#
# Open SQL Connection
#
self.go_to_sql_connections()
self.body_ss('workflow_sql_connections_index.png')
# click on the edit element
self.selenium.find_element(
By.XPATH,
'//table[@id="connection-admin-table"]'
'//tr/td[2][normalize-space() = "remote server"]/'
'../td[1]/div/button[1]'
).click()
self.wait_for_modal_open()
# Take picture of the modal
self.modal_ss('workflow_superuser_sql_edit.png')
# Click in the cancel button
self.cancel_modal()
# End of session
self.logout()
# def test_athena_admin(self):
# # Login
# self.login('[email protected]')
#
# #
# # Open Athena Connection
# #
# self.go_to_athena_connections()
# self.body_ss('workflow_athena_connections_index.png')
#
# # click on the edit element
# self.selenium.find_element(
# By.XPATH,
# "//table[@id='connection-admin-table']"
# "//tr/td[1][normalize-space() = 'athena connection']"
# ).click()
# self.wait_for_modal_open()
#
# # Take picture of the modal
# self.modal_ss('workflow_superuser_athena_edit.png')
#
# # Click in the cancel button
# self.cancel_modal()
#
# # End of session
# self.logout()
#
# # Close the db_engine
# destroy_db_engine()
class ScreenTestWorkflow(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# List of workflows, navigation
self.body_ss('workflow_index.png')
# Workflow card
self.element_ss(
'//div[contains(@class, "ontask-card")]',
'workflow_card.png')
#
# Navigation bar, details
#
self.access_workflow_from_home_page(self.workflow_name)
# Take picture of the navigation bar
self.element_ss("//body/div[@id='wflow-name']", 'navigation_bar.png')
#
# New column modal
#
self.go_to_details()
self.body_ss('workflow_details.png')
self.open_add_regular_column()
self.modal_ss('workflow_add_column.png')
# Click in the cancel button
self.cancel_modal()
#
# Attributes
#
self.go_to_attribute_page()
self.body_ss('workflow_attributes.png')
#
# Share
#
self.go_to_workflow_share()
self.body_ss('workflow_share.png')
#
# EXPORT
#
self.go_to_workflow_export()
self.body_ss('workflow_export.png')
# Click back to the details page
self.selenium.find_element(
By.XPATH,
"//a[normalize-space()='Cancel']"
).click()
self.wait_for_id_and_spinner('attribute-table_previous')
#
# RENAME
#
self.go_to_workflow_rename()
self.modal_ss('workflow_rename.png')
# Click in the cancel button
self.cancel_modal()
#
# FLUSH DATA
#
self.go_to_workflow_flush()
self.modal_ss('workflow_flush.png')
# Click in the cancel button
self.cancel_modal()
#
# DELETE
#
self.go_to_workflow_delete()
self.modal_ss('workflow_delete.png')
# Click in the cancel button
self.cancel_modal()
# End of session
self.logout()
class ScreenTestDetails(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
# Go to workflow details
self.go_to_details()
#
# Ops/Edit Column
#
self.open_column_edit('SID')
self.wait_for_modal_open()
self.modal_ss('workflow_column_edit.png')
# Click in the cancel button
self.cancel_modal()
# End of session
self.logout()
class ScreenTestDataops(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
# Go to CSV
self.go_to_upload_merge()
self.body_ss('dataops_datauploadmerge.png')
self.selenium.find_element(By.LINK_TEXT, "CSV").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Upload/Merge CSV')
)
self.selenium.find_element(By.ID, 'id_data_file').send_keys(
os.path.join(
settings.BASE_DIR(),
'ontask',
'tests',
'initial_workflow',
'initial_workflow.csv')
)
# Picture of the body
self.body_ss('dataops_csvupload.png')
#
# Dataops/Merge CSV Merge Step 2
#
# Click the NEXT button
self.selenium.find_element(
By.XPATH,
"//button[@type='Submit']"
).click()
self.wait_for_page()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@id='id_make_key_2']")
)
)
# Uncheck the columns that won't be keys
col_checks = self.selenium.find_elements(
By.XPATH,
'//input[contains(@id, "id_make_key_")]')
for col_check in col_checks[1:]:
# Bring element until the middle of the page
self.scroll_element_into_view(col_check)
col_check.click()
self.selenium.execute_script("window.scroll(0,0);")
# Picture of the body
self.body_ss('dataops_upload_merge_step2.png')
#
# Dataops/Merge CSV Merge Step 3
#
# Click the NEXT button
submit_button = self.selenium.find_element(
By.XPATH,
"//button[@type='Submit']"
)
self.selenium.execute_script(
"arguments[0].scrollIntoView("
"{block: 'center', inline: 'nearest', behavior: 'instant'});",
submit_button)
WebDriverWait(self.selenium, 10).until(EC.visibility_of(submit_button))
submit_button.click()
self.wait_for_page()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//select[@id='id_dst_key']")
)
)
#
# Dataops/Merge CSV Merge Step 4
#
# Click the NEXT button
# Select left merge
Select(self.selenium.find_element(
By.ID,
'id_how_merge'
)).select_by_value('left')
# Picture of the body
self.body_ss('dataops_upload_merge_step3.png')
# Click the NEXT button
self.selenium.find_element(
By.XPATH,
"//button[@type='Submit']"
).click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//body/div/h1"), 'Review and confirm')
)
# Picture of the body
self.body_ss('dataops_upload_merge_step4.png')
# Click on Finish
submit_button = self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Finish']"
)
self.scroll_element_into_view(submit_button)
submit_button.click()
self.wait_for_id_and_spinner('table-data_previous')
#
# Dataops/Merge Excel Merge
#
# Go to Excel Upload/Merge
self.go_to_excel_upload_merge_step_1()
self.body_ss('dataops_upload_excel.png')
self.go_to_table()
#
# Google doc merge
#
# Go to Excel Upload/Merge
self.go_to_google_sheet_upload_merge_step_1()
self.body_ss('dataops_upload_gsheet.png')
self.go_to_table()
#
# S3 CSV merge
#
self.go_to_s3_upload_merge_step_1()
self.body_ss('dataops_upload_s3.png')
self.go_to_table()
#
# Dataops/Merge SQL Connection
#
self.go_to_sql_upload_merge()
self.body_ss('dataops_SQL_available.png')
# Click on the link RUN
element = self.search_table_row_by_string(
'conn-instructor-table',
1,
'remote server')
element.find_element(By.XPATH, 'td[1]/a').click()
self.wait_for_page(None, 'sql-load-step1')
# Picture of the RUN menu in SQL
self.body_ss('dataops_SQL_run.png')
self.go_to_table()
#
# Dataops/Merge Athena Connection
#
# self.go_to_athena_upload_merge()
# self.body_ss('dataops_athena_available.png')
#
# # Click on the link RUN
# self.selenium.find_element(By.LINK_TEXT, 'Run').click()
# self.wait_for_page(None, 'athena-load-step1')
#
# # Picture of the RUN menu in Athena
# self.body_ss('dataops_athena_run.png')
# Go back to details
self.go_to_details()
#
# Dataops: Transform
#
self.go_to_transform()
self.body_ss('dataops_transform_list.png')
# Click to run test_plugin_1
element = self.search_table_row_by_string(
'transform-table',
1,
'Test Plugin 1 Name')
element.find_element(By.XPATH, 'td[1]/a').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.NAME, 'csrfmiddlewaretoken'))
)
# Picture of the body
self.body_ss('dataops_transformation_run.png')
#
# Dataops: Model
#
self.go_to_model()
self.body_ss('dataops_model_list.png')
# Click to run linear model
element = self.search_table_row_by_string(
'transform-table',
1,
'Linear Model')
element.find_element(By.XPATH, 'td[1]/a').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.NAME, 'csrfmiddlewaretoken'))
)
# Picture of the body
self.body_ss('dataops_model_run.png')
# End of session
self.logout()
class ScreenTestTable(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
#
# Table
#
self.go_to_table()
# Picture of the body
self.body_ss('table.png')
# Picture of the buttons
self.element_ss(
'//div[@id="table-operation-buttons"]',
'table_buttons.png')
#
# Table Views
#
self.selenium.find_element(By.ID, 'select-view-name').click()
# Picture of the body
self.body_ss('table_views.png')
#
# Specific table view
#
self.open_view('Midterm')
# Picture of the body
self.body_ss('table_view_view.png')
# Click edit view definition
self.selenium.find_element(
By.XPATH,
'//button[contains(@class, "js-view-edit")]').click()
self.wait_for_modal_open()
# Take picture of the modal
self.modal_ss('table_view_edit.png')
# Click in the cancel button
self.cancel_modal()
# End of session
self.logout()
class ScreenTestAction(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
#
# Actions
#
self.body_ss('actions.png')
#
# Edit Action In
#
self.open_action_edit('Student comments Week 1', 'parameters')
# Picture of the body
self.body_ss('action_edit_action_in.png')
# Open the "Create question modal"
self.select_tab('conditions-tab')
self.create_condition(
'Full time',
'',
[('Attendance', 'equal', 'Full Time')]
)
# Open the "Create question modal"
self.select_tab('questions-tab')
self.body_ss('action_edit_action_in_question_tab.png')
self.selenium.find_element(
By.XPATH,
"//button[contains(@class, 'js-action-question-add')]").click()
self.wait_for_modal_open()
self.modal_ss("action_edit_action_in_create_question.png")
self.cancel_modal()
# Open the Survey Parameters
self.select_tab('parameters-tab')
self.body_ss('action_edit_action_in_parameters.png')
# Open the preview
self.open_preview()
self.modal_ss('action_action_in_preview.png')
self.cancel_modal()
# Done
# Submit the action
self.selenium.find_element(By.LINK_TEXT, 'Done').click()
self.wait_for_id_and_spinner('action-index')
#
# Run Action In
#
self.open_action_run('Student comments Week 1', True)
# Picture of the body
self.body_ss('action_run_action_in.png')
#
# Enter data manually
#
self.selenium.find_element(
By.XPATH,
"//table[@id='actioninrun-data']/tbody/tr[1]/td[1]/a"
).click()
self.wait_for_page(title='OnTask :: Enter Data')
# Picture of the body
self.body_ss('action_enter_data_action_in.png')
#
# Action In URL enable
#
self.go_to_actions()
self.open_action_operation('Student comments Week 1', 'bi-link-45deg')
# Take picture of the modal
self.modal_ss('action_action_in_URL.png')
# click on the OK button to return
self.selenium.find_element(By.XPATH, "//button[@type='submit']").click()
self.wait_for_modal_close()
#
# Edit Action Out
#
self.open_action_edit(
'Comments about how to prepare the lecture (Week 4)'
)
# Picture of the body
self.body_ss('action_edit_action_out.png')
#
# Edit filter in action out
#
self.select_tab('filter-tab')
self.selenium.find_element(By.CLASS_NAME, 'js-filter-edit').click()
# Wait for the form to modify the filter
WebDriverWait(self.selenium, 10).until(
tests.ElementHasFullOpacity(
(By.XPATH, "//div[@id='modal-item']"))
)
# Take picture of the modal
self.modal_ss('action_action_out_edit_filter.png')
# Click in the cancel button
self.cancel_modal()
#
# Editor parts of action out
#
self.body_ss('action_action_out_filterpart.png')
# Take picture of the condition set
self.select_tab('conditions-tab')
self.body_ss('action_action_out_conditionpart.png')
# Open one of the conditions
self.open_condition('No Video 1')
# Take picture of the condition open
self.modal_ss('action_action_out_edit_condition.png')
self.cancel_modal()
# Open the preview
self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Preview']"
).click()
WebDriverWait(self.selenium, 10).until(
tests.ElementHasFullOpacity((By.XPATH, "//div[@id='modal-item']"))
)
self.modal_ss('action_action_out_preview.png')
self.cancel_modal()
#
# Create a canvas email action
#
self.go_to_actions()
# click on the create action button and create an action
self.selenium.find_element(By.CLASS_NAME, 'js-create-action').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name')))
# Set the name, description and type of the action
self.selenium.find_element(By.ID, 'id_name').send_keys(
'Initial motivation'
)
desc = self.selenium.find_element(By.ID, 'id_description_text')
# Select the action type
select = Select(self.selenium.find_element(By.ID, 'id_action_type'))
select.select_by_value(models.Action.PERSONALIZED_CANVAS_EMAIL)
desc.send_keys('Motivating message depending on the program enrolled')
self.modal_ss('action_personalized_canvas_email_create.png')
# Cancel creation
self.cancel_modal()
# Open the action
self.open_action_edit('Initial motivation')
self.body_ss('action_personalized_canvas_email_edit.png')
# Save action and back to action index
self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Close']"
).click()
self.wait_for_id_and_spinner('action-index')
#
# SEND LIST action
#
# click on the create action button and create an action
self.selenium.find_element(By.CLASS_NAME, 'js-create-action').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name')))
# Set the name, description and type of the action
self.selenium.find_element(By.ID, 'id_name').send_keys(
'Send Email with report'
)
desc = self.selenium.find_element(By.ID, 'id_description_text')
# Select the action type
select = Select(self.selenium.find_element(By.ID, 'id_action_type'))
select.select_by_value(models.Action.EMAIL_REPORT)
desc.send_keys('Send email with column values as list')
self.modal_ss('action_email_report_create.png')
# Cancel creation
self.cancel_modal()
# Open the action
self.open_action_edit('Send Email with report')
self.body_ss('action_email_report_edit.png')
self.select_tab('attachments-tab')
self.body_ss('action_email_report_attachments.png')
self.open_preview()
self.modal_ss('action_email_report_preview.png')
self.cancel_modal()
# Save action and back to action index
self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Close']"
).click()
self.wait_for_id_and_spinner('action-index')
#
# SEND JSON REPORT action
#
# click on the create action button and create an action
self.selenium.find_element(By.CLASS_NAME, 'js-create-action').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name')))
# Set the name, description and type of the action
self.selenium.find_element(By.ID, 'id_name').send_keys(
'Send JSON report'
)
desc = self.selenium.find_element(By.ID, 'id_description_text')
# Select the action type
select = Select(self.selenium.find_element(By.ID, 'id_action_type'))
select.select_by_value(models.Action.JSON_REPORT)
desc.send_keys(
'Send the list of inactive students '
'in week 2 to another platform')
self.modal_ss('action_json_report_create.png')
# Cancel creation
self.cancel_modal()
# Open the action
self.open_action_edit('Send JSON report')
self.body_ss('action_json_report_edit.png')
self.open_preview()
self.modal_ss('action_json_report_preview.png')
self.cancel_modal()
# Save action and back to action index
self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Close']"
).click()
self.wait_for_id_and_spinner('action-index')
# Picture of Canvas scheduling
# self.open_action_schedule('Send Canvas reminder')
# self.body_ss('scheduler_action_canvas_email.png')
# self.go_to_actions()
# Picture of the action row
self.element_ss(
'//div[@id="action-cards"]//'
'h5[normalize-space()="Midterm comments"]/..',
'action_action_ops.png')
#
# Send emails
#
self.open_action_edit('Midterm comments')
# Picture of the body
self.body_ss('action_email_request_data.png')
self.go_to_actions()
self.open_action_edit('Initial motivation')
# Picture of the body
self.body_ss('action_personalized_canvas_email_run.png')
#
# Create ZIP
#
self.go_to_actions()
self.open_action_operation(
'Midterm comments',
'bi-file-earmark-zip-fill',
'zip-action-request-data')
# Picture of the body
self.body_ss('action_zip_request_data.png')
#
# JSON Edit
#
self.go_to_actions()
self.open_action_edit('Send JSON to remote server')
self.body_ss('action_personalized_json_edit.png')
# Save action and back to action index
self.selenium.find_element(
By.XPATH,
"//button[normalize-space()='Close']"
).click()
self.wait_for_id_and_spinner('action-index')
#
# JSON RUN
#
self.open_action_json_run('Send JSON to remote server')
self.body_ss('action_json_run_request_data.png')
# Save action and back to action index
self.selenium.find_element(By.LINK_TEXT, 'Cancel').click()
self.wait_for_id_and_spinner('action-index')
#
# Action URL
#
self.open_action_operation('Midterm comments', 'bi-link-45deg')
# Take picture of the modal
self.modal_ss('action_URL_on.png')
# click on the OK button to return
self.selenium.find_element(
By.XPATH,
"//button[@type='submit']"
).click()
self.wait_close_modal_refresh_table('action-index')
# End of session
self.logout()
class ScreenTestScheduler(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
#
# Open Action Schedule and schedule the Personalized Text action
#
self.open_action_operation(
'Midterm comments',
'bi-calendar',
'email-schedule-send')
# Fill out some fields
self.selenium.find_element(By.ID, 'id_name').send_keys(
'Send Emails after week 3'
)
Select(self.selenium.find_element(
By.ID,
'id_item_column')
).select_by_visible_text('email')
dt_widget = self.selenium.find_element(
By.XPATH,
"//input[@id='id_execute']"
)
dt_widget.clear()
dt_widget.send_keys('2110-07-05 17:30:51')
self.selenium.find_element(By.ID, 'id_subject').send_keys(
'Your preparation activities for the week'
)
self.selenium.find_element(By.ID, 'id_track_read').click()
# Take picture of the export page.
self.body_ss('schedule_action_email.png')
# Click the schedule button
self.selenium.find_element(
By.XPATH,
"//button[@id='next-step-off']"
).click()
self.wait_for_page(title='OnTask :: Operation scheduled')
#
# Actions
#
self.go_to_actions()
#
# Open Action Schedule and schedule the Personalized JSON
#
self.open_action_operation(
'Send JSON to remote server',
'bi-calendar',
'email-schedule-send')
# Fill out some fields
self.selenium.find_element(By.ID, 'id_name').send_keys(
'Send JSON object in Week 5'
)
Select(self.selenium.find_element(
By.ID,
'id_item_column')
).select_by_visible_text('email')
dt_widget = self.selenium.find_element(
By.XPATH,
"//input[@id='id_execute']"
)
dt_widget.clear()
dt_widget.send_keys('2110-07-25 17:00:00')
self.selenium.find_element(By.ID, 'id_token').send_keys(
'afabkvaidlfvsidkfe..kekfioroelallasifjjf;alksid'
)
# Take picture of the export page.
self.body_ss('schedule_action_json.png')
# Click the schedule button
self.selenium.find_element(
By.XPATH,
"//button[@id='next-step-off']"
).click()
self.wait_for_page(title='OnTask :: Operation scheduled')
#
# Scheduler
#
self.go_to_scheduler()
# Take picture of the export page.
self.body_ss('schedule.png')
# End of session
self.logout()
class ScreenTestLogs(ScreenTestFixtureBasic):
def test(self):
# Login
self.login('[email protected]')
# Open Workflows page
self.access_workflow_from_home_page(self.workflow_name)
self.go_to_attribute_page()
self.create_attribute('akey', 'avalue')
# Logs
self.go_to_logs()
# Take picture of the body
self.body_ss('logs.png')
# End of session
self.logout()
class ScreenTestRubric(ScreenTestFixtureBasic):
def test(self):
action_name = 'Project feedback'
# Login
self.login('[email protected]')
self.access_workflow_from_home_page(self.workflow_name)
self.go_to_actions()
# click on the create action button
self.selenium.find_element(By.CLASS_NAME, 'js-create-action').click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name')))
# Set the name, description and type of the action
self.selenium.find_element(By.ID, 'id_name').send_keys(action_name)
desc = self.selenium.find_element(By.ID, 'id_description_text')
desc.send_keys(
'Provide feedback about the project using the results '
+ 'from the rubric')
# Select the action type
select = Select(self.selenium.find_element(By.ID, 'id_action_type'))
select.select_by_value(models.Action.RUBRIC_TEXT)
self.modal_ss('rubric_create.png')
self.cancel_modal()
# Open the action
self.open_action_edit(action_name)
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '//div[contains(@class, "tox-edit-area")]')
)
)
self.body_ss('rubric_edit_text.png')
# Go to the rubric tab
self.select_tab('rubric-tab')
self.body_ss('rubric_edit_table_tab.png')
# Preview
self.open_preview()
self.modal_ss('rubric_preview.png')
self.cancel_modal()
# End of session
self.logout()
| [
"[email protected]"
] | |
5630628da72e6fabab73558152d9df8fbec0a4f0 | 34b16073c76a4fec0ead2603367fbcfd05013898 | /continentalfuzzy/domain/System.py | 2f1c05373acb04de42962036a95f080da9092347 | [] | no_license | rccmodena/ContinentalFuzzyPython | 00958950090351fea5cd299eac8c53ca09c6abc7 | 8b2688dd16a97d75cdc40b680e5cbbd2e247b517 | refs/heads/master | 2023-07-06T18:34:05.344718 | 2021-01-29T14:06:25 | 2021-01-29T14:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,871 | py | """
Developed by Projeto Continentais and Petrobras
author: Rudi César Comiotto Modena
email: [email protected]
date: July, 2020
"""
from typing import Optional, List, Dict
from continentalfuzzy.domain.Rule import Rule
from continentalfuzzy.domain.variable.Input import Input
from continentalfuzzy.domain.variable.Output import Output
from continentalfuzzy.domain.definition.AndMethods import AndMethods
from continentalfuzzy.domain.definition.AggMethods import AggMethods
from continentalfuzzy.domain.definition.ControllerType import ControllerType
from continentalfuzzy.domain.definition.DefuzzMethods import DefuzzMethods
from continentalfuzzy.domain.definition.ImpMethods import ImpMethods
from continentalfuzzy.domain.definition.OrMethods import OrMethods
class System:
"""
Classe usada para armazenar todos os componentes de um arquivo (.fis).
"""
# Constante com o número máximo de consequentes
MAX_NUM_OUTPUTS = 1
# Dicionário com os conectores implementados e a equivalência com
# relação ao arquivo .fis
DICT_CONNECTORS = {'1': 'AND', '2': 'OR'}
def __init__(self,
sys_name: Optional[str] = None,
sys_filename: Optional[str] = None,
sys_type: Optional[ControllerType] = None,
sys_version: Optional[str] = None,
sys_num_inputs: Optional[int] = None,
sys_num_outputs: Optional[int] = None,
sys_num_rules: Optional[int] = None,
sys_and_method: Optional[AndMethods] = None,
sys_or_method: Optional[OrMethods] = None,
sys_imp_method: Optional[ImpMethods] = None,
sys_agg_method: Optional[AggMethods] = None,
sys_defuzz_method: Optional[DefuzzMethods] = None,
sys_inputs: Optional[Dict[int, Input]] = None,
sys_outputs: Optional[Dict[int, Output]] = None,
sys_rules: Optional[List[Rule]] = None,
sys_facies_association: Optional[Dict[int, int]] = None,
sys_use_dict_facies_association: Optional[bool] = None):
"""
Inicializador da classe System
Parâmetros
----------
sys_name : str
String contendo o nome da regra.
sys_filename : str
Caminho do arquivo .fis.
sys_type : ControllerType
Instância da classe ControllerType.
sys_version : str
String contendo a versão da biblioteca fuzzy do Matlab.
sys_num_inputs : int
Número de antecedentes do Sistema Fuzzy.
sys_num_outputs : int
Número de consequentes do Sistema Fuzzy.
sys_num_rules : int
Número de regras do Sistema Fuzzy.
sys_and_method : ANDMethods
Instância da classe ANDMethods.
sys_or_method : ORMethods
Instância da classe ORMethods.
sys_imp_method : ImpMethods
Instância da classe ImpMethods.
sys_agg_method : AggMethods
Instância da classe AggMethods.
sys_defuzz_method : DefuzzMethods
Instância da classe DefuzzMethods.
sys_inputs : dict
Dicionário contendo os antecedentes do sistema fuzzy.
sys_outputs : dict
Dicionário contendo os consequentes do sistema fuzzy.
sys_rules : list
Lista contendo as regras do sistema fuzzy.
"""
self.__name = None
self.__filename = None
self.__type = None
self.__version = None
self.__num_inputs = None
self.__num_outputs = None
self.__num_rules = None
self.__and_method = None
self.__or_method = None
self.__imp_method = None
self.__agg_method = None
self.__defuzz_method = None
self.__inputs = dict()
self.__outputs = dict()
self.__rules = list()
self.__facies_association = dict()
self.__use_dict_facies_association = None
if sys_name is not None:
self.name = sys_name
if sys_filename is not None:
self.filename = sys_filename
if sys_type is not None:
self.type = sys_type
if sys_version is not None:
self.version = sys_version
if sys_num_inputs is not None:
self.num_inputs = sys_num_inputs
if sys_num_outputs is not None:
self.num_outputs = sys_num_outputs
if sys_num_rules is not None:
self.num_rules = sys_num_rules
if sys_and_method is not None:
self.and_method = sys_and_method
if sys_or_method is not None:
self.or_method = sys_or_method
if sys_imp_method is not None:
self.imp_method = sys_imp_method
if sys_agg_method is not None:
self.agg_method = sys_agg_method
if sys_agg_method is not None:
self.agg_method = sys_agg_method
if sys_defuzz_method is not None:
self.defuzz_method = sys_defuzz_method
if sys_inputs is not None:
self.inputs = sys_inputs
if sys_outputs is not None:
self.outputs = sys_outputs
if sys_rules is not None:
self.rules = sys_rules
if sys_facies_association:
self.__facies_association = sys_facies_association
if sys_use_dict_facies_association:
self.__use_dict_facies_association = sys_use_dict_facies_association
@property
def name(self) -> str:
"""
Nome do sistema fuzzy.
Retorna
-------
str
Retorna uma string com o nome do sistema fuzzy.
"""
return self.__name
@name.setter
def name(self, sys_name: str):
"""
Altera o nome do sistema fuzzy.
Parâmetros
----------
sys_name : str
String contendo o nome do sistema fuzzy.
"""
if not isinstance(sys_name, str):
raise Exception(f"O nome não é uma String!")
self.__name = sys_name
@property
def filename(self) -> str:
"""
Caminho do arquivo .fis.
Retorna
-------
str
Retorna uma string com o caminho do arquivo .fis.
"""
return self.__filename
@filename.setter
def filename(self, sys_filename: str):
"""
Altera o caminho do arquivo .fis.
Parâmetros
----------
sys_filename : str
String contendo o caminho do arquivo .fis.
"""
if not isinstance(sys_filename, str):
raise Exception(f"O caminho do arquivo .fis. não é uma String!")
self.__filename = sys_filename
@property
def type(self) -> ControllerType:
"""
Tipo de inferência.
Retorna
-------
ControllerType
Retorna uma instância da classe ControllerType.
"""
return self.__type
@type.setter
def type(self, sys_type: ControllerType):
"""
Altera o tipo de inferência.
Parâmetros
----------
sys_type : ControllerType
Instância da classe ControllerType.
"""
if not isinstance(sys_type, ControllerType):
raise Exception("O tipo de inferência não é uma instância da "
"classe ControllerType!")
self.__type = sys_type
@property
def version(self) -> str:
"""
Versão da biblioteca fuzzy do Matlab.
Retorna
-------
str
Retorna uma string com a versão da biblioteca fuzzy do Matlab.
"""
return self.__version
@version.setter
def version(self, sys_version: str):
"""
Altera a versão da biblioteca fuzzy do Matlab.
Parâmetros
----------
sys_version : str
String contendo a versão da biblioteca fuzzy do Matlab.
"""
if not isinstance(sys_version, str):
raise Exception("A versão da biblioteca fuzzy do Matlab não é "
"uma String!")
self.__version = sys_version
@property
def num_inputs(self) -> int:
"""
Número de antecedentes do Sistema Fuzzy.
Retorna
-------
int
Retorna um inteiro com o número de antecedentes.
"""
return self.__num_inputs
@num_inputs.setter
def num_inputs(self, sys_num_inputs: int):
"""
Altera o número de antecedentes do Sistema Fuzzy.
Parâmetros
----------
sys_num_inputs : int
Inteiro contendo o número de antecedentes.
"""
if not isinstance(sys_num_inputs, int):
raise Exception("O número de antecedentes do Sistema Fuzzy não é "
"um número inteiro!")
self.__num_inputs = sys_num_inputs
@property
def num_outputs(self) -> int:
"""
Número de consequentes do Sistema Fuzzy.
Retorna
-------
int
Retorna um inteiro com o número de consequentes.
"""
return self.__num_outputs
@num_outputs.setter
def num_outputs(self, sys_num_outputs: int):
"""
Altera o número de consequentes do Sistema Fuzzy.
Parâmetros
----------
sys_num_outputs : int
Inteiro contendo o número de consequentes.
"""
if not isinstance(sys_num_outputs, int):
raise Exception("O número de consequentes do Sistema Fuzzy não é "
"um número inteiro!")
# Verifica se o número de consequentes é menor igual ao máximo definido
if sys_num_outputs > self.MAX_NUM_OUTPUTS:
raise Exception(
f"Número máximo de consequentes é {self.MAX_NUM_OUTPUTS}"
f", número informado é {sys_num_outputs}!")
self.__num_outputs = sys_num_outputs
@property
def num_rules(self) -> int:
"""
Número de regras do Sistema Fuzzy.
Retorna
-------
int
Retorna um inteiro com o número de regras.
"""
return self.__num_rules
@num_rules.setter
def num_rules(self, sys_num_rules: int):
"""
Altera o número de regras do Sistema Fuzzy.
Parâmetros
----------
sys_num_rules : int
Inteiro contendo o número de regras.
"""
if not isinstance(sys_num_rules, int):
raise Exception("O número de regras do Sistema Fuzzy não é "
"um número inteiro!")
self.__num_rules = sys_num_rules
@property
def and_method(self) -> AndMethods:
"""
Método usado para o "AND" fuzzy.
Retorna
-------
ANDMethods
Retorna uma instância da classe ANDMethods.
"""
return self.__and_method
@and_method.setter
def and_method(self, sys_and_method: AndMethods):
"""
Altera o método usado para o "AND" fuzzy.
Parâmetros
----------
sys_and_method : ANDMethods
Instância da classe ANDMethods.
"""
if not isinstance(sys_and_method, AndMethods):
raise Exception("O método AND fuzzy não é uma instância da "
"classe ANDMethods!")
self.__and_method = sys_and_method
@property
def or_method(self) -> OrMethods:
"""
Método usado para o "OR" fuzzy.
Retorna
-------
ORMethods
Retorna uma instância da classe ORMethods.
"""
return self.__or_method
@or_method.setter
def or_method(self, sys_or_method: OrMethods):
"""
Altera o método usado para o "OR" fuzzy.
Parâmetros
----------
sys_or_method : ORMethods
Instância da classe ORMethods.
"""
if not isinstance(sys_or_method, OrMethods):
raise Exception("O método OR fuzzy não é uma instância da "
"classe ORMethods!")
self.__or_method = sys_or_method
@property
def imp_method(self) -> ImpMethods:
"""
Método de implicação do sistema fuzzy.
Retorna
-------
ImpMethods
Retorna uma instância da classe ImpMethods.
"""
return self.__imp_method
@imp_method.setter
def imp_method(self, sys_imp_method: ImpMethods):
"""
Altera o método de implicação do sistema fuzzy.
Parâmetros
----------
sys_imp_method : ImpMethods
Instância da classe ImpMethods.
"""
if not isinstance(sys_imp_method, ImpMethods):
raise Exception("O método de implicação não é uma instância da "
"classe ImpMethods!")
self.__imp_method = sys_imp_method
@property
def agg_method(self) -> AggMethods:
"""
Método de agregação do sistema fuzzy.
Retorna
-------
AggMethods
Retorna uma instância da classe AggMethods.
"""
return self.__agg_method
@agg_method.setter
def agg_method(self, sys_agg_method: str):
"""
Altera o método de agregação do sistema fuzzy.
Parâmetros
----------
sys_agg_method : AggMethods
Instância da classe AggMethods.
"""
if not isinstance(sys_agg_method, AggMethods):
raise Exception("O método de agregação não é uma instância da "
"classe AggMethods!")
self.__agg_method = sys_agg_method
@property
def defuzz_method(self) -> DefuzzMethods:
"""
Método de defuzzificação do sistema fuzzy.
Retorna
-------
DefuzzMethods
Retorna uma instância da classe DefuzzMethods.
"""
return self.__defuzz_method
@defuzz_method.setter
def defuzz_method(self, sys_defuzz_method: DefuzzMethods):
"""
Altera o método de defuzzificação do sistema fuzzy.
Parâmetros
----------
sys_defuzz_method : DefuzzMethods
Instância da classe DefuzzMethods.
"""
if not isinstance(sys_defuzz_method, DefuzzMethods):
raise Exception("O método de defuzzificação não é uma instância "
"da classe DefuzzMethods!")
self.__defuzz_method = sys_defuzz_method
@property
def inputs(self) -> Dict[int, Input]:
"""
Dicionário contendo os antecedentes do sistema fuzzy.
A chave do dicionário é o número do antecedente.
O valor é uma instância da classe Input.
Retorna
-------
Dict [int, Input]
Retorna um dicionário onde a chave é o número do antecedente e o
valor é uma instância da classe Input.
"""
return self.__inputs
@inputs.setter
def inputs(self, sys_inputs: Dict[int, Input]):
"""
Altera o dicionário contendo os antecedentes do sistema fuzzy.
Parâmetros
----------
sys_inputs : Dict[int, Input]
Dicionário contendo os antecedentes do sistema fuzzy.
"""
# Verifica se o número de inputs já foi informado
if self.num_inputs is None:
raise Exception(f"O número de antecedentes não foi informado!")
# Verifica se a quantidade de antecedentes está correta
if len(sys_inputs) != self.num_inputs:
raise Exception("Quantidade de antecedentes é diferente da "
"informada no bloco do sistema!")
# # Verifica se o dicionário possui os tipos corretos
for k_input, k_value in sys_inputs.items():
if not isinstance(k_input, int):
raise Exception(
"A chave não é uma número inteiro!")
if not isinstance(k_value, Input):
raise Exception(
"O valor não é uma instância da classe Input!")
self.__inputs = sys_inputs
def add_input(self, sys_num: int, sys_value: Input):
"""
Adiciona uma nova entrada no dicionário contendo os antecedentes do
sistema fuzzy.
Parâmetros
----------
sys_num : int
Inteiro com o número do antecedente.
sys_value : Input
Instância da classe Input.
"""
# Verifica se o número do antecedente é um número inteiro
if not isinstance(sys_num, int):
raise Exception(
"O número do antecedente não é um número inteiro!")
# Verifica se o número de inputs já foi informado
if self.num_inputs is None:
raise Exception(f"O número de antecedentes não foi informado!")
# Verifica se o número do antecedente já foi cadastrado
if self.__inputs.get(sys_num) is not None:
raise Exception(f"Número do antecedente {sys_num} já cadastrado!")
# Verifica se a quantidade de antecedentes está correta
if len(self.__inputs) >= self.num_inputs:
raise Exception("Não é possível adicionar mais antecedentes!")
# Verifica se o valor do dicionário é instância da classe Input
if not isinstance(sys_value, Input):
raise Exception(f"O valor não é uma instância da classe Input!")
self.__inputs[sys_num] = sys_value
@property
def outputs(self) -> Dict[int, Output]:
"""
Dicionário contendo os consequentes do sistema fuzzy.
A chave do dicionário é o número do consequente.
O valor é uma instância da classe Output.
Retorna
-------
Dict [int, Output]
Retorna um dicionário onde a chave é o número do consequente e o
valor é uma instância da classe Output.
"""
return self.__outputs
@outputs.setter
def outputs(self, sys_outputs: Dict[int, Output]):
"""
Altera o dicionário contendo os consequentes do sistema fuzzy.
Parâmetros
----------
sys_outputs : Dict[int, Output]
Dicionário contendo os consequentes do sistema fuzzy.
"""
# Verifica se o número de outputs já foi informado
if self.num_outputs is None:
raise Exception(f"O número de consequentes não foi informado!")
# Verifica se a quantidade de consequentes está correta
if len(sys_outputs) != self.num_outputs:
raise Exception("Quantidade de consequentes é diferente da "
"informada no bloco do sistema!")
# Verifica se o dicionário possui os tipos corretos
for k_output, k_value in sys_outputs.items():
if not isinstance(k_output, int):
raise Exception(
"A chave não é uma número inteiro!")
if not isinstance(k_value, Output):
raise Exception(
"O valor não é uma instância da classe Output!")
self.__outputs = sys_outputs
def add_output(self, sys_num: int, sys_value: Output):
"""
Adiciona uma nova entrada no dicionário contendo os consequentes do
sistema fuzzy.
Parâmetros
----------
sys_num : int
Inteiro com o número do consequente.
sys_value : Output
Instância da classe Output.
"""
# Verifica se o número do consequente é um número inteiro
if not isinstance(sys_num, int):
raise Exception(
"O número do consequente não é um número inteiro!")
# Verifica se o número de inputs já foi informado
if self.num_outputs is None:
raise Exception(f"O número de consequentes não foi informado!")
# Verifica se a quantidade de consequentes está correta
if len(self.__outputs) >= self.num_outputs:
raise Exception("Não é possível adicionar mais consequentes!")
# Verifica se o número do consequente já foi cadastrado
if self.__outputs.get(sys_num) is not None:
raise Exception(f"Número do consequente {sys_num} já cadastrado!")
# Verifica se os valor do dicionário é instância da classe Output
if not isinstance(sys_value, Output):
raise Exception(
f"O valor não é uma instância da classe Output!")
self.__outputs[sys_num] = sys_value
@property
def rules(self) -> List[Rule]:
"""
Lista contendo as regras do sistema fuzzy.
Os valores da lista são instâncias da classe Rule.
Retorna
-------
List[Rule]
Retorna uma lista com as instâncias da classe Rule.
"""
return self.__rules
@rules.setter
def rules(self, sys_rules: List[Rule]):
"""
Altera a lista contendo as regras do sistema fuzzy.
Parâmetros
----------
sys_rules : List[Rule]
Lista contendo as regras do sistema fuzzy.
"""
# Verifica se o número de regras já foi informado
if self.num_rules is None:
raise Exception(f"O número de regras não foi informado!")
# Verifica se a quantidade de regras está correta
if len(sys_rules) != self.num_rules:
raise Exception("Quantidade de regras é diferente da informada no "
"bloco do sistema!")
# Verifica se os valores da lista são instâncias da classe Rule
for value in sys_rules:
if not isinstance(value, Rule):
raise Exception(
f"O valor não é uma instância da classe Rule!")
self.__rules = sys_rules
def add_rule(self, sys_rule: Rule):
"""
Adiciona uma nova entrada na lista contendo as regras do
sistema fuzzy.
Parâmetros
----------
sys_rule : Rule
Instância da classe Rule.
"""
# Verifica se o número de regras já foi informado
if self.num_rules is None:
raise Exception(f"O número de regras não foi informado!")
# Verifica se a quantidade de regras está correta
if len(self.rules) >= self.num_rules:
raise Exception("Não é possível adicionar mais regras!")
if not isinstance(sys_rule, Rule):
raise Exception(
f"O valor não é uma instância da classe Rule!")
self.__rules.append(sys_rule)
@property
def facies_association(self) -> Dict[int, int]:
return self.__facies_association
@facies_association.setter
def facies_association(self, sys_facies_association: Dict[int, int]):
# Verifica se o dicionário possui os tipos corretos
for k_output, k_value in sys_facies_association.items():
if not isinstance(k_output, int):
raise Exception(
"A chave não é uma número inteiro!")
if not isinstance(k_value, int):
raise Exception(
"O valor não é uma número inteiro!")
self.__facies_association = sys_facies_association
def add_facies_association(self, sys_num: int, sys_value: Output):
# Verifica se a associação de fácies já foi cadastrada
if self.__facies_association.get(sys_num) is not None:
raise Exception(f"Associação de Fácies {sys_num} já cadastrada!")
# Verifica se a chave do dicionário é um inteiro
if not isinstance(sys_num, int):
raise Exception(
"A chave não é uma número inteiro!")
# Verifica se o valor do dicionário é um inteiro
if not isinstance(sys_value, int):
raise Exception(
"O valor não é uma número inteiro!")
self.__facies_association[sys_num] = sys_value
@property
def use_dict_facies_association(self) -> bool:
return self.__use_dict_facies_association
@use_dict_facies_association.setter
def use_dict_facies_association(self,
sys_use_dict_facies_association: bool):
if not isinstance(sys_use_dict_facies_association, bool):
raise Exception(f"O nome não é um booleano!")
self.__use_dict_facies_association = sys_use_dict_facies_association | [
"[email protected]"
] | |
b2b28a96fe3b8e65d34f0ef57de0e797013b889c | b873ea1def0810f67834bf4926901b9a8fead362 | /tuples_and_sets_09_21/students_grades.py | 2d987e90a25cbc6088cc5ead764ea9d1a0c51cd1 | [] | no_license | NikiDimov/SoftUni-Python-Advanced | 20f822614fa0fa7de6ded3956fa8d40d589a4a86 | d6c1fe886a3c27c82f03e5e4a6c670f0905d54e6 | refs/heads/main | 2023-08-23T17:42:32.063057 | 2021-10-25T10:32:03 | 2021-10-25T10:32:03 | 328,750,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | N = int(input())
dict_of_students = {}
for _ in range(N):
student, grade = input().split()
grade = float(grade)
if student not in dict_of_students:
dict_of_students[student] = []
dict_of_students[student].append(grade)
for key, value in dict_of_students.items():
print(f"{key} -> {' '.join([f'{el:.2f}'for el in value])} (avg: {sum(value)/len(value):.2f})")
| [
"[email protected]"
] | |
899cb53efbf654a627e45de51acc62dbd6fa2f33 | 16109c0fa3a7d0faf86a9c680ee2490170492a79 | /python_training/myemployee/myemployee/wsgi.py | 695fe3f02004df5e7ff6c50b0497e574378dbeb2 | [] | no_license | sandhyakopparla/python | 361ea5f0dedcdd2c08d0d16c25f3dd9b42b6c3f2 | b87a9ecce5005e2e43e5b0a2d8aa17d477d2c7a2 | refs/heads/submit | 2023-07-16T01:55:46.120786 | 2021-08-20T04:21:31 | 2021-08-20T04:21:31 | 397,566,663 | 0 | 0 | null | 2021-08-20T04:21:31 | 2021-08-18T10:51:48 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for myemployee project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myemployee.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
6735465791531aeeabc1ba1331fb80c55ae2c8f7 | cd4676555fc1066b5a84320ebbb1d4623066380d | /crusoe_observe/flowmon-rest-client/flowmonclient/resources/ads/Filters.py | 3bb92373672eb9ac13be90dde634a9a6f25fedd0 | [
"MIT"
] | permissive | wumingruiye/CRUSOE | 3ca542fa5362caf404593acfc4b01eb8f9b4d5f4 | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | refs/heads/main | 2023-07-01T21:40:09.538245 | 2021-08-05T14:07:03 | 2021-08-05T14:07:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | class Filters:
def __init__(self, client):
self.client = client
def all(self):
resource = "/filters"
return self.client.get(resource)
| [
"[email protected]"
] | |
8f45898f49336b9ca0d3e8ac86db1286a844c6ae | ba3a705ecb3628641793854292aa9a3ff8fc1221 | /10_API/ex1_simple/env/bin/wheel | 56b0575ae06d4be2ebe7e4fa276140b10b77e48c | [] | no_license | mortenhaahr/NGK | d98ada8d63a07ea6447768ab6a23ad1346634b56 | a9e89afb452dd7953cba4403b4e8bc2c0ff2ba1e | refs/heads/master | 2022-07-01T20:39:02.063251 | 2020-05-11T15:40:26 | 2020-05-11T15:40:26 | 242,725,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/home/stud/NGK/10_API/ex1/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
d6e677ac1447f8278ddff36b140132e55441832c | 4abd8812b5e13906ef5b93397548e71655e7fde3 | /WebFrame.py | 3c049b2484390aba2807bb3f2fef1e7be2b6f50d | [] | no_license | HMmelody/AID1808 | 6b06f09308c7b3ba7cb80367d4b4a2568bf93691 | c8893e45f5496a5fbcdd1db43f88b0df53290e52 | refs/heads/master | 2020-04-04T18:17:20.444824 | 2018-11-06T03:55:22 | 2018-11-06T03:55:22 | 156,157,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | # coding=utf-8
'''
模拟框架程序部分
'''
from socket import *
from views import *
frame_ip = '127.0.0.1'
frame_port = 8080
frame_address = (frame_ip,frame_port)
# 静态网页位置
STATIC_DIR = './static'
# url决定我们能处理什么数据
urls = [('/time',show_time),('/hello',say_hello),('/bye',say_bye)]
# 应用类,将功能封装在类中
class Application(object):
def __init__(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.sockfd.bind(frame_address)
def start(self):
self.sockfd.listen(5)
print("Listen the port 8080")
while True:
connfd,addr = self.sockfd.accept()
method = connfd.recv(128).decode()
path_info = connfd.recv(1024).decode()
self.handle(connfd,method,path_info)
def handle(self,connfd,method,path_info):
if method == 'GET':
if path_info == '/' or path_info[-5:] == '.html':
response = self.get_html(path_info)
else:
response = self.get_data(path_info)
elif method == 'POST':
pass
connfd.send(response.encode())
connfd.close()
def get_html(self,path_info):
if path_info == '/':
get_file = STATIC_DIR + '/index.html'
else:
get_file = STATIC_DIR + path_info
try:
fd = open(get_file)
except IOError:
response = "404"
else:
response = fd.read()
finally:
return response
def get_data(self,path_info):
for url,func in urls:
if path_info == url:
return func()
return '404'
if __name__=='__main__':
app = Application()
app.start() # 启动框架应用程序 | [
"[email protected]"
] | |
3fb2b2cb9ec1a2718bd91fb9ee318afc55f997c6 | 51d46cf862654d30f5fa0ee35a9243c9661fc0eb | /User_/user_custom/user_custom/wsgi.py | c4912256bea59c34b54c8253916ca4ef2ffa19cc | [] | no_license | LikeLionCBNU/HamDongHo | 6762a8db487ae2807d1ce9d4d2df7e18d67eab70 | 082cea62cf4b5136309cbddc8c09e4e84f25de7c | refs/heads/master | 2022-12-06T22:48:17.500207 | 2020-08-19T14:31:32 | 2020-08-19T14:31:32 | 256,194,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for user_custom project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_custom.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
f98ec6b74f00596af6fcf03d9e58cc0dd42de5a5 | 296756045df29db632141dda85034fca35213c92 | /D3_4615_재미있는오셸로게임.py | 23a924a8a5dd7ae3b09b95e5d7b3b37e34338aa0 | [] | no_license | dabini/SWEA | 8b1b321a2126a41f0786e7212eb81ea0204716b6 | 4b61d34a4b089699a5594b4c43781a6f1dd2235e | refs/heads/master | 2022-12-24T02:36:01.169672 | 2020-09-27T11:56:13 | 2020-09-27T11:56:13 | 236,012,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | T = int(input())
for t in range(T):
N, M = map(int, input().split()) # 보드의 한 변의 길이 N과 플레이어가 돌을 놓는 횟수 M
field = [[0]*(N+1) for _ in range(N+1)]
for j in range(N//2, N//2+2): #기본 설정
for i in range(N//2, N//2+2):
if i == j: #색이 1이면 흑돌, 2이면 백돌
field[j][i] = 2
else:
field[j][i] = 1
dx = [1, -1, 0, 0, 1, -1, 1, -1] #대각선까지 포함
dy = [0, 0, 1, -1, 1, -1, -1, 1]
for m in range(M):
X, Y, color = map(int, input().split())
field[Y][X] = color
for d in range(8):
check = True
for k in range(1, N):
if 1 <= Y+(dy[d]*k) < N+1 and 1<=X+(dx[d]*k)< N+1 and field[Y+(dy[d]*k)][X+(dx[d]*k)] == color:
for f in range(1, k):
if field[Y+dy[d]*f][X+dx[d]*f] == 0:
check = False
break
if check == False:
break
if check:
for f in range(1, k):
field[Y+dy[d]*f][X+dx[d]*f] = color
break
Bcnt = 0
Wcnt = 0
for l in range(1, N+1):
for q in range(1, N+1):
if field[l][q] == 1: #흑돌
Bcnt += 1
elif field[l][q] == 2: #백돌
Wcnt += 1
print("#{} {} {}".format(t+1, Bcnt, Wcnt)) | [
"[email protected]"
] | |
ec46f3d025b4c8f9a1a28ec7a4e09265e6806c5f | 1b3c73cfd2c183861942d821b5f7b87cfde05687 | /Clustering.py | ebee8512112a8b6ef2bf9157214257ebbb50b688 | [] | no_license | Cheereus/MathModel2020 | d0e77d199701fe752298d3998578be58409c9ce3 | 752811a5550ff3d6ec4dc21f4880986e3fcf08f7 | refs/heads/master | 2023-04-27T15:00:28.332328 | 2021-04-27T02:56:52 | 2021-04-27T02:56:52 | 296,013,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.neighbors import KNeighborsClassifier
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
import matplotlib.pyplot as plt
def k_means(X, k):
k_m_model = KMeans(n_clusters=k, max_iter=300, n_init=40, init='k-means++', n_jobs=-1)
k_m_model.fit(X)
return k_m_model.labels_.tolist()
def knn(X, y, k):
knn_model = KNeighborsClassifier(n_neighbors=k)
knn_model.fit(X, y)
return knn_model
def hca(X, k=None):
hca_model = linkage(X, 'ward')
return hca_model
# dendogram for hca
def hca_dendrogram(model):
plt.figure(figsize=(50, 10))
dendrogram(model, leaf_rotation=90., leaf_font_size=8)
plt.show()
# labels of hca
def hca_labels(model, n_clusters):
labels = fcluster(model, n_clusters, criterion='maxclust')
return labels
| [
"[email protected]"
] | |
cf4b8c87b4ba45dbbd40902f0a861e6e06210961 | cf652cb90f9d6b22b5943e7d025af631214a904d | /plugins/keepkey/clientbase.py | 89ac50ec5a5d20df04dfb2034875d401697369c2 | [
"MIT"
] | permissive | ddude1/TestLite | 02919c68013d2ede9195d618d94260b842a5e292 | 3f3c00e4ef03dd9b23b99b02f9a8895da8d65aef | refs/heads/master | 2022-12-11T12:22:25.029101 | 2018-06-13T14:11:51 | 2018-06-13T14:11:51 | 136,489,568 | 0 | 0 | MIT | 2022-09-23T21:47:03 | 2018-06-07T14:31:31 | Python | UTF-8 | Python | false | false | 9,004 | py | import time
from struct import pack
from electrum_xgox.i18n import _
from electrum_xgox.util import PrintError, UserCancelled
from electrum_xgox.keystore import bip39_normalize_passphrase
from electrum_xgox.bitcoin import serialize_xpub
class GuiMixin(object):
# Requires: self.proto, self.device
messages = {
3: _("Confirm the transaction output on your %s device"),
4: _("Confirm internal entropy on your %s device to begin"),
5: _("Write down the seed word shown on your %s"),
6: _("Confirm on your %s that you want to wipe it clean"),
7: _("Confirm on your %s device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"%s device"),
10: _("Confirm wallet address on your %s device"),
'default': _("Check your %s device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.Failure_PinCancelled,
self.types.Failure_ActionCancelled,
self.types.Failure_NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message % self.device, self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your %s:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your %s.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current %s PIN:")
pin = self.handler.get_pin(msg % self.device)
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your %s will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the Xgox coins in the wallet.") % self.device
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
return self.proto.PassphraseAck(passphrase=passphrase)
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step %d/24. Enter seed word as explained on "
"your %s:") % (self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class KeepKeyClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
'''Convert bip32 path to list of uint32 integers with prime flags
0/-1/1' -> [0, 0x80000001, 0x80000001]'''
# This code is similar to code in trezorlib where it unforunately
# is not declared as a staticmethod. Our n has an extra element.
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
'''Provided here as in keepkeylib but not trezorlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return serialize_xpub(xtype, node.chain_code, node.public_key, node.depth, self.i4b(node.fingerprint), self.i4b(node.child_num))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your %s device to disable passphrases")
else:
self.msg = _("Confirm on your %s device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your %s device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your %s device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your %s device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your %s device to change your PIN")
else:
self.msg = _("Confirm on your %s device to set a PIN")
self.change_pin(remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(KeepKeyClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.print_error("clear_session: ignoring error", str(e))
pass
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(KeepKeyClientBase, self).get_public_node(address_n)
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.print_error("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| [
"[email protected]"
] | |
425fec3232c22560f10257179343fe677eb7810a | 94f304cb4c2ac2ad6ff1ee39725f46254c8838bc | /core/info/Ui_script.py | 39b68de152e2eba395e13f524ec2b484d241ef11 | [] | no_license | kmolLin/python3_solve_dynamic | 105bd70edaa5014e0ad76a9a3c66e43dc0fa5ad7 | 18f56e6958dd1816dfb7c26f4857aa3b41de9312 | refs/heads/master | 2021-06-03T10:19:44.551240 | 2016-09-23T13:22:52 | 2016-09-23T13:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,391 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ahshoe/Desktop/Pyslvs/core/info/script.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Info_Dialog(object):
def setupUi(self, Info_Dialog):
Info_Dialog.setObjectName("Info_Dialog")
Info_Dialog.setEnabled(True)
Info_Dialog.resize(408, 485)
Info_Dialog.setMinimumSize(QtCore.QSize(246, 346))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/edges.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Info_Dialog.setWindowIcon(icon)
Info_Dialog.setAutoFillBackground(True)
Info_Dialog.setSizeGripEnabled(True)
Info_Dialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(Info_Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(Info_Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/icons/main.png"))
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.copy = QtWidgets.QPushButton(Info_Dialog)
self.copy.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.copy.setAutoDefault(False)
self.copy.setObjectName("copy")
self.horizontalLayout_2.addWidget(self.copy)
self.save = QtWidgets.QPushButton(Info_Dialog)
self.save.setAutoDefault(False)
self.save.setObjectName("save")
self.horizontalLayout_2.addWidget(self.save)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.script = QtWidgets.QTextBrowser(Info_Dialog)
self.script.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.script.setObjectName("script")
self.verticalLayout.addWidget(self.script)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.buttonBox = QtWidgets.QDialogButtonBox(Info_Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Help)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Info_Dialog)
self.buttonBox.rejected.connect(Info_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Info_Dialog)
def retranslateUi(self, Info_Dialog):
_translate = QtCore.QCoreApplication.translate
Info_Dialog.setWindowTitle(_translate("Info_Dialog", "Python Script"))
self.label.setWhatsThis(_translate("Info_Dialog", "Pyslvs Icon!"))
self.copy.setText(_translate("Info_Dialog", "Copy"))
self.save.setText(_translate("Info_Dialog", "Save as..."))
self.buttonBox.setWhatsThis(_translate("Info_Dialog", "Click to exit"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Info_Dialog = QtWidgets.QDialog()
ui = Ui_Info_Dialog()
ui.setupUi(Info_Dialog)
Info_Dialog.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
21c56687f092c048fd987093b694fa1b9cdba953 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/_exercises/templates/Mastering Concurrency in Python/Chapter10/example3.py | 22936bfe66daaa8030fb20b3750380ec1e244ec1 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 698 | py | # # ch9/example1.py
#
# ____ ma__ ______ sqrt
# ____ t_i_ ______ d_t_ __ timer
#
# ___ is_prime x
# print('Processing @...' ?
#
# __ ? < 2
# print('@ is not a prime number.' ?
#
# ____ ? __ 2
# print('@ is a prime number.' ?
#
# ____ ? % 2 __ 0
# print('@ is not a prime number.' ?
#
# ____
# limit _ __. sq.. ? + 1
# ___ i __ ra.. 3 ? 2
# __ ? % ? __ 0
# print('@ is not a prime number.' ?
# r_
#
# print('@ is a prime number.' ?
#
# __ _______ __ _______
#
# start _ ti..
# ? 9637529763296797)
# ? 427920331)
# ? 157)
# print('Took @.2_ seconds.' t.. - s..
| [
"[email protected]"
] | |
f2adac6b08cba4a67e4da5fad5abb9ed22b1ebe1 | eed4e25645c7590a986d9b9c8435b8d052cc913a | /parakeet/transforms/stride_specialization.py | 899c1bad4ea379c053391da24e807222ba0846e6 | [
"BSD-3-Clause"
] | permissive | Tillsten/parakeet | 63afd4bb8d83b64529e81e6c13348fd2e4d10a6f | 5045c3f6c4de19f4acbafeaf2fc9e5c7ff9aa63c | refs/heads/master | 2021-01-18T15:36:16.814995 | 2013-10-02T00:36:23 | 2013-10-02T00:36:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | from .. analysis.find_constant_strides import FindConstantStrides, Const, Array, Struct, Tuple
from .. analysis.find_constant_strides import from_python_list, from_internal_repr
from .. syntax.helpers import const_int, const
from dead_code_elim import DCE
from phase import Phase
from simplify import Simplify
from transform import Transform
class StrideSpecializer(Transform):
def __init__(self, abstract_inputs):
Transform.__init__(self)
self.abstract_inputs = abstract_inputs
def pre_apply(self, fn):
analysis = FindConstantStrides(fn, self.abstract_inputs)
analysis.visit_fn(fn)
self.env = analysis.env
def transform_Var(self, expr):
if expr.name in self.env:
value = self.env[expr.name]
if value.__class__ is Const:
result = const(value.value)
return result
return expr
def transform_lhs(self, lhs):
return lhs
def has_unit_stride(abstract_value):
c = abstract_value.__class__
if c is Array:
return has_unit_stride(abstract_value.strides)
elif c is Struct:
return any(has_unit_stride(field_val)
for field_val
in abstract_value.fields.itervalues())
elif c is Tuple:
return any(has_unit_stride(elt)
for elt in abstract_value.elts)
elif c is Const:
return abstract_value.value == 1
else:
return False
_cache = {}
def specialize(fn, python_values, types = None):
if types is None:
abstract_values = from_python_list(python_values)
else:
# if types are given, assume that the values
# are already converted to Parakeet's internal runtime
# representation
abstract_values = []
for (t, internal_value) in zip(types, python_values):
abstract_values.append(from_internal_repr(t, internal_value))
key = (fn.name, tuple(abstract_values))
if key in _cache:
return _cache[key]
elif any(has_unit_stride(v) for v in abstract_values):
specializer = StrideSpecializer(abstract_values)
transforms = Phase([specializer, Simplify, DCE],
memoize = False, copy = True,
name = "StrideSpecialization for %s" % abstract_values)
new_fn = transforms.apply(fn)
else:
new_fn = fn
_cache[key] = new_fn
return new_fn | [
"[email protected]"
] | |
cf008304d5ece13b86dbf6527bf76302f2b03546 | 7e72e16f43170749dada023624a88fd622727639 | /jdcloud_sdk/services/monitor/models/SiteMonitorFtpOption.py | 74c06ad00ca3b082dff46c5db43a7d195487c31e | [
"Apache-2.0"
] | permissive | jdcloud-demo/jdcloud-sdk-python | 4dc1e814217df16c5f60f5e4b3f8260b770f9d2b | fddc2af24031c597948b8b8091978ac7e01a2695 | refs/heads/master | 2020-07-11T18:19:59.688112 | 2019-08-23T05:55:18 | 2019-08-23T05:55:18 | 204,613,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class SiteMonitorFtpOption(object):
def __init__(self, loginType=None, passwd=None, timeout=None, user=None):
"""
:param loginType: (Optional)
:param passwd: (Optional)
:param timeout: (Optional)
:param user: (Optional)
"""
self.loginType = loginType
self.passwd = passwd
self.timeout = timeout
self.user = user
| [
"[email protected]"
] | |
335349eab960e401562561524f316f7a8f145d3e | 8ab21f9b2fb6a96f9fd6ef8f06091b68b2ff8611 | /labluz/wsgi.py | 80467c5db8692bb3f7d05278d9ea4b457b4a8014 | [
"MIT"
] | permissive | macndesign/labluz | 8ddcfa71dcaab7083c0ed670d5e007e6e59de0c1 | a6100ae903ab6164d087ef2765c00d81d134a373 | refs/heads/master | 2016-09-09T21:39:55.771160 | 2013-12-16T03:45:02 | 2013-12-16T03:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | """
WSGI config for labluz project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labluz.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
60e828ae2e99cb6911492deee536f5ec9007e165 | 7b5d97f8d0d00dfbad7cf52133166b7abdb47f11 | /overthewire/vortex/l0/solve.py | fb2ea6cc6b032fa8012914402d719d33fe3b8969 | [] | no_license | plvhx/any-ctf-writeup | 64b34010ba81b968f52bfb437ff908a4e6821a84 | 286c020a2b7e17d6a3ddc995c9c4f5087f4843df | refs/heads/master | 2021-06-10T22:18:03.294359 | 2016-12-14T14:28:03 | 2016-12-14T14:28:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #! /usr/bin/python
import sys
import struct
import socket
import telnetlib
if sys.byteorder == 'little':
Q = lambda x: struct.pack("<I", x)
rQ = lambda x: struct.unpack("<I", x)[0]
elif sys.byteorder == 'big':
Q = lambda x: struct.pack(">I", x)
rQ = lambda x: struct.unpack(">I", x)[0]
try:
s = socket.create_connection(('vortex.labs.overthewire.org', 5842))
except socket.gaierror as e:
(n, q) = e
sys.exit(-1)
try:
tuple = [];
for i in range(4):
tuple.append(rQ(s.recv(4)))
except socket.error as e:
(n, q) = e
sys.exit(-1)
try:
s.send(Q(reduce(lambda x, y: x + y, tuple)) + chr(0x0a))
except socket.error as e:
(n, q) = e
sys.exit(-1)
t = telnetlib.Telnet()
t.sock = s
t.interact()
| [
"vagrant@precise64.(none)"
] | vagrant@precise64.(none) |
9a94262a5308425f8bf9da0226d2fa50d17e48c1 | 4894b77303b4fcd303f6103bc1387c99d87381f9 | /award/settings.py | 4d97140f4232351b4aac496d74956181687b1c98 | [
"MIT"
] | permissive | sngina/Award | ff21422839a32fbf5901e25427a33e6737aa15db | 4cdbe0da49cb69819deb4c00011718b087891919 | refs/heads/master | 2023-06-20T09:05:51.513020 | 2021-07-20T08:33:39 | 2021-07-20T08:33:39 | 386,533,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | """
Django settings for award project.
Generated by 'django-admin startproject' using Django 1.11.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
import django_heroku
from decouple import config , Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'ix8jby828(i8b*=n$#8xi9+mwrq=i1+=*1m*&i2b5v@lvvu+p$'
# SECURITY WARNING: don't run with debug turned on in production!
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'project',
'bootstrap3',
'crispy_forms',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'award.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'award.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if config('MODE')== "dev" :
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals())
LOGIN_REDIRECT_URL ='/' | [
"[email protected]"
] | |
5df01d2277c77acdae3677cd9511de336ccc60fd | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/scaleform/daapi/view/lobby/fortifications/components/fortdisconnectviewcomponent.py | d3c69c3ecb05a4cd811ba421ff93edfbdcf24cf1 | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/components/FortDisconnectViewComponent.py
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortDisconnectViewMeta import FortDisconnectViewMeta
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.shared.fortifications.settings import CLIENT_FORT_STATE
from gui.shared.formatters import icons
from helpers import i18n
class FortDisconnectViewComponent(FortDisconnectViewMeta, FortViewHelper):
def __init__(self):
super(FortDisconnectViewComponent, self).__init__()
def _populate(self):
super(FortDisconnectViewComponent, self)._populate()
state = self.fortState
warningIcon = icons.alert()
warningText = warningIcon + i18n.makeString(FORTIFICATIONS.DISCONNECTED_WARNING)
if state.getStateID() == CLIENT_FORT_STATE.ROAMING:
warningDescrText = FORTIFICATIONS.DISCONNECTED_WARNINGDESCRIPTIONROAMING
else:
warningDescrText = FORTIFICATIONS.DISCONNECTED_WARNINGDESCRIPTIONCENTERUNAVAILABLE
warningDescrText = i18n.makeString(warningDescrText)
self.as_setWarningTextsS(warningText, warningDescrText)
g_eventBus.handleEvent(events.FortEvent(events.FortEvent.VIEW_LOADED), scope=EVENT_BUS_SCOPE.FORT)
def _dispose(self):
super(FortDisconnectViewComponent, self)._dispose()
| [
"[email protected]"
] | |
1420dc41a020bc3ae1a7eff801eef50c12fae666 | 6bd71bdfe9234e5e6de90bb40b6cd8d3e25ca6d2 | /Escuelas/TC2011/ejercicios-Parte1/soluciones/editarEsteComando.py | 4e4a68120b03d4f00ed94f98d99ed32d9755ac52 | [] | no_license | andres0sorio/CMSWork | f1f30a12bf43eb688ef9e95c53c94fe32fc7fe66 | 81e60a0a9b70cd2ae01d17b15be386a6cd925416 | refs/heads/master | 2021-01-22T13:12:16.094247 | 2015-10-26T04:47:12 | 2015-10-26T04:47:12 | 9,710,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | #!/usr/bin/python
import sys,os
# Este es un script sencillo en Python
# Para escribir un comentario, uno debe empezar la linea con un simbolo #
# Por favor comente la linea siguiente adicionando un simbolo # al frente de la linea siguiente
# raise RuntimeError
username = os.getenv('USER')
print 'Muy bien: ', username, ' usted ha editado correctamente este script'
| [
"[email protected]"
] | |
e18d643bc12f22e630a36c43f290e24481c55114 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/drama_scheduler/audition_drama_node.py | 948c4f5ec6ecb200f415e37432aa208b420fe8ff | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,307 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\drama_scheduler\audition_drama_node.py
# Compiled at: 2020-09-09 09:42:05
# Size of source mod 2**32: 18418 bytes
import random
from protocolbuffers import DistributorOps_pb2
from protocolbuffers.DistributorOps_pb2 import Operation
from careers.career_gig import Gig
from careers.career_tuning import Career
from date_and_time import DateAndTime, TimeSpan
from distributor.ops import GenericProtocolBufferOp
from distributor.shared_messages import build_icon_info_msg, IconInfoData
from distributor.system import Distributor
from drama_scheduler.drama_node import BaseDramaNode, DramaNodeRunOutcome
from drama_scheduler.drama_node_types import DramaNodeType
from event_testing.resolver import SingleSimResolver
from event_testing.tests import TunableTestSet
from interactions.utils.loot import LootActions
from interactions.utils.tunable_icon import TunableIcon
from rabbit_hole.rabbit_hole import RabbitHole
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.tunable import TunableTuple, TunableList, Tunable, OptionalTunable
from sims4.utils import classproperty
from tunable_multiplier import TunableMultiplier
from tunable_time import TunableTimeSpan
from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet
from ui.ui_dialog_picker import ObjectPickerRow
import services, sims4.log
AUDITION_TIME_TOKEN = 'audition_time'
GIG_TIME_TOKEN = 'gig_time'
RABBIT_HOLE_ID_TOKEN = 'rabbit_hole_id'
logger = sims4.log.Logger('AuditionDramaNode', default_owner='bosee')
class AuditionDramaNode(BaseDramaNode):
INSTANCE_TUNABLES = {'gig':Gig.TunableReference(description='\n Gig this audition is for.\n '),
'audition_prep_time':TunableTimeSpan(description='\n Amount of time between the seed of the potential audition node\n to the start of the audition time. \n ',
default_hours=5),
'audition_prep_recommendation':TunableLocalizedStringFactory(description='\n String that gives the player more information on how to succeed\n in this audition.\n '),
'audition_prep_icon':OptionalTunable(description='\n If enabled, this icon will be displayed with the audition preparation.\n ',
tunable=TunableIcon(description='\n Icon for audition preparation.\n ')),
'audition_outcomes':TunableList(description='\n List of loot and multipliers which are for audition outcomes.\n ',
tunable=TunableTuple(description='\n The information needed to determine whether or not the sim passes\n or fails this audition. We cannot rely on the outcome of the \n interaction because we need to run this test on uninstantiated \n sims as well. This is similar to the fallback outcomes in \n interactions.\n ',
loot_list=TunableList(description='\n Loot applied if this outcome is chosen\n ',
tunable=LootActions.TunableReference(pack_safe=True)),
weight=TunableMultiplier.TunableFactory(description='\n A tunable list of tests and multipliers to apply to the \n weight of the outcome.\n '),
is_success=Tunable(description='\n Whether or not this is considered a success outcome.\n ',
tunable_type=bool,
default=False))),
'audition_rabbit_hole':RabbitHole.TunableReference(description='\n Data required to put sim in rabbit hole.\n '),
'skip_audition':OptionalTunable(description='\n If enabled, we can skip auditions if sim passes tuned tests.\n ',
tunable=TunableTuple(description='\n Data related to whether or not this audition can be skipped.\n ',
skip_audition_tests=TunableTestSet(description='\n Test to see if sim can skip this audition.\n '),
skipped_audition_loot=TunableList(description='\n Loot applied if sim manages to skip audition\n ',
tunable=LootActions.TunableReference(pack_safe=True)))),
'advance_notice_time':TunableTimeSpan(description='\n The amount of time between the alert and the start of the event.\n ',
default_hours=1,
locked_args={'days':0,
'minutes':0}),
'loot_on_schedule':TunableList(description='\n Loot applied if the audition drama node is scheduled successfully.\n ',
tunable=LootActions.TunableReference(pack_safe=True)),
'advance_notice_notification':TunableUiDialogNotificationSnippet(description='\n The notification that is displayed at the advance notice time.\n ')}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._calculated_audition_time = None
self._calculated_gig_time = None
self._rabbit_hole_id = None
@classproperty
def drama_node_type(cls):
return DramaNodeType.AUDITION
@property
def _require_instanced_sim(self):
return False
@classproperty
def persist_when_active(cls):
return True
def get_picker_schedule_time(self):
return self._calculated_audition_time
def create_picker_row(self, owner=None, **kwargs):
now_time = services.game_clock_service().now()
min_audition_time = now_time + self.audition_prep_time()
possible_audition_times = self.get_final_times_based_on_schedule((self.min_and_max_times), anchor_time=min_audition_time, scheduled_time_only=True)
audition_time = min_audition_time
if possible_audition_times is not None:
now = services.time_service().sim_now
for possible_audition_time in possible_audition_times:
if possible_audition_time[0] >= now:
audition_time = possible_audition_time[0]
break
gig = self.gig
time_till_gig = gig.get_time_until_next_possible_gig(audition_time)
if time_till_gig is None:
return
gig_time = audition_time + time_till_gig
if self.skip_audition and self.skip_audition.skip_audition_tests.run_tests(SingleSimResolver(owner)):
formatted_string = Career.GIG_PICKER_SKIPPED_AUDITION_LOCALIZATION_FORMAT(gig.gig_pay.lower_bound, gig.gig_pay.upper_bound, gig_time, self.audition_prep_recommendation())
else:
formatted_string = Career.GIG_PICKER_LOCALIZATION_FORMAT(gig.gig_pay.lower_bound, gig.gig_pay.upper_bound, audition_time, gig_time, self.audition_prep_recommendation())
self._calculated_audition_time = audition_time
self._calculated_gig_time = gig_time
return gig.create_picker_row(formatted_string, owner, is_audition_flow=True)
def schedule(self, resolver, specific_time=None, time_modifier=TimeSpan.ZERO):
if self.skip_audition:
if self.skip_audition.skip_audition_tests.run_tests(resolver):
for loot in self.skip_audition.skipped_audition_loot:
loot.apply_to_resolver(resolver)
resolver.sim_info_to_test.career_tracker.set_gig(self.gig, self._calculated_gig_time)
return False
success = super().schedule(resolver, specific_time=specific_time, time_modifier=time_modifier)
if success:
services.calendar_service().mark_on_calendar(self, advance_notice_time=(self.advance_notice_time()))
self._send_career_ui_update(is_add=True)
for loot in self.loot_on_schedule:
loot.apply_to_resolver(resolver)
return success
def cleanup(self, from_service_stop=False):
services.calendar_service().remove_on_calendar(self.uid)
self._send_career_ui_update(is_add=False)
rabbit_hole_service = services.get_rabbit_hole_service()
if self._rabbit_hole_id:
if rabbit_hole_service.is_in_rabbit_hole((self._receiver_sim_info.id), rabbit_hole_id=(self._rabbit_hole_id)):
rabbit_hole_service.remove_rabbit_hole_expiration_callback(self._receiver_sim_info.id, self._rabbit_hole_id, self._on_sim_return)
super().cleanup(from_service_stop=from_service_stop)
def resume(self):
if self._rabbit_hole_id:
if not services.get_rabbit_hole_service().is_in_rabbit_hole((self._receiver_sim_info.id), rabbit_hole_id=(self._rabbit_hole_id)):
services.drama_scheduler_service().complete_node(self.uid)
def _run(self):
rabbit_hole_service = services.get_rabbit_hole_service()
self._rabbit_hole_id = rabbit_hole_service.put_sim_in_managed_rabbithole(self._receiver_sim_info, self.audition_rabbit_hole)
if self._rabbit_hole_id is None:
self._on_sim_return(canceled=True)
rabbit_hole_service.set_rabbit_hole_expiration_callback(self._receiver_sim_info.id, self._rabbit_hole_id, self._on_sim_return)
return DramaNodeRunOutcome.SUCCESS_NODE_INCOMPLETE
def _on_sim_return(self, canceled=False):
receiver_sim_info = self._receiver_sim_info
resolver = SingleSimResolver(receiver_sim_info)
weights = []
failure_outcomes = []
for outcome in self.audition_outcomes:
if canceled:
outcome.is_success or failure_outcomes.append(outcome)
continue
weight = outcome.weight.get_multiplier(resolver)
if weight > 0:
weights.append((weight, outcome))
if failure_outcomes:
selected_outcome = random.choice(failure_outcomes)
else:
selected_outcome = sims4.random.weighted_random_item(weights)
if not selected_outcome:
logger.error('No valid outcome is tuned on this audition. Verify weights in audition_outcome for {}.', self.guid64)
services.drama_scheduler_service().complete_node(self.uid)
return
if selected_outcome.is_success:
receiver_sim_info.career_tracker.set_gig(self.gig, self._calculated_gig_time)
for loot in selected_outcome.loot_list:
loot.apply_to_resolver(resolver)
services.drama_scheduler_service().complete_node(self.uid)
def _save_custom_data(self, writer):
if self._calculated_audition_time is not None:
writer.write_uint64(AUDITION_TIME_TOKEN, self._calculated_audition_time)
if self._calculated_gig_time is not None:
writer.write_uint64(GIG_TIME_TOKEN, self._calculated_gig_time)
if self._rabbit_hole_id is not None:
writer.write_uint64(RABBIT_HOLE_ID_TOKEN, self._rabbit_hole_id)
def _load_custom_data(self, reader):
self._calculated_audition_time = DateAndTime(reader.read_uint64(AUDITION_TIME_TOKEN, None))
self._calculated_gig_time = DateAndTime(reader.read_uint64(GIG_TIME_TOKEN, None))
self._rabbit_hole_id = reader.read_uint64(RABBIT_HOLE_ID_TOKEN, None)
rabbit_hole_service = services.get_rabbit_hole_service()
if not self._rabbit_hole_id:
rabbit_hole_service = services.get_rabbit_hole_service()
self._rabbit_hole_id = services.get_rabbit_hole_service().get_rabbit_hole_id_by_type(self._receiver_sim_info.id, self.audition_rabbit_hole)
if self._rabbit_hole_id:
if rabbit_hole_service.is_in_rabbit_hole((self._receiver_sim_info.id), rabbit_hole_id=(self._rabbit_hole_id)):
rabbit_hole_service.set_rabbit_hole_expiration_callback(self._receiver_sim_info.id, self._rabbit_hole_id, self._on_sim_return)
self._send_career_ui_update()
return True
def _send_career_ui_update(self, is_add=True):
audition_update_msg = DistributorOps_pb2.AuditionUpdate()
if is_add:
self.gig.build_gig_msg((audition_update_msg.audition_info), (self._receiver_sim_info), gig_time=(self._calculated_gig_time), audition_time=(self._calculated_audition_time))
op = GenericProtocolBufferOp(Operation.AUDITION_UPDATE, audition_update_msg)
build_icon_info_msg(IconInfoData(icon_resource=(self.audition_prep_icon)), self.audition_prep_recommendation(), audition_update_msg.recommended_task)
Distributor.instance().add_op(self._receiver_sim_info, op)
def load(self, drama_node_proto, schedule_alarm=True):
super_success = super().load(drama_node_proto, schedule_alarm=schedule_alarm)
if not super_success:
return False
services.calendar_service().mark_on_calendar(self, advance_notice_time=(self.advance_notice_time()))
return True
def on_calendar_alert_alarm(self):
receiver_sim_info = self._receiver_sim_info
resolver = SingleSimResolver(receiver_sim_info)
dialog = self.advance_notice_notification(receiver_sim_info, resolver=resolver)
dialog.show_dialog() | [
"[email protected]"
] | |
0f462660927792ab2746267aeb12801a663cbc12 | 69246effee7359d81d8e8ac5cab1bc354a7a933f | /account/tests.py | 88fad6f89c098ebd4fd21c8c6a5ee31eea059001 | [] | no_license | joeyac/TeachAssist2 | 0edfa98ab942ef5cd2308a617491e367a0b7bd86 | d99164b7d4fc2772bcda991ec9de0d13a9a894bf | refs/heads/master | 2020-03-19T04:50:45.656778 | 2018-06-22T08:34:49 | 2018-06-22T08:34:49 | 135,873,919 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | import json
from django.test import TestCase
from django.test import Client
from rest_framework import status
from account.models import User
from utils.constants import UserType
class AccountTestCase(TestCase):
def setUp(self):
teacher = User.objects.create(username='g123', email='[email protected]', user_type=UserType.TEACHER)
teacher.set_password('g123')
teacher.save()
self.c = Client()
def send_json(self, url, data):
return self.c.post(url, json.dumps(data), content_type="application/json")
def test_login_success(self):
response = self.send_json('/login/', {'username': 'g123', 'password': 'g123'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('Succeeded', response.json()['data'])
def test_login_failed(self):
response = self.send_json('/login/', {'username': 'g123', 'password': '123g'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual('Invalid username or password', response.json()['data'])
def test_create_user(self):
response = self.send_json('/register/',
{'username': 'username1234', 'password': '1234567890', 'email': '[email protected]',
'user_type': UserType.STUDENT})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('Succeeded', response.json()['data'])
def test_create_user_failed(self):
response = self.send_json('/register/',
{'username': 'username1234', 'password': '1234567890', 'email': '[email protected]',
'user_type': 'student===='})
self.assertEqual(response.json()['data'], 'user_type: "student====" is not a valid choice.')
def test_decorator_success(self):
self.send_json('/login/', {'username': 'g123', 'password': 'g123'})
response = self.c.get('/test/')
self.assertEqual(response.json()['data'], 'g123')
def test_decorator_failed(self):
response = self.c.get('/test/')
self.assertEqual(response.json()['data'], 'Please login first')
| [
"[email protected]"
] | |
dd02ed50120862fa5fea721d1c262eda6b6db504 | 19a32440205b2caeec67c73c10d917b5fb30a86a | /test/test_reports_report_subreports.py | 2474e1ff3056bb33e73940415fc89f13a98f3904 | [
"MIT",
"Apache-2.0"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 1,321 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.reports_report_subreports import ReportsReportSubreports
class TestReportsReportSubreports(unittest.TestCase):
""" ReportsReportSubreports unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testReportsReportSubreports(self):
"""
Test ReportsReportSubreports
"""
model = swagger_client.models.reports_report_subreports.ReportsReportSubreports()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
6e67b80e239e05eb821cb6eac9b7fb4294adf9e2 | 3ed194eef8f7de82d10b76fb73dabb7abb6f0e46 | /if_char_is_char.py | 892a99d3d4beb0b7a290b6804c9827e1091304d3 | [
"MIT"
] | permissive | CrazyJ36/python | 9c4724e74b276664a9a638dfd09b082e78ec78f1 | 730c384304d51c23a4c36337216ae586f2fe674c | refs/heads/master | 2022-10-11T02:58:04.342322 | 2022-09-30T21:52:49 | 2022-09-30T21:52:49 | 186,742,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #!/usr/bin/env python3.7
print("type 'x' or something else for comparison:")
mchar = input()
if mchar is 'x':
print("input is x")
else:
print("input is not x")
| [
"[email protected]"
] | |
c0e0c39f8544157a8f50baaaa4e515081f058802 | 6ac0afae7ef0db215e5da2e188016deddf0f7a85 | /cogdl/layers/__init__.py | b7a38115afebf102bd1e96ec2ceb5792e518fc3b | [
"MIT"
] | permissive | tianjiansmile/cogdl | ca77277db411f59fdd2cdd75e666dac3e1bbe7c6 | 4e0bbaf76e0e3bde0c8f57010159a63981278329 | refs/heads/master | 2020-11-25T21:35:20.908782 | 2019-12-18T15:09:07 | 2019-12-18T15:09:07 | 228,856,678 | 0 | 0 | MIT | 2019-12-18T14:25:55 | 2019-12-18T14:25:54 | null | UTF-8 | Python | false | false | 120 | py | from .maggregator import MeanAggregator
from .se_layer import SELayer
__all__ = [
'SELayer',
'MeanAggregator'
] | [
"[email protected]"
] | |
4781d858b7b1f270f3c80d2569997214211a5abc | ae8f61a8c0c4a569f00529c3f07c73dbfc884f71 | /tiled/client/utils.py | 021dc8745848024a80b502d34a110853c79d851b | [
"BSD-3-Clause"
] | permissive | untzag/tiled | 1ba705303193312711d8ac75b977a26d6d9e7571 | 43a8ba82660ce3be077f2b6b060bdd2a23cf956b | refs/heads/main | 2023-04-18T18:34:13.545139 | 2021-04-28T21:27:59 | 2021-04-28T21:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | import asyncio
from inspect import iscoroutine
import httpx
import msgpack
from ..utils import Sentinel
class UNSET(Sentinel):
pass
def get_content_with_cache(
cache, offline, client, path, accept=None, timeout=UNSET, **kwargs
):
request = client.build_request("GET", path, **kwargs)
if accept:
request.headers["Accept"] = accept
url = request.url.raw # URL as tuple
if offline:
# We must rely on the cache alone.
reservation = cache.get_reservation(url)
if reservation is None:
raise NotAvailableOffline(url)
content = reservation.load_content()
if content is None:
# TODO Do we ever get here?
raise NotAvailableOffline(url)
return content
if cache is None:
# No cache, so we can use the client straightforwardly.
response = _send(client, request, timeout=timeout)
handle_error(response)
return response.content
# If we get this far, we have an online client and a cache.
reservation = cache.get_reservation(url)
try:
if reservation is not None:
request.headers["If-None-Match"] = reservation.etag
response = _send(client, request, timeout=timeout)
handle_error(response)
if response.status_code == 304: # HTTP 304 Not Modified
# Read from the cache
content = reservation.load_content()
elif response.status_code == 200:
etag = response.headers.get("ETag")
content = response.content
# TODO Respect Cache-control headers (e.g. "no-store")
if etag is not None:
# Write to cache.
cache.put_etag_for_url(url, etag)
cache.put_content(etag, content)
else:
raise NotImplementedError(f"Unexpected status_code {response.status_code}")
finally:
if reservation is not None:
reservation.ensure_released()
return content
def get_json_with_cache(cache, offline, client, path, **kwargs):
return msgpack.unpackb(
get_content_with_cache(
cache, offline, client, path, accept="application/x-msgpack", **kwargs
)
)
def _send(client, request, timeout):
"""
EXPERIMENTAL: Tolerate sync httpx.Client or httpx.AsyncClient.
The AsyncClient is interesting because it can interface directly with FastAPI app
in the same process via ASGI.
"""
if timeout is UNSET:
result = client.send(request)
else:
result = client.send(request, timeout=timeout)
if iscoroutine(result):
return asyncio.run(result)
return result
def handle_error(response):
try:
response.raise_for_status()
except httpx.RequestError:
raise # Nothing to add in this case; just raise it.
except httpx.HTTPStatusError as exc:
if response.status_code < 500:
# Include more detail that httpx does by default.
message = (
f"{exc.response.status_code}: "
f"{exc.response.json()['detail']} "
f"{exc.request.url}"
)
raise ClientError(message, exc.request, exc.response) from exc
else:
raise
class ClientError(httpx.HTTPStatusError):
def __init__(self, message, request, response):
super().__init__(message=message, request=request, response=response)
class NotAvailableOffline(Exception):
"Item looked for in offline cache was not found."
| [
"[email protected]"
] | |
deb0f1f1c739784d71150481cdcafcc50e01d078 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/ABC/A/072_a.py | 2bdb2b702f8afe70275c3ff34ad5c1e31423163a | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """100 17"""
output = """83"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """48 58"""
output = """0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """1000000000 1000000000"""
output = """0"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
def resolve():
x,t = map(int, input().split())
if x - t > 0:
print(x-t)
else:
print("0") | [
"[email protected]"
] | |
5182e2b90713dbb6256c9936fc582e92cabae6f7 | 50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7 | /challenge6(hotel)/SnowballSH.py | 023455239f336510c3cbbc9ff2146d798a0ea89a | [
"MIT"
] | permissive | banana-galaxy/challenges | 792caa05e7b8aa10aad8e04369fc06aaf05ff398 | 8655c14828607535a677e2bb18689681ee6312fa | refs/heads/master | 2022-12-26T23:58:12.660152 | 2020-10-06T13:38:04 | 2020-10-06T13:38:04 | 268,851,516 | 11 | 8 | MIT | 2020-09-22T21:21:30 | 2020-06-02T16:24:41 | Python | UTF-8 | Python | false | false | 837 | py | def matrixSum(matrix):
#Returns a list of cols instead of rows
def get_col():
rt = []
for c in range(len(matrix[0])):
ct = []
for i in range(len(matrix)):
ct.append(matrix[i][c])
rt.append(ct)
return rt
#Check if TWT staff isn't able to move in
def not_able(idx,c):
for i, v in enumerate(c):
if i < idx and v <= 0:
return True
return False
col = get_col().copy()
SUMLIST = []
for col_count, every_col in enumerate(col):
for num_count, every_num in enumerate(every_col):
if not_able(num_count,every_col):
SUMLIST.append(0)
else:
SUMLIST.append(every_num)
return sum(SUMLIST) | [
"[email protected]"
] | |
95f95cb7909dc39645281e60db52b9e9201e9b98 | 00da01b831d58f4cbae5c3625823849dde3eff67 | /contests/293_DIV2/b.py | cbd9c868d1e95f83896418f65fa074356a5e2ff4 | [] | no_license | atupal/codeforces | 577b2c084b9cf047c5780f840e9fe49fcb0fd50a | 563c3e27e0450b3dde24661a606c84b1a8d81f17 | refs/heads/master | 2021-01-15T22:10:07.657554 | 2015-10-12T06:58:10 | 2015-10-12T06:58:10 | 10,015,882 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | # -*- coding: utf-8 -*-
s = raw_input()
t = raw_input()
import string
lower = string.lowercase
upper = lower.upper()
class mydict(dict):
def __missing__(self, key):
self[key] = 0
return self[key]
ds = mydict()
dt = mydict()
for i in s:
ds[i] += 1
for i in t:
dt[i] += 1
cnt1 = 0
cnt2 = 0
for key in ds:
t = min( ds[key], dt[key] )
ds[key] -= t
dt[key] -= t
cnt1 += t
for key in lower:
if ds[key] and dt[key.upper()]:
cnt2 += min( ds[key] , dt[key.upper()] )
for key in upper:
if ds[key] and dt[key.lower()]:
cnt2 += min( ds[key] , dt[key.lower()] )
print cnt1, cnt2
| [
"[email protected]"
] | |
c5c31cec5a6fe7c542613055cd5833c5b02e84fd | 84226827016bf833e843ebce91d856e74963e3ed | /tests/unit/payload_test.py | fb32bdfe608f403ef2d986cac36febf6377b224a | [
"Apache-2.0"
] | permissive | jbq/pkg-salt | ad31610bf1868ebd5deae8f4b7cd6e69090f84e0 | b6742e03cbbfb82f4ce7db2e21a3ff31b270cdb3 | refs/heads/master | 2021-01-10T08:55:33.946693 | 2015-05-21T13:41:01 | 2015-05-21T13:41:01 | 36,014,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,505 | py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
tests.unit.payload_test
~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath, MockWraps
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
ensure_in_syspath('../')
# Import salt libs
import salt.payload
from salt.utils.odict import OrderedDict
import salt.exceptions
# Import 3rd-party libs
import msgpack
import zmq
import errno
import threading
import time
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PayloadTestCase(TestCase):
def assertNoOrderedDict(self, data):
if isinstance(data, OrderedDict):
raise AssertionError(
'Found an ordered dictionary'
)
if isinstance(data, dict):
for value in data.values():
self.assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
with patch('msgpack.version', (0, 1, 13)):
msgpack.dumps = MockWraps(
msgpack.dumps, 1, TypeError('ODict TypeError Forced')
)
payload = salt.payload.Serial('msgpack')
idata = {'pillar': [OrderedDict(environment='dev')]}
odata = payload.loads(payload.dumps(idata.copy()))
self.assertNoOrderedDict(odata)
self.assertEqual(idata, odata)
class SREQTestCase(TestCase):
port = 8845 # TODO: dynamically assign a port?
@classmethod
def setUpClass(cls):
'''
Class to set up zmq echo socket
'''
def echo_server():
'''
A server that echos the message sent to it over zmq
Optional "sleep" can be sent to delay response
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:{0}".format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
# Wait for next request from client
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
if isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep'):
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if exc.errno == errno.EAGAIN:
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
@classmethod
def tearDownClass(cls):
'''
Remove echo server
'''
# kill the thread
SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
def get_sreq(self):
return salt.payload.SREQ('tcp://127.0.0.1:{0}'.format(SREQTestCase.port))
def test_send_auto(self):
'''
Test creation, send/rect
'''
sreq = self.get_sreq()
# check default of empty load and enc clear
assert sreq.send_auto({}) == {'enc': 'clear', 'load': {}}
# check that the load always gets passed
assert sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'}
def test_send(self):
sreq = self.get_sreq()
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_timeout(self):
'''
Test SREQ Timeouts
'''
sreq = self.get_sreq()
# client-side timeout
start = time.time()
# This is a try/except instead of an assertRaises because of a possible
# subtle bug in zmq wherein a timeout=0 actually exceutes a single poll
# before the timeout is reached.
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert time.time() - start < 1 # ensure we didn't wait
# server-side timeout
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert time.time() - start >= 1 # ensure we actually tried once (1s)
# server-side timeout with retries
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert time.time() - start >= 2 # ensure we actually tried twice (2s)
# test a regular send afterwards (to make sure sockets aren't in a twist
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_destroy(self):
'''
Test the __del__ capabilities
'''
sreq = self.get_sreq()
# ensure no exceptions when we go to destroy the sreq, since __del__
# swallows exceptions, we have to call destroy directly
sreq.destroy()
if __name__ == '__main__':
from integration import run_tests
run_tests(PayloadTestCase, needs_daemon=False)
run_tests(SREQTestCase, needs_daemon=False)
| [
"[email protected]"
] | |
06293b56fb3f342e664ece1e162150b79ef57c2f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_get_element_oob_1-60.py | e03bfff89349b673831ba7e902bbb691139386ce | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | x:str = "abc"
a:str = ""
def str_get(s:str, i:int) -> str:
return s[i]
a = str_get($ID, -1)
print(a)
| [
"[email protected]"
] | |
ebdebcdd687aa5cf786c51373a673303400f155a | 0478abafc05f1dd55ddf6054d95fef73e9fa03e9 | /quati/__main__.py | 5d239149288d0d96a0286f5e599b8a35e00bbdbc | [
"MIT"
] | permissive | deep-spin/quati | 89bce0868b36b0d7902659507b72acfbd01ada98 | 62a6769475090182fe2990b2864d66f8e2081a32 | refs/heads/master | 2023-03-12T09:22:31.520259 | 2021-03-02T15:13:22 | 2021-03-02T15:13:22 | 330,678,540 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import argparse
import logging
from quati import config_utils
from quati import opts
from quati import predict
from quati import train
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='quati')
parser.add_argument('task', type=str, choices=['train', 'predict'])
opts.general_opts(parser)
opts.preprocess_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opts.predict_opts(parser)
if __name__ == '__main__':
options = parser.parse_args()
options.output_dir = config_utils.configure_output(options.output_dir)
config_utils.configure_logger(options.debug, options.output_dir)
config_utils.configure_seed(options.seed)
config_utils.configure_device(options.gpu_id)
logger.info('Output directory is: {}'.format(options.output_dir))
if options.task == 'train':
train.run(options)
elif options.task == 'predict':
predict.run(options)
| [
"[email protected]"
] | |
ed7a0afb48637db929c10a6a88ec40e413eed8cc | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/neutron/neutron/agent/linux/interface.py | 89d30a5f79e4a947e3d1bf5e05a8148957b71c0e | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,791 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo.config import cfg
import six
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.extensions.flavor import (FLAVOR_NETWORK)
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use')),
cfg.BoolOpt('ovs_use_veth',
default=False,
help=_('Uses veth for an interface or not')),
cfg.IntOpt('network_device_mtu',
help=_('MTU setting for device.')),
cfg.StrOpt('meta_flavor_driver_mappings',
help=_('Mapping between flavor and LinuxInterfaceDriver')),
cfg.StrOpt('admin_user',
help=_("Admin username")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
]
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
# from linux IF_NAMESIZE
DEV_NAME_LEN = 14
DEV_NAME_PREFIX = 'tap'
def __init__(self, conf):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=[]):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
"""
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace=namespace)
previous = {}
for address in device.addr.list(scope='global', filters=['permanent']):
previous[address['cidr']] = address['ip_version']
# add new addresses
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
if ip_cidr in previous:
del previous[ip_cidr]
continue
device.addr.add(net.version, ip_cidr, str(net.broadcast))
# clean up any old addresses
for ip_cidr, ip_version in previous.items():
if ip_cidr not in preserve_ips:
device.addr.delete(ip_version, ip_cidr)
self.delete_conntrack_state(root_helper=self.root_helper,
namespace=namespace,
ip=ip_cidr)
def delete_conntrack_state(self, root_helper, namespace, ip):
"""Delete conntrack state associated with an IP address.
This terminates any active connections through an IP. Call this soon
after removing the IP address from an interface so that new connections
cannot be created before the IP address is gone.
root_helper: root_helper to gain root access to call conntrack
namespace: the name of the namespace where the IP has been configured
ip: the IP address for which state should be removed. This can be
passed as a string with or without /NN. A netaddr.IPAddress or
netaddr.Network representing the IP address can also be passed.
"""
ip_str = str(netaddr.IPNetwork(ip).ip)
ip_wrapper = ip_lib.IPWrapper(root_helper, namespace=namespace)
# Delete conntrack state for ingress traffic
# If 0 flow entries have been deleted
# conntrack -D will return 1
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_("Failed deleting ingress connection state of"
" floatingip %s"), ip_str)
# Delete conntrack state for egress traffic
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_("Failed deleting egress connection state of"
" floatingip %s"), ip_str)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
@abc.abstractmethod
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
class NullDriver(LinuxInterfaceDriver):
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
pass
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = 'tap'
def __init__(self, conf):
super(OVSInterfaceDriver, self).__init__(conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
if self.conf.ovs_use_veth:
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap')
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', device_name, '--',
'add-port', bridge, device_name]
if internal:
cmd += ['--', 'set', 'Interface', device_name, 'type=internal']
cmd += ['--', 'set', 'Interface', device_name,
'external-ids:iface-id=%s' % port_id,
'--', 'set', 'Interface', device_name,
'external-ids:iface-status=active',
'--', 'set', 'Interface', device_name,
'external-ids:attached-mac=%s' % mac_address]
utils.execute(cmd, self.root_helper)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
if self.conf.ovs_use_veth:
root_dev.link.set_mtu(self.conf.network_device_mtu)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
else:
LOG.info(_("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name, prefix)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge, self.root_helper)
try:
ovs.delete_port(tap_name)
if self.conf.ovs_use_veth:
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name)
except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"),
device_name)
class MidonetInterfaceDriver(LinuxInterfaceDriver):
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""This method is called by the Dhcp agent or by the L3 agent
when a new network is created
"""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = device_name.replace(prefix or 'tap', 'tap')
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
cmd = ['mm-ctl', '--bind-port', port_id, device_name]
utils.execute(cmd, self.root_helper)
else:
LOG.info(_("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
# the port will be deleted by the dhcp agent that will call the plugin
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
try:
device.link.delete()
except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"), device_name)
LOG.debug(_("Unplugged interface '%s'"), device_name)
ip_lib.IPWrapper(
self.root_helper, namespace).garbage_collect_namespace()
class IVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an IVS bridge."""
DEV_NAME_PREFIX = 'tap'
def __init__(self, conf):
super(IVSInterfaceDriver, self).__init__(conf)
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap')
return dev_name
def _ivs_add_port(self, device_name, port_id, mac_address):
cmd = ['ivs-ctl', 'add-port', device_name]
utils.execute(cmd, self.root_helper)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = self._get_tap_name(device_name, prefix)
root_dev, ns_dev = ip.add_veth(tap_name, device_name)
self._ivs_add_port(tap_name, port_id, mac_address)
ns_dev = ip.device(device_name)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
root_dev.link.set_mtu(self.conf.network_device_mtu)
if namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
else:
LOG.info(_("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
tap_name = self._get_tap_name(device_name, prefix)
try:
cmd = ['ivs-ctl', 'del-port', tap_name]
utils.execute(cmd, self.root_helper)
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name)
except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"),
device_name)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plugin the interface."""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
# Enable agent to define the prefix
if prefix:
tap_name = device_name.replace(prefix, 'tap')
else:
tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap')
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_veth.link.set_address(mac_address)
if self.conf.network_device_mtu:
root_veth.link.set_mtu(self.conf.network_device_mtu)
ns_veth.link.set_mtu(self.conf.network_device_mtu)
root_veth.link.set_up()
ns_veth.link.set_up()
else:
LOG.info(_("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, self.root_helper, namespace)
try:
device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name)
except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"),
device_name)
class MetaInterfaceDriver(LinuxInterfaceDriver):
def __init__(self, conf):
super(MetaInterfaceDriver, self).__init__(conf)
from neutronclient.v2_0 import client
self.neutron = client.Client(
username=self.conf.admin_user,
password=self.conf.admin_password,
tenant_name=self.conf.admin_tenant_name,
auth_url=self.conf.auth_url,
auth_strategy=self.conf.auth_strategy,
region_name=self.conf.auth_region
)
self.flavor_driver_map = {}
for flavor, driver_name in [
driver_set.split(':')
for driver_set in
self.conf.meta_flavor_driver_mappings.split(',')]:
self.flavor_driver_map[flavor] = self._load_driver(driver_name)
def _get_flavor_by_network_id(self, network_id):
network = self.neutron.show_network(network_id)
return network['network'][FLAVOR_NETWORK]
def _get_driver_by_network_id(self, network_id):
flavor = self._get_flavor_by_network_id(network_id)
return self.flavor_driver_map[flavor]
def _set_device_plugin_tag(self, network_id, device_name, namespace=None):
plugin_tag = self._get_flavor_by_network_id(network_id)
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
device.link.set_alias(plugin_tag)
def _get_device_plugin_tag(self, device_name, namespace=None):
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
return device.link.alias
def get_device_name(self, port):
driver = self._get_driver_by_network_id(port.network_id)
return driver.get_device_name(port)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
driver = self._get_driver_by_network_id(network_id)
ret = driver.plug(network_id, port_id, device_name, mac_address,
bridge=bridge, namespace=namespace, prefix=prefix)
self._set_device_plugin_tag(network_id, device_name, namespace)
return ret
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
plugin_tag = self._get_device_plugin_tag(device_name, namespace)
driver = self.flavor_driver_map[plugin_tag]
return driver.unplug(device_name, bridge, namespace, prefix)
def _load_driver(self, driver_provider):
LOG.debug(_("Driver location: %s"), driver_provider)
plugin_klass = importutils.import_class(driver_provider)
return plugin_klass(self.conf)
| [
"[email protected]"
] | |
7351842124bf39729e663fadcacb1fdc825994cb | 95eed88115075f7e1916a14de7497d05a12a9330 | /abc194d.py | d19e27dcc40bbd3da711a3fffa9b61e788b10c10 | [] | no_license | ynagi2/atcoder | bdbbd030f1dd39e937b0872b028ce0f38372521e | e404f4500d837bfd6ca473aa2837f46ae71ad84a | refs/heads/master | 2022-04-29T12:48:44.229462 | 2022-04-22T15:04:50 | 2022-04-22T15:04:50 | 241,098,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py |
# 等差×等比の部分和求めて極限とばすと,
# n / n-k をk=1からn-1まで足し合わせればいいことがわかる
# 別の考え方として,E = p*1 + (1-p)(E+1)
# (確率pで成功し,1-pでEが1回増えるという関係式)
# より,E = 1/p が求まるので,p = 1 - n/Nから,
# N / N-n の総和を求めればよいと考えられる
def main():
n = int(input())
ans = 0
for i in range(n-1):
ans += n / (i+1)
print(ans)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
293f2076ab950a2caf64b738152b431673aa60d9 | f71aecb0e91fe877af3ec652c7f6753a1e7b5ccd | /DeleteAndEarn_MID_740.py | 9184c6e40bedecac812fa994aac05abff1692a4c | [] | no_license | 953250587/leetcode-python | 036ad83154bf1fce130d41220cf2267856c7770d | 679a2b246b8b6bb7fc55ed1c8096d3047d6d4461 | refs/heads/master | 2020-04-29T12:01:47.084644 | 2019-03-29T15:50:45 | 2019-03-29T15:50:45 | 176,122,880 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | """
Given an array nums of integers, you can perform operations on the array.
In each operation, you pick any nums[i] and delete it to earn nums[i] points. After, you must delete every element equal to nums[i] - 1 or nums[i] + 1.
You start with 0 points. Return the maximum number of points you can earn by applying such operations.
Example 1:
Input: nums = [3, 4, 2]
Output: 6
Explanation:
Delete 4 to earn 4 points, consequently 3 is also deleted.
Then, delete 2 to earn 2 points. 6 total points are earned.
Example 2:
Input: nums = [2, 2, 3, 3, 3, 4]
Output: 9
Explanation:
Delete 3 to earn 3 points, deleting both 2's and the 4.
Then, delete 3 again to earn 3 points, and 3 again to earn 3 points.
9 total points are earned.
Note:
The length of nums is at most 20000.
Each element nums[i] is an integer in the range [1, 10000]
"""
class Solution(object):
def deleteAndEarn(self, nums):
"""
:type nums: List[int]
:rtype: int
192ms
"""
num_diff = [0] * 10001
for num in nums:
num_diff[num] += 1
dp = [0] * 10001
dp[1] = num_diff[1]
for i in range(2, 10001):
dp[i] = max(dp[i - 1], dp[i - 2] + num_diff[i] * i)
print(dp[0:10])
return dp[-1]
# print(Solution().deleteAndEarn([3, 4, 2]))
# print(Solution().deleteAndEarn([2, 2, 3, 3, 3, 4]))
print(Solution().deleteAndEarn([1])) | [
"[email protected]"
] | |
e7e1884d2677b6ffa1945bcccd74218252cc3ceb | 3d6f8dc406a18397c354df72ce7dbf5e87712567 | /LeetCode/Num_Count.py | b0d86b7223ec0c7f7887b0cca1ebe54dcde1a1d1 | [] | no_license | HonryZhang/Python-Learning | b7410153eff7cd4e51e6e5d69cf7a9bc739a8243 | e29a75eb42588b2ac31d9b498827cba9c87fc157 | refs/heads/master | 2021-01-16T21:41:45.429416 | 2018-05-16T01:36:31 | 2018-05-16T01:36:31 | 100,246,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'Hongrui'
'''
求数字出现的次数?
a.求1~2593中数字 5 出现的次数? 比如555,数字5算出现3次。
b.求 1~N中,数字x (0~9) 出现的次数
'''
'''
Input k: the target number
Input n: the target digit
Output: the total times appeared in the list
'''
def digitCounts(k,n):
t_count = 0 #define the total count of digit n in the list
n_count = 0 #define the count of digit n in the singe loop
index = 0 #define the location of digit n
p_value = k
while p_value:
if p_value%10>n: #get the single digit count
n_count = int(p_value/10+1)*(10**index)
if p_value%10==n: #get the 10th digit count
n_count = int(p_value/10)*(10**index)+ k%(10**index)+1
if p_value%10<n: #get the 100th digit count
n_count = int(p_value/10)*(10**index)
t_count +=n_count
p_value = p_value/10
index=+1
print 'the total count of number %s in the list is %s'%(n,t_count)
if __name__=="__main__":
while True:
value = int(raw_input('Please input a target value:>>'))
number = int(raw_input('Please input a digit:>>'))
if (value>0) and ((number >=0) and (number <10)):
digitCounts(value,number)
else:
print 'Exited.Please check your input. the target value must be a positive integer and the number must between 0 and 9 '
break
'''
Test Cases:
TC_1:Input an invalid target number:0
Expected Result:program outputs 0
TC_2:Input an invalid digit which is larger than 9 or is a negtive number
Expect Result: program exited and print correct message
TC_3:Input a special value of the single digit 0
Expect Result: program outputs the correct count
TC_4:Input a negtive value of target
Expect Result: program exited and print correct message
''' | [
"[email protected]"
] | |
566835bfa21b0ae9ac62103e689e07fca0d0ec84 | dd027c4bbcace97e3dbf566c123b178ceb1a8282 | /sett/bin.py | ec6cfeb5a5845420ceece18c657966787f9d4496 | [] | no_license | cecedille1/Sett | 479bf00ca8df807f431e235c68b892bb90fab9b0 | bf8b9f204caa532a5fb8f110ab4e4a1cea03cb96 | refs/heads/master | 2021-01-10T13:28:11.614022 | 2016-03-31T16:51:14 | 2016-03-31T16:51:14 | 43,488,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import functools
from paver.easy import path
class NotInstalled(Exception):
pass
class DirectorySearcher(object):
def __init__(self, directory):
self.directory = path(directory)
def search(self, program):
bin_path = self.directory.joinpath(program)
if bin_path.access(os.X_OK):
return bin_path
return None
def __repr__(self):
return '<DS {}>'.format(self.directory)
class DirectoriesSearcher(object):
def __init__(self, directories):
self.directories = [DirectorySearcher(d) for d in directories]
def search(self, program):
for directory in self.directories:
prog = directory.search(program)
if prog:
return prog
def __repr__(self):
return '<DS {}>'.format(', '.join(d.directory for d in self.directories))
class Which(object):
NotInstalled = NotInstalled
def __init__(self, searchers):
self.searchers = searchers
self._cache = {}
def __repr__(self):
return '<W {}>'.format(', '.join(repr(s) for s in self.searchers))
def __getattr__(self, program):
return self.search(program)
def search(self, program):
if '/' in program:
raise ValueError('Program name must not contain a /')
if program in self._cache:
return self._cache[program]
for searcher in self.searchers:
bin_path = searcher.search(program)
if bin_path:
self._cache[program] = bin_path
return bin_path
raise NotInstalled(program)
def default_searchers():
searchers = []
from sett.pip import VENV_BIN
if VENV_BIN.exists():
searchers.append(DirectorySearcher(VENV_BIN))
from sett.npm import NODE_MODULES
if NODE_MODULES.exists():
searchers.append(DirectorySearcher(NODE_MODULES.joinpath('.bin')))
from sett.gem import GEM_HOME
if GEM_HOME.exists():
searchers.append(DirectorySearcher(GEM_HOME.joinpath('bin')))
if os.environ.get('PATH'):
searchers.append(DirectoriesSearcher(os.environ['PATH'].split(':')))
return searchers
class LazyWhich(object):
NotInstalled = NotInstalled
def __init__(self, searchers_provider):
self.sp = searchers_provider
def is_evaluated(self):
return '_which' in self.__dict__
def __getattr__(self, attr):
if not self.is_evaluated():
self._which = Which(self.sp())
return getattr(self._which, attr)
def __repr__(self):
return self.__getattr__('__repr__')()
def update(self, fn=None):
if fn:
@functools.wraps(fn)
def inner_update(*args, **kw):
r = fn(*args, **kw)
self.update()
return r
return inner_update
elif self.is_evaluated():
del self._which
which = LazyWhich(default_searchers)
| [
"[email protected]"
] | |
acaf735fe15e99d7e343e8c31f11012dd1eba187 | f71ee969fa331560b6a30538d66a5de207e03364 | /scripts/client/gui/clubs/states.py | 044f480507dd784df1264197b77e5f09b86db64b | [] | no_license | webiumsk/WOT-0.9.8-CT | 31356ed01cb110e052ba568e18cb2145d4594c34 | aa8426af68d01ee7a66c030172bd12d8ca4d7d96 | refs/heads/master | 2016-08-03T17:54:51.752169 | 2015-05-12T14:26:00 | 2015-05-12T14:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,557 | py | # Embedded file name: scripts/client/gui/clubs/states.py
from gui.shared.utils.decorators import ReprInjector
from gui.clubs.settings import CLIENT_CLUB_STATE
from gui.clubs.restrictions import AccountClubLimits, DefaultAccountClubLimits
@ReprInjector.simple(('getStateID', 'state'))
class _AccountClubsState(object):
def __init__(self, stateID):
self.__stateID = stateID
def getStateID(self):
return self.__stateID
def getLimits(self):
return DefaultAccountClubLimits()
def update(self, clubsCtrl, privateData):
pass
@ReprInjector.withParent()
class UnavailableClubsState(_AccountClubsState):
def __init__(self):
super(UnavailableClubsState, self).__init__(CLIENT_CLUB_STATE.UNKNOWN)
def update(self, clubsCtrl, profile):
if profile.hasClub():
clubsCtrl._changeState(HasClubState(profile.getClubInfo(), profile.getInvites(), profile.getRestrictions()))
elif profile.wasSentApplication():
clubsCtrl._changeState(SentAppState(profile.getApplication(), profile.getInvites(), profile.getRestrictions()))
elif profile.isSynced():
clubsCtrl._changeState(NoClubState(profile.getInvites(), profile.getRestrictions()))
@ReprInjector.withParent(('getInvites', 'invites'))
class _AvailableClubState(_AccountClubsState):
def __init__(self, stateID, invites, restrs):
super(_AvailableClubState, self).__init__(stateID)
self._invites = invites
self._restrs = restrs
def getInvites(self):
return self._invites
def getLimits(self):
return AccountClubLimits(self._restrs)
def update(self, clubsCtrl, profile):
self._invites = profile.getInvites()
self._restrs = profile.getRestrictions()
@ReprInjector.withParent(('getClubDbID', 'clubDbID'), ('getJoiningTime', 'join'))
class HasClubState(_AvailableClubState):
def __init__(self, clubInfo, invites, restrs):
super(HasClubState, self).__init__(CLIENT_CLUB_STATE.HAS_CLUB, invites, restrs)
self.__clubInfo = clubInfo
def getClubDbID(self):
return self.__clubInfo.id
def getJoiningTime(self):
return self.__clubInfo.joined_at
def update(self, clubsCtrl, profile):
if not profile.isSynced():
clubsCtrl._changeState(UnavailableClubsState())
elif not profile.hasClub():
if profile.wasSentApplication():
clubsCtrl._changeState(SentAppState(profile.getApplication(), profile.getInvites(), profile.getRestrictions()))
else:
clubsCtrl._changeState(NoClubState(profile.getInvites(), profile.getRestrictions()))
else:
super(HasClubState, self).update(clubsCtrl, profile)
@ReprInjector.withParent()
class NoClubState(_AvailableClubState):
def __init__(self, invites, restrs):
super(NoClubState, self).__init__(CLIENT_CLUB_STATE.NO_CLUB, invites, restrs)
def update(self, clubsCtrl, profile):
if not profile.isSynced():
clubsCtrl._changeState(UnavailableClubsState())
elif profile.wasSentApplication():
clubsCtrl._changeState(SentAppState(profile.getApplication(), profile.getInvites(), profile.getRestrictions()))
elif profile.hasClub():
clubsCtrl._changeState(HasClubState(profile.getClubInfo(), profile.getInvites(), profile.getRestrictions()))
else:
super(NoClubState, self).update(clubsCtrl, profile)
@ReprInjector.withParent(('getClubDbID', 'club'), ('getSendingTime', 'sent'), ('getComment', 'comment'))
class SentAppState(_AvailableClubState):
def __init__(self, app, invites, restrs):
super(SentAppState, self).__init__(CLIENT_CLUB_STATE.SENT_APP, invites, restrs)
self._app = app
self._invites = invites
def getClubDbID(self):
return self._app.getClubDbID()
def getSendingTime(self):
return self._app.getTimestamp()
def getComment(self):
return self._app.getComment()
def update(self, clubsCtrl, profile):
if not profile.isSynced():
clubsCtrl._changeState(UnavailableClubsState())
elif profile.hasClub():
clubsCtrl._changeState(HasClubState(profile.getClubInfo(), profile.getInvites(), profile.getRestrictions()))
elif not profile.wasSentApplication():
clubsCtrl._changeState(NoClubState(profile.getInvites(), profile.getRestrictions()))
else:
super(SentAppState, self).update(clubsCtrl, profile)
| [
"[email protected]"
] | |
0ae864d0a423e68a0efa8509b43d6dea15776b19 | 4e89d371a5f8cca3c5c7e426af1bcb7f1fc4dda3 | /python/python_test_002/04/02.py.backup.new | f2c15f2898e8db20233471a5f599c56036c773d1 | [] | no_license | bodii/test-code | f2a99450dd3230db2633a554fddc5b8ee04afd0b | 4103c80d6efde949a4d707283d692db9ffac4546 | refs/heads/master | 2023-04-27T16:37:36.685521 | 2023-03-02T08:38:43 | 2023-03-02T08:38:43 | 63,114,995 | 4 | 1 | null | 2023-04-17T08:28:35 | 2016-07-12T01:29:24 | Go | UTF-8 | Python | false | false | 442 | new | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import time
import os
def print_dir_info(dir_path):
for name in os.listdir(dir_path):
full_path = os.path.join(dir_path, name)
file_size = os.path.getsize(full_path)
mod_time = time.ctime(os.path.getmtime(full_path))
print("%-12s: %8d bytes, modified %s" % (name, file_size, mod_time))
if __name__ == '__main__':
print_dir_info('./')
| [
"[email protected]"
] | |
9c990b250078cf9c8369a1de24ae04eab71bd38d | ebc00ddf4c8c5f5076471e8b8d56c2b634c51230 | /test/lint/lint-format-strings.py | 16dc41072bf1f3e3a7e014ad34d18c2409ba2e70 | [
"MIT"
] | permissive | BlockMechanic/rain | 584a9e245cfb7ab5fb1add97b699b86833bfbc5b | e8818b75240ff9277b0d14d38769378f05d0b525 | refs/heads/master | 2021-07-03T03:48:53.977665 | 2021-03-04T01:28:20 | 2021-03-04T01:28:20 | 228,412,343 | 0 | 0 | MIT | 2019-12-16T15:03:28 | 2019-12-16T15:03:27 | null | UTF-8 | Python | false | false | 12,727 | py | #!/usr/bin/env python3
#
# Copyright (c) 2018-2019 The Rain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Lint format strings: This program checks that the number of arguments passed
# to a variadic format string function matches the number of format specifiers
# in the format string.
import argparse
import re
import sys
FALSE_POSITIVES = [
("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
("src/index/base.cpp", "FatalError(const char* fmt, const Args&... args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"),
("src/util/system.cpp", "strprintf(_(COPYRIGHT_HOLDERS), _(COPYRIGHT_HOLDERS_SUBSTITUTION))"),
("src/util/system.cpp", "strprintf(COPYRIGHT_HOLDERS, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/wallet/wallet.h", "WalletLogPrintf(std::string fmt, Params... parameters)"),
("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
("src/logging.h", "LogPrintf(const char* fmt, const Args&... args)"),
]
def parse_function_calls(function_name, source_code):
"""Return an array with all calls to function function_name in string source_code.
Preprocessor directives and C++ style comments ("//") in source_code are removed.
>>> len(parse_function_calls("foo", "foo();bar();foo();bar();"))
2
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);")
True
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);")
True
>>> len(parse_function_calls("foo", "foo();bar();// foo();bar();"))
1
>>> len(parse_function_calls("foo", "#define FOO foo();"))
0
"""
assert type(function_name) is str and type(source_code) is str and function_name
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
return re.findall(r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
"""Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */")
replaced with spaces. Multiple spaces are replaced with a single space.
>>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ")
'foo foo foo'
"""
assert type(s) is str
s = s.replace("\n", " ")
s = s.replace("\t", " ")
s = re.sub("/\*.*?\*/", " ", s)
s = re.sub(" {2,}", " ", s)
return s.strip()
ESCAPE_MAP = {
r"\n": "[escaped-newline]",
r"\t": "[escaped-tab]",
r'\"': "[escaped-quote]",
}
def escape(s):
"""Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as
"[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]".
>>> unescape(escape("foo")) == "foo"
True
>>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"')
'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]'
"""
assert type(s) is str
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(raw_value, escaped_value)
return s
def unescape(s):
"""Return the unescaped version of escaped string s.
Reverses the replacements made in function escape(s).
>>> unescape(escape("bar"))
'bar'
>>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]")
'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"'
"""
assert type(s) is str
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(escaped_value, raw_value)
return s
def parse_function_call_and_arguments(function_name, function_call):
"""Split string function_call into an array of strings consisting of:
* the string function_call followed by "("
* the function call argument #1
* ...
* the function call argument #n
* a trailing ");"
The strings returned are in escaped form. See escape(...).
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");')
['foo(', '"%s %s",', ' "foo",', ' "bar"', ')']
>>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);')
['fooprintf(', '"%050d",', ' i', ')']
>>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo')
['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')']
>>> parse_function_call_and_arguments("foo", "foo()")
['foo(', '', ')']
>>> parse_function_call_and_arguments("foo", "foo(123)")
['foo(', '123', ')']
>>> parse_function_call_and_arguments("foo", 'foo("foo")')
['foo(', '"foo"', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo<wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo << 1, err);')
['strprintf(', '"%s (%d)",', ' foo << 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<bar>() >> 1, err);')
['strprintf(', '"%s (%d)",', ' foo<bar>() >> 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo < 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1, err);')
['strprintf(', '"%s (%d)",', ' foo < 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo > 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1, err);')
['strprintf(', '"%s (%d)",', ' foo > 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= 1, err);')
['strprintf(', '"%s (%d)",', ' foo <= 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= bar<1, 2>(1, 2), err);')
['strprintf(', '"%s (%d)",', ' foo <= bar<1, 2>(1, 2),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2)?bar:foobar,err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2)?bar:foobar,', 'err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2),err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2),', 'err', ')']
"""
assert type(function_name) is str and type(function_call) is str and function_name
remaining = normalize(escape(function_call))
expected_function_call = "{}(".format(function_name)
assert remaining.startswith(expected_function_call)
parts = [expected_function_call]
remaining = remaining[len(expected_function_call):]
open_parentheses = 1
open_template_arguments = 0
in_string = False
parts.append("")
for i, char in enumerate(remaining):
parts.append(parts.pop() + char)
if char == "\"":
in_string = not in_string
continue
if in_string:
continue
if char == "(":
open_parentheses += 1
continue
if char == ")":
open_parentheses -= 1
if open_parentheses > 1:
continue
if open_parentheses == 0:
parts.append(parts.pop()[:-1])
parts.append(char)
break
prev_char = remaining[i - 1] if i - 1 >= 0 else None
next_char = remaining[i + 1] if i + 1 <= len(remaining) - 1 else None
if char == "<" and next_char not in [" ", "<", "="] and prev_char not in [" ", "<"]:
open_template_arguments += 1
continue
if char == ">" and next_char not in [" ", ">", "="] and prev_char not in [" ", ">"] and open_template_arguments > 0:
open_template_arguments -= 1
if open_template_arguments > 0:
continue
if char == ",":
parts.append("")
return parts
def parse_string_content(argument):
"""Return the text within quotes in string argument.
>>> parse_string_content('1 "foo %d bar" 2')
'foo %d bar'
>>> parse_string_content('1 foobar 2')
''
>>> parse_string_content('1 "bar" 2')
'bar'
>>> parse_string_content('1 "foo" 2 "bar" 3')
'foobar'
>>> parse_string_content('1 "foo" 2 " " "bar" 3')
'foo bar'
>>> parse_string_content('""')
''
>>> parse_string_content('')
''
>>> parse_string_content('1 2 3')
''
"""
assert type(argument) is str
string_content = ""
in_string = False
for char in normalize(escape(argument)):
if char == "\"":
in_string = not in_string
elif in_string:
string_content += char
return string_content
def count_format_specifiers(format_string):
"""Return the number of format specifiers in string format_string.
>>> count_format_specifiers("foo bar foo")
0
>>> count_format_specifiers("foo %d bar foo")
1
>>> count_format_specifiers("foo %d bar %i foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo %d foo")
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
"""
assert type(format_string) is str
format_string = format_string.replace('%%', 'X')
n = 0
in_specifier = False
for i, char in enumerate(format_string):
if char == "%":
in_specifier = True
n += 1
elif char in "aAcdeEfFgGinopsuxX":
in_specifier = False
elif in_specifier and char == "*":
n += 1
return n
def main():
parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed "
"to a variadic format string function matches the number of format "
"specifiers in the format string.")
parser.add_argument("--skip-arguments", type=int, help="number of arguments before the format string "
"argument (e.g. 1 in the case of fprintf)", default=0)
parser.add_argument("function_name", help="function name (e.g. fprintf)", default=None)
parser.add_argument("file", nargs="*", help="C++ source code file (e.g. foo.cpp)")
args = parser.parse_args()
exit_code = 0
for filename in args.file:
with open(filename, "r", encoding="utf-8") as f:
for function_call_str in parse_function_calls(args.function_name, f.read()):
parts = parse_function_call_and_arguments(args.function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
if (f.name, relevant_function_call_str) in FALSE_POSITIVES:
continue
if len(parts) < 3 + args.skip_arguments:
exit_code = 1
print("{}: Could not parse function call string \"{}(...)\": {}".format(f.name, args.function_name, relevant_function_call_str))
continue
argument_count = len(parts) - 3 - args.skip_arguments
format_str = parse_string_content(parts[1 + args.skip_arguments])
format_specifier_count = count_format_specifiers(format_str)
if format_specifier_count != argument_count:
exit_code = 1
print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format(f.name, format_specifier_count, argument_count, relevant_function_call_str))
continue
sys.exit(exit_code)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c8ed762106e501cfc1f293e9ce7a805a2f2f4ccf | a3a6eeb340735664c863952bf3c1e3070e61d987 | /tests/test_bools.py | b32005d2de314894aec53fd4db70bb575825a6c1 | [
"Apache-2.0"
] | permissive | DalavanCloud/tmppy | dab593789d6e1ae6a3b25db6c4b41ce4fcfb378c | cdde676ba9d5011b7d2a46a9852e5986b90edbbc | refs/heads/master | 2020-03-27T19:55:42.295263 | 2018-09-01T18:13:21 | 2018-09-01T18:13:21 | 147,021,787 | 1 | 0 | Apache-2.0 | 2018-09-01T18:14:23 | 2018-09-01T18:14:23 | null | UTF-8 | Python | false | false | 4,058 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2tmp.testing import *
@assert_compilation_succeeds()
def test_not_false():
assert not False
@assert_compilation_succeeds()
def test_not_true():
assert (not True) == False
@assert_conversion_fails
def test_not_int_error():
assert not 1 # error: The "not" operator is only supported for booleans, but this value has type int.
@assert_conversion_fails
def test_and_toplevel_error():
assert True and True # error: The "and" operator is only supported in functions, not at toplevel.
@assert_compilation_succeeds()
def test_and_true_true():
def f(x: int):
return True
def g(x: int):
return True
def h(x: int):
assert f(5) and g(5)
return True
assert h(3)
@assert_compilation_succeeds()
def test_and_true_false():
def f(x: int):
return True
def g(x: int):
return False
def h(x: int):
assert (f(5) and g(5)) == False
return True
assert h(3)
@assert_compilation_succeeds()
def test_and_false_true():
def f(x: int):
return False
def g(x: int):
assert False
return True
def h(x: int):
assert (f(5) and g(5)) == False
return True
assert h(3)
@assert_compilation_succeeds()
def test_and_false_false():
def f(x: int):
return False
def g(x: int):
assert False
return False
def h(x: int):
assert (f(5) and g(5)) == False
return True
@assert_conversion_fails
def test_and_bool_int_error():
def h(x: int):
assert (True and
1) # error: The "and" operator is only supported for booleans, but this value has type int.
return True
@assert_conversion_fails
def test_and_int_bool_error():
def h(x: int):
assert (1 # error: The "and" operator is only supported for booleans, but this value has type int.
and True)
return True
@assert_conversion_fails
def test_or_toplevel_error():
assert True or False # error: The "or" operator is only supported in functions, not at toplevel.
@assert_compilation_succeeds()
def test_or_false_false():
def f(x: int):
return False
def g(x: int):
return False
def h(x: int):
assert (f(5) or g(5)) == False
return True
assert h(3)
@assert_compilation_succeeds()
def test_or_false_true():
def f(x: int):
return False
def g(x: int):
return True
def h(x: int):
assert f(5) or g(5)
return True
assert h(3)
@assert_compilation_succeeds()
def test_or_true_false():
def f(x: int):
return True
def g(x: int):
assert False
return False
def h(x: int):
assert f(5) or g(5)
return True
assert h(3)
@assert_compilation_succeeds()
def test_or_true_true():
def f(x: int):
return True
def g(x: int):
assert False
return True
def h(x: int):
assert f(5) or g(5)
return True
assert h(3)
@assert_conversion_fails
def test_or_bool_int_error():
def h(x: int):
assert (True or
1) # error: The "or" operator is only supported for booleans, but this value has type int.
return True
@assert_conversion_fails
def test_or_int_bool_error():
def h(x: int):
assert (1 # error: The "or" operator is only supported for booleans, but this value has type int.
or True)
return True
| [
"[email protected]"
] | |
fe2b1cc542f5ae3f9d81ae04c6c874bea64f10d4 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/4f956ef173469139d65286c38d6509ea89e8d0c5urls.py | 4f956ef173469139d65286c38d6509ea89e8d0c5 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 4,309 | py | from django.conf import settings
from django.conf.urls import url
from django.views.decorators.cache import cache_page
cache = cache_page(60 * 15) if not settings.DEBUG else lambda x: x
from . import views
FeedbackSubmissionView_view = cache(views.FeedbackSubmissionView.as_view())
ManageCourseListView_view = views.ManageCourseListView.as_view()
ManageNotRespondedListView_view = views.ManageNotRespondedListView.as_view()
UserListView_view = views.UserListView.as_view()
UserFeedbackListView_view = views.UserFeedbackListView.as_view()
UserFeedbackView_view = views.UserFeedbackView.as_view()
RespondFeedbackView_view = views.respond_feedback_view_select(
views.RespondFeedbackView.as_view(),
views.RespondFeedbackViewAjax.as_view()
)
FeedbackTagView_view = views.FeedbackTagView.as_view()
PATH_REGEX = r'[\w\d\-./]'
MANAGE = r'^manage/'
MANAGE_SITE = MANAGE + r'(?P<site_id>\d+)/'
urlpatterns = [
# Aplus feedback submission
url(r'^feedback/$',
FeedbackSubmissionView_view,
name='submission'),
url(r'^feedback/(?P<path_key>{path_regex}+)$'.format(path_regex=PATH_REGEX),
FeedbackSubmissionView_view,
name='submission'),
# Feedback management and responding
url(r'^manage/$',
views.ManageSiteListView.as_view(),
name='site-list'),
url(r'^manage/courses/$',
ManageCourseListView_view,
name='course-list'),
url(r'^manage/courses/(?P<site_id>\d+)/$',
ManageCourseListView_view,
name='course-list'),
url(r'^manage/(?P<course_id>\d+)/clear-cache/$',
views.ManageClearCacheView.as_view(),
name='clear-cache'),
url(r'^manage/(?P<course_id>\d+)/update-studenttags/$',
views.ManageUpdateStudenttagsView.as_view(),
name='update-studenttags'),
url(r'^manage/(?P<course_id>\d+)/unread/$',
ManageNotRespondedListView_view,
name='notresponded-course'),
url(r'^manage/(?P<course_id>\d+)/unread/(?P<path_filter>{path_regex}*)$'.format(path_regex=PATH_REGEX),
ManageNotRespondedListView_view,
name='notresponded-course'),
url(r'^manage/(?P<course_id>\d+)/feedbacks/$',
views.ManageFeedbacksListView.as_view(),
name='list'),
url(r'^manage/(?P<course_id>\d+)/user/$',
UserListView_view,
name='user-list'),
url(r'^manage/(?P<course_id>\d+)/byuser/(?P<user_id>\d+)/$',
UserFeedbackListView_view,
name='byuser'),
url(r'^manage/(?P<course_id>\d+)/byuser/(?P<user_id>\d+)/(?P<exercise_id>\d+)/$',
UserFeedbackView_view,
name='byuser'),
url(r'^manage/(?P<course_id>\d+)/byuser/(?P<user_id>\d+)/(?P<exercise_id>\d+)/(?P<path_filter>{path_regex}*)$'.format(path_regex=PATH_REGEX),
UserFeedbackView_view,
name='byuser'),
url(r'^manage/(?P<course_id>\d+)/tags/$',
views.FeedbackTagListView.as_view(),
name='tags'),
url(r'^manage/(?P<course_id>\d+)/tags/(?P<tag_id>\d+)/$',
views.FeedbackTagEditView.as_view(),
name='tags-edit'),
url(r'^manage/(?P<course_id>\d+)/tags/(?P<tag_id>\d+)/remove/$',
views.FeedbackTagDeleteView.as_view(),
name='tags-remove'),
url(r'^manage/respond/(?P<feedback_id>\d+)/$',
RespondFeedbackView_view,
name='respond'),
url(r'^manage/tag/(?P<feedback_id>\d+)/$',
views.FeedbackTagView.as_view(),
name='tag-list'),
url(r'^manage/tag/(?P<feedback_id>\d+)/(?P<tag_id>\d+)/$',
views.FeedbackTagView.as_view(),
name='tag'),
# support for old urls
url(r'^manage/notresponded/course/(?P<course_id>\d+)/$',
ManageNotRespondedListView_view),
url(r'^manage/notresponded/course/(?P<course_id>\d+)/(?P<path_filter>{path_regex}*)$'.format(path_regex=PATH_REGEX),
ManageNotRespondedListView_view),
url(r'^manage/user/(?P<course_id>\d+)/$',
UserListView_view),
url(r'^manage/byuser/(?P<course_id>\d+)/(?P<user_id>\d+)/$',
UserFeedbackListView_view),
url(r'^manage/byuser/(?P<course_id>\d+)/(?P<user_id>\d+)/(?P<exercise_id>\d+)/$',
UserFeedbackView_view),
url(r'^manage/byuser/(?P<course_id>\d+)/(?P<user_id>\d+)/(?P<exercise_id>\d+)/(?P<path_filter>{path_regex}*)$'.format(path_regex=PATH_REGEX),
UserFeedbackView_view),
]
| [
"[email protected]"
] | |
4dbd3efc8708bcb4d34ff55293169dcabeba1df4 | 6238dc5b5818f54295547cf4cb1afa5553ddfb94 | /taobao/guagua/views.py | 0f7415da4a50128ac7344cc3b1811ed0dd18a060 | [] | no_license | liaosiwei/guagua | 8208bb82b1df5506dcb86c1a7094c849ea5576a6 | ee6025813e83568dc25beb52279c86f8bd33f1a4 | refs/heads/master | 2016-09-06T16:45:00.798633 | 2013-05-03T04:02:35 | 2013-05-03T04:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | # coding=UTF-8
import base64
from random import randint
from django.shortcuts import render
from django.utils import simplejson
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from utility import *
def fetchuserinfo(request):
if request.method == 'GET':
get = request.GET.copy()
if get.has_key('top_session'):
sessionkey = get['top_session']
if get.has_key('top_parameters'):
para_str = base64.b64decode(get['top_parameters'])
para_dict = dict([item.split('=') for item in para_str.split('&')])
user = User.objects.create_user(para_dict['visitor_nick'], password=para_dict['visitor_nick'])
profile = user.get_profile()
profile.taobao_id = para_dict['visitor_id']
profile.taobao_nick = para_dict['visitor_nick']
profile.sessionkey = sessionkey
profile.start_date = user.date_joined
profile.save()
user = authenticate(username = para_dict['visitor_nick'], password = para_dict['visitor_nick'])
login(request, user)
#login(request, user)
return HttpResponseRedirect('/pwd_change/')
def base(request):
return render(request, 'base.html')
def index(request):
return render(request, 'index.html')
def plot(request):
return render(request, 'plot.html')
def ajax_plotdata(request):
to_return = {'stat': 'failed', 'data': ''}
# fetch data by top interface, now generate random data
length = 80
d = [randint(0, 80) for i in range(length)]
data = [[i, sum(d[:i+1])] for i in range(length)]
to_return['stat'] = 'success'
to_return['data'] = data
serialized = simplejson.dumps(to_return)
return HttpResponse(serialized, mimetype="application/json")
def pie(request):
return render(request, 'pie.html')
def ajax_piedata(request):
to_return = {'stat': 'failed', 'data': ''}
to_return['data'] = [{'label': u'武汉', 'data': 10}, {'label': u'北京', 'data': 300}, {'label': u'上海', 'data': 420}, {'label': u'广州', 'data': 403}]
serialized = simplejson.dumps(to_return)
return HttpResponse(serialized, mimetype="application/json")
| [
"[email protected]"
] | |
83dce67026bbfad8715bd7a55601f55e28930776 | 5b46e6fd5bbd44a7ccd1333184e13fc4703a084b | /tcex/app_feature/__init__.py | be2ba3c48d248ccaaf89b0854fd3b607bb9bf2c1 | [
"Apache-2.0"
] | permissive | TpyoKnig/tcex | 1fc297a5f93a17e4bc4a7786335714eb89b6e21d | 7cf04fec048fadc71ff851970045b8a587269ccf | refs/heads/master | 2022-12-25T19:20:49.675331 | 2020-09-30T10:05:37 | 2020-09-30T10:05:37 | 254,921,370 | 0 | 0 | Apache-2.0 | 2020-08-24T23:21:27 | 2020-04-11T17:38:28 | null | UTF-8 | Python | false | false | 105 | py | """App feature module for TcEx Framework"""
# flake8: noqa
from .advanced_request import AdvancedRequest
| [
"[email protected]"
] | |
8d2d22353601d0e01717d35b5dcc9e799f41999e | 21164ca369ea3a1674c3a03bbebcd9c830d0864c | /rules/php/CVI_1010.py | 52a1c4d2d3ae1e95fdb389413593d1d3962c7073 | [
"MIT"
] | permissive | IMULMUL/Cobra-W | a02d6b512843c96685550a98d74f9d8ebe4bdfaf | de65466e53c8bd8e29a5f3e16ab91146d354c53f | refs/heads/master | 2023-07-08T19:14:35.698641 | 2023-06-27T09:31:15 | 2023-06-27T09:31:15 | 202,911,104 | 0 | 0 | MIT | 2023-06-27T23:39:53 | 2019-08-17T17:17:37 | Python | UTF-8 | Python | false | false | 1,204 | py | # -*- coding: utf-8 -*-
"""
auto rule template
~~~~
:author: LoRexxar <[email protected]>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from utils.api import *
class CVI_1010():
"""
rule class
"""
def __init__(self):
self.svid = 1010
self.language = "php"
self.author = "LoRexxar/wufeifei"
self.vulnerability = "LDAPI"
self.description = "LDAP注入可能导致ldap的账号信息泄露"
self.level = 3
# status
self.status = True
# 部分配置
self.match_mode = "function-param-regex"
self.match = r"(ldap_add|ldap_delete|ldap_list|ldap_read|ldap_search|ldap_bind)"
# for solidity
self.match_name = None
self.black_list = None
# for chrome ext
self.keyword = None
# for regex
self.unmatch = None
self.vul_function = None
def main(self, regex_string):
"""
regex string input
:regex_string: regex match string
:return:
"""
pass
| [
"[email protected]"
] | |
143c3d4725a20e10ab6179d77ba2c658c4f44e7e | e484393a06ff65b9432116a5a08077c4431fc17f | /pipelines.py | b684ded6aa835201d1e7d97df670eb9c4df53974 | [] | no_license | knighton/seq_clf | e9adf4301844377b090601b26389ea1e4c17100f | 553aa4f990fb072d08c62667cdb179a4aa5b31a2 | refs/heads/master | 2016-09-06T15:58:22.834655 | 2015-07-26T03:59:52 | 2015-07-26T03:59:52 | 39,712,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from camacho.base import Transformer
from camacho.preprocess.sequence.coders import IntCoder
from camacho.preprocess.binarize.embeddings import Word2Vec
from camacho.preprocess.binarize.onehot import AtomBinarizer
from camacho.preprocess.sequence.max_length import MaxLength
from camacho.preprocess.sequence.min_length import MinLength
import numpy as np
def zh_int():
data = [
MaxLength(256),
MinLength(256),
IntCoder(min_freq=10),
]
labels = [
AtomBinarizer(),
]
need_to_embed = True
return data, labels, need_to_embed
class Int2Str(Transformer):
"""
Word2Vec expects string inputs.
"""
def transform(self, nnn):
return map(lambda nn: map(str, nn), nnn)
def inverse_transform(self, sss):
return map(lambda ss: map(int, ss), sss)
def zh_vec_3d():
data = [
MaxLength(256),
MinLength(256),
IntCoder(min_freq=10),
Int2Str(),
Word2Vec(16),
]
labels = [
AtomBinarizer(),
]
need_to_embed = False
return data, labels, need_to_embed
class Flatten(Transformer):
def transform(self, aaaa):
rrr = []
for aaa in aaaa:
rr = []
for aa in aaa:
rr.extend(aa)
rrr.append(rr)
return np.array(rrr)
def zh_vec_2d():
data = [
MaxLength(256),
MinLength(256),
IntCoder(min_freq=10),
Int2Str(),
Word2Vec(16),
Flatten(),
]
labels = [
AtomBinarizer(),
]
need_to_embed = False
return data, labels, need_to_embed
| [
"[email protected]"
] | |
284778b2959e8c3cb457a8b8c097bde8b17187ca | a290f2a3dbe72b87b0018a61a67df17b0c5e71fe | /web_test/assist/selene/__init__.py | 6b94f45e3b6c61a01c07359459058afad757ff24 | [
"MIT"
] | permissive | yashaka/python-web-test | 4d10028c28e6239842318872ca6e36d0823e23a1 | 0630a61ba338195028753954e73218f5219e2c02 | refs/heads/master | 2022-09-19T23:42:15.848396 | 2022-09-05T19:25:53 | 2022-09-05T19:25:53 | 243,817,484 | 59 | 16 | MIT | 2021-06-15T19:16:47 | 2020-02-28T17:24:20 | Python | UTF-8 | Python | false | false | 29 | py | from . import report, shared
| [
"[email protected]"
] | |
316836557b9fa1f0953b5a1ffcc5a776f4ef8968 | e0731ac7bd6a9fcb386d9c5d4181c9d549ab1d02 | /desafio23.py | 3014731320bdb0ff34ffd742354f1f5a90a9b917 | [] | no_license | lportinari/Desafios-Python-Curso-em-Video | 3ab98b87a2178448b3e53031b86522558c31c099 | cd7662ddfe371e48e5aabc6e86e23dc6337405fb | refs/heads/master | 2020-04-29T11:09:25.689901 | 2019-06-23T23:58:06 | 2019-06-23T23:58:06 | 176,087,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | """
Exercício Python 023: Faça um programa que leia um número de 0 a 9999 e mostre na tela cada um dos dígitos separados.
"""
print('.:DESCUBRA A UNIDADE, DEZENA, CENTENA E MILHAR DE UM NÚMERO:.')
n = int(input('Digite um número de 0 a 9999: '))
u = n // 1 % 10
d = n // 10 % 10
c = n // 100 % 10
m = n // 1000 % 10
#Módulo de cores
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'verde':'\033[32m',
'amarelo':'\033[33m',
'roxo':'\033[35m'}
print('Analisando o número {}'.format(n))
print('{}Unidade{}: {}'.format(cores['azul'], cores['limpa'], u))
print('{}Dezena{}: {}'.format(cores['verde'], cores['limpa'], d))
print('{}Centena{}: {}'.format(cores['amarelo'], cores['limpa'], c))
print('{}Milhar{}: {}'.format(cores['roxo'], cores['limpa'], m))
| [
"[email protected]"
] | |
765a43a4671b375f00fee86a67821c3abe3fe0af | a79d7f5d0ff7f4ad2c5755f74a8685123eb79157 | /week3/sigma.py | f411312b7d7dc6645756ecb7c536a902bd38f241 | [] | no_license | ArtamoshinN/python | 54c4adf5359fd18c55ce8268c4dde89d1e8b203c | 3c9c69f06e6d1ab890ed7ae057bf331d2d007539 | refs/heads/master | 2021-01-24T03:03:49.064277 | 2018-07-03T18:47:56 | 2018-07-03T18:47:56 | 122,875,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import math
x = int(input())
i = 0
kv = 0
sum = 0
while x != 0:
kv = kv + x ** 2
sum = sum + x
x = int(input())
i += 1
s = float(sum / i)
sigma = float(math.sqrt((kv - 2 * s * sum + i * s ** 2) / (i - 1)))
print('{0:.11f}'.format(sigma))
| [
"[email protected]"
] | |
567f7dbea1bbe7373fa2366bb54bdc714defee20 | fc4aaf15ef2b10c89728651316300ada27f14ae3 | /Loginapp/views.py | c9b90dbe4cb42e79dd883f1bc474e925c892da44 | [] | no_license | ethicalrushi/seller | 7f6231e710b32e8d7700e32e321879728de65546 | 9473fcb6595c27c7adbcea24990b6e8f006d3e8a | refs/heads/master | 2020-03-08T04:00:59.089892 | 2018-04-04T17:48:19 | 2018-04-04T17:48:19 | 126,335,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | from django.shortcuts import render
from django.contrib.auth.models import User
from .forms import UserForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login,logout,authenticate
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
# Create your views here.
def register(request):
registered = False
if request.method == "POST":
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
registered = True
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request,'Loginapp/registeration.html',
{'user_form':user_form,
'registered':registered,
})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect('/product/')
else:
return HttpResponse("Account Not Active")
else:
print("Someone tried to login and failed!")
return HttpResponse("Invalid login details")
else:
return render(request,'Loginapp/login.html')
@login_required
def user_logout(request):
logout(request)
return render(request,'basic_app/index.html')
| [
"[email protected]"
] | |
feba07fb944a5622f047e5052f424b91535a4577 | dd6a3615d54ca825051f1c9f81bcd206eb9cfd10 | /mps/utils/general_utils.py | 1aca50e963278deef17bd3364f1b805354b72975 | [
"MIT"
] | permissive | cherakhan/mps | 82e06aea229b2047bf1be68c4430fad621189abf | 2ba818c361e467841f6bbe0ef47a1e833ef315d3 | refs/heads/master | 2022-02-01T20:40:07.327357 | 2019-06-10T03:43:49 | 2019-06-10T03:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,986 | py | """
A collection of very generic python utilities.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=no-name-in-module
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
from scipy.sparse import dok_matrix
from scipy.linalg import solve_triangular
def map_to_cube(pts, bounds):
""" Maps bounds to [0,1]^d and returns the representation in the cube. """
return (pts - bounds[:, 0])/(bounds[:, 1] - bounds[:, 0])
def map_to_bounds(pts, bounds):
""" Given a point in [0,1]^d, returns the representation in the original space. """
return pts * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def get_sublist_from_indices(orig_list, idxs):
""" Returns a sublist from the indices. orig_list can be anthing that can be
indexed and idxs are a list of indices. """
return [orig_list[idx] for idx in idxs]
def compute_average_sq_prediction_error(Y1, Y2):
""" Returns the average prediction error. """
return np.linalg.norm(np.array(Y1) - np.array(Y2))**2 / len(Y1)
def dist_squared(X1, X2):
""" If X1 is n1xd and X2 is n2xd, this returns an n1xn2 matrix where the (i,j)th
entry is the squared distance between X1(i,:) and X2(j,:).
"""
n1, dim1 = X1.shape
n2, dim2 = X2.shape
if dim1 != dim2:
raise ValueError('Second dimension of X1 and X2 should be equal.')
dist_sq = (np.outer(np.ones(n1), (X2**2).sum(axis=1))
+ np.outer((X1**2).sum(axis=1), np.ones(n2))
- 2*X1.dot(X2.T))
dist_sq = np.clip(dist_sq, 0.0, np.inf)
return dist_sq
def project_symmetric_to_psd_cone(M, is_symmetric=True, epsilon=0):
""" Projects the symmetric matrix M to the PSD cone. """
if is_symmetric:
try:
eigvals, eigvecs = np.linalg.eigh(M)
except np.linalg.LinAlgError:
eigvals, eigvecs = np.linalg.eig(M)
eigvals = np.real(eigvals)
eigvecs = np.real(eigvecs)
else:
eigvals, eigvecs = np.linalg.eig(M)
clipped_eigvals = np.clip(eigvals, epsilon, np.inf)
return (eigvecs * clipped_eigvals).dot(eigvecs.T)
def stable_cholesky(M, add_to_diag_till_psd=True):
""" Returns L, a 'stable' cholesky decomposition of M. L is lower triangular and
satisfies L*L' = M.
Sometimes nominally psd matrices are not psd due to numerical issues. By adding a
small value to the diagonal we can make it psd. This is what this function does.
Use this iff you know that K should be psd. We do not check for errors.
"""
_printed_warning = False
if M.size == 0:
return M # if you pass an empty array then just return it.
try:
# First try taking the Cholesky decomposition.
L = np.linalg.cholesky(M)
except np.linalg.linalg.LinAlgError as e:
# If it doesn't work, then try adding diagonal noise.
if not add_to_diag_till_psd:
raise e
diag_noise_power = -11
max_M = np.diag(M).max()
diag_noise = np.diag(M).max() * 1e-11
chol_decomp_succ = False
while not chol_decomp_succ:
try:
diag_noise = (10 ** diag_noise_power) * max_M
L = np.linalg.cholesky(M + diag_noise * np.eye(M.shape[0]))
chol_decomp_succ = True
except np.linalg.linalg.LinAlgError:
if diag_noise_power > -9 and not _printed_warning:
from warnings import warn
warn(('Could not compute Cholesky decomposition despite adding %0.4f to the '
'diagonal. This is likely because the M is not positive semi-definite.')%(
(10**diag_noise_power) * max_M))
_printed_warning = True
diag_noise_power += 1
if diag_noise_power >= 5:
raise ValueError(('Could not compute Cholesky decomposition despite adding' +
' %0.4f to the diagonal. This is likely because the M is not ' +
'positive semi-definite or has infinities/nans.')%(diag_noise))
return L
# Solving triangular matrices -----------------------
def _solve_triangular_common(A, b, lower):
""" Solves Ax=b when A is a triangular matrix. """
if A.size == 0 and b.shape[0] == 0:
return np.zeros((b.shape))
else:
return solve_triangular(A, b, lower=lower)
def solve_lower_triangular(A, b):
""" Solves Ax=b when A is lower triangular. """
return _solve_triangular_common(A, b, lower=True)
def solve_upper_triangular(A, b):
""" Solves Ax=b when A is upper triangular. """
return _solve_triangular_common(A, b, lower=False)
def draw_gaussian_samples(num_samples, mu, K):
""" Draws num_samples samples from a Gaussian distribution with mean mu and
covariance K.
"""
num_pts = len(mu)
L = stable_cholesky(K)
U = np.random.normal(size=(num_pts, num_samples))
V = L.dot(U).T + mu
return V
# Executing a Pythong string -------------------------------------------------------
def evaluate_strings_with_given_variables(_strs_to_execute, _variable_dict=None):
""" Executes a list of python strings and returns the results as a list.
variable_dict is a dictionary mapping strings to values which may be used in
str_to_execute.
"""
if _variable_dict is None:
_variable_dict = {}
if not isinstance(_strs_to_execute, list):
_got_list_of_constraints = False
_strs_to_execute = [_strs_to_execute]
else:
_got_list_of_constraints = True
for _key, _value in _variable_dict.items():
locals()[_key] = _value
_ret = [eval(_elem) for _elem in _strs_to_execute]
if _got_list_of_constraints:
return _ret
else:
return _ret[0]
# Matrix/Array/List utilities ------------------------------------------------------
def get_nonzero_indices_in_vector(vec):
""" Returns the nonzero indices in the vector vec. """
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec.todense()).ravel()
ret, = vec.nonzero()
return ret
def reorder_list_or_array(M, ordering):
""" Reorders a list or array like object. """
if isinstance(M, list):
return [M[i] for i in ordering]
else:
return M[ordering]
def reorder_rows_and_cols_in_matrix(M, ordering):
""" Reorders the rows and columns in matrix M. """
array_type = type(M)
if array_type == dok_matrix: # Check if a sparse matrix to convert to array
M = np.asarray(M.todense())
elif array_type == list:
M = np.array(M)
# Now do the reordering
M = M[:, ordering][ordering]
# Convert back
if array_type == dok_matrix: # Check if a sparse matrix for return
M = dok_matrix(M)
elif array_type == list:
M = [list(m) for m in M]
return M
def _set_coords_to_val(A, coords, val):
""" Sets the indices in matrix A to value. """
for coord in coords:
A[coord[0], coord[1]] = val
def get_dok_mat_with_set_coords(n, coords):
""" Returns a sparse 0 matrix with the coordinates in coords set to 1. """
A = dok_matrix((n, n))
_set_coords_to_val(A, coords, 1)
return A
def block_augment_array(A, B, C, D):
""" Given a n1xn2 array A, an n1xn3 array B, an n4xn5 array C, and a
n4x(n2 + n3 - n5) array D, this returns (n1+n4)x(n2+n3) array of the form
[A, B; C; D].
"""
AB = np.hstack((A, B))
CD = np.hstack((C, D))
return np.vstack((AB, CD))
def generate_01_grid(size_per_dim, dim=None, as_mesh=False):
""" Generates a grid in [0,1]^d having sizes size_per_dim[i] on dimension i.
if as_mesh is True, it returns the grid as a mesh. Otherwise as an nxdim array.
"""
if not hasattr(size_per_dim, '__iter__'):
size_per_dim = [size_per_dim] * dim
grid_on_each_dim = []
for curr_size in size_per_dim:
curr_grid = np.linspace(0, 1, curr_size + 1)[:-1]
curr_grid = curr_grid + 0.5/float(curr_size)
grid_on_each_dim.append(curr_grid)
mgrid = np.meshgrid(*grid_on_each_dim)
if as_mesh:
ret = mgrid
else:
ret = np.array([elem.flatten() for elem in mgrid]).T
tmp = np.array(ret[:, 1])
ret[:, 1] = np.array(ret[:, 0])
ret[:, 0] = tmp
return ret
# For sampling based on fitness values -------------------------------------------------
# We are using them in the GA and BO algorithms.
def get_exp_probs_from_fitness(fitness_vals, scaling_param=None, scaling_const=None):
""" Returns sampling probabilities from fitness values; the fitness values are
exponentiated and used as probabilities.
"""
fitness_vals = np.array(fitness_vals)
if scaling_param is None:
scaling_const = scaling_const if scaling_const is not None else 0.5
scaling_param = scaling_const * fitness_vals.std()
mean_param = fitness_vals.mean()
exp_probs = np.exp((fitness_vals - mean_param)/scaling_param)
return exp_probs/exp_probs.sum()
def sample_according_to_exp_probs(fitness_vals, num_samples, replace=False,
scaling_param=None, scaling_const=None):
""" Samples after exponentiating the fitness values. """
exp_probs = get_exp_probs_from_fitness(fitness_vals, scaling_param, scaling_const)
return np.random.choice(len(fitness_vals), num_samples, p=exp_probs, replace=replace)
| [
"[email protected]"
] | |
e8e8dbb1faee07802f147df09b256cc61a91d28a | 9676cae4726eda7308502b0b5753544cef20921c | /image_to_features.py | edc88e1540a9f3223206e03a9c3f8c57184160fe | [] | no_license | amnh-sciviz/image-collection | f8f4b88e1c5c3b8feb975225a4cac97402098410 | 497c7bf21b1b7c392a3219b69acb6981e9c3cebc | refs/heads/master | 2020-05-02T07:34:20.706841 | 2019-06-13T18:57:12 | 2019-06-13T18:57:12 | 177,821,513 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | # -*- coding: utf-8 -*-
# Adapted from:
# https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-search.ipynb
import argparse
import bz2
import glob
import numpy as np
import os
import pickle
from sklearn.decomposition import PCA
import sys
from lib.utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILES", default="images/photographic_thumbnails/*.jpg", help="Input file pattern")
parser.add_argument('-size', dest="IMAGE_SIZE", default="224x224", help="Resize images to this size")
parser.add_argument('-pca', dest="PCA_COMPONENTS", default=256, type=int, help="Principal component analysis (PCA) components to reduce down to")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/photographic_features.p.bz2", help="Pickle cache file to store features")
a = parser.parse_args()
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
import keras
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
IMAGE_SIZE = tuple([int(d) for d in a.IMAGE_SIZE.strip().split("x")])
# Read files
files = glob.glob(a.INPUT_FILES)
files = sorted(files)
fileCount = len(files)
print("Found %s files" % fileCount)
# Load model, feature extractor
model = keras.applications.VGG16(weights='imagenet', include_top=True)
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
print("Extracting features from each image...")
features = np.zeros((fileCount, 4096), dtype=np.float32)
for i, fn in enumerate(files):
im = image.load_img(fn, target_size=model.input_shape[1:3])
x = image.img_to_array(im)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feat = feat_extractor.predict(x)[0]
features[i] = feat
printProgress(i+1, fileCount)
print("Reducing feature vectors down to %s features..." % a.PCA_COMPONENTS)
pca = PCA(n_components=a.PCA_COMPONENTS)
pca.fit(features)
pca_features = pca.transform(features)
print("Saving features file %s..." % a.OUTPUT_FILE)
makeDir(a.OUTPUT_FILE)
pickle.dump(pca_features, bz2.open(a.OUTPUT_FILE, 'wb'))
print("Done.")
| [
"[email protected]"
] | |
8647ea711a5c6d6c875b5c47c301901ed1ec3b8f | dffd7156da8b71f4a743ec77d05c8ba031988508 | /ac/nikkei2019-2-qual/nikkei2019_2_qual_b/8360228.py | b7005d58d66843ad71530b0a48a1340716f7b16f | [] | no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py |
n, *a = map(int, open(0).read().split())
if a[0]!=0:
print(0)
exit()
b = [0]*n
for i in a: b[i]+=1
if b[0]!=1:
print(0)
exit()
ans = 1
i = 0
while i<n-1:
if b[i]==0:
if sum(b[i:])>0:
print(0)
exit()
else:
break
ans *= b[i]**b[i+1]
i += 1
print(ans%998244353)
| [
"[email protected]"
] | |
70f9502dd64dd7794bbebc3848a84c585299fe52 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03944/s429630442.py | 448282dd05c588cb880bfe4c06969e1a1107c06b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | W,H,N = map(int,input().split())
lst = [list(map(int,input().split())) for i in range(N)]
#print(W,H,N)
#print(lst)
#lst = sorted(lst, key=lambda x: x[2])
#print(lst)
Wst = [1]*W
Hst = [1]*H
for i in range(N):
if lst[i][2] == 1:
for j in range(lst[i][0]):
Wst[j] = 0
if lst[i][2] == 2:
#print(lst[i][0],W)
for j in range(lst[i][0],W):
Wst[j] = 0
if lst[i][2] == 3:
for j in range(lst[i][1]):
Hst[j] = 0
if lst[i][2] == 4:
#print(lst[i][0],W)
for j in range(lst[i][1],H):
Hst[j] = 0
#print(Wst,Hst)
print(sum(Wst)*sum(Hst)) | [
"[email protected]"
] | |
411bd049e67d923c60ea0feb3624ea38d0df9624 | 63ce91bae5eeadf885262b8fe0e769a64454d257 | /models/se_resnet.py | c72e3972b948675e500da05f177dd64992d0e48e | [
"Apache-2.0"
] | permissive | Data-drone/cv_experiments | c7349e7808f7f9c1315ce1efe33be1f86f4a9f80 | d6e1d9716c03a9165e3d8a08f4cc1287323a56ca | refs/heads/master | 2021-06-26T04:33:10.079771 | 2021-01-19T11:40:30 | 2021-01-19T11:40:30 | 196,596,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,701 | py | ### SE Layer from: https://arxiv.org/abs/1709.01507
# based on: https://github.com/moskomule/senet.pytorch/blob/master/senet/se_resnet.py
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from torchvision.models import ResNet
from .se_module import SELayer
# need adapt to Torchvision style calls to integrate
__all__ = ['se_resnet18', 'se_resnet34', 'se_resnet50', 'se_resnet101', 'se_resnet152']
# expects pth files
model_urls = {
'se_resnet18': '',
'se_resnet34': '',
'se_resnet50': '',
'se_resnet101': '',
'se_resnet152': ''
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16):
super(SEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def _se_resnet(arch, block, layers, pretrained, progress, **kwargs):
# adapted from the _resnet function in torch vision
model = ResNet(block, layers, **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def se_resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
with added Squeeze Excitation layers from
"Squeeze-and-Excitation Networks" <https://arxiv.org/abs/1709.01507>
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _se_resnet('se_resnet18', SEBasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def se_resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
with added Squeeze Excitation layers from
"Squeeze-and-Excitation Networks" <https://arxiv.org/abs/1709.01507>
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _se_resnet('se_resnet34', SEBasicBlock, [3,4,6,3], pretrained, progress,
**kwargs)
def se_resnet50(pretrained=False, progress=True, **kwargs):
"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
with added Squeeze Excitation layers from
"Squeeze-and-Excitation Networks" <https://arxiv.org/abs/1709.01507>
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _se_resnet('se_resnet50', SEBottleneck, [3,4,6,3], pretrained, progress,
**kwargs)
def se_resnet101(pretrained=False, progress=True, **kwargs):
"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
with added Squeeze Excitation layers from
"Squeeze-and-Excitation Networks" <https://arxiv.org/abs/1709.01507>
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _se_resnet('se_resnet101', SEBottleneck, [3,4,23,3], pretrained, progress,
**kwargs)
def se_resnet152(pretrained=False, progress=True, **kwargs):
"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
with added Squeeze Excitation layers from
"Squeeze-and-Excitation Networks" <https://arxiv.org/abs/1709.01507>
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _se_resnet('se_resnet152', SEBottleneck, [3,8,36,3], pretrained, progress,
**kwargs)
### review these later
class CifarSEBasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, reduction=16):
super(CifarSEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
if inplanes != planes:
self.downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes))
else:
self.downsample = lambda x: x
self.stride = stride
def forward(self, x):
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
out += residual
out = self.relu(out)
return out
class CifarSEResNet(nn.Module):
def __init__(self, block, n_size, num_classes=10, reduction=16):
super(CifarSEResNet, self).__init__()
self.inplane = 16
self.conv1 = nn.Conv2d(
3, self.inplane, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplane)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
block, 16, blocks=n_size, stride=1, reduction=reduction)
self.layer2 = self._make_layer(
block, 32, blocks=n_size, stride=2, reduction=reduction)
self.layer3 = self._make_layer(
block, 64, blocks=n_size, stride=2, reduction=reduction)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(64, num_classes)
self.initialize()
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, reduction):
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.inplane, planes, stride, reduction))
self.inplane = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CifarSEPreActResNet(CifarSEResNet):
def __init__(self, block, n_size, num_classes=10, reduction=16):
super(CifarSEPreActResNet, self).__init__(
block, n_size, num_classes, reduction)
self.bn1 = nn.BatchNorm2d(self.inplane)
self.initialize()
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn1(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
def se_resnet20(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = CifarSEResNet(CifarSEBasicBlock, 3, **kwargs)
return model
def se_resnet32(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = CifarSEResNet(CifarSEBasicBlock, 5, **kwargs)
return model
def se_resnet56(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = CifarSEResNet(CifarSEBasicBlock, 9, **kwargs)
return model
def se_preactresnet20(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = CifarSEPreActResNet(CifarSEBasicBlock, 3, **kwargs)
return model
def se_preactresnet32(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = CifarSEPreActResNet(CifarSEBasicBlock, 5, **kwargs)
return model
def se_preactresnet56(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = CifarSEPreActResNet(CifarSEBasicBlock, 9, **kwargs)
return model | [
"[email protected]"
] | |
58d298ec0522399c2673710a99ea5800b973b8b4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03328/s169266542.py | f38d282a01fae28bcf1d5c8f04c9ec5cb6991a81 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | a, b = list(map(int, input().split()))
a_height = 0
b_height = 0
sa = abs(a - b)
for i in range(1, sa):
a_height += i
# for i in range(1, sa + 1):
# b_height += i
# print(a_height)
# print(b_height)
print(a_height - a) | [
"[email protected]"
] | |
d6c18edef9ba8d5cfaf660eac3864401b7c94e93 | 41ca95879e0f0908ba824074abd43e76177a0d54 | /google-datacatalog-rdbms-connector/tests/google/datacatalog_connectors/rdbms/prepare/sql_objects/sql_objects_assembled_entry_factory_test.py | c235001590ab3040f6f42cc1ba8bcdd5b71bbde1 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/datacatalog-connectors-rdbms | b3765cd0789cc803abe554d00a266fcab1388f88 | 3b1a2729896542f36487e84a13e80dcb6c2aa3d9 | refs/heads/master | 2023-05-15T16:43:21.811232 | 2021-12-06T22:25:54 | 2021-12-06T22:25:54 | 259,464,905 | 73 | 63 | Apache-2.0 | 2021-12-06T22:25:55 | 2020-04-27T21:51:36 | Python | UTF-8 | Python | false | false | 3,725 | py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import unittest
from google.cloud import datacatalog
from google.datacatalog_connectors.commons_test import utils
from google.datacatalog_connectors.rdbms.prepare.sql_objects import \
sql_objects_assembled_entry_factory
class SQLObjectsAssembledEntryFactoryTestCase(unittest.TestCase):
__MODULE_PATH = '{}/..'.format(os.path.dirname(os.path.abspath(__file__)))
__PROJECT_ID = 'test_project'
__LOCATION_ID = 'location_id'
__ENTRY_GROUP_ID = 'oracle'
__MOCKED_ENTRY_PATH = 'mocked_entry_path'
__METADATA_SERVER_HOST = 'metadata_host'
__PREPARE_PACKAGE = 'google.datacatalog_connectors.rdbms.prepare'
@mock.patch('{}.sql_objects.sql_objects_datacatalog_tag_factory'
'.SQLObjectsDataCatalogTagFactory'.format(__PREPARE_PACKAGE))
@mock.patch('{}.sql_objects.sql_objects_datacatalog_entry_factory'
'.SQLObjectsDataCatalogEntryFactory'.format(__PREPARE_PACKAGE))
def setUp(self, mock_entry_factory, mock_tag_factory):
metadata = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'metadata_with_sql_objects.json')['sql_objects']
sql_objects_config = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'sql_objects_config.json')
self.__sql_objects_config = sql_objects_config
self.__metadata = metadata
self.__entry_factory = mock_entry_factory.return_value
self.__tag_factory = mock_tag_factory.return_value
tag_templates_dict = {
'{}_{}_metadata'.format(self.__ENTRY_GROUP_ID, 'function'):
datacatalog.TagTemplate(),
'{}_{}_metadata'.format(self.__ENTRY_GROUP_ID, 'stored_procedure'):
datacatalog.TagTemplate()
}
self.__factory = sql_objects_assembled_entry_factory. \
SQLObjectsAssembledEntryFactory(
self.__PROJECT_ID,
self.__LOCATION_ID,
self.__METADATA_SERVER_HOST,
self.__ENTRY_GROUP_ID,
sql_objects_config,
tag_templates_dict)
def test_constructor_should_set_instance_attributes(self):
attrs = self.__factory.__dict__
self.assertEqual(
self.__entry_factory, attrs['_SQLObjectsAssembledEntryFactory'
'__datacatalog_entry_factory'])
def test_make_entries_for_sql_objects_should_create_entries(self):
entry_factory = self.__entry_factory
entry_factory.\
make_entry_for_sql_object.return_value = 'f_entry_id', {}
entry_factory.\
make_entry_for_sql_object.return_value = 'sp_entry_id', {}
tag_factory = self.__tag_factory
tag_factory.make_tags_for_sql_object.return_value = [{}]
assembled_entries = self.__factory.make_entries(self.__metadata)
self.assertEqual(4, len(assembled_entries))
self.assertEqual(1, len(assembled_entries[0].tags))
self.assertEqual(1, len(assembled_entries[1].tags))
| [
"[email protected]"
] | |
9dc682d35cab375572088c33f58ee8f39c4985f7 | 6930a434c0506d44bf8a8e81cb86e95c219c3a77 | /python/学生信息管理系统/更改全部信息的管理.py | 7949250894e7e403136252e8746a824fcf286b81 | [] | no_license | Conquerk/test | ed15d5603538340559556c9e0f20cc61ad3e4486 | 7ff42c99b8a2132c6dd1c73315ff95cfef63a8f6 | refs/heads/master | 2020-04-19T01:47:28.322929 | 2019-01-28T01:52:00 | 2019-01-28T01:52:00 | 167,882,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | l=[]
def guanli():
while True:
print("+"+"-"*20+"+")
print("|"+str("1.添加学生信息").center(14)+"|")
print("|"+str("2.显示学生信息").center(14)+"|")
print("|"+str("3.删除学生信息").center(14)+"|")
print("|"+str("4.修改学生成绩").center(14)+"|")
print("|"+str("q.退出").center(18)+"|")
print("+"+"-"*20+"+")
x=input("输入菜单功能:")
if x=="1":
myinput()
elif x=="2":
mybiaoge()
elif x=="3":
delete()
elif x=="4":
change()
elif x=="q":
break
def myinput():
global l
while True:
n=input("请输入姓名:")
if not n :
break
s=int(input("请输入年龄:"))
a=int(input("请输入成绩:"))
d={}
d["name"]=n
d["age"]=s
d["score"]=a
l.append(d)
def mybiaoge():
global l
print("+-------------+------------+------------+")
print("| name | age | score |")
print("+-------------+------------+------------+")
for x in l :
line="|"+x["name"].center(11)
line+="|"+str(x["age"]).center(12)
line+="|"+str(x["score"]).center(12)+"|"
print(line)
print("+-------------+------------+------------+")
def delete():
global l
i=0
y=input("请输入删除的学生信息")
for x in l: # a aa aaa aaaa
i+=1 # 1 2 3 4
if y in x["name"]: # aa
del l[(i-1)] # 1
def change():
global l
y=input("请输入要修改学生信息的名字:")
for x in l :
if y in x["name"]:
a=input("请输入要修改的信息")
if a =="name":
n=input("新的名字")
x["name"]=n
if a =="age":
p=int(input("请输入新的年龄"))
x["age"]=p
if a=="score":
t=int(input("请输入新的成绩:"))
x["score"]=t
guanli() | [
"[email protected]"
] | |
dad36a3496dbc38de44bdbea546b2ae30e6a8a21 | 092b753aa30b5a050ad6e502a8e46cf5d88ac3db | /easy_7.py | ed00499027f5c7f9482f42e8e7c7740108a31118 | [] | no_license | BrettMcGregor/dailyprogrammer | 2ba33e7501c1198c69ae391ec85d36de1511d9cc | f954ffcbb6430be0a96115246bed2fbe20ef0132 | refs/heads/master | 2020-03-21T06:41:53.490354 | 2018-07-17T04:45:31 | 2018-07-17T04:45:31 | 138,235,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | # Write a program that can translate Morse code in the format of
# ...---...
#
# A space and a slash will be placed between words. ..- / --.-
#
# For bonus, add the capability of going from a string to Morse code.
#
# Super-bonus if your program can flash or beep the Morse.
#
# This is your Morse to translate:
code = (".... . .-.. .-.. --- / -.. .- .. .-.. -.-- / .--. .-. --- --. .-. .- -- -- . .-. / "
"--. --- --- -.. / .-.. ..- -.-. -.- / --- -. / - .... . / "
"-.-. .... .- .-.. .-.. . -. --. . ... / - --- -.. .- -.--")
morse = {'A':'.-', 'B':'-...', 'C':'-.-.', 'D':'-..', 'E':'.', 'F':'..-.',
'G':'--.', 'H':'....', 'I':'..', 'J':'.---', 'K':'-.-', 'L':'.-..',
'M':'--', 'N':'-.', 'O':'---', 'P':'.--.', 'Q':'--.-', 'R':'.-.',
'S':'...', 'T':'-', 'U':'..-', 'V':'...-', 'W':'.--', 'X':'-..-',
'Y':'-.--', 'Z':'--..', '1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....', '7':'--...', '8':'---..',
'9':'----.', '0':'-----', ' ':'/'}
def morse_decode(source):
words = []
decode = []
for word in source.split(" / "):
words.append(word.split(" "))
for word in words:
string = ""
for let in word:
for k, v in morse.items():
if let == v:
string += k
decode.append(string.lower())
return " ".join(decode)
def morse_code(s):
encode = []
out_string = ""
for letter in s:
for key in morse.keys():
if letter.upper() == key:
out_string += f"{morse[key]} "
encode.append(out_string)
return " ".join(encode)
# print(morse_code(input("Enter text to convert to Morse code\n> ")))
print(morse_decode(code))
print(morse_code(morse_decode(code)))
print(morse_decode(morse_code(morse_decode(code))))
| [
"[email protected]"
] | |
5c63e3a2f515983d52df167049fb851ea942caad | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=0/sched.py | c5eaad177d1026b0984b6b39b4a8cf663c74b219 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | -X FMLP -Q 0 -L 2 81 400
-X FMLP -Q 0 -L 2 55 400
-X FMLP -Q 0 -L 2 46 175
-X FMLP -Q 1 -L 1 33 175
-X FMLP -Q 1 -L 1 29 100
-X FMLP -Q 1 -L 1 26 125
-X FMLP -Q 2 -L 1 23 300
-X FMLP -Q 2 -L 1 20 100
-X FMLP -Q 2 -L 1 15 175
-X FMLP -Q 3 -L 1 14 100
-X FMLP -Q 3 -L 1 14 150
-X FMLP -Q 3 -L 1 14 125
| [
"[email protected]"
] | |
8b8b3facdeca2ad375851d0e4af14adaf96a11b5 | f547959acabd13b3a6419fc149573fbf961b4cad | /examples/make_rkoc_python.py | dac1bfa7a52c0b2b4662dd33727b3a462a5e21de | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ketch/nodepy | b69fc85c52993f8872ae9fcad52a5082c0edc0eb | a78dd5c5021a0dbf7952c3614996315dbed5a642 | refs/heads/master | 2023-07-11T18:47:35.839942 | 2023-06-22T18:02:21 | 2023-06-26T14:03:44 | 4,030,392 | 57 | 18 | NOASSERTION | 2023-06-26T14:03:46 | 2012-04-15T06:59:24 | Python | UTF-8 | Python | false | false | 939 | py | #Make python script to evaluate order conditions
#for RK methods
from nodepy import rooted_trees as rt
from nodepy import runge_kutta_method as rk
p_max=15
f=open('oc_butcher.py','w')
f.write("def order(rk,tol=1.e-13):\n")
f.write(" from numpy import dot,zeros,all,sum,abs\n")
f.write(" coneq = zeros((1000))\n")
f.write(" A=rk.A\n b=rk.b\n c=rk.c\n")
f.write(" coneq[0]=sum(b)-1.\n")
f.write(" if any(abs(coneq)>tol):\n")
f.write(" return 0")
for ip in range(2,p_max):
print 'Generating order '+str(ip)+' conditions...'
ioc=0
f.write("\n # order "+str(ip)+" conditions:\n")
forest = rt.list_trees(ip)
for tree in forest:
oc=rk.elementary_weight_str(tree,style='python')
rhs =str(rt.Emap(tree))
f.write(" coneq["+str(ioc)+"]="+oc+"-"+rhs+".\n")
ioc+=1
f.write(" if any(abs(coneq)>tol):\n")
f.write(" return "+str(ip-1))
f.close()
| [
"[email protected]"
] | |
771785779f80f186cbf23c1566cbaec80ec62e62 | 87c77108855bef9e34b9d8dc1ce4eae1e135a3ee | /myfirstkedro/src/myfirstkedro/__init__.py | 3d1336402d80c9a4809a72cdfe7bdcbfbeb04c03 | [] | no_license | YaCpotato/my-kedro-sample | 6715bcb6fe1fd41d5253298b441be8b03094df8d | 9cf12ba36c6fc47b93e8f0abedb31c4f1f186f2d | refs/heads/master | 2023-01-03T02:50:26.567971 | 2020-10-29T14:50:04 | 2020-10-29T14:50:04 | 308,355,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""my first kedro
"""
__version__ = "0.1"
| [
"[email protected]"
] | |
4a3b71538eb6ce0c3b4facba849a1492ed9f347d | bbd3e0c5de108ae790c4ec3cb541c31fec61cca8 | /devil/devil/android/device_utils.py | 271f411a03fe80c7e9d5c0d9ac502839e59bf887 | [
"BSD-3-Clause"
] | permissive | oneumyvakin/catapult | a8e39dd1d63da3f018a05b03ae5e4c8e899ff225 | 69d05ea70e77998c86149ac78044470e3ca364ad | refs/heads/master | 2021-01-25T14:22:00.123474 | 2018-03-02T19:42:21 | 2018-03-02T20:16:09 | 123,683,443 | 0 | 0 | BSD-3-Clause | 2018-03-03T10:52:58 | 2018-03-03T10:52:58 | null | UTF-8 | Python | false | false | 105,450 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a variety of device interactions based on adb.
Eventually, this will be based on adb_wrapper.
"""
# pylint: disable=unused-argument
import calendar
import collections
import fnmatch
import json
import logging
import os
import posixpath
import pprint
import random
import re
import shutil
import stat
import tempfile
import time
import threading
import uuid
from devil import base_error
from devil import devil_env
from devil.utils import cmd_helper
from devil.android import apk_helper
from devil.android import device_signal
from devil.android import decorators
from devil.android import device_errors
from devil.android import device_temp_file
from devil.android import install_commands
from devil.android import logcat_monitor
from devil.android import md5sum
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import keyevent
from devil.android.sdk import split_select
from devil.android.sdk import version_codes
from devil.utils import host_utils
from devil.utils import parallelizer
from devil.utils import reraiser_thread
from devil.utils import timeout_retry
from devil.utils import zip_utils
from py_utils import tempfile_ext
logger = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 3
# A sentinel object for default values
# TODO(jbudorick,perezju): revisit how default values are handled by
# the timeout_retry decorators.
DEFAULT = object()
_RESTART_ADBD_SCRIPT = """
trap '' HUP
trap '' TERM
trap '' PIPE
function restart() {
stop adbd
start adbd
}
restart &
"""
# Not all permissions can be set.
_PERMISSIONS_BLACKLIST_RE = re.compile('|'.join(fnmatch.translate(p) for p in [
'android.permission.ACCESS_LOCATION_EXTRA_COMMANDS',
'android.permission.ACCESS_MOCK_LOCATION',
'android.permission.ACCESS_NETWORK_STATE',
'android.permission.ACCESS_NOTIFICATION_POLICY',
'android.permission.ACCESS_VR_STATE',
'android.permission.ACCESS_WIFI_STATE',
'android.permission.AUTHENTICATE_ACCOUNTS',
'android.permission.BLUETOOTH',
'android.permission.BLUETOOTH_ADMIN',
'android.permission.BROADCAST_STICKY',
'android.permission.CHANGE_NETWORK_STATE',
'android.permission.CHANGE_WIFI_MULTICAST_STATE',
'android.permission.CHANGE_WIFI_STATE',
'android.permission.DISABLE_KEYGUARD',
'android.permission.DOWNLOAD_WITHOUT_NOTIFICATION',
'android.permission.EXPAND_STATUS_BAR',
'android.permission.GET_PACKAGE_SIZE',
'android.permission.INSTALL_SHORTCUT',
'android.permission.INJECT_EVENTS',
'android.permission.INTERNET',
'android.permission.KILL_BACKGROUND_PROCESSES',
'android.permission.MANAGE_ACCOUNTS',
'android.permission.MODIFY_AUDIO_SETTINGS',
'android.permission.NFC',
'android.permission.READ_SYNC_SETTINGS',
'android.permission.READ_SYNC_STATS',
'android.permission.RECEIVE_BOOT_COMPLETED',
'android.permission.RECORD_VIDEO',
'android.permission.REORDER_TASKS',
'android.permission.REQUEST_INSTALL_PACKAGES',
'android.permission.RESTRICTED_VR_ACCESS',
'android.permission.RUN_INSTRUMENTATION',
'android.permission.SET_ALARM',
'android.permission.SET_TIME_ZONE',
'android.permission.SET_WALLPAPER',
'android.permission.SET_WALLPAPER_HINTS',
'android.permission.TRANSMIT_IR',
'android.permission.USE_CREDENTIALS',
'android.permission.USE_FINGERPRINT',
'android.permission.VIBRATE',
'android.permission.WAKE_LOCK',
'android.permission.WRITE_SYNC_SETTINGS',
'com.android.browser.permission.READ_HISTORY_BOOKMARKS',
'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS',
'com.android.launcher.permission.INSTALL_SHORTCUT',
'com.chrome.permission.DEVICE_EXTRAS',
'com.google.android.apps.now.CURRENT_ACCOUNT_ACCESS',
'com.google.android.c2dm.permission.RECEIVE',
'com.google.android.providers.gsf.permission.READ_GSERVICES',
'com.google.vr.vrcore.permission.VRCORE_INTERNAL',
'com.sec.enterprise.knox.MDM_CONTENT_PROVIDER',
'*.permission.C2D_MESSAGE',
'*.permission.READ_WRITE_BOOKMARK_FOLDERS',
'*.TOS_ACKED',
]))
_SHELL_OUTPUT_SEPARATOR = '~X~'
_PERMISSIONS_EXCEPTION_RE = re.compile(
r'java\.lang\.\w+Exception: .*$', re.MULTILINE)
_CURRENT_FOCUS_CRASH_RE = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
_GETPROP_RE = re.compile(r'\[(.*?)\]: \[(.*?)\]')
# Regex to parse the long (-l) output of 'ls' command, c.f.
# https://github.com/landley/toybox/blob/master/toys/posix/ls.c#L446
_LONG_LS_OUTPUT_RE = re.compile(
r'(?P<st_mode>[\w-]{10})\s+' # File permissions
r'(?:(?P<st_nlink>\d+)\s+)?' # Number of links (optional)
r'(?P<st_owner>\w+)\s+' # Name of owner
r'(?P<st_group>\w+)\s+' # Group of owner
r'(?:' # Either ...
r'(?P<st_rdev_major>\d+),\s+' # Device major, and
r'(?P<st_rdev_minor>\d+)\s+' # Device minor
r'|' # .. or
r'(?P<st_size>\d+)\s+' # Size in bytes
r')?' # .. or nothing
r'(?P<st_mtime>\d{4}-\d\d-\d\d \d\d:\d\d)\s+' # Modification date/time
r'(?P<filename>.+?)' # File name
r'(?: -> (?P<symbolic_link_to>.+))?' # Symbolic link (optional)
r'$' # End of string
)
_LS_DATE_FORMAT = '%Y-%m-%d %H:%M'
_FILE_MODE_RE = re.compile(r'[dbclps-](?:[r-][w-][xSs-]){2}[r-][w-][xTt-]$')
_FILE_MODE_KIND = {
'd': stat.S_IFDIR, 'b': stat.S_IFBLK, 'c': stat.S_IFCHR,
'l': stat.S_IFLNK, 'p': stat.S_IFIFO, 's': stat.S_IFSOCK,
'-': stat.S_IFREG}
_FILE_MODE_PERMS = [
stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR,
stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP,
stat.S_IROTH, stat.S_IWOTH, stat.S_IXOTH,
]
_FILE_MODE_SPECIAL = [
('s', stat.S_ISUID),
('s', stat.S_ISGID),
('t', stat.S_ISVTX),
]
_PS_COLUMNS = {
'pid': 1,
'ppid': 2,
'name': -1
}
_SELINUX_MODE = {
'enforcing': True,
'permissive': False,
'disabled': None
}
# Some devices require different logic for checking if root is necessary
_SPECIAL_ROOT_DEVICE_LIST = [
'marlin', # Pixel XL
'sailfish', # Pixel
'taimen', # Pixel 2 XL
'walleye', # Pixel 2
]
_IMEI_RE = re.compile(r' Device ID = (.+)$')
# The following regex is used to match result parcels like:
"""
Result: Parcel(
0x00000000: 00000000 0000000f 00350033 00360033 '........3.5.3.6.'
0x00000010: 00360032 00370030 00300032 00300039 '2.6.0.7.2.0.9.0.'
0x00000020: 00380033 00000039 '3.8.9... ')
"""
_PARCEL_RESULT_RE = re.compile(
r'0x[0-9a-f]{8}\: (?:[0-9a-f]{8}\s+){1,4}\'(.{16})\'')
_EBUSY_RE = re.compile(
r'mkdir failed for ([^,]*), Device or resource busy')
PS_COLUMNS = ('name', 'pid', 'ppid')
ProcessInfo = collections.namedtuple('ProcessInfo', PS_COLUMNS)
@decorators.WithExplicitTimeoutAndRetries(
_DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
def GetAVDs():
"""Returns a list of Android Virtual Devices.
Returns:
A list containing the configured AVDs.
"""
lines = cmd_helper.GetCmdOutput([
os.path.join(devil_env.config.LocalPath('android_sdk'),
'tools', 'android'),
'list', 'avd']).splitlines()
avds = []
for line in lines:
if 'Name:' not in line:
continue
key, value = (s.strip() for s in line.split(':', 1))
if key == 'Name':
avds.append(value)
return avds
@decorators.WithExplicitTimeoutAndRetries(
_DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
def RestartServer():
"""Restarts the adb server.
Raises:
CommandFailedError if we fail to kill or restart the server.
"""
def adb_killed():
return not adb_wrapper.AdbWrapper.IsServerOnline()
def adb_started():
return adb_wrapper.AdbWrapper.IsServerOnline()
adb_wrapper.AdbWrapper.KillServer()
if not timeout_retry.WaitFor(adb_killed, wait_period=1, max_tries=5):
# TODO(perezju): raise an exception after fixng http://crbug.com/442319
logger.warning('Failed to kill adb server')
adb_wrapper.AdbWrapper.StartServer()
if not timeout_retry.WaitFor(adb_started, wait_period=1, max_tries=5):
raise device_errors.CommandFailedError('Failed to start adb server')
def _ParseModeString(mode_str):
"""Parse a mode string, e.g. 'drwxrwxrwx', into a st_mode value.
Effectively the reverse of |mode_to_string| in, e.g.:
https://github.com/landley/toybox/blob/master/lib/lib.c#L896
"""
if not _FILE_MODE_RE.match(mode_str):
raise ValueError('Unexpected file mode %r', mode_str)
mode = _FILE_MODE_KIND[mode_str[0]]
for c, flag in zip(mode_str[1:], _FILE_MODE_PERMS):
if c != '-' and c.islower():
mode |= flag
for c, (t, flag) in zip(mode_str[3::3], _FILE_MODE_SPECIAL):
if c.lower() == t:
mode |= flag
return mode
def _GetTimeStamp():
"""Return a basic ISO 8601 time stamp with the current local time."""
return time.strftime('%Y%m%dT%H%M%S', time.localtime())
def _JoinLines(lines):
# makes sure that the last line is also terminated, and is more memory
# efficient than first appending an end-line to each line and then joining
# all of them together.
return ''.join(s for line in lines for s in (line, '\n'))
def _CreateAdbWrapper(device):
if isinstance(device, adb_wrapper.AdbWrapper):
return device
else:
return adb_wrapper.AdbWrapper(device)
def _FormatPartialOutputError(output):
lines = output.splitlines() if isinstance(output, basestring) else output
message = ['Partial output found:']
if len(lines) > 11:
message.extend('- %s' % line for line in lines[:5])
message.extend('<snip>')
message.extend('- %s' % line for line in lines[-5:])
else:
message.extend('- %s' % line for line in lines)
return '\n'.join(message)
class DeviceUtils(object):
_MAX_ADB_COMMAND_LENGTH = 512
_MAX_ADB_OUTPUT_LENGTH = 32768
_LAUNCHER_FOCUSED_RE = re.compile(
r'\s*mCurrentFocus.*(Launcher|launcher).*')
_VALID_SHELL_VARIABLE = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
LOCAL_PROPERTIES_PATH = posixpath.join('/', 'data', 'local.prop')
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
def __init__(self, device, enable_device_files_cache=False,
default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES):
"""DeviceUtils constructor.
Args:
device: Either a device serial, an existing AdbWrapper instance, or an
an existing AndroidCommands instance.
enable_device_files_cache: For PushChangedFiles(), cache checksums of
pushed files rather than recomputing them on a subsequent call.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit value is provided.
"""
self.adb = None
if isinstance(device, basestring):
self.adb = _CreateAdbWrapper(device)
elif isinstance(device, adb_wrapper.AdbWrapper):
self.adb = device
else:
raise ValueError('Unsupported device value: %r' % device)
self._commands_installed = None
self._default_timeout = default_timeout
self._default_retries = default_retries
self._enable_device_files_cache = enable_device_files_cache
self._cache = {}
self._client_caches = {}
self._cache_lock = threading.RLock()
assert hasattr(self, decorators.DEFAULT_TIMEOUT_ATTR)
assert hasattr(self, decorators.DEFAULT_RETRIES_ATTR)
self._ClearCache()
@property
def serial(self):
"""Returns the device serial."""
return self.adb.GetDeviceSerial()
def __eq__(self, other):
"""Checks whether |other| refers to the same device as |self|.
Args:
other: The object to compare to. This can be a basestring, an instance
of adb_wrapper.AdbWrapper, or an instance of DeviceUtils.
Returns:
Whether |other| refers to the same device as |self|.
"""
return self.serial == str(other)
def __lt__(self, other):
"""Compares two instances of DeviceUtils.
This merely compares their serial numbers.
Args:
other: The instance of DeviceUtils to compare to.
Returns:
Whether |self| is less than |other|.
"""
return self.serial < other.serial
def __str__(self):
"""Returns the device serial."""
return self.serial
@decorators.WithTimeoutAndRetriesFromInstance()
def IsOnline(self, timeout=None, retries=None):
"""Checks whether the device is online.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is online, False otherwise.
Raises:
CommandTimeoutError on timeout.
"""
try:
return self.adb.GetState() == 'device'
except base_error.BaseError as exc:
logger.info('Failed to get state: %s', exc)
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def HasRoot(self, timeout=None, retries=None):
"""Checks whether or not adbd has root privileges.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if adbd has root privileges, False otherwise.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
try:
if self.product_name in _SPECIAL_ROOT_DEVICE_LIST:
return self.GetProp('service.adb.root') == '1'
self.RunShellCommand(['ls', '/root'], check_return=True)
return True
except device_errors.AdbCommandFailedError:
return False
def NeedsSU(self, timeout=DEFAULT, retries=DEFAULT):
"""Checks whether 'su' is needed to access protected resources.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if 'su' is available on the device and is needed to to access
protected resources; False otherwise if either 'su' is not available
(e.g. because the device has a user build), or not needed (because adbd
already has root privileges).
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if 'needs_su' not in self._cache:
cmd = '%s && ! ls /root' % self._Su('ls /root')
if self.product_name in _SPECIAL_ROOT_DEVICE_LIST:
if self.HasRoot():
self._cache['needs_su'] = False
return False
cmd = 'which which && which su'
try:
self.RunShellCommand(cmd, shell=True, check_return=True,
timeout=self._default_timeout if timeout is DEFAULT else timeout,
retries=self._default_retries if retries is DEFAULT else retries)
self._cache['needs_su'] = True
except device_errors.AdbCommandFailedError:
self._cache['needs_su'] = False
return self._cache['needs_su']
def _Su(self, command):
if self.build_version_sdk >= version_codes.MARSHMALLOW:
return 'su 0 %s' % command
return 'su -c %s' % command
@decorators.WithTimeoutAndRetriesFromInstance()
def EnableRoot(self, timeout=None, retries=None):
"""Restarts adbd with root privileges.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if root could not be enabled.
CommandTimeoutError on timeout.
"""
if 'needs_su' in self._cache:
del self._cache['needs_su']
try:
self.adb.Root()
except device_errors.AdbCommandFailedError:
if self.IsUserBuild():
raise device_errors.CommandFailedError(
'Unable to root device with user build.', str(self))
else:
raise # Failed probably due to some other reason.
def device_online_with_root():
try:
self.adb.WaitForDevice()
return self.GetProp('service.adb.root', cache=False) == '1'
except (device_errors.AdbCommandFailedError,
device_errors.DeviceUnreachableError):
return False
timeout_retry.WaitFor(device_online_with_root, wait_period=1)
@decorators.WithTimeoutAndRetriesFromInstance()
def IsUserBuild(self, timeout=None, retries=None):
"""Checks whether or not the device is running a user build.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is running a user build, False otherwise (i.e. if
it's running a userdebug build).
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
return self.build_type == 'user'
@decorators.WithTimeoutAndRetriesFromInstance()
def GetExternalStoragePath(self, timeout=None, retries=None):
"""Get the device's path to its SD card.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
The device's path to its SD card.
Raises:
CommandFailedError if the external storage path could not be determined.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self._EnsureCacheInitialized()
if not self._cache['external_storage']:
raise device_errors.CommandFailedError('$EXTERNAL_STORAGE is not set',
str(self))
return self._cache['external_storage']
@decorators.WithTimeoutAndRetriesFromInstance()
def GetIMEI(self, timeout=None, retries=None):
"""Get the device's IMEI.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
The device's IMEI.
Raises:
AdbCommandFailedError on error
"""
if self._cache.get('imei') is not None:
return self._cache.get('imei')
if self.build_version_sdk < 21:
out = self.RunShellCommand(['dumpsys', 'iphonesubinfo'],
raw_output=True, check_return=True)
if out:
match = re.search(_IMEI_RE, out)
if match:
self._cache['imei'] = match.group(1)
return self._cache['imei']
else:
out = self.RunShellCommand(['service', 'call', 'iphonesubinfo', '1'],
check_return=True)
if out:
imei = ''
for line in out:
match = re.search(_PARCEL_RESULT_RE, line)
if match:
imei = imei + match.group(1)
imei = imei.replace('.', '').strip()
if imei:
self._cache['imei'] = imei
return self._cache['imei']
raise device_errors.CommandFailedError('Unable to fetch IMEI.')
@decorators.WithTimeoutAndRetriesFromInstance()
def GetApplicationPaths(self, package, timeout=None, retries=None):
"""Get the paths of the installed apks on the device for the given package.
Args:
package: Name of the package.
Returns:
List of paths to the apks on the device for the given package.
"""
return self._GetApplicationPathsInternal(package)
def _GetApplicationPathsInternal(self, package, skip_cache=False):
cached_result = self._cache['package_apk_paths'].get(package)
if cached_result is not None and not skip_cache:
if package in self._cache['package_apk_paths_to_verify']:
self._cache['package_apk_paths_to_verify'].remove(package)
# Don't verify an app that is not thought to be installed. We are
# concerned only with apps we think are installed having been
# uninstalled manually.
if cached_result and not self.PathExists(cached_result):
cached_result = None
self._cache['package_apk_checksums'].pop(package, 0)
if cached_result is not None:
return list(cached_result)
# 'pm path' is liable to incorrectly exit with a nonzero number starting
# in Lollipop.
# TODO(jbudorick): Check if this is fixed as new Android versions are
# released to put an upper bound on this.
should_check_return = (self.build_version_sdk < version_codes.LOLLIPOP)
output = self.RunShellCommand(
['pm', 'path', package], check_return=should_check_return)
apks = []
bad_output = False
for line in output:
if line.startswith('package:'):
apks.append(line[len('package:'):])
elif line.startswith('WARNING:'):
continue
else:
bad_output = True # Unexpected line in output.
if not apks and output:
if bad_output:
raise device_errors.CommandFailedError(
'Unexpected pm path output: %r' % '\n'.join(output), str(self))
else:
logger.warning('pm returned no paths but the following warnings:')
for line in output:
logger.warning('- %s', line)
self._cache['package_apk_paths'][package] = list(apks)
return apks
@decorators.WithTimeoutAndRetriesFromInstance()
def GetApplicationVersion(self, package, timeout=None, retries=None):
"""Get the version name of a package installed on the device.
Args:
package: Name of the package.
Returns:
A string with the version name or None if the package is not found
on the device.
"""
output = self.RunShellCommand(
['dumpsys', 'package', package], check_return=True)
if not output:
return None
for line in output:
line = line.strip()
if line.startswith('versionName='):
return line[len('versionName='):]
raise device_errors.CommandFailedError(
'Version name for %s not found on dumpsys output' % package, str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def GetApplicationDataDirectory(self, package, timeout=None, retries=None):
"""Get the data directory on the device for the given package.
Args:
package: Name of the package.
Returns:
The package's data directory.
Raises:
CommandFailedError if the package's data directory can't be found,
whether because it's not installed or otherwise.
"""
output = self._RunPipedShellCommand(
'pm dump %s | grep dataDir=' % cmd_helper.SingleQuote(package))
for line in output:
_, _, dataDir = line.partition('dataDir=')
if dataDir:
return dataDir
raise device_errors.CommandFailedError(
'Could not find data directory for %s', package)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetSecurityContextForPackage(self, package, timeout=None, retries=None):
"""Gets the SELinux security context for the given package.
Args:
package: Name of the package.
Returns:
The package's security context as a string, or None if not found.
"""
for line in self.RunShellCommand(['ls', '-Z', '/data/data/'],
as_root=True, check_return=True):
split_line = line.split()
# ls -Z output differs between Android versions, but the package is
# always last and the context always starts with "u:object"
if split_line[-1] == package:
for column in split_line:
if column.startswith('u:object'):
return column
return None
def TakeBugReport(self, path, timeout=60*5, retries=None):
"""Takes a bug report and dumps it to the specified path.
This doesn't use adb's bugreport option since its behavior is dependent on
both adb version and device OS version. To make it simpler, this directly
runs the bugreport command on the device itself and dumps the stdout to a
file.
Args:
path: Path on the host to drop the bug report.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
with device_temp_file.DeviceTempFile(self.adb) as device_tmp_file:
cmd = '( bugreport )>%s 2>&1' % device_tmp_file.name
self.RunShellCommand(
cmd, check_return=True, shell=True, timeout=timeout, retries=retries)
self.PullFile(device_tmp_file.name, path)
@decorators.WithTimeoutAndRetriesFromInstance()
def WaitUntilFullyBooted(self, wifi=False, timeout=None, retries=None):
"""Wait for the device to fully boot.
This means waiting for the device to boot, the package manager to be
available, and the SD card to be ready. It can optionally mean waiting
for wifi to come up, too.
Args:
wifi: A boolean indicating if we should wait for wifi to come up or not.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError if one of the component waits times out.
DeviceUnreachableError if the device becomes unresponsive.
"""
def sd_card_ready():
try:
self.RunShellCommand(['test', '-d', self.GetExternalStoragePath()],
check_return=True)
return True
except device_errors.AdbCommandFailedError:
return False
def pm_ready():
try:
return self._GetApplicationPathsInternal('android', skip_cache=True)
except device_errors.CommandFailedError:
return False
def boot_completed():
try:
return self.GetProp('sys.boot_completed', cache=False) == '1'
except device_errors.CommandFailedError:
return False
def wifi_enabled():
return 'Wi-Fi is enabled' in self.RunShellCommand(['dumpsys', 'wifi'],
check_return=False)
self.adb.WaitForDevice()
timeout_retry.WaitFor(sd_card_ready)
timeout_retry.WaitFor(pm_ready)
timeout_retry.WaitFor(boot_completed)
if wifi:
timeout_retry.WaitFor(wifi_enabled)
REBOOT_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
@decorators.WithTimeoutAndRetriesFromInstance(
min_default_timeout=REBOOT_DEFAULT_TIMEOUT)
def Reboot(self, block=True, wifi=False, timeout=None, retries=None):
"""Reboot the device.
Args:
block: A boolean indicating if we should wait for the reboot to complete.
wifi: A boolean indicating if we should wait for wifi to be enabled after
the reboot. The option has no effect unless |block| is also True.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def device_offline():
return not self.IsOnline()
self.adb.Reboot()
self._ClearCache()
timeout_retry.WaitFor(device_offline, wait_period=1)
if block:
self.WaitUntilFullyBooted(wifi=wifi)
INSTALL_DEFAULT_TIMEOUT = 4 * _DEFAULT_TIMEOUT
@decorators.WithTimeoutAndRetriesFromInstance(
min_default_timeout=INSTALL_DEFAULT_TIMEOUT)
def Install(self, apk, allow_downgrade=False, reinstall=False,
permissions=None, timeout=None, retries=None):
"""Install an APK.
Noop if an identical APK is already installed.
Args:
apk: An ApkHelper instance or string containing the path to the APK.
allow_downgrade: A boolean indicating if we should allow downgrades.
reinstall: A boolean indicating if we should keep any existing app data.
permissions: Set of permissions to set. If not set, finds permissions with
apk helper. To set no permissions, pass [].
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the installation fails.
CommandTimeoutError if the installation times out.
DeviceUnreachableError on missing device.
"""
self._InstallInternal(apk, None, allow_downgrade=allow_downgrade,
reinstall=reinstall, permissions=permissions)
@decorators.WithTimeoutAndRetriesFromInstance(
min_default_timeout=INSTALL_DEFAULT_TIMEOUT)
def InstallSplitApk(self, base_apk, split_apks, allow_downgrade=False,
reinstall=False, allow_cached_props=False,
permissions=None, timeout=None, retries=None):
"""Install a split APK.
Noop if all of the APK splits are already installed.
Args:
base_apk: An ApkHelper instance or string containing the path to the base
APK.
split_apks: A list of strings of paths of all of the APK splits.
allow_downgrade: A boolean indicating if we should allow downgrades.
reinstall: A boolean indicating if we should keep any existing app data.
allow_cached_props: Whether to use cached values for device properties.
permissions: Set of permissions to set. If not set, finds permissions with
apk helper. To set no permissions, pass [].
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the installation fails.
CommandTimeoutError if the installation times out.
DeviceUnreachableError on missing device.
DeviceVersionError if device SDK is less than Android L.
"""
self._InstallInternal(base_apk, split_apks, reinstall=reinstall,
allow_cached_props=allow_cached_props,
permissions=permissions,
allow_downgrade=allow_downgrade)
def _InstallInternal(self, base_apk, split_apks, allow_downgrade=False,
reinstall=False, allow_cached_props=False,
permissions=None):
if split_apks:
self._CheckSdkLevel(version_codes.LOLLIPOP)
base_apk = apk_helper.ToHelper(base_apk)
all_apks = [base_apk.path]
if split_apks:
all_apks += split_select.SelectSplits(
self, base_apk.path, split_apks, allow_cached_props=allow_cached_props)
if len(all_apks) == 1:
logger.warning('split-select did not select any from %s', split_apks)
missing_apks = [apk for apk in all_apks if not os.path.exists(apk)]
if missing_apks:
raise device_errors.CommandFailedError(
'Attempted to install non-existent apks: %s'
% pprint.pformat(missing_apks))
package_name = base_apk.GetPackageName()
device_apk_paths = self._GetApplicationPathsInternal(package_name)
apks_to_install = None
host_checksums = None
if not device_apk_paths:
apks_to_install = all_apks
elif len(device_apk_paths) > 1 and not split_apks:
logger.warning(
'Installing non-split APK when split APK was previously installed')
apks_to_install = all_apks
elif len(device_apk_paths) == 1 and split_apks:
logger.warning(
'Installing split APK when non-split APK was previously installed')
apks_to_install = all_apks
else:
try:
apks_to_install, host_checksums = (
self._ComputeStaleApks(package_name, all_apks))
except EnvironmentError as e:
logger.warning('Error calculating md5: %s', e)
apks_to_install, host_checksums = all_apks, None
if apks_to_install and not reinstall:
self.Uninstall(package_name)
apks_to_install = all_apks
if apks_to_install:
# Assume that we won't know the resulting device state.
self._cache['package_apk_paths'].pop(package_name, 0)
self._cache['package_apk_checksums'].pop(package_name, 0)
if split_apks:
partial = package_name if len(apks_to_install) < len(all_apks) else None
self.adb.InstallMultiple(
apks_to_install, partial=partial, reinstall=reinstall,
allow_downgrade=allow_downgrade)
else:
self.adb.Install(
base_apk.path, reinstall=reinstall, allow_downgrade=allow_downgrade)
else:
# Running adb install terminates running instances of the app, so to be
# consistent, we explicitly terminate it when skipping the install.
self.ForceStop(package_name)
if (permissions is None
and self.build_version_sdk >= version_codes.MARSHMALLOW):
permissions = base_apk.GetPermissions()
self.GrantPermissions(package_name, permissions)
# Upon success, we know the device checksums, but not their paths.
if host_checksums is not None:
self._cache['package_apk_checksums'][package_name] = host_checksums
@decorators.WithTimeoutAndRetriesFromInstance()
def Uninstall(self, package_name, keep_data=False, timeout=None,
retries=None):
"""Remove the app |package_name| from the device.
This is a no-op if the app is not already installed.
Args:
package_name: The package to uninstall.
keep_data: (optional) Whether to keep the data and cache directories.
timeout: Timeout in seconds.
retries: Number of retries.
Raises:
CommandFailedError if the uninstallation fails.
CommandTimeoutError if the uninstallation times out.
DeviceUnreachableError on missing device.
"""
installed = self._GetApplicationPathsInternal(package_name)
if not installed:
return
try:
self.adb.Uninstall(package_name, keep_data)
self._cache['package_apk_paths'][package_name] = []
self._cache['package_apk_checksums'][package_name] = set()
except:
# Clear cache since we can't be sure of the state.
self._cache['package_apk_paths'].pop(package_name, 0)
self._cache['package_apk_checksums'].pop(package_name, 0)
raise
def _CheckSdkLevel(self, required_sdk_level):
"""Raises an exception if the device does not have the required SDK level.
"""
if self.build_version_sdk < required_sdk_level:
raise device_errors.DeviceVersionError(
('Requires SDK level %s, device is SDK level %s' %
(required_sdk_level, self.build_version_sdk)),
device_serial=self.serial)
@decorators.WithTimeoutAndRetriesFromInstance()
def RunShellCommand(self, cmd, shell=False, check_return=False, cwd=None,
env=None, run_as=None, as_root=False, single_line=False,
large_output=False, raw_output=False, timeout=None,
retries=None):
"""Run an ADB shell command.
The command to run |cmd| should be a sequence of program arguments
(preferred) or a single string with a shell script to run.
When |cmd| is a sequence, it is assumed to contain the name of the command
to run followed by its arguments. In this case, arguments are passed to the
command exactly as given, preventing any further processing by the shell.
This allows callers to easily pass arguments with spaces or special
characters without having to worry about quoting rules. Whenever possible,
it is recomended to pass |cmd| as a sequence.
When |cmd| is passed as a single string, |shell| should be set to True.
The command will be interpreted and run by the shell on the device,
allowing the use of shell features such as pipes, wildcards, or variables.
Failing to set shell=True will issue a warning, but this will be changed
to a hard failure in the future (see: catapult:#3242).
This behaviour is consistent with that of command runners in cmd_helper as
well as Python's own subprocess.Popen.
TODO(perezju) Change the default of |check_return| to True when callers
have switched to the new behaviour.
Args:
cmd: A sequence containing the command to run and its arguments, or a
string with a shell script to run (should also set shell=True).
shell: A boolean indicating whether shell features may be used in |cmd|.
check_return: A boolean indicating whether or not the return code should
be checked.
cwd: The device directory in which the command should be run.
env: The environment variables with which the command should be run.
run_as: A string containing the package as which the command should be
run.
as_root: A boolean indicating whether the shell command should be run
with root privileges.
single_line: A boolean indicating if only a single line of output is
expected.
large_output: Uses a work-around for large shell command output. Without
this large output will be truncated.
raw_output: Whether to only return the raw output
(no splitting into lines).
timeout: timeout in seconds
retries: number of retries
Returns:
If single_line is False, the output of the command as a list of lines,
otherwise, a string with the unique line of output emmited by the command
(with the optional newline at the end stripped).
Raises:
AdbCommandFailedError if check_return is True and the exit code of
the command run on the device is non-zero.
CommandFailedError if single_line is True but the output contains two or
more lines.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def env_quote(key, value):
if not DeviceUtils._VALID_SHELL_VARIABLE.match(key):
raise KeyError('Invalid shell variable name %r' % key)
# using double quotes here to allow interpolation of shell variables
return '%s=%s' % (key, cmd_helper.DoubleQuote(value))
def run(cmd):
return self.adb.Shell(cmd)
def handle_check_return(cmd):
try:
return run(cmd)
except device_errors.AdbCommandFailedError as exc:
if check_return:
raise
else:
return exc.output
def handle_large_command(cmd):
if len(cmd) < self._MAX_ADB_COMMAND_LENGTH:
return handle_check_return(cmd)
else:
with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script:
self._WriteFileWithPush(script.name, cmd)
logger.info('Large shell command will be run from file: %s ...',
cmd[:self._MAX_ADB_COMMAND_LENGTH])
return handle_check_return('sh %s' % script.name_quoted)
def handle_large_output(cmd, large_output_mode):
if large_output_mode:
with device_temp_file.DeviceTempFile(self.adb) as large_output_file:
cmd = '( %s )>%s 2>&1' % (cmd, large_output_file.name)
logger.debug('Large output mode enabled. Will write output to '
'device and read results from file.')
handle_large_command(cmd)
return self.ReadFile(large_output_file.name, force_pull=True)
else:
try:
return handle_large_command(cmd)
except device_errors.AdbCommandFailedError as exc:
if exc.status is None:
logger.error(_FormatPartialOutputError(exc.output))
logger.warning('Attempting to run in large_output mode.')
logger.warning('Use RunShellCommand(..., large_output=True) for '
'shell commands that expect a lot of output.')
return handle_large_output(cmd, True)
else:
raise
if isinstance(cmd, basestring):
if not shell:
logger.warning(
'The command to run should preferably be passed as a sequence of'
' args. If shell features are needed (pipes, wildcards, variables)'
' clients should explicitly set shell=True.')
else:
cmd = ' '.join(cmd_helper.SingleQuote(s) for s in cmd)
if env:
env = ' '.join(env_quote(k, v) for k, v in env.iteritems())
cmd = '%s %s' % (env, cmd)
if cwd:
cmd = 'cd %s && %s' % (cmd_helper.SingleQuote(cwd), cmd)
if run_as:
cmd = 'run-as %s sh -c %s' % (cmd_helper.SingleQuote(run_as),
cmd_helper.SingleQuote(cmd))
if as_root and self.NeedsSU():
# "su -c sh -c" allows using shell features in |cmd|
cmd = self._Su('sh -c %s' % cmd_helper.SingleQuote(cmd))
output = handle_large_output(cmd, large_output)
if raw_output:
return output
output = output.splitlines()
if single_line:
if not output:
return ''
elif len(output) == 1:
return output[0]
else:
msg = 'one line of output was expected, but got: %s'
raise device_errors.CommandFailedError(msg % output, str(self))
else:
return output
def _RunPipedShellCommand(self, script, **kwargs):
PIPESTATUS_LEADER = 'PIPESTATUS: '
script += '; echo "%s${PIPESTATUS[@]}"' % PIPESTATUS_LEADER
kwargs.update(shell=True, check_return=True)
output = self.RunShellCommand(script, **kwargs)
pipestatus_line = output[-1]
if not pipestatus_line.startswith(PIPESTATUS_LEADER):
logger.error('Pipe exit statuses of shell script missing.')
raise device_errors.AdbShellCommandFailedError(
script, output, status=None,
device_serial=self.serial)
output = output[:-1]
statuses = [
int(s) for s in pipestatus_line[len(PIPESTATUS_LEADER):].split()]
if any(statuses):
raise device_errors.AdbShellCommandFailedError(
script, output, status=statuses,
device_serial=self.serial)
return output
@decorators.WithTimeoutAndRetriesFromInstance()
def KillAll(self, process_name, exact=False, signum=device_signal.SIGKILL,
as_root=False, blocking=False, quiet=False,
timeout=None, retries=None):
"""Kill all processes with the given name on the device.
Args:
process_name: A string containing the name of the process to kill.
exact: A boolean indicating whether to kill all processes matching
the string |process_name| exactly, or all of those which contain
|process_name| as a substring. Defaults to False.
signum: An integer containing the signal number to send to kill. Defaults
to SIGKILL (9).
as_root: A boolean indicating whether the kill should be executed with
root privileges.
blocking: A boolean indicating whether we should wait until all processes
with the given |process_name| are dead.
quiet: A boolean indicating whether to ignore the fact that no processes
to kill were found.
timeout: timeout in seconds
retries: number of retries
Returns:
The number of processes attempted to kill.
Raises:
CommandFailedError if no process was killed and |quiet| is False.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
processes = self.ListProcesses(process_name)
if exact:
processes = [p for p in processes if p.name == process_name]
if not processes:
if quiet:
return 0
else:
raise device_errors.CommandFailedError(
'No processes matching %r (exact=%r)' % (process_name, exact),
str(self))
logger.info(
'KillAll(%r, ...) attempting to kill the following:', process_name)
for p in processes:
logger.info(' %05d %s', p.pid, p.name)
pids = set(p.pid for p in processes)
cmd = ['kill', '-%d' % signum] + sorted(str(p) for p in pids)
self.RunShellCommand(cmd, as_root=as_root, check_return=True)
def all_pids_killed():
pids_left = (p.pid for p in self.ListProcesses(process_name))
return not pids.intersection(pids_left)
if blocking:
timeout_retry.WaitFor(all_pids_killed, wait_period=0.1)
return len(pids)
@decorators.WithTimeoutAndRetriesFromInstance()
def StartActivity(self, intent_obj, blocking=False, trace_file_name=None,
force_stop=False, timeout=None, retries=None):
"""Start package's activity on the device.
Args:
intent_obj: An Intent object to send.
blocking: A boolean indicating whether we should wait for the activity to
finish launching.
trace_file_name: If present, a string that both indicates that we want to
profile the activity and contains the path to which the
trace should be saved.
force_stop: A boolean indicating whether we should stop the activity
before starting it.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the activity could not be started.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
cmd = ['am', 'start']
if blocking:
cmd.append('-W')
if trace_file_name:
cmd.extend(['--start-profiler', trace_file_name])
if force_stop:
cmd.append('-S')
cmd.extend(intent_obj.am_args)
for line in self.RunShellCommand(cmd, check_return=True):
if line.startswith('Error:'):
raise device_errors.CommandFailedError(line, str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def StartInstrumentation(self, component, finish=True, raw=False,
extras=None, timeout=None, retries=None):
if extras is None:
extras = {}
cmd = ['am', 'instrument']
if finish:
cmd.append('-w')
if raw:
cmd.append('-r')
for k, v in extras.iteritems():
cmd.extend(['-e', str(k), str(v)])
cmd.append(component)
# Store the package name in a shell variable to help the command stay under
# the _MAX_ADB_COMMAND_LENGTH limit.
package = component.split('/')[0]
shell_snippet = 'p=%s;%s' % (package,
cmd_helper.ShrinkToSnippet(cmd, 'p', package))
return self.RunShellCommand(shell_snippet, shell=True, check_return=True,
large_output=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def BroadcastIntent(self, intent_obj, timeout=None, retries=None):
"""Send a broadcast intent.
Args:
intent: An Intent to broadcast.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
cmd = ['am', 'broadcast'] + intent_obj.am_args
self.RunShellCommand(cmd, check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def GoHome(self, timeout=None, retries=None):
"""Return to the home screen and obtain launcher focus.
This command launches the home screen and attempts to obtain
launcher focus until the timeout is reached.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def is_launcher_focused():
output = self.RunShellCommand(['dumpsys', 'window', 'windows'],
check_return=True, large_output=True)
return any(self._LAUNCHER_FOCUSED_RE.match(l) for l in output)
def dismiss_popups():
# There is a dialog present; attempt to get rid of it.
# Not all dialogs can be dismissed with back.
self.SendKeyEvent(keyevent.KEYCODE_ENTER)
self.SendKeyEvent(keyevent.KEYCODE_BACK)
return is_launcher_focused()
# If Home is already focused, return early to avoid unnecessary work.
if is_launcher_focused():
return
self.StartActivity(
intent.Intent(action='android.intent.action.MAIN',
category='android.intent.category.HOME'),
blocking=True)
if not is_launcher_focused():
timeout_retry.WaitFor(dismiss_popups, wait_period=1)
@decorators.WithTimeoutAndRetriesFromInstance()
def ForceStop(self, package, timeout=None, retries=None):
"""Close the application.
Args:
package: A string containing the name of the package to stop.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if self.GetApplicationPids(package):
self.RunShellCommand(['am', 'force-stop', package], check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def ClearApplicationState(
self, package, permissions=None, timeout=None, retries=None):
"""Clear all state for the given package.
Args:
package: A string containing the name of the package to stop.
permissions: List of permissions to set after clearing data.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
# Check that the package exists before clearing it for android builds below
# JB MR2. Necessary because calling pm clear on a package that doesn't exist
# may never return.
if ((self.build_version_sdk >= version_codes.JELLY_BEAN_MR2)
or self._GetApplicationPathsInternal(package)):
self.RunShellCommand(['pm', 'clear', package], check_return=True)
self.GrantPermissions(package, permissions)
@decorators.WithTimeoutAndRetriesFromInstance()
def SendKeyEvent(self, keycode, timeout=None, retries=None):
"""Sends a keycode to the device.
See the devil.android.sdk.keyevent module for suitable keycode values.
Args:
keycode: A integer keycode to send to the device.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self.RunShellCommand(['input', 'keyevent', format(keycode, 'd')],
check_return=True)
PUSH_CHANGED_FILES_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
@decorators.WithTimeoutAndRetriesFromInstance(
min_default_timeout=PUSH_CHANGED_FILES_DEFAULT_TIMEOUT)
def PushChangedFiles(self, host_device_tuples, timeout=None,
retries=None, delete_device_stale=False):
"""Push files to the device, skipping files that don't need updating.
When a directory is pushed, it is traversed recursively on the host and
all files in it are pushed to the device as needed.
Additionally, if delete_device_stale option is True,
files that exist on the device but don't exist on the host are deleted.
Args:
host_device_tuples: A list of (host_path, device_path) tuples, where
|host_path| is an absolute path of a file or directory on the host
that should be minimially pushed to the device, and |device_path| is
an absolute path of the destination on the device.
timeout: timeout in seconds
retries: number of retries
delete_device_stale: option to delete stale files on device
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
all_changed_files = []
all_stale_files = []
missing_dirs = set()
cache_commit_funcs = []
for h, d in host_device_tuples:
assert os.path.isabs(h) and posixpath.isabs(d)
h = os.path.realpath(h)
changed_files, up_to_date_files, stale_files, cache_commit_func = (
self._GetChangedAndStaleFiles(h, d, delete_device_stale))
all_changed_files += changed_files
all_stale_files += stale_files
cache_commit_funcs.append(cache_commit_func)
if changed_files and not up_to_date_files and not stale_files:
if os.path.isdir(h):
missing_dirs.add(d)
else:
missing_dirs.add(posixpath.dirname(d))
if delete_device_stale and all_stale_files:
self.RunShellCommand(['rm', '-f'] + all_stale_files, check_return=True)
if all_changed_files:
if missing_dirs:
try:
self.RunShellCommand(['mkdir', '-p'] + list(missing_dirs),
check_return=True)
except device_errors.AdbShellCommandFailedError as e:
# TODO(crbug.com/739899): This is attempting to diagnose flaky EBUSY
# errors that have been popping up in single-device scenarios.
# Remove it once we've figured out what's causing them and how best
# to handle them.
m = _EBUSY_RE.search(e.output)
if m:
logging.error(
'Hit EBUSY while attempting to make missing directories.')
logging.error('lsof output:')
# Don't check for return below since grep exits with a non-zero when
# no match is found.
for l in self.RunShellCommand(
'lsof | grep %s' % cmd_helper.SingleQuote(m.group(1)),
check_return=False):
logging.error(' %s', l)
raise
self._PushFilesImpl(host_device_tuples, all_changed_files)
for func in cache_commit_funcs:
func()
def _GetChangedAndStaleFiles(self, host_path, device_path, track_stale=False):
"""Get files to push and delete
Args:
host_path: an absolute path of a file or directory on the host
device_path: an absolute path of a file or directory on the device
track_stale: whether to bother looking for stale files (slower)
Returns:
a four-element tuple
1st element: a list of (host_files_path, device_files_path) tuples to push
2nd element: a list of host_files_path that are up-to-date
3rd element: a list of stale files under device_path, or [] when
track_stale == False
4th element: a cache commit function.
"""
try:
# Length calculations below assume no trailing /.
host_path = host_path.rstrip('/')
device_path = device_path.rstrip('/')
specific_device_paths = [device_path]
ignore_other_files = not track_stale and os.path.isdir(host_path)
if ignore_other_files:
specific_device_paths = []
for root, _, filenames in os.walk(host_path):
relative_dir = root[len(host_path) + 1:]
specific_device_paths.extend(
posixpath.join(device_path, relative_dir, f) for f in filenames)
def calculate_host_checksums():
return md5sum.CalculateHostMd5Sums([host_path])
def calculate_device_checksums():
if self._enable_device_files_cache:
cache_entry = self._cache['device_path_checksums'].get(device_path)
if cache_entry and cache_entry[0] == ignore_other_files:
return dict(cache_entry[1])
sums = md5sum.CalculateDeviceMd5Sums(specific_device_paths, self)
cache_entry = [ignore_other_files, sums]
self._cache['device_path_checksums'][device_path] = cache_entry
return dict(sums)
host_checksums, device_checksums = reraiser_thread.RunAsync((
calculate_host_checksums,
calculate_device_checksums))
except EnvironmentError as e:
logger.warning('Error calculating md5: %s', e)
return ([(host_path, device_path)], [], [], lambda: 0)
to_push = []
up_to_date = []
to_delete = []
if os.path.isfile(host_path):
host_checksum = host_checksums.get(host_path)
device_checksum = device_checksums.get(device_path)
if host_checksum == device_checksum:
up_to_date.append(host_path)
else:
to_push.append((host_path, device_path))
else:
for host_abs_path, host_checksum in host_checksums.iteritems():
device_abs_path = posixpath.join(
device_path, os.path.relpath(host_abs_path, host_path))
device_checksum = device_checksums.pop(device_abs_path, None)
if device_checksum == host_checksum:
up_to_date.append(host_abs_path)
else:
to_push.append((host_abs_path, device_abs_path))
to_delete = device_checksums.keys()
def cache_commit_func():
new_sums = {posixpath.join(device_path, path[len(host_path) + 1:]): val
for path, val in host_checksums.iteritems()}
cache_entry = [ignore_other_files, new_sums]
self._cache['device_path_checksums'][device_path] = cache_entry
return (to_push, up_to_date, to_delete, cache_commit_func)
def _ComputeDeviceChecksumsForApks(self, package_name):
ret = self._cache['package_apk_checksums'].get(package_name)
if ret is None:
device_paths = self._GetApplicationPathsInternal(package_name)
file_to_checksums = md5sum.CalculateDeviceMd5Sums(device_paths, self)
ret = set(file_to_checksums.values())
self._cache['package_apk_checksums'][package_name] = ret
return ret
def _ComputeStaleApks(self, package_name, host_apk_paths):
def calculate_host_checksums():
return md5sum.CalculateHostMd5Sums(host_apk_paths)
def calculate_device_checksums():
return self._ComputeDeviceChecksumsForApks(package_name)
host_checksums, device_checksums = reraiser_thread.RunAsync((
calculate_host_checksums, calculate_device_checksums))
stale_apks = [k for (k, v) in host_checksums.iteritems()
if v not in device_checksums]
return stale_apks, set(host_checksums.values())
def _PushFilesImpl(self, host_device_tuples, files):
if not files:
return
size = sum(host_utils.GetRecursiveDiskUsage(h) for h, _ in files)
file_count = len(files)
dir_size = sum(host_utils.GetRecursiveDiskUsage(h)
for h, _ in host_device_tuples)
dir_file_count = 0
for h, _ in host_device_tuples:
if os.path.isdir(h):
dir_file_count += sum(len(f) for _r, _d, f in os.walk(h))
else:
dir_file_count += 1
push_duration = self._ApproximateDuration(
file_count, file_count, size, False)
dir_push_duration = self._ApproximateDuration(
len(host_device_tuples), dir_file_count, dir_size, False)
zip_duration = self._ApproximateDuration(1, 1, size, True)
if (dir_push_duration < push_duration and dir_push_duration < zip_duration
# TODO(jbudorick): Resume directory pushing once clients have switched
# to 1.0.36-compatible syntax.
and False):
self._PushChangedFilesIndividually(host_device_tuples)
elif push_duration < zip_duration:
self._PushChangedFilesIndividually(files)
elif self._commands_installed is False:
# Already tried and failed to install unzip command.
self._PushChangedFilesIndividually(files)
elif not self._PushChangedFilesZipped(
files, [d for _, d in host_device_tuples]):
self._PushChangedFilesIndividually(files)
def _MaybeInstallCommands(self):
if self._commands_installed is None:
try:
if not install_commands.Installed(self):
install_commands.InstallCommands(self)
self._commands_installed = True
except device_errors.CommandFailedError as e:
logger.warning('unzip not available: %s', str(e))
self._commands_installed = False
return self._commands_installed
@staticmethod
def _ApproximateDuration(adb_calls, file_count, byte_count, is_zipping):
# We approximate the time to push a set of files to a device as:
# t = c1 * a + c2 * f + c3 + b / c4 + b / (c5 * c6), where
# t: total time (sec)
# c1: adb call time delay (sec)
# a: number of times adb is called (unitless)
# c2: push time delay (sec)
# f: number of files pushed via adb (unitless)
# c3: zip time delay (sec)
# c4: zip rate (bytes/sec)
# b: total number of bytes (bytes)
# c5: transfer rate (bytes/sec)
# c6: compression ratio (unitless)
# All of these are approximations.
ADB_CALL_PENALTY = 0.1 # seconds
ADB_PUSH_PENALTY = 0.01 # seconds
ZIP_PENALTY = 2.0 # seconds
ZIP_RATE = 10000000.0 # bytes / second
TRANSFER_RATE = 2000000.0 # bytes / second
COMPRESSION_RATIO = 2.0 # unitless
adb_call_time = ADB_CALL_PENALTY * adb_calls
adb_push_setup_time = ADB_PUSH_PENALTY * file_count
if is_zipping:
zip_time = ZIP_PENALTY + byte_count / ZIP_RATE
transfer_time = byte_count / (TRANSFER_RATE * COMPRESSION_RATIO)
else:
zip_time = 0
transfer_time = byte_count / TRANSFER_RATE
return adb_call_time + adb_push_setup_time + zip_time + transfer_time
def _PushChangedFilesIndividually(self, files):
for h, d in files:
self.adb.Push(h, d)
def _PushChangedFilesZipped(self, files, dirs):
if not self._MaybeInstallCommands():
return False
with tempfile_ext.NamedTemporaryDirectory() as working_dir:
zip_path = os.path.join(working_dir, 'tmp.zip')
try:
zip_utils.WriteZipFile(zip_path, files)
except zip_utils.ZipFailedError:
return False
self.NeedsSU()
with device_temp_file.DeviceTempFile(
self.adb, suffix='.zip') as device_temp:
self.adb.Push(zip_path, device_temp.name)
quoted_dirs = ' '.join(cmd_helper.SingleQuote(d) for d in dirs)
self.RunShellCommand(
'unzip %s&&chmod -R 777 %s' % (device_temp.name, quoted_dirs),
shell=True, as_root=True,
env={'PATH': '%s:$PATH' % install_commands.BIN_DIR},
check_return=True)
return True
# TODO(nednguyen): remove this and migrate the callsite to PathExists().
@decorators.WithTimeoutAndRetriesFromInstance()
def FileExists(self, device_path, timeout=None, retries=None):
"""Checks whether the given file exists on the device.
Arguments are the same as PathExists.
"""
return self.PathExists(device_path, timeout=timeout, retries=retries)
@decorators.WithTimeoutAndRetriesFromInstance()
def PathExists(self, device_paths, as_root=False, timeout=None, retries=None):
"""Checks whether the given path(s) exists on the device.
Args:
device_path: A string containing the absolute path to the file on the
device, or an iterable of paths to check.
as_root: Whether root permissions should be use to check for the existence
of the given path(s).
timeout: timeout in seconds
retries: number of retries
Returns:
True if the all given paths exist on the device, False otherwise.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
paths = device_paths
if isinstance(paths, basestring):
paths = (paths,)
if not paths:
return True
cmd = ['test', '-e', paths[0]]
for p in paths[1:]:
cmd.extend(['-a', '-e', p])
try:
self.RunShellCommand(cmd, as_root=as_root, check_return=True,
timeout=timeout, retries=retries)
return True
except device_errors.CommandFailedError:
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def RemovePath(self, device_path, force=False, recursive=False,
as_root=False, rename=False, timeout=None, retries=None):
"""Removes the given path(s) from the device.
Args:
device_path: A string containing the absolute path to the file on the
device, or an iterable of paths to check.
force: Whether to remove the path(s) with force (-f).
recursive: Whether to remove any directories in the path(s) recursively.
as_root: Whether root permissions should be use to remove the given
path(s).
rename: Whether to rename the path(s) before removing to help avoid
filesystem errors. See https://stackoverflow.com/questions/11539657
timeout: timeout in seconds
retries: number of retries
"""
def _RenamePath(path):
random_suffix = hex(random.randint(2 ** 12, 2 ** 16 - 1))[2:]
dest = '%s-%s' % (path, random_suffix)
try:
self.RunShellCommand(
['mv', path, dest], as_root=as_root, check_return=True)
return dest
except device_errors.AdbShellCommandFailedError:
# If it couldn't be moved, just try rm'ing the original path instead.
return path
args = ['rm']
if force:
args.append('-f')
if recursive:
args.append('-r')
if isinstance(device_path, basestring):
args.append(device_path if not rename else _RenamePath(device_path))
else:
args.extend(
device_path if not rename else [_RenamePath(p) for p in device_path])
self.RunShellCommand(args, as_root=as_root, check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def PullFile(self, device_path, host_path, timeout=None, retries=None):
"""Pull a file from the device.
Args:
device_path: A string containing the absolute path of the file to pull
from the device.
host_path: A string containing the absolute path of the destination on
the host.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
"""
# Create the base dir if it doesn't exist already
dirname = os.path.dirname(host_path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
self.adb.Pull(device_path, host_path)
def _ReadFileWithPull(self, device_path):
try:
d = tempfile.mkdtemp()
host_temp_path = os.path.join(d, 'tmp_ReadFileWithPull')
self.adb.Pull(device_path, host_temp_path)
with open(host_temp_path, 'r') as host_temp:
return host_temp.read()
finally:
if os.path.exists(d):
shutil.rmtree(d)
@decorators.WithTimeoutAndRetriesFromInstance()
def ReadFile(self, device_path, as_root=False, force_pull=False,
timeout=None, retries=None):
"""Reads the contents of a file from the device.
Args:
device_path: A string containing the absolute path of the file to read
from the device.
as_root: A boolean indicating whether the read should be executed with
root privileges.
force_pull: A boolean indicating whether to force the operation to be
performed by pulling a file from the device. The default is, when the
contents are short, to retrieve the contents using cat instead.
timeout: timeout in seconds
retries: number of retries
Returns:
The contents of |device_path| as a string. Contents are intepreted using
universal newlines, so the caller will see them encoded as '\n'. Also,
all lines will be terminated.
Raises:
AdbCommandFailedError if the file can't be read.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def get_size(path):
return self.FileSize(path, as_root=as_root)
if (not force_pull
and 0 < get_size(device_path) <= self._MAX_ADB_OUTPUT_LENGTH):
return _JoinLines(self.RunShellCommand(
['cat', device_path], as_root=as_root, check_return=True))
elif as_root and self.NeedsSU():
with device_temp_file.DeviceTempFile(self.adb) as device_temp:
cmd = 'SRC=%s DEST=%s;cp "$SRC" "$DEST" && chmod 666 "$DEST"' % (
cmd_helper.SingleQuote(device_path),
cmd_helper.SingleQuote(device_temp.name))
self.RunShellCommand(cmd, shell=True, as_root=True, check_return=True)
return self._ReadFileWithPull(device_temp.name)
else:
return self._ReadFileWithPull(device_path)
def _WriteFileWithPush(self, device_path, contents):
with tempfile.NamedTemporaryFile() as host_temp:
host_temp.write(contents)
host_temp.flush()
self.adb.Push(host_temp.name, device_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def WriteFile(self, device_path, contents, as_root=False, force_push=False,
timeout=None, retries=None):
"""Writes |contents| to a file on the device.
Args:
device_path: A string containing the absolute path to the file to write
on the device.
contents: A string containing the data to write to the device.
as_root: A boolean indicating whether the write should be executed with
root privileges (if available).
force_push: A boolean indicating whether to force the operation to be
performed by pushing a file to the device. The default is, when the
contents are short, to pass the contents using a shell script instead.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the file could not be written on the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if not force_push and len(contents) < self._MAX_ADB_COMMAND_LENGTH:
# If the contents are small, for efficieny we write the contents with
# a shell command rather than pushing a file.
cmd = 'echo -n %s > %s' % (cmd_helper.SingleQuote(contents),
cmd_helper.SingleQuote(device_path))
self.RunShellCommand(cmd, shell=True, as_root=as_root, check_return=True)
elif as_root and self.NeedsSU():
# Adb does not allow to "push with su", so we first push to a temp file
# on a safe location, and then copy it to the desired location with su.
with device_temp_file.DeviceTempFile(self.adb) as device_temp:
self._WriteFileWithPush(device_temp.name, contents)
# Here we need 'cp' rather than 'mv' because the temp and
# destination files might be on different file systems (e.g.
# on internal storage and an external sd card).
self.RunShellCommand(['cp', device_temp.name, device_path],
as_root=True, check_return=True)
else:
# If root is not needed, we can push directly to the desired location.
self._WriteFileWithPush(device_path, contents)
def _ParseLongLsOutput(self, device_path, as_root=False, **kwargs):
"""Run and scrape the output of 'ls -a -l' on a device directory."""
device_path = posixpath.join(device_path, '') # Force trailing '/'.
output = self.RunShellCommand(
['ls', '-a', '-l', device_path], as_root=as_root,
check_return=True, env={'TZ': 'utc'}, **kwargs)
if output and output[0].startswith('total '):
output.pop(0) # pylint: disable=maybe-no-member
entries = []
for line in output:
m = _LONG_LS_OUTPUT_RE.match(line)
if m:
if m.group('filename') not in ['.', '..']:
item = m.groupdict()
# A change in toybox is causing recent Android versions to escape
# spaces in file names. Here we just unquote those spaces. If we
# later find more essoteric characters in file names, a more careful
# unquoting mechanism may be needed. But hopefully not.
# See: https://goo.gl/JAebZj
item['filename'] = item['filename'].replace('\\ ', ' ')
entries.append(item)
else:
logger.info('Skipping: %s', line)
return entries
def ListDirectory(self, device_path, as_root=False, **kwargs):
"""List all files on a device directory.
Mirroring os.listdir (and most client expectations) the resulting list
does not include the special entries '.' and '..' even if they are present
in the directory.
Args:
device_path: A string containing the path of the directory on the device
to list.
as_root: A boolean indicating whether the to use root privileges to list
the directory contents.
timeout: timeout in seconds
retries: number of retries
Returns:
A list of filenames for all entries contained in the directory.
Raises:
AdbCommandFailedError if |device_path| does not specify a valid and
accessible directory in the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
entries = self._ParseLongLsOutput(device_path, as_root=as_root, **kwargs)
return [d['filename'] for d in entries]
def StatDirectory(self, device_path, as_root=False, **kwargs):
"""List file and stat info for all entries on a device directory.
Implementation notes: this is currently implemented by parsing the output
of 'ls -a -l' on the device. Whether possible and convenient, we attempt to
make parsing strict and return values mirroring those of the standard |os|
and |stat| Python modules.
Mirroring os.listdir (and most client expectations) the resulting list
does not include the special entries '.' and '..' even if they are present
in the directory.
Args:
device_path: A string containing the path of the directory on the device
to list.
as_root: A boolean indicating whether the to use root privileges to list
the directory contents.
timeout: timeout in seconds
retries: number of retries
Returns:
A list of dictionaries, each containing the following keys:
filename: A string with the file name.
st_mode: File permissions, use the stat module to interpret these.
st_nlink: Number of hard links (may be missing).
st_owner: A string with the user name of the owner.
st_group: A string with the group name of the owner.
st_rdev_pair: Device type as (major, minior) (only if inode device).
st_size: Size of file, in bytes (may be missing for non-regular files).
st_mtime: Time of most recent modification, in seconds since epoch
(although resolution is in minutes).
symbolic_link_to: If entry is a symbolic link, path where it points to;
missing otherwise.
Raises:
AdbCommandFailedError if |device_path| does not specify a valid and
accessible directory in the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
entries = self._ParseLongLsOutput(device_path, as_root=as_root, **kwargs)
for d in entries:
for key, value in d.items():
if value is None:
del d[key] # Remove missing fields.
d['st_mode'] = _ParseModeString(d['st_mode'])
d['st_mtime'] = calendar.timegm(
time.strptime(d['st_mtime'], _LS_DATE_FORMAT))
for key in ['st_nlink', 'st_size', 'st_rdev_major', 'st_rdev_minor']:
if key in d:
d[key] = int(d[key])
if 'st_rdev_major' in d and 'st_rdev_minor' in d:
d['st_rdev_pair'] = (d.pop('st_rdev_major'), d.pop('st_rdev_minor'))
return entries
def StatPath(self, device_path, as_root=False, **kwargs):
"""Get the stat attributes of a file or directory on the device.
Args:
device_path: A string containing the path of a file or directory from
which to get attributes.
as_root: A boolean indicating whether the to use root privileges to
access the file information.
timeout: timeout in seconds
retries: number of retries
Returns:
A dictionary with the stat info collected; see StatDirectory for details.
Raises:
CommandFailedError if device_path cannot be found on the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
dirname, filename = posixpath.split(posixpath.normpath(device_path))
for entry in self.StatDirectory(dirname, as_root=as_root, **kwargs):
if entry['filename'] == filename:
return entry
raise device_errors.CommandFailedError(
'Cannot find file or directory: %r' % device_path, str(self))
def FileSize(self, device_path, as_root=False, **kwargs):
"""Get the size of a file on the device.
Note: This is implemented by parsing the output of the 'ls' command on
the device. On some Android versions, when passing a directory or special
file, the size is *not* reported and this function will throw an exception.
Args:
device_path: A string containing the path of a file on the device.
as_root: A boolean indicating whether the to use root privileges to
access the file information.
timeout: timeout in seconds
retries: number of retries
Returns:
The size of the file in bytes.
Raises:
CommandFailedError if device_path cannot be found on the device, or
its size cannot be determited for some reason.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
entry = self.StatPath(device_path, as_root=as_root, **kwargs)
try:
return entry['st_size']
except KeyError:
raise device_errors.CommandFailedError(
'Could not determine the size of: %s' % device_path, str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def SetJavaAsserts(self, enabled, timeout=None, retries=None):
"""Enables or disables Java asserts.
Args:
enabled: A boolean indicating whether Java asserts should be enabled
or disabled.
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device-side property changed and a restart is required as a
result, False otherwise.
Raises:
CommandTimeoutError on timeout.
"""
def find_property(lines, property_name):
for index, line in enumerate(lines):
if line.strip() == '':
continue
key_value = tuple(s.strip() for s in line.split('=', 1))
if len(key_value) != 2:
continue
key, value = key_value
if key == property_name:
return index, value
return None, ''
new_value = 'all' if enabled else ''
# First ensure the desired property is persisted.
try:
properties = self.ReadFile(self.LOCAL_PROPERTIES_PATH).splitlines()
except device_errors.CommandFailedError:
properties = []
index, value = find_property(properties, self.JAVA_ASSERT_PROPERTY)
if new_value != value:
if new_value:
new_line = '%s=%s' % (self.JAVA_ASSERT_PROPERTY, new_value)
if index is None:
properties.append(new_line)
else:
properties[index] = new_line
else:
assert index is not None # since new_value == '' and new_value != value
properties.pop(index)
self.WriteFile(self.LOCAL_PROPERTIES_PATH, _JoinLines(properties))
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
value = self.GetProp(self.JAVA_ASSERT_PROPERTY)
if new_value != value:
self.SetProp(self.JAVA_ASSERT_PROPERTY, new_value)
return True
else:
return False
def GetLanguage(self, cache=False):
"""Returns the language setting on the device.
Args:
cache: Whether to use cached properties when available.
"""
return self.GetProp('persist.sys.language', cache=cache)
def GetCountry(self, cache=False):
"""Returns the country setting on the device.
Args:
cache: Whether to use cached properties when available.
"""
return self.GetProp('persist.sys.country', cache=cache)
@property
def screen_density(self):
"""Returns the screen density of the device."""
DPI_TO_DENSITY = {
120: 'ldpi',
160: 'mdpi',
240: 'hdpi',
320: 'xhdpi',
480: 'xxhdpi',
640: 'xxxhdpi',
}
return DPI_TO_DENSITY.get(self.pixel_density, 'tvdpi')
@property
def pixel_density(self):
return int(self.GetProp('ro.sf.lcd_density', cache=True))
@property
def build_description(self):
"""Returns the build description of the system.
For example:
nakasi-user 4.4.4 KTU84P 1227136 release-keys
"""
return self.GetProp('ro.build.description', cache=True)
@property
def build_fingerprint(self):
"""Returns the build fingerprint of the system.
For example:
google/nakasi/grouper:4.4.4/KTU84P/1227136:user/release-keys
"""
return self.GetProp('ro.build.fingerprint', cache=True)
@property
def build_id(self):
"""Returns the build ID of the system (e.g. 'KTU84P')."""
return self.GetProp('ro.build.id', cache=True)
@property
def build_product(self):
"""Returns the build product of the system (e.g. 'grouper')."""
return self.GetProp('ro.build.product', cache=True)
@property
def build_type(self):
"""Returns the build type of the system (e.g. 'user')."""
return self.GetProp('ro.build.type', cache=True)
@property
def build_version_sdk(self):
"""Returns the build version sdk of the system as a number (e.g. 19).
For version code numbers see:
http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
For named constants see devil.android.sdk.version_codes
Raises:
CommandFailedError if the build version sdk is not a number.
"""
value = self.GetProp('ro.build.version.sdk', cache=True)
try:
return int(value)
except ValueError:
raise device_errors.CommandFailedError(
'Invalid build version sdk: %r' % value)
@property
def product_cpu_abi(self):
"""Returns the product cpu abi of the device (e.g. 'armeabi-v7a')."""
return self.GetProp('ro.product.cpu.abi', cache=True)
@property
def product_model(self):
"""Returns the name of the product model (e.g. 'Nexus 7')."""
return self.GetProp('ro.product.model', cache=True)
@property
def product_name(self):
"""Returns the product name of the device (e.g. 'nakasi')."""
return self.GetProp('ro.product.name', cache=True)
@property
def product_board(self):
"""Returns the product board name of the device (e.g. 'shamu')."""
return self.GetProp('ro.product.board', cache=True)
def _EnsureCacheInitialized(self):
"""Populates cache token, runs getprop and fetches $EXTERNAL_STORAGE."""
if self._cache['token']:
return
with self._cache_lock:
if self._cache['token']:
return
# Change the token every time to ensure that it will match only the
# previously dumped cache.
token = str(uuid.uuid1())
cmd = (
'c=/data/local/tmp/cache_token;'
'echo $EXTERNAL_STORAGE;'
'cat $c 2>/dev/null||echo;'
'echo "%s">$c &&' % token +
'getprop'
)
output = self.RunShellCommand(
cmd, shell=True, check_return=True, large_output=True)
# Error-checking for this existing is done in GetExternalStoragePath().
self._cache['external_storage'] = output[0]
self._cache['prev_token'] = output[1]
output = output[2:]
prop_cache = self._cache['getprop']
prop_cache.clear()
for key, value in _GETPROP_RE.findall(''.join(output)):
prop_cache[key] = value
self._cache['token'] = token
@decorators.WithTimeoutAndRetriesFromInstance()
def GetProp(self, property_name, cache=False, timeout=None, retries=None):
"""Gets a property from the device.
Args:
property_name: A string containing the name of the property to get from
the device.
cache: Whether to use cached properties when available.
timeout: timeout in seconds
retries: number of retries
Returns:
The value of the device's |property_name| property.
Raises:
CommandTimeoutError on timeout.
"""
assert isinstance(property_name, basestring), (
"property_name is not a string: %r" % property_name)
if cache:
# It takes ~120ms to query a single property, and ~130ms to query all
# properties. So, when caching we always query all properties.
self._EnsureCacheInitialized()
else:
# timeout and retries are handled down at run shell, because we don't
# want to apply them in the other branch when reading from the cache
value = self.RunShellCommand(
['getprop', property_name], single_line=True, check_return=True,
timeout=timeout, retries=retries)
self._cache['getprop'][property_name] = value
# Non-existent properties are treated as empty strings by getprop.
return self._cache['getprop'].get(property_name, '')
@decorators.WithTimeoutAndRetriesFromInstance()
def SetProp(self, property_name, value, check=False, timeout=None,
retries=None):
"""Sets a property on the device.
Args:
property_name: A string containing the name of the property to set on
the device.
value: A string containing the value to set to the property on the
device.
check: A boolean indicating whether to check that the property was
successfully set on the device.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if check is true and the property was not correctly
set on the device (e.g. because it is not rooted).
CommandTimeoutError on timeout.
"""
assert isinstance(property_name, basestring), (
"property_name is not a string: %r" % property_name)
assert isinstance(value, basestring), "value is not a string: %r" % value
self.RunShellCommand(['setprop', property_name, value], check_return=True)
prop_cache = self._cache['getprop']
if property_name in prop_cache:
del prop_cache[property_name]
# TODO(perezju) remove the option and make the check mandatory, but using a
# single shell script to both set- and getprop.
if check and value != self.GetProp(property_name, cache=False):
raise device_errors.CommandFailedError(
'Unable to set property %r on the device to %r'
% (property_name, value), str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def GetABI(self, timeout=None, retries=None):
"""Gets the device main ABI.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
The device's main ABI name.
Raises:
CommandTimeoutError on timeout.
"""
return self.GetProp('ro.product.cpu.abi', cache=True)
def _GetPsOutput(self, pattern):
"""Runs |ps| command on the device and returns its output,
This private method abstracts away differences between Android verions for
calling |ps|, and implements support for filtering the output by a given
|pattern|, but does not do any output parsing.
"""
try:
ps_cmd = 'ps'
# ps behavior was changed in Android above N, http://crbug.com/686716
if (self.build_version_sdk >= version_codes.NOUGAT_MR1
and self.build_id[0] > 'N'):
ps_cmd = 'ps -e'
if pattern:
return self._RunPipedShellCommand(
'%s | grep -F %s' % (ps_cmd, cmd_helper.SingleQuote(pattern)))
else:
return self.RunShellCommand(
ps_cmd.split(), check_return=True, large_output=True)
except device_errors.AdbShellCommandFailedError as e:
if e.status and isinstance(e.status, list) and not e.status[0]:
# If ps succeeded but grep failed, there were no processes with the
# given name.
return []
else:
raise
@decorators.WithTimeoutAndRetriesFromInstance()
def ListProcesses(self, process_name=None, timeout=None, retries=None):
"""Returns a list of tuples with info about processes on the device.
This essentially parses the output of the |ps| command into convenient
ProcessInfo tuples.
Args:
process_name: A string used to filter the returned processes. If given,
only processes whose name have this value as a substring
will be returned.
timeout: timeout in seconds
retries: number of retries
Returns:
A list of ProcessInfo tuples with |name|, |pid|, and |ppid| fields.
"""
process_name = process_name or ''
processes = []
for line in self._GetPsOutput(process_name):
row = line.split()
try:
row = {k: row[i] for k, i in _PS_COLUMNS.iteritems()}
if row['pid'] == 'PID' or process_name not in row['name']:
# Skip over header and non-matching processes.
continue
row['pid'] = int(row['pid'])
row['ppid'] = int(row['ppid'])
except StandardError: # e.g. IndexError, TypeError, ValueError.
logging.warning('failed to parse ps line: %r', line)
continue
processes.append(ProcessInfo(**row))
return processes
# TODO(#4103): Remove after migrating clients to ListProcesses.
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPids(self, process_name=None, timeout=None, retries=None):
"""Returns the PIDs of processes containing the given name as substring.
DEPRECATED
Note that the |process_name| is often the package name.
Args:
process_name: A string containing the process name to get the PIDs for.
If missing returns PIDs for all processes.
timeout: timeout in seconds
retries: number of retries
Returns:
A dict mapping process name to a list of PIDs for each process that
contained the provided |process_name|.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
procs_pids = collections.defaultdict(list)
for p in self.ListProcesses(process_name):
procs_pids[p.name].append(str(p.pid))
return procs_pids
@decorators.WithTimeoutAndRetriesFromInstance()
def GetApplicationPids(self, process_name, at_most_one=False,
timeout=None, retries=None):
"""Returns the PID or PIDs of a given process name.
Note that the |process_name|, often the package name, must match exactly.
Args:
process_name: A string containing the process name to get the PIDs for.
at_most_one: A boolean indicating that at most one PID is expected to
be found.
timeout: timeout in seconds
retries: number of retries
Returns:
A list of the PIDs for the named process. If at_most_one=True returns
the single PID found or None otherwise.
Raises:
CommandFailedError if at_most_one=True and more than one PID is found
for the named process.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
pids = [p.pid for p in self.ListProcesses(process_name)
if p.name == process_name]
if at_most_one:
if len(pids) > 1:
raise device_errors.CommandFailedError(
'Expected a single PID for %r but found: %r.' % (
process_name, pids),
device_serial=str(self))
return pids[0] if pids else None
else:
return pids
@decorators.WithTimeoutAndRetriesFromInstance()
def GetEnforce(self, timeout=None, retries=None):
"""Get the current mode of SELinux.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True (enforcing), False (permissive), or None (disabled).
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
output = self.RunShellCommand(
['getenforce'], check_return=True, single_line=True).lower()
if output not in _SELINUX_MODE:
raise device_errors.CommandFailedError(
'Unexpected getenforce output: %s' % output)
return _SELINUX_MODE[output]
@decorators.WithTimeoutAndRetriesFromInstance()
def SetEnforce(self, enabled, timeout=None, retries=None):
"""Modify the mode SELinux is running in.
Args:
enabled: a boolean indicating whether to put SELinux in encorcing mode
(if True), or permissive mode (otherwise).
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self.RunShellCommand(
['setenforce', '1' if int(enabled) else '0'], as_root=True,
check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def TakeScreenshot(self, host_path=None, timeout=None, retries=None):
"""Takes a screenshot of the device.
Args:
host_path: A string containing the path on the host to save the
screenshot to. If None, a file name in the current
directory will be generated.
timeout: timeout in seconds
retries: number of retries
Returns:
The name of the file on the host to which the screenshot was saved.
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if not host_path:
host_path = os.path.abspath('screenshot-%s-%s.png' % (
self.serial, _GetTimeStamp()))
with device_temp_file.DeviceTempFile(self.adb, suffix='.png') as device_tmp:
self.RunShellCommand(['/system/bin/screencap', '-p', device_tmp.name],
check_return=True)
self.PullFile(device_tmp.name, host_path)
return host_path
@decorators.WithTimeoutAndRetriesFromInstance()
def DismissCrashDialogIfNeeded(self, timeout=None, retries=None):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
def _FindFocusedWindow():
match = None
# TODO(jbudorick): Try to grep the output on the device instead of using
# large_output if/when DeviceUtils exposes a public interface for piped
# shell command handling.
for line in self.RunShellCommand(['dumpsys', 'window', 'windows'],
check_return=True, large_output=True):
match = re.match(_CURRENT_FOCUS_CRASH_RE, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return None
package = match.group(2)
logger.warning('Trying to dismiss %s dialog for %s', *match.groups())
self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(keyevent.KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logger.error('Still showing a %s dialog for %s', *match.groups())
return package
def GetLogcatMonitor(self, *args, **kwargs):
"""Returns a new LogcatMonitor associated with this device.
Parameters passed to this function are passed directly to
|logcat_monitor.LogcatMonitor| and are documented there.
"""
return logcat_monitor.LogcatMonitor(self.adb, *args, **kwargs)
def GetClientCache(self, client_name):
"""Returns client cache."""
if client_name not in self._client_caches:
self._client_caches[client_name] = {}
return self._client_caches[client_name]
def _ClearCache(self):
"""Clears all caches."""
for client in self._client_caches:
self._client_caches[client].clear()
self._cache = {
# Map of packageId -> list of on-device .apk paths
'package_apk_paths': {},
# Set of packageId that were loaded from LoadCacheData and not yet
# verified.
'package_apk_paths_to_verify': set(),
# Map of packageId -> set of on-device .apk checksums
'package_apk_checksums': {},
# Map of property_name -> value
'getprop': {},
# Map of device_path -> [ignore_other_files, map of path->checksum]
'device_path_checksums': {},
# Location of sdcard ($EXTERNAL_STORAGE).
'external_storage': None,
# Token used to detect when LoadCacheData is stale.
'token': None,
'prev_token': None,
}
@decorators.WithTimeoutAndRetriesFromInstance()
def LoadCacheData(self, data, timeout=None, retries=None):
"""Initializes the cache from data created using DumpCacheData.
The cache is used only if its token matches the one found on the device.
This prevents a stale cache from being used (which can happen when sharing
devices).
Args:
data: A previously serialized cache (string).
timeout: timeout in seconds
retries: number of retries
Returns:
Whether the cache was loaded.
"""
obj = json.loads(data)
self._EnsureCacheInitialized()
given_token = obj.get('token')
if not given_token or self._cache['prev_token'] != given_token:
logger.warning('Stale cache detected. Not using it.')
return False
self._cache['package_apk_paths'] = obj.get('package_apk_paths', {})
# When using a cache across script invokations, verify that apps have
# not been uninstalled.
self._cache['package_apk_paths_to_verify'] = set(
self._cache['package_apk_paths'].iterkeys())
package_apk_checksums = obj.get('package_apk_checksums', {})
for k, v in package_apk_checksums.iteritems():
package_apk_checksums[k] = set(v)
self._cache['package_apk_checksums'] = package_apk_checksums
device_path_checksums = obj.get('device_path_checksums', {})
self._cache['device_path_checksums'] = device_path_checksums
return True
@decorators.WithTimeoutAndRetriesFromInstance()
def DumpCacheData(self, timeout=None, retries=None):
"""Dumps the current cache state to a string.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
A serialized cache as a string.
"""
self._EnsureCacheInitialized()
obj = {}
obj['token'] = self._cache['token']
obj['package_apk_paths'] = self._cache['package_apk_paths']
obj['package_apk_checksums'] = self._cache['package_apk_checksums']
# JSON can't handle sets.
for k, v in obj['package_apk_checksums'].iteritems():
obj['package_apk_checksums'][k] = list(v)
obj['device_path_checksums'] = self._cache['device_path_checksums']
return json.dumps(obj, separators=(',', ':'))
@classmethod
def parallel(cls, devices, async=False):
"""Creates a Parallelizer to operate over the provided list of devices.
Args:
devices: A list of either DeviceUtils instances or objects from
from which DeviceUtils instances can be constructed. If None,
all attached devices will be used.
async: If true, returns a Parallelizer that runs operations
asynchronously.
Returns:
A Parallelizer operating over |devices|.
"""
devices = [d if isinstance(d, cls) else cls(d) for d in devices]
if async:
return parallelizer.Parallelizer(devices)
else:
return parallelizer.SyncParallelizer(devices)
@classmethod
def HealthyDevices(cls, blacklist=None, device_arg='default', retry=True,
**kwargs):
"""Returns a list of DeviceUtils instances.
Returns a list of DeviceUtils instances that are attached, not blacklisted,
and optionally filtered by --device flags or ANDROID_SERIAL environment
variable.
Args:
blacklist: A DeviceBlacklist instance (optional). Device serials in this
blacklist will never be returned, but a warning will be logged if they
otherwise would have been.
device_arg: The value of the --device flag. This can be:
'default' -> Same as [], but returns an empty list rather than raise a
NoDevicesError.
[] -> Returns all devices, unless $ANDROID_SERIAL is set.
None -> Use $ANDROID_SERIAL if set, otherwise looks for a single
attached device. Raises an exception if multiple devices are
attached.
'serial' -> Returns an instance for the given serial, if not
blacklisted.
['A', 'B', ...] -> Returns instances for the subset that is not
blacklisted.
retry: If true, will attempt to restart adb server and query it again if
no devices are found.
A device serial, or a list of device serials (optional).
Returns:
A list of DeviceUtils instances.
Raises:
NoDevicesError: Raised when no non-blacklisted devices exist and
device_arg is passed.
MultipleDevicesError: Raise when multiple devices exist, but |device_arg|
is None.
"""
allow_no_devices = False
if device_arg == 'default':
allow_no_devices = True
device_arg = ()
select_multiple = True
if not (isinstance(device_arg, tuple) or isinstance(device_arg, list)):
select_multiple = False
if device_arg:
device_arg = (device_arg,)
blacklisted_devices = blacklist.Read() if blacklist else []
# adb looks for ANDROID_SERIAL, so support it as well.
android_serial = os.environ.get('ANDROID_SERIAL')
if not device_arg and android_serial:
device_arg = (android_serial,)
def blacklisted(serial):
if serial in blacklisted_devices:
logger.warning('Device %s is blacklisted.', serial)
return True
return False
def _get_devices():
if device_arg:
devices = [cls(x, **kwargs) for x in device_arg if not blacklisted(x)]
else:
devices = []
for adb in adb_wrapper.AdbWrapper.Devices():
if not blacklisted(adb.GetDeviceSerial()):
devices.append(cls(_CreateAdbWrapper(adb), **kwargs))
if len(devices) == 0 and not allow_no_devices:
raise device_errors.NoDevicesError()
if len(devices) > 1 and not select_multiple:
raise device_errors.MultipleDevicesError(devices)
return sorted(devices)
try:
return _get_devices()
except device_errors.NoDevicesError:
if not retry:
raise
logger.warning(
'No devices found. Will try again after restarting adb server.')
RestartServer()
return _get_devices()
@decorators.WithTimeoutAndRetriesFromInstance()
def RestartAdbd(self, timeout=None, retries=None):
logger.info('Restarting adbd on device.')
with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script:
self.WriteFile(script.name, _RESTART_ADBD_SCRIPT)
self.RunShellCommand(
['source', script.name], check_return=True, as_root=True)
self.adb.WaitForDevice()
@decorators.WithTimeoutAndRetriesFromInstance()
def GrantPermissions(self, package, permissions, timeout=None, retries=None):
# Permissions only need to be set on M and above because of the changes to
# the permission model.
if not permissions or self.build_version_sdk < version_codes.MARSHMALLOW:
return
permissions = set(
p for p in permissions if not _PERMISSIONS_BLACKLIST_RE.match(p))
if ('android.permission.WRITE_EXTERNAL_STORAGE' in permissions
and 'android.permission.READ_EXTERNAL_STORAGE' not in permissions):
permissions.add('android.permission.READ_EXTERNAL_STORAGE')
script = ';'.join([
'p={package}',
'for q in {permissions}',
'do pm grant "$p" "$q"',
'echo "{sep}$q{sep}$?{sep}"',
'done'
]).format(
package=cmd_helper.SingleQuote(package),
permissions=' '.join(
cmd_helper.SingleQuote(p) for p in sorted(permissions)),
sep=_SHELL_OUTPUT_SEPARATOR)
logger.info('Setting permissions for %s.', package)
res = self.RunShellCommand(
script, shell=True, raw_output=True, large_output=True,
check_return=True)
res = res.split(_SHELL_OUTPUT_SEPARATOR)
failures = [
(permission, output.strip())
for permission, status, output in zip(res[1::3], res[2::3], res[0::3])
if int(status)]
if failures:
logger.warning(
'Failed to grant some permissions. Blacklist may need to be updated?')
for permission, output in failures:
# Try to grab the relevant error message from the output.
m = _PERMISSIONS_EXCEPTION_RE.search(output)
if m:
error_msg = m.group(0)
elif len(output) > 200:
error_msg = repr(output[:200]) + ' (truncated)'
else:
error_msg = repr(output)
logger.warning('- %s: %s', permission, error_msg)
@decorators.WithTimeoutAndRetriesFromInstance()
def IsScreenOn(self, timeout=None, retries=None):
"""Determines if screen is on.
Dumpsys input_method exposes screen on/off state. Below is an explination of
the states.
Pre-L:
On: mScreenOn=true
Off: mScreenOn=false
L+:
On: mInteractive=true
Off: mInteractive=false
Returns:
True if screen is on, false if it is off.
Raises:
device_errors.CommandFailedError: If screen state cannot be found.
"""
if self.build_version_sdk < version_codes.LOLLIPOP:
input_check = 'mScreenOn'
check_value = 'mScreenOn=true'
else:
input_check = 'mInteractive'
check_value = 'mInteractive=true'
dumpsys_out = self._RunPipedShellCommand(
'dumpsys input_method | grep %s' % input_check)
if not dumpsys_out:
raise device_errors.CommandFailedError(
'Unable to detect screen state', str(self))
return check_value in dumpsys_out[0]
@decorators.WithTimeoutAndRetriesFromInstance()
def SetScreen(self, on, timeout=None, retries=None):
"""Turns screen on and off.
Args:
on: bool to decide state to switch to. True = on False = off.
"""
def screen_test():
return self.IsScreenOn() == on
if screen_test():
logger.info('Screen already in expected state.')
return
self.SendKeyEvent(keyevent.KEYCODE_POWER)
timeout_retry.WaitFor(screen_test, wait_period=1)
| [
"[email protected]"
] | |
c9a49f1bc21897f0928af4cf213aeb52abd7faa1 | 694d57c3e512ce916269411b51adef23532420cd | /leetcode/325maximum_size_subarray_sum_equals_k.py | 9fa88d782d3c6059f19e3c7ea9b11bb698fabf6f | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | class Solution(object):
# brute-force solution, TLE
def maxSubArrayLen(self, nums, k):
max_len = 0
l = len(nums)
for start in xrange(l):
cur_total = 0
for end in xrange(start, l):
cur_total += nums[end]
if cur_total == k:
max_len = max(max_len, end - start + 1)
return max_len
# soltion2
def maxSubArrayLen2(self, nums, k):
nums = [0] + nums
sum_record = {}
cur_total = 0
max_len = 0
for j in xrange(len(nums)):
cur_total += nums[j]
pre_sum = cur_total - k
if pre_sum in sum_record:
max_len = max(max_len, j - sum_record[pre_sum])
if cur_total not in sum_record:
sum_record[cur_total] = j
return max_len
if __name__ == "__main__":
sol = Solution()
nums = [1,-1,5,-2,3]
print sol.maxSubArrayLen(nums, 3)
print sol.maxSubArrayLen2(nums, 3)
| [
"[email protected]"
] | |
bae4523c4207c1bbe212d24575d269a923c31e3d | 96b0473aedcbcd2c656d3c86819a93c5c90c2c9f | /PySideTutorials/modules/QCompleter/QCompleter_custom completer rules.py | 72b79e179e2b970cc1f1ca14892a9a8db44c2683 | [] | no_license | mikebourbeauart/Tutorials | e2e1c48f4268b75bf6add1c4aee286b83886175b | 8a70f0563198bee96fedacfc94d283647f6ccbc7 | refs/heads/master | 2020-04-06T09:03:55.563189 | 2018-02-23T17:02:23 | 2018-02-23T17:02:23 | 64,701,663 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,403 | py | ########################################################################################################################
#
# mb_pandora
# By Mike Bourbeau
# mikebourbeau.com
# 2015
#
# Big thanks to Chris Zurbrigg (especially for taking the time to answer all my questions and steer me in the right direction),
# Jeremy Ernst, Cesar Saez, and all other tutorial makers/educators online
#
########################################################################################################################
from PySide import QtCore, QtGui
from shiboken import wrapInstance
import maya.OpenMayaUI as mui
import maya.OpenMaya as om
import maya.cmds as mc
import maya.mel as mel
import inspect
def get_parent():
ptr = mui.MQtUtil.mainWindow()
return wrapInstance( long( ptr ), QtGui.QWidget )
def show():
m = PandoraUI(parent=get_parent())
m.exec_()
del m
########################################################################################################################
class PandoraUI( QtGui.QDialog ):
''' Create the text field that the user types into '''
def __init__( self, parent=get_parent() ):
super( PandoraUI, self ).__init__( )
# Commands
self.move_UI()
self.create_gui()
self.create_layout()
self.create_connections()
self.setAttribute( QtCore.Qt.WA_DeleteOnClose )
def move_UI(self):
''' Moves the UI to the cursor's position '''
pos = QtGui.QCursor.pos()
self.move(pos.x()-100, pos.y()+25)
self.setFixedSize(230, 30)
def create_gui( self ):
''' Visible GUI '''
# Hide window stuff
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
# Line edit
self.line_edit = Line_Edit(parent=self)
# Completer AKA view
self.completer = QtGui.QCompleter(self)
self.completer.setCompletionMode(QtGui.QCompleter.UnfilteredPopupCompletion)
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.setMaxVisibleItems(15)
# ProxyModel
self.pFilterModel = QtGui.QSortFilterProxyModel(self)
self.pFilterModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
# Set completer
self.line_edit.setCompleter(self.completer)
def set_model(self, model):
''' Set model '''
self.pFilterModel.setSourceModel(model)
self.completer.setModel(self.pFilterModel)
self.completer.setModelSorting(QtGui.QCompleter.CaseInsensitivelySortedModel)
def create_layout( self ):
''' Create layout '''
main_layout = QtGui.QVBoxLayout(self)
main_layout.addWidget( self.line_edit )
main_layout.totalMaximumSize()
main_layout.setContentsMargins(2,2,2,2)
self.setLayout( main_layout )
def create_connections( self ):
''' Connections '''
self.line_edit.textEdited[unicode].connect(self.pFilterModel.setFilterFixedString)
self.line_edit.returnPressed.connect( self.on_text_edited )
self.line_edit.esc_pressed.connect( self.on_esc_press )
self.line_edit.tab_pressed.connect( self.on_text_edited )
self.line_edit.mouse_pressed.connect( self.on_esc_press )
####################################################################################################################
## SLOTS
####################################################################################################################
def on_text_edited(self):
''' Run this when text is edited to execute a command '''
command_list = []
for name, data in inspect.getmembers(mc, callable):
command_list.append(name)
command = self.line_edit.text()
if len( command ):
if command in command_list:
mel_command = mel.eval( "{0}".format( command ) )
self.close()
else:
om.MGlobal.displayError("Not a valid command")
else:
om.MGlobal.displayInfo("")
self.close()
def on_esc_press(self):
''' Close the UI '''
om.MGlobal.displayInfo("")
self.close()
########################################################################################################################
class Line_Edit( QtGui.QLineEdit ):
''' Create the QLineEdit '''
# Signal Variables
esc_pressed = QtCore.Signal(str)
esc_signal_str = "escPressed"
tab_pressed = QtCore.Signal(str)
tab_signal_str = "tabPressed"
mouse_pressed = QtCore.Signal(str)
mouse_signal_str = "mousePressed"
def __init__(self, parent=None):
super( Line_Edit, self ).__init__( )
# Sizing the line edit
self.setFixedHeight(25)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
self.setFont(font)
# Custom Signals
def event(self, event):
if (event.type()==QtCore.QEvent.KeyPress):
if (event.key()==QtCore.Qt.Key_Escape):
self.esc_pressed.emit(self.esc_signal_str)
return True
if (event.key()==QtCore.Qt.Key_Tab):
self.tab_pressed.emit(self.tab_signal_str)
return True
return QtGui.QLineEdit.event(self, event)
if (event.type()==QtCore.QEvent.FocusOut):
self.mouse_pressed.emit(self.mouse_signal_str)
return True
return QtGui.QLineEdit.event(self, event)
########################################################################################################################
if __name__ == "__main__":
#def run():
# Development stuff
try:
pandora_ui.close()
pandora_ui.deleteLater()
except:
pass
# Get commands
command_list = []
for name, data in inspect.getmembers(mc, callable):
command_list.append(name)
# Set items in model
model = QtGui.QStandardItemModel()
for i,word in enumerate(command_list):
item = QtGui.QStandardItem(word)
model.setItem(i, item)
# Action stuff
pandora_ui = PandoraUI()
pandora_ui.show()
pandora_ui.set_model(model)
# Development stuff
try:
pandora_ui.show()
except:
pandora_ui.close()
pandora_ui.deleteLater()
| [
"[email protected]"
] | |
e8a1cb972b55943c03fdd95bef0099a28dbfafdd | 79bbc2bf3a12c463bf497a68163d4a15a9290a3f | /src/HackerRank/Sherlock and Anagram.py | d2f7bb8ce1d2302d06336657e995d68b1a676635 | [] | no_license | melodist/CodingPractice | f5cf614f3be07211d0afe02141b7f6848abeaa12 | 96f56c8b4779a8616e6c9b7ad570010e85c89b68 | refs/heads/master | 2023-09-01T08:02:43.090395 | 2023-08-27T13:33:53 | 2023-08-27T13:33:53 | 238,710,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | """
https://www.hackerrank.com/challenges/sherlock-and-anagrams/problem
Using Hashmap
Brute Force Problem
"""
from collections import Counter
def sherlockAndAnagrams(s):
answer = 0
for i in range(1, len(s)):
maps = Counter()
for j in range(0, len(s)-i+1):
maps[''.join(sorted(s[j:i+j]))] += 1
for key in maps.keys():
if maps[key] > 1:
answer += (maps[key] - 1) * maps[key] // 2
return answer
| [
"[email protected]"
] | |
9d4fa83496af7ba7a0863416391737f608a652e4 | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/423.Reconstruct Original Digits from English(从英文中重建数字).py | a00db04715d3f9ea835ea9eaf4b101bbb57ff1fc | [
"MIT"
] | permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | """
<p>Given a <b>non-empty</b> string containing an out-of-order English representation of digits <code>0-9</code>, output the digits in ascending order.</p>
<p><b>Note:</b><br />
<ol>
<li>Input contains only lowercase English letters.</li>
<li>Input is guaranteed to be valid and can be transformed to its original digits. That means invalid inputs such as "abc" or "zerone" are not permitted.</li>
<li>Input length is less than 50,000.</li>
</ol>
</p>
<p><b>Example 1:</b><br />
<pre>
Input: "owoztneoer"
Output: "012"
</pre>
</p>
<p><b>Example 2:</b><br />
<pre>
Input: "fviefuro"
Output: "45"
</pre>
</p><p>给定一个<strong>非空</strong>字符串,其中包含字母顺序打乱的英文单词表示的数字<code>0-9</code>。按升序输出原始的数字。</p>
<p><strong>注意:</strong></p>
<ol>
<li>输入只包含小写英文字母。</li>
<li>输入保证合法并可以转换为原始的数字,这意味着像 "abc" 或 "zerone" 的输入是不允许的。</li>
<li>输入字符串的长度小于 50,000。</li>
</ol>
<p><strong>示例 1:</strong></p>
<pre>
输入: "owoztneoer"
输出: "012" (zeroonetwo)
</pre>
<p><strong>示例 2:</strong></p>
<pre>
输入: "fviefuro"
输出: "45" (fourfive)
</pre>
<p>给定一个<strong>非空</strong>字符串,其中包含字母顺序打乱的英文单词表示的数字<code>0-9</code>。按升序输出原始的数字。</p>
<p><strong>注意:</strong></p>
<ol>
<li>输入只包含小写英文字母。</li>
<li>输入保证合法并可以转换为原始的数字,这意味着像 "abc" 或 "zerone" 的输入是不允许的。</li>
<li>输入字符串的长度小于 50,000。</li>
</ol>
<p><strong>示例 1:</strong></p>
<pre>
输入: "owoztneoer"
输出: "012" (zeroonetwo)
</pre>
<p><strong>示例 2:</strong></p>
<pre>
输入: "fviefuro"
输出: "45" (fourfive)
</pre>
"""
class Solution:
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
| [
"[email protected]"
] | |
439d9d3f2ddb0c86de1e761c7094748324c59a5f | 41e2d689522c6929332e36056f651158db59837e | /roman_dictionary.py | dde7d220964b381ed2ffc6d9ec9bd4c7b88842af | [] | no_license | RichardAfolabi/Scientific-Python | 14d39bffe8726dcee83e1a23d35cdb48e0076c3a | efebf6dd40af192b82931b1181299408dd8fde36 | refs/heads/master | 2016-09-02T04:07:45.486906 | 2015-07-10T18:51:14 | 2015-07-10T18:51:14 | 28,725,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | """
Roman Dictionary
----------------
Mark Antony keeps a list of the people he knows in several dictionaries
based on their relationship to him::
friends = {'julius': '100 via apian', 'cleopatra': '000 pyramid parkway'}
romans = dict(brutus='234 via tratorium', cassius='111 aqueduct lane')
countrymen = dict([('plebius','786 via bunius'),
('plebia', '786 via bunius')])
1. Print out the names for all of Antony's friends.
2. Now all of their addresses.
3. Now print them as "pairs".
4. Hmmm. Something unfortunate befell Julius. Remove him from the
friends list.
5. Antony needs to mail everyone for his second-triumvirate party. Make
a single dictionary containing everyone.
6. Antony's stopping over in Egypt and wants to swing by Cleopatra's
place while he is there. Get her address.
7. The barbarian hordes have invaded and destroyed all of Rome.
Clear out everyone from the dictionary.
See :ref:`roman-dictionary-solution`.
"""
friends = {'julius': '100 via apian', 'cleopatra': '000 pyramid parkway'}
romans = dict(brutus='234 via tratorium', cassius='111 aqueduct lane')
countrymen = dict([('plebius','786 via bunius'), ('plebia', '786 via bunius')])
# Names of all friends
#print(friends.keys() + romans.keys() + countrymen.keys())
#Addresses of all friends
#print(friends.values() + romans.values() + countrymen.values())
#Pairs of names and addresses
all_friends = friends.items() + romans.items() + countrymen.items()
print(all_friends)
print(all_friends.pop(0) )
#Dictionary of all
friends_dict = dict(all_friends)
print(friends_dict)
#Cleopatra's address
print(friends_dict['cleopatra'])
friends_dict.pop('plebia')
print(friends_dict)
del friends_dict['brutus']
print(friends_dict)
friends_dict.clear()
print(friends_dict) | [
"[email protected]"
] | |
c67ded67faf5fa7a4bf3cfe4d6682b5b23e2af51 | b3e3284f3d7b66f237e60fdfb1a37db706363139 | /RST/app/traslado/urls.py | 4a8a4b104ca3de309c7847f59ccd00a33601af8e | [] | no_license | corporacionrst/administracion | 4caf1545c313eb36408850bb4506bbd0bf43d6e6 | 7405442b4f14a589d75a5e04250be123403180ec | refs/heads/master | 2020-04-11T00:04:06.017147 | 2018-12-11T21:46:49 | 2018-12-11T21:46:49 | 161,374,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from django.urls import path
from .views import solicitar,tienda,stock,agregar,quitar,autorizar,pdf,listar
urlpatterns = [
path('solicitar/',solicitar.as_view(),name="solicitar"),
path('tienda/',tienda.as_view(),name="tienda"),
path('stock/',stock.as_view(),name="stock"),
path('agregar/',agregar.as_view(),name="agregar"),
path('quitar/',quitar.as_view(),name="quitar"),
path('pdf/<int:id>',pdf.as_view(),name="pdf"),
path('listar/',listar.as_view(),name="listar"),
path('autorizar/',autorizar.as_view(),name="autorizar")
]
| [
"[email protected]"
] | |
bfd72a4ff02cd2ec6f5484ca7cd5efbbc5d52771 | 857fc21a40aa32d2a57637de1c723e4ab51062ff | /CodingInterviews/python/38_tree_depth.py | 2941974551915121365ad463d71545d484d5ffee | [
"MIT"
] | permissive | YorkFish/git_study | efa0149f94623d685e005d58dbaef405ab91d541 | 6e023244daaa22e12b24e632e76a13e5066f2947 | refs/heads/master | 2021-06-21T18:46:50.906441 | 2020-12-25T14:04:04 | 2020-12-25T14:04:04 | 171,432,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python3
# coding:utf-8
from helper import TreeNode
class Solution:
def TreeDepth(self, pRoot):
if pRoot is None:
return 0
return max(self.TreeDepth(pRoot.left), self.TreeDepth(pRoot.right)) + 1
if __name__ == "__main__":
"""
1
/ \
2 3
/
4
"""
root = TreeNode(0)
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
root.left = n1
root.right = n2
n1.left = n3
s = Solution()
ans = s.TreeDepth(root)
print(ans)
| [
"[email protected]"
] | |
18de57b350df43f697375c5b245e6dc014626709 | ca954595dd8c89d722b1da49f3bd7e56a1ad9dd2 | /ppo/ppo.py | f3cbde986bb28106969acf335d7d5682abd25567 | [] | no_license | HKU-ICRA/DM_ReinforcementLearning | 1184a16f83919ee00fb72f4797c8d4801ee62866 | dffe5aabd1e26d7ecc7b17452106015962551ebf | refs/heads/master | 2020-08-06T12:01:17.704384 | 2019-11-04T13:17:09 | 2019-11-04T13:17:09 | 212,969,083 | 3 | 1 | null | 2019-10-30T06:06:53 | 2019-10-05T08:42:15 | Python | UTF-8 | Python | false | false | 10,970 | py | import os, sys
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
try:
from mpi4py import MPI
except ImportError:
MPI = None
from runner import Runner
from model import Model
def constfn(val):
def f(_):
return val
return f
def learn(env, total_timesteps, nagents=2, eval_env=None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, save_dir=None, model_fn=None, update_fn=None, init_fn=None,
normalize_observations=True, normalize_returns=True,
mpi_rank_weight=1, comm=None, use_tensorboard=False, tb_log_dir=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps * nagents
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
model_fn = Model
model = model_fn(ob_space=ob_space, ac_space=ac_space,
ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight,
normalize_observations=normalize_observations, normalize_returns=normalize_returns,
use_tensorboard=use_tensorboard, tb_log_dir=tb_log_dir)
if load_path is not None:
model.load(load_path)
'''
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
'''
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=100)
#if eval_env is not None:
# eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, actions, returns, dones, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
#if eval_env is not None:
# eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
#if eval_env is not None:
# eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
'''
mblossvals = []
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
sl_obs = [obs[i] for i in mbflatinds]
sl_returns = np.array([returns[i] for i in mbflatinds])
sl_actions = [actions[i] for i in mbflatinds]
sl_dones = np.array([dones[i] for i in mbflatinds])
sl_values = np.array([values[i] for i in mbflatinds])
sl_neglogpacs = np.array([neglogpacs[i] for i in mbflatinds])
mbstates = states
mblossvals.append(model.train(lrnow, cliprangenow, sl_obs, sl_returns, sl_actions, sl_dones, sl_values, sl_neglogpacs, mbstates))
'''
mblossvals = []
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (returns, values, neglogpacs))
mbstates = states
slice_obs = {k: v[mbinds] for k, v in obs.items()}
slice_actions = {k: v[mbinds] for k, v in actions.items()}
mblossvals.append(model.train(lrnow, cliprangenow, slice_obs, slice_actions, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
'''
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
'''
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root:
if save_dir == None:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
else:
checkdir = osp.join(save_dir, 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
if eval_env != None:
runner.record_render(eval_env)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def view(env, total_timesteps, episodes=5, seed=None, nsteps=2048,
load_path=None, model_fn=None, update_fn=None, init_fn=None,
normalize_observations=False, normalize_returns=False):
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Instantiate the model object (that creates act_model and train_model)
model = Model(ob_space=ob_space, ac_space=ac_space, ent_coef=0.0, vf_coef=0.5, max_grad_norm=0.5,
normalize_observations=normalize_observations, normalize_returns=normalize_returns)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=0.99, lam=0.95)
runner.render(episodes)
| [
"[email protected]"
] | |
d5fc5b6db085b46395f79022f2078f51827c0bfa | 63e2bed7329c79bf67279f9071194c9cba88a82c | /SevOneApi/python-client/swagger_client/models/flow_falcon_setting_v1.py | 1f3cc2c1d10cccb4d001fd3fc36c37a697f41177 | [] | no_license | jsthomason/LearningPython | 12422b969dbef89578ed326852dd65f65ab77496 | 2f71223250b6a198f2736bcb1b8681c51aa12c03 | refs/heads/master | 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,911 | py | # coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FlowFalconSettingV1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_zoom_in_cb': 'str',
'granularity': 'int',
'graph_other': 'bool',
'is_rate': 'bool',
'split': 'int',
'subnet_category_id': 'int'
}
attribute_map = {
'can_zoom_in_cb': 'canZoomInCb',
'granularity': 'granularity',
'graph_other': 'graphOther',
'is_rate': 'isRate',
'split': 'split',
'subnet_category_id': 'subnetCategoryId'
}
def __init__(self, can_zoom_in_cb=None, granularity=None, graph_other=None, is_rate=None, split=None, subnet_category_id=None): # noqa: E501
"""FlowFalconSettingV1 - a model defined in Swagger""" # noqa: E501
self._can_zoom_in_cb = None
self._granularity = None
self._graph_other = None
self._is_rate = None
self._split = None
self._subnet_category_id = None
self.discriminator = None
if can_zoom_in_cb is not None:
self.can_zoom_in_cb = can_zoom_in_cb
if granularity is not None:
self.granularity = granularity
if graph_other is not None:
self.graph_other = graph_other
if is_rate is not None:
self.is_rate = is_rate
if split is not None:
self.split = split
if subnet_category_id is not None:
self.subnet_category_id = subnet_category_id
@property
def can_zoom_in_cb(self):
"""Gets the can_zoom_in_cb of this FlowFalconSettingV1. # noqa: E501
:return: The can_zoom_in_cb of this FlowFalconSettingV1. # noqa: E501
:rtype: str
"""
return self._can_zoom_in_cb
@can_zoom_in_cb.setter
def can_zoom_in_cb(self, can_zoom_in_cb):
"""Sets the can_zoom_in_cb of this FlowFalconSettingV1.
:param can_zoom_in_cb: The can_zoom_in_cb of this FlowFalconSettingV1. # noqa: E501
:type: str
"""
self._can_zoom_in_cb = can_zoom_in_cb
@property
def granularity(self):
"""Gets the granularity of this FlowFalconSettingV1. # noqa: E501
:return: The granularity of this FlowFalconSettingV1. # noqa: E501
:rtype: int
"""
return self._granularity
@granularity.setter
def granularity(self, granularity):
"""Sets the granularity of this FlowFalconSettingV1.
:param granularity: The granularity of this FlowFalconSettingV1. # noqa: E501
:type: int
"""
self._granularity = granularity
@property
def graph_other(self):
"""Gets the graph_other of this FlowFalconSettingV1. # noqa: E501
:return: The graph_other of this FlowFalconSettingV1. # noqa: E501
:rtype: bool
"""
return self._graph_other
@graph_other.setter
def graph_other(self, graph_other):
"""Sets the graph_other of this FlowFalconSettingV1.
:param graph_other: The graph_other of this FlowFalconSettingV1. # noqa: E501
:type: bool
"""
self._graph_other = graph_other
@property
def is_rate(self):
"""Gets the is_rate of this FlowFalconSettingV1. # noqa: E501
:return: The is_rate of this FlowFalconSettingV1. # noqa: E501
:rtype: bool
"""
return self._is_rate
@is_rate.setter
def is_rate(self, is_rate):
"""Sets the is_rate of this FlowFalconSettingV1.
:param is_rate: The is_rate of this FlowFalconSettingV1. # noqa: E501
:type: bool
"""
self._is_rate = is_rate
@property
def split(self):
"""Gets the split of this FlowFalconSettingV1. # noqa: E501
:return: The split of this FlowFalconSettingV1. # noqa: E501
:rtype: int
"""
return self._split
@split.setter
def split(self, split):
"""Sets the split of this FlowFalconSettingV1.
:param split: The split of this FlowFalconSettingV1. # noqa: E501
:type: int
"""
self._split = split
@property
def subnet_category_id(self):
"""Gets the subnet_category_id of this FlowFalconSettingV1. # noqa: E501
:return: The subnet_category_id of this FlowFalconSettingV1. # noqa: E501
:rtype: int
"""
return self._subnet_category_id
@subnet_category_id.setter
def subnet_category_id(self, subnet_category_id):
"""Sets the subnet_category_id of this FlowFalconSettingV1.
:param subnet_category_id: The subnet_category_id of this FlowFalconSettingV1. # noqa: E501
:type: int
"""
self._subnet_category_id = subnet_category_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FlowFalconSettingV1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlowFalconSettingV1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
8d19db4102d07e5fdfcc362289c71ffbb2079ae9 | dd116fe1e94191749ab7a9b00be25bfd88641d82 | /cairis/cairis/AttackerNodeDialog.py | 66e5c66dbc7112d2c73da0dba0fce3eb93a371ba | [
"Apache-2.0"
] | permissive | RobinQuetin/CAIRIS-web | fbad99327707ea3b995bdfb4841a83695989e011 | 4a6822db654fecb05a09689c8ba59a4b1255c0fc | HEAD | 2018-12-28T10:53:00.595152 | 2015-06-20T16:53:39 | 2015-06-20T16:53:39 | 33,935,403 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import gtk
from NDImplementationDecorator import NDImplementationDecorator
class AttackerNodeDialog:
def __init__(self,objt,environmentName,dupProperty,overridingEnvironment,builder):
self.window = builder.get_object("AttackerNodeDialog")
self.decorator = NDImplementationDecorator(builder)
self.decorator.updateTextCtrl("attackerNameCtrl",objt.name())
roles = []
for role in objt.roles(environmentName,dupProperty):
roles.append([role])
self.decorator.updateListCtrl("attackerRolesCtrl",['Role'],gtk.ListStore(str),roles)
capabilities = []
for cap,value in objt.capability(environmentName,dupProperty):
capabilities.append([cap,value])
self.decorator.updateListCtrl("attackerCapabilityCtrl",['Capability','Value'],gtk.ListStore(str,str),capabilities)
motives = []
for motive in objt.motives(environmentName,dupProperty):
motives.append([motive])
self.decorator.updateListCtrl("attackerMotiveCtrl",['Motive'],gtk.ListStore(str),motives)
self.decorator.updateMLTextCtrl("attackerDescriptionCtrl",objt.description())
self.window.resize(350,300)
def on_attackerOkButton_clicked(self,callback_data):
self.window.destroy()
def show(self):
self.window.show()
| [
"[email protected]"
] | |
43a4377094245e2463adc28cc427afa15821c112 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_head_product_group.py | f10b2ce0ada4d6b08c1a732b56a318068888bafd | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 1,663 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_head_product_group.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.product_group.check_entity_exists(
resource_group_name="rg1",
service_name="apimService1",
product_id="5931a75ae4bbd512a88c680b",
group_id="59306a29e4bbd510dc24e5f9",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementHeadProductGroup.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
654403a4269a49135c306d5d732655e2a3609d87 | bc9cb3f0f104778026ca6f3a07595dd5d6ce840f | /PROJETO_FINAL/codigo/core/loadout/oticos/optic.py | e4b5d6bb5479c2c27a5b7d5fbae3b46ac9465325 | [] | no_license | JohnRhaenys/escola_de_ferias | ff7a5d7f399459725f3852ca6ee200486f29e7d4 | 193364a05a5c7ccb2e5252c150745d6743738728 | refs/heads/master | 2023-01-12T11:30:46.278703 | 2020-11-19T14:59:10 | 2020-11-19T14:59:10 | 314,278,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from pydub import AudioSegment
from pydub.playback import play
class Optic:
def __init__(self, name, color):
self.name = name
self.color = color
def make_sound(self, audio_file_path):
audio = AudioSegment.from_mp3(audio_file_path)
play(audio)
def __str__(self):
return self.name
| [
"[email protected]"
] | |
6574753bed014a63ce252017aa322bdde279e822 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/akPu5CaloJetSequence_pPb_data_bTag_cff.py | 7a84c061532b4aafa349e43e7a825c503c57021d | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,051 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
akPu5Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu5CaloJets"),
matched = cms.InputTag("ak5HiGenJets")
)
akPu5Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu5CaloJets")
)
akPu5Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu5CaloJets"),
payload = "AKPu5Calo_HI"
)
akPu5CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu5CaloJets'))
akPu5Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak5HiGenJets'))
akPu5CalobTagger = bTaggers("akPu5Calo")
#create objects locally since they dont load properly otherwise
akPu5Calomatch = akPu5CalobTagger.match
akPu5Caloparton = akPu5CalobTagger.parton
akPu5CaloPatJetFlavourAssociation = akPu5CalobTagger.PatJetFlavourAssociation
akPu5CaloJetTracksAssociatorAtVertex = akPu5CalobTagger.JetTracksAssociatorAtVertex
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexMVABJetTags = akPu5CalobTagger.CombinedSecondaryVertexMVABJetTags
akPu5CaloJetBProbabilityBJetTags = akPu5CalobTagger.JetBProbabilityBJetTags
akPu5CaloSoftMuonByPtBJetTags = akPu5CalobTagger.SoftMuonByPtBJetTags
akPu5CaloSoftMuonByIP3dBJetTags = akPu5CalobTagger.SoftMuonByIP3dBJetTags
akPu5CaloTrackCountingHighEffBJetTags = akPu5CalobTagger.TrackCountingHighEffBJetTags
akPu5CaloTrackCountingHighPurBJetTags = akPu5CalobTagger.TrackCountingHighPurBJetTags
akPu5CaloPatJetPartonAssociation = akPu5CalobTagger.PatJetPartonAssociation
akPu5CaloImpactParameterTagInfos = akPu5CalobTagger.ImpactParameterTagInfos
akPu5CaloJetProbabilityBJetTags = akPu5CalobTagger.JetProbabilityBJetTags
akPu5CaloPositiveOnlyJetProbabilityJetTags = akPu5CalobTagger.PositiveOnlyJetProbabilityJetTags
akPu5CaloNegativeOnlyJetProbabilityJetTags = akPu5CalobTagger.NegativeOnlyJetProbabilityJetTags
akPu5CaloNegativeTrackCountingHighEffJetTags = akPu5CalobTagger.NegativeTrackCountingHighEffJetTags
akPu5CaloNegativeTrackCountingHighPur = akPu5CalobTagger.NegativeTrackCountingHighPur
akPu5CaloNegativeOnlyJetBProbabilityJetTags = akPu5CalobTagger.NegativeOnlyJetBProbabilityJetTags
akPu5CaloPositiveOnlyJetBProbabilityJetTags = akPu5CalobTagger.PositiveOnlyJetBProbabilityJetTags
akPu5CaloSecondaryVertexTagInfos = akPu5CalobTagger.SecondaryVertexTagInfos
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexMVABJetTags = akPu5CalobTagger.CombinedSecondaryVertexMVABJetTags
akPu5CaloSecondaryVertexNegativeTagInfos = akPu5CalobTagger.SecondaryVertexNegativeTagInfos
akPu5CaloSimpleSecondaryVertexNegativeHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexNegativeHighEffBJetTags
akPu5CaloSimpleSecondaryVertexNegativeHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexNegativeHighPurBJetTags
akPu5CaloCombinedSecondaryVertexNegativeBJetTags = akPu5CalobTagger.CombinedSecondaryVertexNegativeBJetTags
akPu5CaloCombinedSecondaryVertexPositiveBJetTags = akPu5CalobTagger.CombinedSecondaryVertexPositiveBJetTags
akPu5CaloSoftMuonTagInfos = akPu5CalobTagger.SoftMuonTagInfos
akPu5CaloSoftMuonBJetTags = akPu5CalobTagger.SoftMuonBJetTags
akPu5CaloSoftMuonByIP3dBJetTags = akPu5CalobTagger.SoftMuonByIP3dBJetTags
akPu5CaloSoftMuonByPtBJetTags = akPu5CalobTagger.SoftMuonByPtBJetTags
akPu5CaloNegativeSoftMuonByPtBJetTags = akPu5CalobTagger.NegativeSoftMuonByPtBJetTags
akPu5CaloPositiveSoftMuonByPtBJetTags = akPu5CalobTagger.PositiveSoftMuonByPtBJetTags
akPu5CaloPatJetFlavourId = cms.Sequence(akPu5CaloPatJetPartonAssociation*akPu5CaloPatJetFlavourAssociation)
akPu5CaloJetBtaggingIP = cms.Sequence(akPu5CaloImpactParameterTagInfos *
(akPu5CaloTrackCountingHighEffBJetTags +
akPu5CaloTrackCountingHighPurBJetTags +
akPu5CaloJetProbabilityBJetTags +
akPu5CaloJetBProbabilityBJetTags +
akPu5CaloPositiveOnlyJetProbabilityJetTags +
akPu5CaloNegativeOnlyJetProbabilityJetTags +
akPu5CaloNegativeTrackCountingHighEffJetTags +
akPu5CaloNegativeTrackCountingHighPur +
akPu5CaloNegativeOnlyJetBProbabilityJetTags +
akPu5CaloPositiveOnlyJetBProbabilityJetTags
)
)
akPu5CaloJetBtaggingSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexTagInfos
* (akPu5CaloSimpleSecondaryVertexHighEffBJetTags
+
akPu5CaloSimpleSecondaryVertexHighPurBJetTags
+
akPu5CaloCombinedSecondaryVertexBJetTags
+
akPu5CaloCombinedSecondaryVertexMVABJetTags
)
)
akPu5CaloJetBtaggingNegSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexNegativeTagInfos
* (akPu5CaloSimpleSecondaryVertexNegativeHighEffBJetTags
+
akPu5CaloSimpleSecondaryVertexNegativeHighPurBJetTags
+
akPu5CaloCombinedSecondaryVertexNegativeBJetTags
+
akPu5CaloCombinedSecondaryVertexPositiveBJetTags
)
)
akPu5CaloJetBtaggingMu = cms.Sequence(akPu5CaloSoftMuonTagInfos * (akPu5CaloSoftMuonBJetTags
+
akPu5CaloSoftMuonByIP3dBJetTags
+
akPu5CaloSoftMuonByPtBJetTags
+
akPu5CaloNegativeSoftMuonByPtBJetTags
+
akPu5CaloPositiveSoftMuonByPtBJetTags
)
)
akPu5CaloJetBtagging = cms.Sequence(akPu5CaloJetBtaggingIP
*akPu5CaloJetBtaggingSV
*akPu5CaloJetBtaggingNegSV
*akPu5CaloJetBtaggingMu
)
akPu5CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu5CaloJets"),
genJetMatch = cms.InputTag("akPu5Calomatch"),
genPartonMatch = cms.InputTag("akPu5Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu5Calocorr")),
JetPartonMapSource = cms.InputTag("akPu5CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu5CaloJetTracksAssociatorAtVertex"),
discriminatorSources = cms.VInputTag(cms.InputTag("akPu5CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu5CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexMVABJetTags"),
cms.InputTag("akPu5CaloJetBProbabilityBJetTags"),
cms.InputTag("akPu5CaloJetProbabilityBJetTags"),
cms.InputTag("akPu5CaloSoftMuonByPtBJetTags"),
cms.InputTag("akPu5CaloSoftMuonByIP3dBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighEffBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu5CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = True,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
embedCaloTowers = False,
embedPFCandidates = True
)
akPu5CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu5CalopatJetsWithBtagging"),
genjetTag = 'ak5HiGenJets',
rParam = 0.5,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = False,
isMC = False,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(True),
bTagJetName = cms.untracked.string("akPu5Calo"),
genPtMin = cms.untracked.double(15),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL')
)
akPu5CaloJetSequence_mc = cms.Sequence(
akPu5Caloclean
*
akPu5Calomatch
*
akPu5Caloparton
*
akPu5Calocorr
*
akPu5CaloJetID
*
akPu5CaloPatJetFlavourId
*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_data = cms.Sequence(akPu5Calocorr
*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_jec = akPu5CaloJetSequence_mc
akPu5CaloJetSequence_mix = akPu5CaloJetSequence_mc
akPu5CaloJetSequence = cms.Sequence(akPu5CaloJetSequence_data)
| [
"[email protected]"
] | |
1fde89f76c9bb7e830b737c97a05c2db20bdc605 | fcb0480812b806b2383b5ddba781bdb157e5d580 | /backend/inByulGram/models.py | 93b5815910b2283490c7b6772ee90e6c54c649cb | [] | no_license | bunnycast/inByulGram | f576126a7028f340244f60cbe58bfbc46d114412 | 54af7629f60ef87a6a0238466b34720536616b19 | refs/heads/master | 2023-03-14T09:52:26.984478 | 2021-03-05T14:12:43 | 2021-03-05T14:12:43 | 341,405,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | import re
from django.conf import settings
from django.db import models
from django.urls import reverse
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Post(TimeStampedModel):
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='mt_post_set', on_delete=models.CASCADE)
photo = models.ImageField(upload_to='inByulGram/post/%Y/%m/%d')
caption = models.CharField(max_length=500)
tag_set = models.ManyToManyField('Tag', blank=True)
location = models.CharField(max_length=100)
like_user_set = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='like_post_set')
def __str__(self):
return self.caption
def extract_tag_list(self):
tag_name_list = re.findall(r"#([a-zA-Z\dㄱ-힇]+)", self.caption)
tag_list = []
for tag_name in tag_name_list:
tag, _ = Tag.objects.get_or_create(name=tag_name)
tag_list.append(tag)
return tag_list
def get_absolute_url(self):
return reverse('instagram:post_detail', args=[self.pk])
def is_like_user(self, user):
return self.like_user_set.filter(pk=user.pk).exists()
class Meta:
ordering = ['-id']
class Comment(TimeStampedModel):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
post = models.ForeignKey('Post', on_delete=models.CASCADE)
message = models.TextField()
class Meta:
ordering = ['-id']
class Tag(TimeStampedModel):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
| [
"[email protected]"
] | |
6b1f4ed165609e436d377590f34a8a963e68547f | 5ceea4106e0df754ae581c1f5e2d16082d7b6386 | /hackerRank/data-structures/arrays/arrays-ds.py | 06ca1a9cd3c7a7ac82b9c0bd94010dbf94d9361b | [] | no_license | vikramlance/Python-Programming | b0d4bd70145bfaa7a66434656c5970fbc57e8bd3 | 4094961e3c613e33f2d8a6d30281c60ed09d8c80 | refs/heads/master | 2022-06-17T00:58:50.646615 | 2022-06-03T03:39:35 | 2022-06-03T03:39:35 | 53,989,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | '''
https://www.hackerrank.com/challenges/arrays-ds
'''
#!/bin/python
import sys
n = int(raw_input().strip())
arr = map(int,raw_input().strip().split(' '))
print ' '.join(map(str, arr[::-1]))
| [
"[email protected]"
] | |
85147248b7b7d4b6ec887b871bff81793d2dc7bd | c2ec70be9ffbf29e779bcda8b491f96973cb1a39 | /chapter_06/chapter_6_6_5.py | d67b624e1e184320e5a97db0e4c695bcf8440400 | [] | no_license | panlourenco/exercises-coronapython | 8ef4f805fcdcc77686b5f325da6c8629106bac3c | 9ccb80a33f9ec1a23123d20147225338c7e476bc | refs/heads/master | 2022-12-09T22:56:59.676366 | 2020-09-06T17:35:56 | 2020-09-06T17:35:56 | 257,653,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # 6-5. Rivers:
rivers = {
'nile': 'egypt',
'mississippi': 'united states',
'seine': 'france',
'vistula': 'poland',
'Amazonas': 'brasil',
}
for river, country in rivers.items():
print("The " + river.title() + " flows through " + country.title() + ".")
print("\nThe following rivers are included in this data set:")
for river in rivers.keys():
print("- " + river.title())
print("\nThe following countries are included in this data set:")
for country in rivers.values():
print("- " + country.title()) | [
"[email protected]"
] | |
fe7a4edce117c23fb54632a7523492a7efe36909 | 9777ae10e1ec0a5e55e66ae71e2430eaa16ff891 | /tests/q01b.py | 9cdef7d217fd3e733970e1d5bc04197378d40979 | [] | no_license | jkuruzovich/final-starter-2019 | 175f5c65a295112c27c9240c3b14660a2669a875 | 8eed585ce4d4739548c5b3d0dfc82dd63ac3a2ec | refs/heads/master | 2022-03-14T18:43:47.639034 | 2019-12-09T20:15:52 | 2019-12-09T20:15:52 | 184,331,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | test = {
'name': '1b',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> "{0:.4f}".format(round( cost_ny,4))
'15.1882'
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
46592bd7353b194e91fdbd40140bbbd8ff92a6ab | cc7474d52cfcd124f2ddd4ad4a2cad1d2868bd80 | /unispider/spiders/ppai_scrapy.py | 93b603d685865a4fd45b5ee6a87122cd850af6aa | [] | no_license | yidun55/unispider | 212e81ed49a5bb9ed219cdab8215256bf16694f1 | aa8d826707baedf56028e6307f590458ed683e8d | refs/heads/master | 2020-05-18T16:31:58.346600 | 2015-10-28T13:37:09 | 2015-10-28T13:37:09 | 39,058,048 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,986 | py | #coding:utf-8
"""
从拍拍贷上爬取网贷黑名单
author: 邓友辉
email:[email protected]
date:2015/06/10
"""
from scrapy.utils.request import request_fingerprint
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy import log
from scrapy import signals
import time
from unispider.items import *
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class p2pBlacklist(Spider):
download_delay=2
name = 'pplist'
start_urls = ['http://www.ppdai.com/blacklist/2015']
allowed_domains = ['ppdai.com']
# writeInFile = "/home/dyh/data/specialworker/judicial/url_j.txt"
writeInFile = "E:/DLdata/ppai.txt"
# haveRequested = "/home/dyh/data/specialworker/judicial/haveRequestedUrl.txt"
haveRequested = "E:/DLdata/ppai_control.txt"
def __init__(self):
pass
def set_crawler(self,crawler):
super(p2pBlacklist, self).set_crawler(crawler)
self.bind_signal()
def bind_signal(self):
self.crawler.signals.connect(self.open_file, \
signal=signals.spider_opened) #爬虫开启时,打开文件
self.crawler.signals.connect(self.close_file, \
signal=signals.spider_closed) #爬虫关闭时,关闭文件
def open_file(self):
self.file_handler = open(self.writeInFile, "a") #写内容
self.file_haveRequested = open(self.haveRequested, "a+") #写入已请求成功的url
self.url_have_seen = set()
for line in self.file_haveRequested:
fp = self.url_fingerprint(line)
self.url_have_seen.add(fp)
def close_file(self):
self.file_handler.close()
def url_fingerprint(self, url):
req = Request(url.strip())
fp = request_fingerprint(req)
return fp
def make_requests_from_url(self, url):
return Request(url, callback=self.gettotal, dont_filter=True)
def gettotal(self, response):
"""
extract pages from different years
"""
url = 'http://www.ppdai.com/blacklist/'
years = xrange(2008,2016)
urls = [url+str(year) for year in years]
for url in urls[0:5]:
# print url, "year url"
yield Request(url, callback=self.extract, dont_filter=True)
def extract(self, response):
"""
extract pages and then Request those pages sequecely
"""
# print response.url,"extract response url"
sel = response.selector
pages = []
try:
# print "pages work"
pages = sel.xpath("//div[contains(@class,'fen_ye_nav')]//td/text()").re(u"共([\d]{1,3})页")
# print pages
except Exception, e:
print e,"error pages"
log.msg(e, level=log.ERROR)
log.msg(response.url, level=log.ERROR)
if len(pages) == 0:
self.getUserName(response) #only one page
else:
for page in range(int(pages[0])+1)[1:2]: #fro test
url = response.url+"_m0_p"+str(page)
yield Request(url, callback=self.getUserName,dont_filter=True)
def getUserName(self, response):
"""
get user name from xtml
"""
sel = response.selector
blackTr = sel.xpath(u"//table[contains(@class,'black_table')]/tr[position()>1]//p[contains(text(),'真实姓名')]/a/@href").extract()
url = 'http://www.ppdai.com'
urls = [url+i for i in blackTr]
for url in urls[0:1]:
fp = self.url_fingerprint(url)
if fp not in self.url_have_seen:
self.url_have_seen.add(fp)
yield Request(url,
callback = self.detail, dont_filter=False)
else:
pass
def for_ominated_data(self,info_list,i_list):
"""
some elements are ominated, set the ominated elements
as ""
"""
try:
if len(i_list) == 0:
i_list.append("")
else:
pass
assert len(i_list) == 1, "the element must be unique"
info_list.append(i_list[0].strip())
# print 'you work'
return info_list
except Exception, e:
print 'i work'
log.msg(e, level=log.ERROR)
def detail(self, response):
"""
extract detail info and store it in items
"""
item = UnispiderItem()
sel = response.selector
self.file_haveRequested.write(response.url+"\n")
total = sel.xpath(u"count(//div[@class='table_nav']\
//tr)").extract() #借款的笔数
total = int(total[0][0])
for trI in xrange(2, total+1):
"""
有的用户有多笔的借款
trI是不同笔借款所在表格的行数
"""
info = []
accPri = sel.xpath(u"//table[contains(@class, \
'detail_table')]/tr[1]/td[1]/text()").re(ur"累计借入本金:([\S\s]*)") #累计借入本金
info = self.for_ominated_data(info, accPri)
ovDate = sel.xpath(u"//table[contains(@class, 'detail_table')]/tr[1]/td[2]/span/text()").extract() #最大逾期天数
info = self.for_ominated_data(info, ovDate)
bef = u"//table[contains(@class, 'detail_table')]/tr[3]//tr["
aft = u"]/td[1]/text()"
ass = bef + str(trI) + aft
liID = sel.xpath(ass).extract() #列表编号
info = self.for_ominated_data(info, liID)
bef = u"//table[contains(@class, 'detail_table')]/tr[3]//tr["
aft = u"]/td[2]/text()"
ass = bef + str(trI) + aft
loNu = sel.xpath(ass).extract() #借款期数
info = self.for_ominated_data(info, loNu)
bef = u"//table[contains(@class, 'detail_table')]/tr[3]//tr["
aft = u"]/td[3]/text()"
ass = bef + str(trI) + aft
loTime = sel.xpath(ass).extract() #借款时间
info = self.for_ominated_data(info, loTime)
bef = u"//table[contains(@class, 'detail_table')]/tr[3]//tr["
aft = u"]/td[4]/text()"
ass = bef + str(trI) + aft
ovDayNu = sel.xpath(ass).extract() #逾期天数
info = self.for_ominated_data(info, ovDayNu)
bef = u"//table[contains(@class, 'detail_table')]/tr[3]//tr["
aft = u"]/td[5]/text()"
ass = bef + str(trI) + aft
ovPri = sel.xpath(ass).extract() #逾期本息
info = self.for_ominated_data(info, ovPri)
prov = sel.xpath(u"//div[contains(@class,\
'blacklist_detail_nav')]//li//strong/text()").re(ur"_([\w]*?)_[男|女]") #省
info = self.for_ominated_data(info, prov)
usrNa = sel.xpath(u"//div[contains(@class,\
'blacklist_detail_nav')]//li").re(ur"用户名:([\w\W]*?)\n") #用户名
info = self.for_ominated_data(info, usrNa)
name = sel.xpath(u"//div[contains(@class,\
'blacklist_detail_nav')]//li").re(ur"姓名:([\w\W]*?)\n") #姓名
info = self.for_ominated_data(info, name)
phoneN = sel.xpath(u"//div[contains(@class,\
'blacklist_detail_nav')]//li").re(ur"手机号:([\w\W]*?)\n") #手机号
info = self.for_ominated_data(info, phoneN)
ID = sel.xpath(u"//div[contains(@class,\
'blacklist_detail_nav')]//li").re(ur"身份证号:([\w\W]*?)\n") #身份证号
info = self.for_ominated_data(info, ID)
try:
info.append(str(time.strftime("%Y年%m月%d日")))
info = '\001'.join(info)+"\n"
item['content'] = info
yield item
except Exception, e:
log.msg('ERROR:{url}'.format(url=response.url),\
level=log.ERROR) | [
"[email protected]"
] | |
394ca5aa490425423f8b5baa05b50dee4488b340 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_052/ch80_2020_06_20_18_56_40_883079.py | 2b0e0c4b3e47da4c259cad01dc2cf476c5c8eca5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def interseccao_chaves (dicionario, dic):
lista = []
a = dicionario.keys()
b = dic.keys()
if a in dic:
lista.append(a)
if b in dicionario:
lista.append(b)
return lista
| [
"[email protected]"
] | |
723544710160d738e96bc643094c89f86919f4c0 | 69dc65bc9e0c9d30a5b3c1cf72d6e51fa07fa44b | /optimization/train_op.py | 4168b9ed8002e0a5337f6a9d754f048e252dd109 | [] | no_license | xljhtq/SeqGAN | 68e2ef3bee7ae4c581cd0392a3a187b05626f18c | 30882edf4eaefc25754d09d35e430c9a7c04f5cb | refs/heads/master | 2020-03-26T05:59:58.545614 | 2018-09-04T12:41:40 | 2018-09-04T12:41:40 | 144,584,798 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,473 | py | ## encoding=utf8
import sys
import numpy as np
import tensorflow as tf
import random
from dataloader import Gen_Data_loader, Dis_dataloader
from generator_op import Generator
from discriminator import Discriminator
from rollout_op import ROLLOUT
import vocab_utils
import os
#########################################################################################
PRE_EPOCH_NUM_generator = 30 # supervised (maximum likelihood estimation) epochs
PRE_EPOCH_NUM_discriminator = 30
BATCH_SIZE = 10
need_generated_samples = 200000
TOTAL_BATCH = 50
g_lrn = 0.01
d_lrn = 0.0001
# Generator Hyper-parameters
######################################################################################
EMB_DIM = 128 # embedding dimension
HIDDEN_DIM = 200 # hidden state dimension of lstm cell
SEQ_LENGTH = 20 # sequence length
START_TOKEN = 0
SEED = 88
#########################################################################################
# Discriminator Hyper-parameters
#########################################################################################
dis_embedding_dim = 128
dis_filter_sizes = [2, 3, 5]
dis_num_filters = [100, 100, 100]
dis_dropout_keep_prob = 0.75
dis_l2_reg_lambda = 0.01
#########################################################################################
# Basic Training Parameters
#########################################################################################
train_dir = "./"
log = open('data/experiment-log.txt', 'w')
positive_file = 'data/positive_data.txt'
negative_file = 'data/negative_data.txt' ## generator model 生成的fake data
out_negative_file = 'data/negative_data_' ## 输出观察的negative_data
eval_file = 'data/eval_data.txt' ## generator model 生成的每次训练后的test集
def generate_samples(sess, trainable_model, batch_size, generated_num, output_file):
generated_samples = []
for _ in range(int(generated_num / batch_size)):
generated_samples.extend(trainable_model.generate(sess))
with open(output_file, 'w') as fout:
for poem in generated_samples:
buffer = ' '.join([str(x) for x in poem]) + '\n'
fout.write(buffer)
def target_loss(sess, target_lstm, data_loader):
# target_loss means the oracle negative log-likelihood tested with the oracle model "target_lstm"
nll = []
data_loader.reset_pointer()
for it in xrange(data_loader.num_batch):
batch = data_loader.next_batch()
g_loss = sess.run(target_lstm.pretrain_loss, {target_lstm.x: batch})
nll.append(g_loss)
return np.mean(nll)
def pre_train_epoch(sess, trainable_model, data_loader):
# using MLE
supervised_g_losses = []
data_loader.reset_pointer()
for it in xrange(data_loader.num_batch):
batch = data_loader.next_batch()
_, g_loss = trainable_model.pretrain_step(sess, batch)
supervised_g_losses.append(g_loss)
return np.mean(supervised_g_losses)
def transform_file(negative_file, wordVocab, out_file):
out_op = open(out_file, "w")
for line in open(negative_file):
line = line.strip("\n").split(" ")
wordList = []
for id in line:
wordList.append(wordVocab.id2word[int(id)])
out_op.write(" ".join(wordList) + "\n")
out_op.close()
def main(source_file, wordVocab, vocab_size):
tf.reset_default_graph()
random.seed(SEED)
np.random.seed(SEED)
assert START_TOKEN == 0
dis_data_loader = Dis_dataloader(BATCH_SIZE)
gen_data_loader = Gen_Data_loader(BATCH_SIZE)
likelihood_data_loader = Gen_Data_loader(BATCH_SIZE) # For testing
# todo: print ("starting generating positive samples...")
generated_num = gen_data_loader.transform_positive_file_2(train_dir + source_file,
train_dir + positive_file,
wordVocab,
SEQ_LENGTH)
print ("generated_num: ", generated_num)
if generated_num < 100: return
gen_data_loader.create_batches(train_dir + positive_file)
with tf.variable_scope("Train", reuse=None):
generator = Generator(wordVocab,
vocab_size,
BATCH_SIZE,
EMB_DIM,
HIDDEN_DIM,
SEQ_LENGTH,
START_TOKEN,
learning_rate=g_lrn)
discriminator = Discriminator(word_vocab=wordVocab,
sequence_length=SEQ_LENGTH,
num_classes=2,
embedding_size=dis_embedding_dim,
filter_sizes=dis_filter_sizes,
num_filters=dis_num_filters,
l2_reg_lambda=dis_l2_reg_lambda,
learning_rate=d_lrn)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
# todo: 1.##############pre-train generator##############
print 'Start pre-training generator with MLE...'
log.write('pre-training...\n')
for epoch in xrange(PRE_EPOCH_NUM_generator):
loss = pre_train_epoch(sess, generator, gen_data_loader)
if epoch % 5 == 0:
buffer = 'epoch:\t' + str(epoch) + '\tloss:\t' + str(loss)
print (buffer)
sys.stdout.flush()
log.write(buffer)
# todo: 2.##############pre-train discriminator##############
print 'Start pre-training discriminator...'
for _ in range(PRE_EPOCH_NUM_discriminator):
## 由于是对概率分布的采样,所以每次生成的fake data数据都是不同的
generate_samples(sess,
generator,
BATCH_SIZE,
generated_num,
negative_file)
dis_data_loader.load_train_data(positive_file, negative_file)
for _ in range(3): ## 对每批fake_data进行训练discriminator
dis_data_loader.reset_pointer()
for it in xrange(dis_data_loader.num_batch):
x_batch, y_batch = dis_data_loader.next_batch()
feed = {
discriminator.input_x: x_batch,
discriminator.input_y: y_batch,
discriminator.dropout_keep_prob: dis_dropout_keep_prob
}
_ = sess.run(discriminator.train_op, feed)
with tf.variable_scope("Train", reuse=None):
g_beta = ROLLOUT(generator, 0.8) ## 这是表示 g_beta
# todo: 3.############## Adversarial Training ##############
print '#########################################################################'
print 'Start Adversarial Training...'
log.write('adversarial training...\n')
for total_batch in range(TOTAL_BATCH):
# todo: Train the generator for one batch samples
for it in range(1):
samples = generator.generate(sess)
rewards = g_beta.get_reward(sess, samples, 16, discriminator)
feed = {generator.x: samples,
generator.rewards: rewards}
_, g_loss = sess.run([generator.g_updates, generator.g_loss], feed_dict=feed)
# Test
if total_batch % 10 == 0 or total_batch == TOTAL_BATCH - 1:
buffer = 'epoch:\t' + str(total_batch) + '\tg_loss:\t' + str(g_loss)
print (buffer)
sys.stdout.flush()
log.write(buffer)
g_beta.update_params()
# todo: Train the discriminator
for _ in range(5):
generate_samples(sess,
generator,
BATCH_SIZE,
generated_num,
negative_file)
dis_data_loader.load_train_data(positive_file, negative_file)
for _ in range(3):
dis_data_loader.reset_pointer()
for it in xrange(dis_data_loader.num_batch):
x_batch, y_batch = dis_data_loader.next_batch()
feed = {
discriminator.input_x: x_batch,
discriminator.input_y: y_batch,
discriminator.dropout_keep_prob: dis_dropout_keep_prob
}
_ = sess.run(discriminator.train_op, feed)
if total_batch % 10 == 0 or total_batch == TOTAL_BATCH - 1:
out_file = out_negative_file + str(total_batch) + ".txt"
transform_file(negative_file, wordVocab, out_file)
generate_samples(sess,
generator,
BATCH_SIZE,
need_generated_samples,
negative_file)
transform_file(negative_file, wordVocab, source_file + ".GEN")
if __name__ == '__main__':
path = "data/all_2/"
fileList = os.listdir(path)
print ("start loading vocab...")
wordVocab = vocab_utils.Vocab()
wordVocab.fromText_format3(train_dir, "data/wordvec.vec")
vocab_size = wordVocab.vocab_size
print ("vocab_size: ", vocab_size)
for file in fileList:
print (path + file)
main(path + file, wordVocab, vocab_size)
| [
"[email protected]"
] | |
009137a3b61958b2ef9248924a5c5b51fbd40a96 | 0822d36728e9ed1d4e91d8ee8b5ea39010ac9371 | /robo/pages/goias/oanapolis.py | b5cc50ea54d8600f47c59fd1327281db8bd8b173 | [] | no_license | diegothuran/blog | 11161e6f425d08bf7689190eac0ca5bd7cb65dd7 | 233135a1db24541de98a7aeffd840cf51e5e462e | refs/heads/master | 2022-12-08T14:03:02.876353 | 2019-06-05T17:57:55 | 2019-06-05T17:57:55 | 176,329,704 | 0 | 0 | null | 2022-12-08T04:53:02 | 2019-03-18T16:46:43 | Python | UTF-8 | Python | false | false | 637 | py | # coding: utf-8
import sys
sys.path.insert(0, '../../../blog')
from bs4 import BeautifulSoup
import requests
GLOBAL_RANK = 2969356
RANK_BRAZIL = 62956
NAME = 'oanapolis.com.br'
def get_urls():
try:
urls = []
link = 'http://oanapolis.com.br/'
req = requests.get(link)
noticias = BeautifulSoup(req.text, "html.parser").find_all('h2', class_='entry-title')
for noticia in noticias:
href = noticia.find_all('a', href=True)[0]['href']
# print(href)
urls.append(href)
return urls
except:
raise Exception('Exception in oanapolis')
| [
"[email protected]"
] | |
9772a0e3675f402d54ce680f87aaf70c21ed6d41 | 239dc36bc1042b5b99bdd1a72aaedeef22327083 | /EionetLDAP/__init__.py | 3f357b41cf398c1bcf42b3cacaaa10d4f88d9d96 | [] | no_license | eaudeweb/EionetProducts | d67c8627317548d17e9662da8d00d4b71a894774 | da85b0001b994cdc6d94d481fd020de69fc10f63 | refs/heads/master | 2021-01-10T20:04:12.867465 | 2011-05-09T13:45:17 | 2011-05-23T13:24:11 | 2,135,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # The contents of this file are subject to the Mozilla Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Initial Owner of the Original Code is European Environment
# Agency (EEA). Portions created by Eau de Web are
# Copyright (C) European Environment Agency. All
# Rights Reserved.
#
# Authors:
#
# Alex Morega, Eau de Web
__version__='0.1'
from App.ImageFile import ImageFile
import EionetLDAP
def initialize(context):
# register EionetLDAP
context.registerClass(
EionetLDAP.EionetLDAP,
constructors=(EionetLDAP.manage_addEionetLDAP_html,
EionetLDAP.manage_addEionetLDAP),
icon='www/eionet_ldap.gif',
)
misc_ = {
'eionet_ldap.gif': ImageFile('www/eionet_ldap.gif', globals()),
}
| [
"[email protected]"
] | |
2e45ed946ff60dd2b0f5abeae6e510dd73e3d67f | 16734d189c2bafa9c66fdc989126b7d9aa95c478 | /Python/small-projects/make_dictionary.py | 6c17de909dd3b4d6068513f34ef23f19786613ed | [] | no_license | Ericksmith/CD-projects | 3dddd3a3819341be7202f11603cf793a2067c140 | 3b06b6e289d241c2f1115178c693d304280c2502 | refs/heads/master | 2021-08-15T17:41:32.329647 | 2017-11-18T01:18:04 | 2017-11-18T01:18:04 | 104,279,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas", 'corgi']
def make_dict(arr1, arr2):
new_dict = {}
if len(arr1) < len(arr2):
big = arr2
small = arr1
else:
big = arr1
small = arr2
for i in range(len(big)):
if len(small) -1 < i:
new_dict[big[i]] = None
else:
new_dict[big[i]] = small[i]
return new_dict
print(make_dict(name, favorite_animal)) | [
"[email protected]"
] | |
723addda30f29f8f7f246fcf493c78b96472cb12 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/eventhub/v20140901/outputs.py | 0ebdc673b5e80e5109e83bebd9559fc888bd6218 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 1,725 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'SkuResponse',
]
@pulumi.output_type
class SkuResponse(dict):
"""
SKU parameters supplied to the create Namespace operation
"""
def __init__(__self__, *,
tier: str,
capacity: Optional[int] = None,
name: Optional[str] = None):
"""
SKU parameters supplied to the create Namespace operation
:param str tier: The billing tier of this particular SKU.
:param int capacity: The Event Hubs throughput units.
:param str name: Name of this SKU.
"""
pulumi.set(__self__, "tier", tier)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def tier(self) -> str:
"""
The billing tier of this particular SKU.
"""
return pulumi.get(self, "tier")
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
The Event Hubs throughput units.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of this SKU.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
5dc00e8bf125c32f5ce8a34eea869cdd95a2d6a6 | 0d392fa6e594279ef193597a36fde65c341cf7c5 | /test.py | f80d7b9718267ea298ad2ec5ce51717e1af7a19e | [] | no_license | chimtrangbu/DoorToDoor | a0d542f99f04ec3e5774e8cd49b478e731f4568f | 88891be30ef6aac8714b0fa41d575db47ed7de7d | refs/heads/master | 2020-04-15T04:11:22.943176 | 2019-01-07T03:52:42 | 2019-01-07T03:52:42 | 164,374,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,854 | py | from math import sqrt
from sys import argv
class Node():
'''
each Node object holding each city's coordinates
cal_distance: calculating distance between this Node and another Node
'''
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
def cal_distance(self, another_node):
return sqrt((self.x - another_node.x) ** 2 +
(self.y - another_node.y) ** 2)
class Graph(object):
def __init__(self):
self.nodes = []
def add_node(self, node):
self.nodes.append(node)
return self.nodes
def nearest_neighbor(self, start=None):
if start is None:
start = self.nodes[0]
must_visit = self.nodes.copy()
path = [start]
must_visit.remove(start)
while must_visit:
nearest = must_visit[0]
cur_node = path[-1]
min_dist = cur_node.cal_distance(nearest)
for node in must_visit:
if cur_node.cal_distance(node) < min_dist:
nearest = node
min_dist = cur_node.cal_distance(node)
# nearest = min(must_visit, key=lambda k: path[-1].cal_distance(k))
path.append(nearest)
must_visit.remove(nearest)
return path
def random_insertion(self, start=None):
if start is None:
start = self.nodes[0]
must_visit = self.nodes.copy()
path = [start]
must_visit.remove(start)
while must_visit:
# nearest = min(must_visit, key=lambda k: path[-1].cal_distance(k))
import random
nearest = must_visit[random.randint(0, len(must_visit) - 1)]
pos = min(path, key=lambda k: nearest.cal_distance(k))
pos_index = path.index(pos)
if pos_index == 0:
pos_insert = pos_index + 1
elif pos_index == len(path) - 1:
case_insert_after = length_road([path[-2], path[-1], nearest])
case_insert_before = length_road([path[-2], nearest, path[-1]])
if case_insert_after > case_insert_before:
pos_insert = pos_index
else:
pos_insert = pos_index + 1
else:
last_node = path[pos_index - 1]
next_node = path[pos_index + 1]
case_insert_before = length_road(
[last_node, nearest, pos, next_node])
case_insert_after = length_road(
[last_node, pos, nearest, next_node])
if case_insert_after > case_insert_before:
pos_insert = pos_index
else:
pos_insert = pos_index + 1
path.insert(pos_insert, nearest)
must_visit.remove(nearest)
return path
@staticmethod
def two_opt(route):
best = route
improved = True
while improved:
improved = False
for i in range(1, len(route) - 2):
for j in range(i + 1, len(route)):
if j - i == 1:
continue # changes nothing, skip then
new_route = route[:]
new_route[i:j] = route[
j - 1:i - 1:-1] # this is the 2woptSwap
if length_road(new_route) < length_road(best):
best = new_route
improved = True
route = best
return best
def find_shortest_path(self, key='nearest_neighbor'):
if key == 'nearest_neighbor':
self.nodes = self.nearest_neighbor()
elif key == 'random_insertion':
shortest_path = self.random_insertion()
for i in range(10):
random_path = self.random_insertion()
if length_road(random_path) < length_road(shortest_path):
shortest_path = random_path
self.nodes = shortest_path
elif key == 'two_opt':
self.nodes = self.two_opt(self.nearest_neighbor())
return self.nodes
def show_graph(self):
print(' -> '.join([node.name for node in self.nodes]))
def length_road(nodes):
sum = 0
for i in range(len(nodes) - 1):
sum += nodes[i].cal_distance(nodes[i + 1])
return sum
def main():
map = Graph()
f = open(argv[1], 'r')
lines = f.readlines()
f.close()
for line in lines:
info = line.split(', ')
map.add_node(Node(info[0], float(info[1]), float(info[2])))
map.find_shortest_path()
map.show_graph()
print('length of path:', length_road(map.nodes))
if __name__ == '__main__':
import time
now = time.time()
main()
print('time:', time.time() - now)
| [
"[email protected]"
] | |
ae2e1039ec5d63e87258338ed7b25d82ab0a6aad | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/33/b0033d0a4ffa00161174a93fd5908e78 | e27685ffa45504a46e820c64ac565614f01f195a | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class s_table(object):
def __init__(self):
self.__helper = MySqlHelper()
def add_ser(self,sername):
sql = 'insert into s_table(s_name,s_ip) values(%s)'
params = (username,)
try:
self.__helper.insert_one(sql,params)
except Exception as e:
print(e)
def get_user(self,username):
sql = 'select u_id from u_table where u_name = %s'
try:
u_id = self.__helper.select(sql,username)[0][0]
except Exception as e:
print(e)
return u_id
def upd_user(self,username_old,username_new):
sql = 'update u_table set u_name = %s where u_name = %s'
params = (username_new, username_old)
try:
self.__helper.update(sql,params)
except Exception as e:
print(e)
def del_user(self, username):
sql = 'delete from u_table where u_name=%s'
g_u_handle = g_u_relation()
if g_u_handle.get_u_g_id(username):
g_u_handle.del_u_g(username)
try:
self.__helper.delete(sql,username)
except Exception as e:
print(e)
else:
try:
self.__helper.delete(sql,username)
except Exception as e:
print(e)
| [
"[email protected]"
] | ||
8920f9bea0a12cc6d1d38a9f94aab9488afa66c8 | 0d5be86072e92da0b5c086ec42bbfeeb6c1cf367 | /tiny_tokenizer/word_tokenizers/mecab_tokenizer.py | c71164572d6df24cc3025c58ac1e64e53f7a0a04 | [
"MIT"
] | permissive | chie8842/tiny_tokenizer | d04eab4115daba443f83647a332177dcce5c9b19 | 6599873c050f4e064c88381688d8476346b57099 | refs/heads/master | 2020-07-25T18:57:15.545717 | 2019-09-12T15:39:42 | 2019-09-12T15:39:42 | 208,393,571 | 0 | 0 | MIT | 2019-09-14T05:26:24 | 2019-09-14T05:26:23 | null | UTF-8 | Python | false | false | 1,655 | py | from typing import Optional
from tiny_tokenizer.tiny_tokenizer_token import Token
from tiny_tokenizer.word_tokenizers.tokenizer import BaseTokenizer
class MeCabTokenizer(BaseTokenizer):
"""Wrapper class forexternal text analyzers"""
def __init__(
self, dictionary_path: Optional[str] = None, with_postag: bool = False
):
"""
Initializer for MeCabTokenizer.
Parameters
---
dictionary_path (Optional[str]=None)
path to a custom dictionary (option)
it is used by `mecab -u [dictionary_path]`
with_postag (bool=False)
flag determines iftiny_tokenizer.tokenizer include pos tags.
"""
super().__init__(name="mecab", with_postag=with_postag)
try:
import natto
except ModuleNotFoundError:
raise ModuleNotFoundError("natto-py is not installed")
flag = ""
if not self.with_postag:
flag += " -Owakati"
if dictionary_path is not None:
flag += f" -u {dictionary_path}"
self.mecab = natto.MeCab(flag)
def tokenize(self, text: str):
"""Tokenize"""
return_result = []
parse_result = self.mecab.parse(text)
if self.with_postag:
for elem in parse_result.split("\n")[:-1]:
surface, feature = elem.split()
postag = feature.split(",")[0]
return_result.append(Token(surface=surface, postag=postag))
else:
for surface in parse_result.split(" "):
return_result.append(Token(surface=surface))
return return_result
| [
"[email protected]"
] | |
11607fcc0a5e114881fa435009ca3febe204f661 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/uzv.py | 6468252550a7167a4125e9595ad33c5dccc3d2aa | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uZV':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
0d591e3d250f5c8491dadeedc5a9c343ffd4224f | 23acc991e4b6e96aa9ac0898ef59831009442a7e | /pygazebo/msg/plugin_pb2.py | 7d2abf0447c68650685d40f2e751334f023e302f | [
"Apache-2.0"
] | permissive | kunaltyagi/pygazebo | f07ee8c3a790095b66e074b0afa698c15a823d6a | 4cd163f4efec5a6f672445de939385bf8a9156e1 | refs/heads/master | 2021-01-16T20:28:55.059663 | 2014-06-04T10:06:56 | 2014-06-04T10:06:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,176 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='plugin.proto',
package='gazebo.msgs',
serialized_pb='\n\x0cplugin.proto\x12\x0bgazebo.msgs\"<\n\x06Plugin\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x10\n\x08\x66ilename\x18\x02 \x02(\t\x12\x12\n\x08innerxml\x18\x03 \x01(\t:\x00')
_PLUGIN = descriptor.Descriptor(
name='Plugin',
full_name='gazebo.msgs.Plugin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='gazebo.msgs.Plugin.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='filename', full_name='gazebo.msgs.Plugin.filename', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='innerxml', full_name='gazebo.msgs.Plugin.innerxml', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=29,
serialized_end=89,
)
DESCRIPTOR.message_types_by_name['Plugin'] = _PLUGIN
class Plugin(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLUGIN
# @@protoc_insertion_point(class_scope:gazebo.msgs.Plugin)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
717d112a2b418a4f6c234f761f9bbf96d26b3535 | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /models/official/r1/mnist/mnist_test.py | 87e0571234ac91fa0192c8ca65353a890ce0a363 | [
"MIT",
"Apache-2.0"
] | permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 4,740 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow.compat.v1 as tf # pylint: disable=g-bad-import-order
from absl import logging
from official.r1.mnist import mnist
BATCH_SIZE = 100
def dummy_input_fn():
image = tf.random.uniform([BATCH_SIZE, 784])
labels = tf.random.uniform([BATCH_SIZE, 1], maxval=9, dtype=tf.int32)
return image, labels
def make_estimator():
data_format = 'channels_last'
if tf.test.is_built_with_cuda():
data_format = 'channels_first'
return tf.estimator.Estimator(
model_fn=mnist.model_fn, params={
'data_format': data_format
})
class Tests(tf.test.TestCase):
"""Run tests for MNIST model.
MNIST uses contrib and will not work with TF 2.0. All tests are disabled if
using TF 2.0.
"""
def test_mnist(self):
classifier = make_estimator()
classifier.train(input_fn=dummy_input_fn, steps=2)
eval_results = classifier.evaluate(input_fn=dummy_input_fn, steps=1)
loss = eval_results['loss']
global_step = eval_results['global_step']
accuracy = eval_results['accuracy']
self.assertEqual(loss.shape, ())
self.assertEqual(2, global_step)
self.assertEqual(accuracy.shape, ())
input_fn = lambda: tf.random.uniform([3, 784])
predictions_generator = classifier.predict(input_fn)
for _ in range(3):
predictions = next(predictions_generator)
self.assertEqual(predictions['probabilities'].shape, (10,))
self.assertEqual(predictions['classes'].shape, ())
def mnist_model_fn_helper(self, mode, multi_gpu=False):
features, labels = dummy_input_fn()
image_count = features.shape[0]
spec = mnist.model_fn(features, labels, mode, {
'data_format': 'channels_last',
'multi_gpu': multi_gpu
})
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = spec.predictions
self.assertAllEqual(predictions['probabilities'].shape, (image_count, 10))
self.assertEqual(predictions['probabilities'].dtype, tf.float32)
self.assertAllEqual(predictions['classes'].shape, (image_count,))
self.assertEqual(predictions['classes'].dtype, tf.int64)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = spec.loss
self.assertAllEqual(loss.shape, ())
self.assertEqual(loss.dtype, tf.float32)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = spec.eval_metric_ops
self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
def test_mnist_model_fn_train_mode(self):
self.mnist_model_fn_helper(tf.estimator.ModeKeys.TRAIN)
def test_mnist_model_fn_train_mode_multi_gpu(self):
self.mnist_model_fn_helper(tf.estimator.ModeKeys.TRAIN, multi_gpu=True)
def test_mnist_model_fn_eval_mode(self):
self.mnist_model_fn_helper(tf.estimator.ModeKeys.EVAL)
def test_mnist_model_fn_predict_mode(self):
self.mnist_model_fn_helper(tf.estimator.ModeKeys.PREDICT)
class Benchmarks(tf.test.Benchmark):
"""Simple speed benchmarking for MNIST."""
def benchmark_train_step_time(self):
classifier = make_estimator()
# Run one step to warmup any use of the GPU.
classifier.train(input_fn=dummy_input_fn, steps=1)
have_gpu = tf.test.is_gpu_available()
num_steps = 1000 if have_gpu else 100
name = 'train_step_time_%s' % ('gpu' if have_gpu else 'cpu')
start = time.time()
classifier.train(input_fn=dummy_input_fn, steps=num_steps)
end = time.time()
wall_time = (end - start) / num_steps
self.report_benchmark(
iters=num_steps,
wall_time=wall_time,
name=name,
extras={
'examples_per_sec': BATCH_SIZE / wall_time
})
if __name__ == '__main__':
logging.set_verbosity(logging.ERROR)
tf.disable_v2_behavior()
tf.test.main()
| [
"[email protected]"
] | |
5a9a226de42291f1f90430012f72aa1d128fea00 | aaa4eb09ebb66b51f471ebceb39c2a8e7a22e50a | /Lista 08/exercício 05.py | d60d2357f70d2d5edfb22beaaf828ffda4ce7614 | [
"MIT"
] | permissive | Brenda-Werneck/Listas-CCF110 | c0a079df9c26ec8bfe194072847b86b294a19d4a | 271b0930e6cce1aaa279f81378205c5b2d3fa0b6 | refs/heads/main | 2023-09-03T09:59:05.351611 | 2021-10-17T00:49:03 | 2021-10-17T00:49:03 | 411,115,920 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #Crie um algoritmo que leia os elementos de uma matriz inteira 10 x 10 e escreva somente os elementos acima da diagonal principal.
matriz = [[0 for i in range(10)] for j in range(10)]
for i in range(10):
for j in range(10):
matriz[i][j] = int(input(f"Digite o valor para o índice ({i},{j}): "))
for i in range(10):
for j in range(10):
if j > i:
print(matriz[i][j]) | [
"[email protected]"
] | |
9df870310f7741f1840bdb5a4722de6f5e914631 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/leveleditor/worldData/tortuga_area_cave_b_1.py | cd44a96180dfdd7bb87decb582ce21f2652bccc6 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 142,242 | py | from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {
'Interact Links': [
[
'1165198380.77Shochet',
'1165198398.22Shochet',
'Bi-directional'],
[
'1165198750.63Shochet',
'1174696064.0dxschafe',
'Bi-directional'],
[
'1176832256.0dxschafe',
'1174696832.0dxschafe0',
'Bi-directional'],
[
'1165198307.05Shochet',
'1165198339.88Shochet',
'Bi-directional'],
[
'1178912429.07Aholdun',
'1176832000.0dxschafe',
'Bi-directional'],
[
'1187230080.0dxschafe',
'1176832128.0dxschafe',
'Bi-directional']],
'Objects': {
'1158121765.09sdnaik': {
'Type': 'Island Game Area',
'Name': 'tortuga_area_cave_b_1',
'File': '',
'Instanced': True,
'Minimap': False,
'Objects': {
'1158171645.33sdnaik': {
'Type': 'Locator Node',
'Name': 'portal_interior_1',
'Hpr': VBase3(-98.822999999999993, 0.0, 0.0),
'Pos': Point3(407.79500000000002, 202.76900000000001, 1.9379999999999999),
'Scale': VBase3(1.0, 1.0, 1.0) },
'1158171645.36sdnaik': {
'Type': 'Locator Node',
'Name': 'portal_interior_2',
'Hpr': VBase3(-5.5789999999999997, 0.0, 0.0),
'Pos': Point3(-535.71799999999996, 237.303, 77.641000000000005),
'Scale': VBase3(1.0, 1.0, 1.0) },
'1162436263.03sdnaik': {
'Type': 'Locator Node',
'Name': 'portal_interior_3',
'Hpr': VBase3(1.1879999999999999, -1.45, -0.33800000000000002),
'Pos': Point3(105.468, -363.34500000000003, 0.35799999999999998),
'Scale': VBase3(1.0, 1.0, 1.0) },
'1162436315.06sdnaik': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(-99.859999999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(1.2709999999999999, -174.053, 30.359000000000002),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Ambush',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Model': 'models/misc/smiley' } },
'1162436324.81sdnaik': {
'Type': 'Movement Node',
'Hpr': VBase3(-99.859999999999999, 0.0, 0.0),
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(90.555000000000007, -242.13, 26.390000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/misc/smiley' } },
'1162436327.92sdnaik': {
'Type': 'Movement Node',
'Hpr': VBase3(-99.859999999999999, 0.0, 0.0),
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(119.16200000000001, -154.03299999999999, 25.109000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/misc/smiley' } },
'1162436337.55sdnaik': {
'Type': 'Movement Node',
'Hpr': VBase3(-99.859999999999999, 0.0, 0.0),
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(209.666, -151.852, 11.335000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/misc/smiley' } },
'1165198179.13Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(64.393000000000001, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(68.921999999999997, -172.97200000000001, 27.347000000000001),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198212.56Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': Point3(0.0, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-450.43799999999999, 98.209000000000003, 78.063999999999993),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Bat T2',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198250.78Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': Point3(0.0, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(-27.125, -24.824999999999999, 31.609999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Ambush',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198262.39Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(88.058000000000007, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(35.813000000000002, -10.69, 28.806999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198277.17Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(-153.42699999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(-98.992999999999995, 166.41200000000001, 34.792999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Scorp T3',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198307.05Shochet': {
'Type': 'Object Spawn Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-304.97199999999998, 215.03299999999999, 66.972999999999999),
'Priority': '1',
'Scale': VBase3(1.0, 1.0, 1.0),
'SpawnDelay': '300',
'Spawnables': 'Buried Treasure',
'Visual': {
'Color': (0.80000000000000004, 0.20000000000000001, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' },
'startingDepth': '12' },
'1165198339.88Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '3.7879',
'AnimSet': 'default',
'Hpr': VBase3(36.395000000000003, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '3.8333',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-243.67699999999999, 122.73099999999999, 66.085999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Bat T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0.0, 0.0, 0.65000000000000002, 1.0),
'Model': 'models/misc/smiley' } },
'1165198380.77Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(-34.704999999999998, 0.0, 0.0),
'Level': '5',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(170.97999999999999, -246.67500000000001, 18.998999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Scorp T3',
'Start State': 'Ambush',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198398.22Shochet': {
'Type': 'Object Spawn Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(185.77600000000001, -234.41499999999999, 16.07),
'Priority': '1',
'Scale': VBase3(1.0, 1.0, 1.0),
'SpawnDelay': '600',
'Spawnables': 'Buried Treasure',
'Visual': {
'Color': (0.80000000000000004, 0.20000000000000001, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' },
'startingDepth': '12' },
'1165198503.47Shochet': {
'Type': 'Spawn Node',
'Aggro Radius': '9.0361',
'AnimSet': 'default',
'Hpr': VBase3(-33.582000000000001, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '1.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-277.32900000000001, -39.203000000000003, 56.488),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1165198531.48Shochet': {
'Type': 'Rope',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-364.76499999999999, -14.846, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.69999998807907104, 0.73000001907348633, 0.57999998331069946, 1.0),
'Model': 'models/props/rope_pile' } },
'1165198551.64Shochet': {
'Type': 'Bucket',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-365.74200000000002, -20.152999999999999, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.69999998807907104, 0.73000001907348633, 0.57999998331069946, 1.0),
'Model': 'models/props/bucket' } },
'1165198557.73Shochet': {
'Type': 'Bucket',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(-337.63900000000001, -69.614000000000004, 55.795999999999999),
'Scale': VBase3(0.80400000000000005, 0.80400000000000005, 0.80400000000000005),
'Visual': {
'Model': 'models/props/bucket' } },
'1165198715.92Shochet': {
'Type': 'Barrel',
'DisableCollision': False,
'Hpr': VBase3(-162.65899999999999, 0.0, 0.0),
'Pos': Point3(-310.327, -116.15000000000001, 55.795999999999999),
'Scale': VBase3(0.74199999999999999, 0.74199999999999999, 0.74199999999999999),
'Visual': {
'Color': (0.89000000000000001, 0.81999999999999995, 0.68235294117647061, 1.0),
'Model': 'models/props/barrel_group_2' } },
'1165198750.63Shochet': {
'Type': 'Object Spawn Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(-212.51400000000001, -119.623, 55.795999999999999),
'Priority': '1',
'Scale': VBase3(1.0, 1.0, 1.0),
'SpawnDelay': '1200',
'Spawnables': 'Buried Treasure',
'Visual': {
'Color': (0.80000000000000004, 0.20000000000000001, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' },
'startingDepth': '12' },
'1165198987.73Shochet': {
'Type': 'Pig_stuff',
'DisableCollision': False,
'Hpr': VBase3(138.58099999999999, 0.0, 0.0),
'Pos': Point3(-340.64299999999997, -66.668000000000006, 55.795999999999999),
'Scale': VBase3(1.163, 1.163, 1.163),
'Visual': {
'Color': (0.69999998807907104, 0.69999998807907104, 0.69999998807907104, 1.0),
'Model': 'models/props/pigtrough' } },
'1165199209.06Shochet': {
'Type': 'Log_Stack',
'DisableCollision': True,
'Hpr': VBase3(110.553, 0.0, 0.0),
'Pos': Point3(-318.39600000000002, -86.885999999999996, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/Log_stack_c' } },
'1165199266.86Shochet': {
'Type': 'Log_Stack',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-443.637, 215.524, 78.063999999999993),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.89999997615814209, 0.89999997615814209, 0.89999997615814209, 1.0),
'Model': 'models/props/Log_stack_c' } },
'1165199269.66Shochet': {
'Type': 'Log_Stack',
'DisableCollision': False,
'Hpr': VBase3(98.906999999999996, 0.0, 0.0),
'Pos': Point3(-436.15800000000002, 207.75999999999999, 78.063999999999993),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.60000002384185791, 0.60000002384185791, 0.60000002384185791, 1.0),
'Model': 'models/props/Log_stack_c' } },
'1174696064.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '3.9157',
'AnimSet': 'bar_talk03',
'Hpr': VBase3(46.354999999999997, 0.0, 0.0),
'Level': '9',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(-237.011, -78.780000000000001, 56.234999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174696704.0dxschafe3': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(139.76300000000001, 2.4590000000000001, -1.379),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(71.093999999999994, -185.95599999999999, 27.251000000000001),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174696832.0dxschafe0': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'axe_chop',
'Hpr': VBase3(121.938, 0.0, 0.0),
'Level': '8',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(-312.25799999999998, -83.069999999999993, 55.795999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174696960.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(137.95099999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(-16.731000000000002, 79.530000000000001, 31.138000000000002),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174696960.0dxschafe0': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(118.69499999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(428.59899999999999, -167.691, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174697088.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(78.296000000000006, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(433.52499999999998, -186.32599999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Skel T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174697088.0dxschafe0': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(-167.422, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '100',
'Pause Duration': '30',
'Pos': Point3(303.34899999999999, 146.86000000000001, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Bat T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1174697216.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(94.757000000000005, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(445.46600000000001, -177.12299999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Scorp T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1175224320.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': Point3(0.0, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(192.91499999999999, -216.69499999999999, 16.038),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Scorp T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1175224576.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'default',
'Hpr': VBase3(127.81399999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-331.50599999999997, 220.59299999999999, 66.936999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Bat T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': '1',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0.0, 0.0, 0.65000000000000002, 1.0),
'Model': 'models/misc/smiley' } },
'1176832000.0dxschafe': {
'Type': 'Searchable Container',
'Aggro Radius': 5.0,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-347.61399999999998, -63.616999999999997, 55.795999999999999),
'Scale': VBase3(0.66600000000000004, 0.66600000000000004, 0.66600000000000004),
'Visual': {
'Color': (0.84999999999999998, 0.85999999999999999, 0.78823529411764703, 1.0),
'Model': 'models/props/barrel' },
'searchTime': '6.0',
'type': 'Barrel' },
'1176832128.0dxschafe': {
'Type': 'Searchable Container',
'Aggro Radius': 5.0,
'Hpr': VBase3(-37.155999999999999, 0.0, 0.0),
'Pos': Point3(-370.42399999999998, -17.001999999999999, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.92000001668930054, 0.89999997615814209, 0.79000002145767212, 1.0),
'Model': 'models/props/wellA' },
'searchTime': '6.0',
'type': 'WellA' },
'1176832256.0dxschafe': {
'Type': 'Searchable Container',
'Aggro Radius': 5.0,
'Hpr': VBase3(49.418999999999997, 0.0, 0.0),
'Pos': Point3(-310.98599999999999, -109.911, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.40000000596046448, 0.40000000596046448, 0.40000000596046448, 1.0),
'Model': 'models/props/crate_04' },
'searchTime': '6.0',
'type': 'Crate' },
'1176832256.0dxschafe0': {
'Type': 'Crate',
'DisableCollision': False,
'Hpr': VBase3(61.383000000000003, 0.0, 0.0),
'Objects': {
'1165198567.91Shochet': {
'Type': 'Baskets',
'DisableCollision': False,
'Hpr': VBase3(-61.383000000000003, 0.0, 0.0),
'Pos': Point3(0.17499999999999999, -1.244, 2.798),
'Scale': VBase3(1.615, 1.615, 1.615),
'Visual': {
'Model': 'models/props/basket' } } },
'Pos': Point3(-356.279, -58.765000000000001, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.77000000000000002, 0.77000000000000002, 0.7686274509803922, 1.0),
'Model': 'models/props/crate_04' } },
'1176832384.0dxschafe': {
'Type': 'Crate',
'DisableCollision': True,
'Hpr': VBase3(22.091000000000001, 0.0, 0.0),
'Pos': Point3(-351.827, -63.482999999999997, 55.795999999999999),
'Scale': VBase3(0.88600000000000001, 0.88600000000000001, 0.88600000000000001),
'Visual': {
'Color': (0.84999999999999998, 0.84999999999999998, 0.81568627450980391, 1.0),
'Model': 'models/props/crates_group_1' } },
'1178674718.63kmuller': {
'Type': 'Tunnel Cap',
'DisableCollision': False,
'Hpr': VBase3(-18.768999999999998, 0.0, 0.0),
'Pos': Point3(404.22399999999999, 196.28800000000001, 4.3490000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/tunnels/tunnelcap_cave_interior' } },
'1178674825.55kmuller': {
'Type': 'Tunnel Cap',
'DisableCollision': False,
'Hpr': VBase3(93.260000000000005, 0.0, 0.0),
'Pos': Point3(-532.98099999999999, 238.16200000000001, 80.477000000000004),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/tunnels/tunnelcap_cave_interior' } },
'1178911434.38Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(-118.715, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-488.57299999999998, 218.791, 78.063999999999993),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911724.66Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(-125.14700000000001, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-377.30900000000003, 173.01400000000001, 72.918999999999997),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911731.8Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(170.256, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-339.209, 135.66300000000001, 65.870999999999995),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911742.6Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(-125.54900000000001, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-326.05900000000003, 30.454999999999998, 59.337000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911745.27Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(65.685000000000002, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-203.52699999999999, -43.539999999999999, 59.027999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911759.62Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(108.952, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-138.08000000000001, -7.7290000000000001, 55.308),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911771.35Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(-155.97800000000001, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-42.875, 43.515000000000001, 32.305),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911776.87Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(155.792, 0.0, 0.0),
'Index': -1,
'Pos': Point3(-9.0069999999999997, 130.71299999999999, 30.789999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911781.71Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(138.06999999999999, 0.0, 0.0),
'Index': -1,
'Pos': Point3(99.465000000000003, -35.753999999999998, 25.975999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911785.35Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(9.0299999999999994, 0.0, 0.0),
'Index': -1,
'Pos': Point3(66.801000000000002, -229.447, 27.446000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911790.93Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(81.337000000000003, 0.0, 0.0),
'Index': -1,
'Pos': Point3(292.95800000000003, -140.55500000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911800.87Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(81.631, 0.0, 0.0),
'Index': -1,
'Pos': Point3(390.12299999999999, -175.01599999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911806.46Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(-161.40799999999999, 0.0, 0.0),
'Index': -1,
'Pos': Point3(280.60500000000002, 105.41800000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178911820.85Aholdun': {
'Type': 'Player Spawn Node',
'Hpr': VBase3(175.005, 0.0, 0.0),
'Index': -1,
'Pos': Point3(403.80599999999998, 128.46000000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'All',
'Visual': {
'Color': (0.5, 0.5, 0.5, 1),
'Model': 'models/misc/smiley' } },
'1178912429.07Aholdun': {
'Type': 'Spawn Node',
'Aggro Radius': '12.0000',
'AnimSet': 'idleB',
'Hpr': VBase3(-83.716999999999999, 0.0, 0.0),
'Level': '8',
'Min Population': '1',
'Patrol Radius': '1.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(-322.47699999999998, -80.765000000000001, 55.795999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T2',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0.69999999999999996, 0.69999999999999996, 0.69999999999999996, 1.0),
'Model': 'models/misc/smiley' } },
'1186083916.67kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-156.24700000000001, 0.0, 0.0),
'Pos': Point3(123.57599999999999, -119.833, 22.908999999999999),
'Scale': VBase3(0.76400000000000001, 0.76400000000000001, 0.76400000000000001),
'Visual': {
'Color': (0.25999999046325684, 0.34999999403953552, 0.38999998569488525, 1.0),
'Model': 'models/props/rock_caveB_sphere' } },
'1186083958.75kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-60.417999999999999, 0.0, 0.0),
'Pos': Point3(139.75899999999999, -113.84099999999999, 23.702999999999999),
'Scale': VBase3(0.86099999999999999, 0.86099999999999999, 0.86099999999999999),
'Visual': {
'Color': (0.25999999046325684, 0.34999999403953552, 0.38999998569488525, 1.0),
'Model': 'models/props/rock_caveA_floor' } },
'1186535808.0dxschafe': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(54.356999999999999, 0.0, 0.0),
'Pos': Point3(-367.86200000000002, -73.421000000000006, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0),
'Model': 'models/buildings/fort_eitc_annex_1' } },
'1186535936.0dxschafe': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(135.732, 0.0, 0.0),
'Pos': Point3(-324.35899999999998, -117.803, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.45000000000000001, 0.44, 0.28000000000000003, 1.0),
'Model': 'models/buildings/fort_eitc_annex_2' } },
'1186536320.0dxschafe': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(139.09700000000001, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-306.63400000000001, -20.018999999999998, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/fort_guardhouse' } },
'1186536576.0dxschafe': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(143.30000000000001, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-279.17000000000002, -41.328000000000003, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/fort_guardhouse' } },
'1186536576.0dxschafe0': {
'Type': 'Spanish Walls',
'DisableCollision': False,
'Hpr': VBase3(-38.68, 0.0, 0.0),
'Pos': Point3(-356.78199999999998, 22.190000000000001, 55.287999999999997),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/TallWallStone_60' } },
'1186536832.0dxschafe': {
'Type': 'Spanish Walls',
'DisableCollision': False,
'Hpr': VBase3(143.16900000000001, 0.0, 0.0),
'Pos': Point3(-372.84500000000003, -62.874000000000002, 49.432000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/TallWallStone_60' } },
'1186536832.0dxschafe0': {
'Type': 'Spanish Walls',
'DisableCollision': False,
'Hpr': VBase3(-38.524000000000001, 0.0, 0.0),
'Pos': Point3(-273.17200000000003, -46.271999999999998, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/TallWallStone_60' } },
'1186538496.0dxschafe': {
'Type': 'Spanish Walls',
'DisableCollision': False,
'Hpr': VBase3(-38.997999999999998, 0.0, 0.0),
'Pos': Point3(-226.23099999999999, -83.647000000000006, 55.825000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.58999999999999997, 0.60999999999999999, 0.46999999999999997, 1.0),
'Model': 'models/buildings/TallWallStone_60' } },
'1186699264.0dxschafe': {
'Type': 'Building Exterior',
'File': 'tortuga_cave_b_int_1',
'ExtUid': '1186699264.0dxschafe0',
'Hpr': VBase3(139.73699999999999, 0.0, 0.0),
'Objects': {
'1203997191.31dxschafe': {
'Type': 'Door Locator Node',
'Name': 'door_locator',
'Hpr': VBase3(-180.0, 0.0, 0.0),
'Pos': Point3(-0.54100000000000004, -20.634, 0.79500000000000004),
'Scale': VBase3(1.0, 1.0, 1.0) } },
'Pos': Point3(-345.68599999999998, -92.228999999999999, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0),
'Door': 'models/buildings/shanty_guildhall_door',
'Model': 'models/buildings/fort_eitc',
'SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith' } },
'1186699904.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '11.4458',
'AnimSet': 'attention',
'Hpr': VBase3(-133.46899999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '1.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-298.76799999999997, -10.036, 55.795999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0.0, 0.51000000000000001, 0.65000000000000002, 1.0),
'Model': 'models/misc/smiley' } },
'1187117022.19kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-19.824000000000002, 0.0, 0.0),
'Pos': Point3(-166.05799999999999, -35.743000000000002, 49.625999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.87000000476837158, 1.0, 1.0, 1.0),
'Model': 'models/props/rock_caveC_sphere' } },
'1187117094.23kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-96.183000000000007, 0.0, 0.0),
'Pos': Point3(-138.73400000000001, -48.973999999999997, 56.043999999999997),
'Scale': VBase3(2.5920000000000001, 2.903, 2.903),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1187117117.58kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(172.02000000000001, 0.0, 0.0),
'Pos': Point3(-122.524, -29.640999999999998, 45.991),
'Scale': VBase3(1.3919999999999999, 1.5109999999999999, 3.661),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1187117147.0kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-137.54400000000001, 0.0, 0.0),
'Pos': Point3(-133.22800000000001, -32.475999999999999, 51.048999999999999),
'Scale': VBase3(1.1379999999999999, 1.6930000000000001, 3.2160000000000002),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1187229184.0dxschafe': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(48.334000000000003, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-195.40700000000001, -109.408, 57.746000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/fort_guardhouse' } },
'1187229184.0dxschafe0': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(-128.55199999999999, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-354.69400000000002, 20.849, 57.767000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/fort_guardhouse' } },
'1187229184.0dxschafe1': {
'Type': 'Simple Fort',
'DisableCollision': False,
'Hpr': VBase3(-39.668999999999997, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-238.99600000000001, -74.015000000000001, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'UseMayaLOD': False,
'Visual': {
'Color': (0.5899999737739563, 0.61000001430511475, 0.4699999988079071, 1.0),
'Model': 'models/buildings/fort_guardhouse' } },
'1187230080.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '3.9157',
'AnimSet': 'bar_talk01',
'Hpr': VBase3(-118.67700000000001, 0.0, 0.0),
'Level': '8',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(-241.66499999999999, -75.265000000000001, 56.323999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1187393536.0dxschafe': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(127.358, 0.0, 0.0),
'Pos': Point3(-298.976, -15.923999999999999, 61.145000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/torch' } },
'1187393664.0dxschafe': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(127.358, 0.0, 0.0),
'Pos': Point3(-277.62599999999998, -32.347999999999999, 61.424999999999997),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/torch' } },
'1187393664.0dxschafe0': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(135.08799999999999, 0.0, 0.0),
'Pos': Point3(-334.71300000000002, -74.099999999999994, 61.972999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/torch' } },
'1187393664.0dxschafe1': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(138.059, 0.0, 0.0),
'Pos': Point3(-328.67399999999998, -79.126000000000005, 61.606999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/torch' } },
'1188518735.04dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '60.0000',
'DropOff': '0.0000',
'FlickRate': '0.5000',
'Flickering': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Intensity': '0.8434',
'LightType': 'AMBIENT',
'Pos': Point3(0.0, 0.0, 65.221999999999994),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0.57999999999999996, 0.84999999999999998, 1.0),
'Model': 'models/props/light_tool_bulb' } },
'1188518837.84dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '15.0000',
'DropOff': '69.9398',
'FlickRate': '0.5000',
'Flickering': False,
'Hpr': VBase3(121.19799999999999, -29.334, 167.268),
'Intensity': '0.9398',
'LightType': 'SPOT',
'Pos': Point3(-200.20500000000001, -11.209, 132.26599999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (1, 1, 1, 1),
'Model': 'models/props/light_tool_bulb' } },
'1188519129.73dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '45.9940',
'DropOff': '10.3012',
'FlickRate': '0.0000',
'Flickering': False,
'Hpr': VBase3(121.98, -44.432000000000002, 162.41999999999999),
'Intensity': '1.1084',
'LightType': 'SPOT',
'Pos': Point3(-240.245, 10.894, 100.396),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (1, 1, 1, 1),
'Model': 'models/props/light_tool_bulb' } },
'1190324864.0dxschafe': {
'Type': 'Shanty Tents',
'Hpr': VBase3(-178.83799999999999, 0.0, 0.0),
'Pos': Point3(303.01600000000002, -223.28800000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (1.0, 0.93000000000000005, 0.75686274509803919, 1.0),
'Model': 'models/buildings/shanty_tent_house_body' } },
'1190324992.0dxschafe': {
'Type': 'Effect Node',
'EffectName': 'torch_effect',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(304.92899999999997, -199.095, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190324992.0dxschafe0': {
'Type': 'Effect Node',
'EffectName': 'torch_effect',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(304.33199999999999, -198.22800000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190324992.0dxschafe1': {
'Type': 'Effect Node',
'EffectName': 'torch_effect',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(306.74799999999999, -198.22399999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190325120.0dxschafe0': {
'Type': 'Log_Stack',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(286.90600000000001, -211.404, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.56000000000000005, 0.51000000000000001, 0.31764705882352939, 1.0),
'Model': 'models/props/Log_stack_a' } },
'1190325120.0dxschafe1': {
'Type': 'Log_Stack',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(305.56999999999999, -198.048, 2.1749999999999998),
'Scale': VBase3(0.56599999999999995, 0.56599999999999995, 0.56599999999999995),
'Visual': {
'Model': 'models/vegetation/gen_log_group03' } },
'1190325248.0dxschafe': {
'Type': 'Barrel',
'DisableCollision': False,
'Hpr': VBase3(36.975000000000001, 0.0, 0.0),
'Pos': Point3(319.88200000000001, -218.947, 2.1749999999999998),
'Scale': VBase3(0.83999999999999997, 0.83999999999999997, 0.83999999999999997),
'Visual': {
'Color': (0.38, 0.48999999999999999, 0.44705882352941179, 1.0),
'Model': 'models/props/barrel' } },
'1190325248.0dxschafe0': {
'Type': 'Barrel',
'DisableCollision': False,
'Hpr': VBase3(-14.064, 0.0, 0.0),
'Pos': Point3(327.25900000000001, -223.303, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.39000000000000001, 0.47999999999999998, 0.45490196078431372, 1.0),
'Model': 'models/props/barrel_group_3' } },
'1190325248.0dxschafe1': {
'Type': 'Cart',
'DisableCollision': False,
'Hpr': VBase3(172.88999999999999, 0.0, 0.0),
'Pos': Point3(328.46499999999997, -216.97399999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.75, 0.75, 0.75294117647058822, 1.0),
'Model': 'models/props/cart_flat' } },
'1190325376.0dxschafe': {
'Type': 'Crate',
'DisableCollision': False,
'Hpr': VBase3(25.477, -0.46700000000000003, 0.0),
'Pos': Point3(294.18200000000002, -230.179, 2.169),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.78000000000000003, 0.75, 0.61176470588235299, 1.0),
'Model': 'models/props/crate' } },
'1190325376.0dxschafe0': {
'Type': 'Crate',
'DisableCollision': False,
'Hpr': VBase3(-82.179000000000002, 0.0, 0.0),
'Pos': Point3(282.87599999999998, -219.02099999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (1.0, 0.87, 0.80000000000000004, 1.0),
'Model': 'models/props/crates_group_1' } },
'1190325504.0dxschafe': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(296.32499999999999, -202.81200000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.92000000000000004, 0.75, 0.65490196078431373, 1.0),
'Model': 'models/props/table_shanty_2' } },
'1190325504.0dxschafe0': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-11.57, 0.0, 0.0),
'Pos': Point3(292.39299999999997, -204.97900000000001, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.91000000000000003, 0.68999999999999995, 0.59607843137254901, 1.0),
'Model': 'models/props/stool_shanty' } },
'1190325504.0dxschafe1': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(308.45299999999997, -203.52799999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.95999999999999996, 0.88, 0.83921568627450982, 1.0),
'Model': 'models/props/stool_shanty' } },
'1190325504.0dxschafe2': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(60.771000000000001, 0.0, 0.0),
'Pos': Point3(303.64499999999998, -202.78999999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.95999999999999996, 0.88, 0.83921568627450982, 1.0),
'Model': 'models/props/stool_shanty' } },
'1190325632.0dxschafe': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-175.595, 0.0, 0.0),
'Objects': { },
'Pos': Point3(303.39499999999998, -231.989, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.56000000000000005, 0.51000000000000001, 0.31764705882352939, 1.0),
'Model': 'models/props/bench_shanty_2' } },
'1190325632.0dxschafe0': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-89.373999999999995, 0.0, 0.0),
'Pos': Point3(310.47000000000003, -226.291, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.67000000000000004, 0.65000000000000002, 0.57647058823529407, 1.0),
'Model': 'models/props/bed_shantyB' } },
'1190325632.0dxschafe1': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-91.700999999999993, 0.0, 0.0),
'Pos': Point3(310.565, -220.023, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.84999999999999998, 0.84999999999999998, 0.81176470588235294, 1.0),
'Model': 'models/props/bed_shantyB' } },
'1190325632.0dxschafe2': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-76.117999999999995, 0.094, -0.45800000000000002),
'Objects': { },
'Pos': Point3(292.86700000000002, -224.30199999999999, 2.1219999999999999),
'Scale': VBase3(1.145, 1.145, 1.145),
'Visual': {
'Color': (0.78000000000000003, 0.75, 0.61568627450980395, 1.0),
'Model': 'models/props/bench_shanty_2' } },
'1190325760.0dxschafe': {
'Type': 'LaundryRope',
'DisableCollision': False,
'Hpr': VBase3(179.48099999999999, 0.0, 0.0),
'Pos': Point3(303.00099999999998, -233.399, -2.6099999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.5, 0.5, 0.50196078431372548, 1.0),
'Model': 'models/props/LaundryRope' } },
'1190325760.0dxschafe1': {
'Type': 'Spawn Node',
'AnimSet': 'idleC',
'Hpr': VBase3(-136.40799999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(321.17700000000002, -210.80099999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190325888.0dxschafe': {
'Type': 'Spawn Node',
'AnimSet': 'shovel',
'Hpr': VBase3(47.905000000000001, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(270.99299999999999, -197.15299999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190325888.0dxschafe0': {
'Type': 'Spawn Node',
'Aggro Radius': '0.9036',
'AnimSet': 'bar_talk02',
'Hpr': VBase3(-8.1400000000000006, 0.0, 0.0),
'Level': '8',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(322.45400000000001, -216.28299999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'EITC T4',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190326016.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '2.4096',
'AnimSet': 'idleC',
'Hpr': VBase3(-62.640999999999998, 0.0, 0.0),
'Level': '8',
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(291.60399999999998, -207.79499999999999, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'EITC T4',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190326144.0dxschafe': {
'Type': 'Spawn Node',
'AnimSet': 'doctor_work',
'Hpr': VBase3(163.15600000000001, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': 100,
'Pause Duration': 30,
'Pos': Point3(296.303, -208.02600000000001, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Idle',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190326144.0dxschafe0': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-88.085999999999999, 0.0, 0.0),
'Pos': Point3(300.23599999999999, -203.672, 2.1749999999999998),
'Scale': VBase3(1.1359999999999999, 1.1359999999999999, 1.1359999999999999),
'Visual': {
'Color': (0.93999999999999995, 0.80000000000000004, 0.72941176470588232, 1.0),
'Model': 'models/props/bench_shanty_2' } },
'1190326400.0dxschafe': {
'Type': 'Spawn Node',
'AnimSet': 'idleB',
'Hpr': VBase3(112.479, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(297.19299999999998, -142.10300000000001, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190326784.0dxschafe': {
'Type': 'Cups',
'DisableCollision': False,
'Hpr': VBase3(-151.47200000000001, 0.0, 0.314),
'Pos': Point3(294.733, -205.071, 5.1059999999999999),
'Scale': VBase3(0.77200000000000002, 0.77200000000000002, 0.77200000000000002),
'Visual': {
'Model': 'models/props/cup_tin' } },
'1190417152.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '22.2892',
'AnimSet': 'attention',
'Hpr': VBase3(97.441000000000003, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '9.3554',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-273.97000000000003, 61.027000000000001, 64.373000000000005),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1190417152.0dxschafe0': {
'Type': 'Spawn Node',
'AnimSet': 'default',
'Hpr': VBase3(-80.219999999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-337.08100000000002, 68.757999999999996, 65.061999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0.0, 0.0, 0.65000000000000002, 1.0),
'Model': 'models/misc/smiley' } },
'1190417280.0dxschafe': {
'Type': 'Searchable Container',
'Aggro Radius': '5.0000',
'Hpr': VBase3(-37.57, 0.0, 0.0),
'Pos': Point3(-243.232, -74.319999999999993, 56.332000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Model': 'models/props/crate_04' },
'searchTime': '6.0',
'type': 'Crate' },
'1190418048.0dxschafe': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-129.40100000000001, -2.278, 0.0),
'Pos': Point3(-310.67599999999999, -11.411, 55.268999999999998),
'Scale': VBase3(0.86099999999999999, 0.86099999999999999, 0.86099999999999999),
'Visual': {
'Color': (0.92000000000000004, 0.93000000000000005, 0.90980392156862744, 1.0),
'Model': 'models/props/rock_group_4_sphere' } },
'1190418048.0dxschafe0': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-58.069000000000003, 0.0, 0.0),
'Pos': Point3(-331.524, 7.298, 55.231000000000002),
'Scale': VBase3(0.86099999999999999, 0.86099999999999999, 0.86099999999999999),
'Visual': {
'Color': (0.83999999999999997, 0.84999999999999998, 0.80392156862745101, 1.0),
'Model': 'models/props/rock_group_5_sphere' } },
'1190418048.0dxschafe1': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(132.833, 0.0, -3.887),
'Pos': Point3(-346.44, 13.361000000000001, 54.948999999999998),
'Scale': VBase3(1.3600000000000001, 1.3600000000000001, 1.3600000000000001),
'Visual': {
'Color': (0.88, 0.89000000000000001, 0.8666666666666667, 1.0),
'Model': 'models/props/rock_group_5_sphere' } },
'1190418176.0dxschafe': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-39.450000000000003, 0.0, 0.0),
'Pos': Point3(-260.52499999999998, -46.938000000000002, 55.042999999999999),
'Scale': VBase3(1.1970000000000001, 1.1970000000000001, 1.1970000000000001),
'Visual': {
'Color': (0.5, 0.5, 0.5, 1.0),
'Model': 'models/props/rock_group_5_sphere' } },
'1190418176.0dxschafe0': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-29.405000000000001, 0.0, 0.0),
'Pos': Point3(-190.822, -93.793999999999997, 55.499000000000002),
'Scale': VBase3(1.7689999999999999, 1.7689999999999999, 1.7689999999999999),
'Visual': {
'Color': (0.5, 0.5, 0.5, 1.0),
'Model': 'models/props/rock_group_2_sphere' } },
'1190418304.0dxschafe': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-30.361000000000001, 0.0, 0.0),
'Pos': Point3(-382.63400000000001, -42.094999999999999, 54.280000000000001),
'Scale': VBase3(2.0489999999999999, 2.0489999999999999, 2.0489999999999999),
'Visual': {
'Color': (0.85999999999999999, 0.77647058823529413, 0.6705882352941176, 1.0),
'Model': 'models/props/rock_group_5_sphere' } },
'1190418304.0dxschafe0': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-47.396999999999998, 0.0, 0.0),
'Pos': Point3(-349.33999999999997, 39.994999999999997, 59.781999999999996),
'Scale': VBase3(1.506, 1.506, 1.506),
'Visual': {
'Color': (0.5, 0.5, 0.5, 1.0),
'Model': 'models/props/rock_group_1_sphere' } },
'1190418432.0dxschafe': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(68.052999999999997, 0.0, 0.0),
'Pos': Point3(-278.45400000000001, -132.57400000000001, 54.057000000000002),
'Scale': VBase3(2.98, 2.98, 2.98),
'Visual': {
'Color': (0.47999999999999998, 0.69999999999999996, 0.56000000000000005, 1.0),
'Model': 'models/props/rock_group_1_sphere' } },
'1190926976.0dchiappe0': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-271.685, -29.751000000000001, 55.795999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1190927104.0dchiappe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-276.92200000000003, 75.936999999999998, 65.385000000000005),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1190927104.0dchiappe0': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-330.16899999999998, 53.048000000000002, 63.253999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1190927232.0dchiappe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-365.096, 187.697, 68.905000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1190927872.0dchiappe': {
'Type': 'Interior_furnishings',
'DisableCollision': False,
'Hpr': VBase3(157.95699999999999, -70.334000000000003, 91.379999999999995),
'Pos': Point3(294.76999999999998, -202.51400000000001, 5.7999999999999998),
'Scale': VBase3(0.54500000000000004, 0.54500000000000004, 0.54500000000000004),
'Visual': {
'Model': 'models/props/shop_bsmith_dagger' } },
'1190928000.0dchiappe0': {
'Type': 'Cups',
'DisableCollision': False,
'Hpr': VBase3(-22.812999999999999, -0.55400000000000005, -178.68199999999999),
'Pos': Point3(294.48000000000002, -206.297, 6.1470000000000002),
'Scale': VBase3(1.1830000000000001, 1.1830000000000001, 1.1830000000000001),
'Visual': {
'Model': 'models/props/cup_tin' } },
'1190928256.0dchiappe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(231.625, -185.458, 6.9930000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1190928384.0dchiappe0': {
'Type': 'Rock',
'DisableCollision': False,
'Hpr': VBase3(4.3650000000000002, 0.0, 0.0),
'Pos': Point3(265.65300000000002, -195.584, 2.1749999999999998),
'Scale': VBase3(0.32100000000000001, 0.32100000000000001, 0.32100000000000001),
'Visual': {
'Model': 'models/props/rock_group_4_sphere' } },
'1190928512.0dchiappe2': {
'Type': 'Rock',
'DisableCollision': False,
'Hpr': VBase3(109.878, 0.0, 0.0),
'Pos': Point3(267.81099999999998, -196.92400000000001, 2.0190000000000001),
'Scale': VBase3(0.48499999999999999, 0.69799999999999995, 0.23899999999999999),
'Visual': {
'Color': (0.41999999999999998, 0.59999999999999998, 0.72999999999999998, 1.0),
'Model': 'models/props/dirt_pile' } },
'1190928640.0dchiappe0': {
'Type': 'Rock',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(270.85300000000001, -201.24100000000001, 2.1749999999999998),
'Scale': VBase3(0.33000000000000002, 0.33000000000000002, 0.33000000000000002),
'Visual': {
'Model': 'models/props/dirt_pile_cave' } },
'1190928640.0dchiappe1': {
'Type': 'Rock',
'DisableCollision': False,
'Hpr': VBase3(107.334, 0.0, 0.0),
'Pos': Point3(267.81099999999998, -201.01499999999999, 2.1749999999999998),
'Scale': VBase3(0.248, 0.248, 0.248),
'Visual': {
'Model': 'models/props/rock_group_3_sphere' } },
'1191866354.18kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-40.052, 0.0, 0.0),
'Pos': Point3(-327.47800000000001, -12.048999999999999, 55.795999999999999),
'Scale': VBase3(1.0760000000000001, 1.0760000000000001, 1.0760000000000001),
'Visual': {
'Color': (0.77000000000000002, 0.78000000000000003, 0.69411764705882351, 1.0),
'Model': 'models/props/rock_group_1_sphere' } },
'1191866382.48kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-63.313000000000002, 0.0, 0.0),
'Pos': Point3(-331.82499999999999, -3.2109999999999999, 55.478000000000002),
'Scale': VBase3(1.5860000000000001, 1.5860000000000001, 1.5860000000000001),
'Visual': {
'Color': (0.80000000000000004, 0.81000000000000005, 0.76078431372549016, 1.0),
'Model': 'models/props/rock_group_3_sphere' } },
'1191866461.7kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-51.747, 0.0, 0.0),
'Pos': Point3(-256.16199999999998, -69.468000000000004, 55.280000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/rockpile_cave_stone' } },
'1191866500.32kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-202.04599999999999, -125.574, 55.340000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.36000001430511475, 0.43999999761581421, 0.5, 1.0),
'Model': 'models/props/rockpile_cave_stone' } },
'1191866530.96kmuller': {
'Type': 'Rock',
'DisableCollision': False,
'Hpr': VBase3(29.850000000000001, 0.0, 0.0),
'Pos': Point3(-206.55099999999999, -103.899, 55.795999999999999),
'Scale': VBase3(0.26000000000000001, 0.26000000000000001, 0.26000000000000001),
'Visual': {
'Color': (0.62, 0.78000000000000003, 0.88235294117647056, 1.0),
'Model': 'models/props/rock_caveB_sphere' } },
'1191867492.56kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-43.417000000000002, 0.0, 0.0),
'Pos': Point3(-339.03100000000001, 11.77, 54.920999999999999),
'Scale': VBase3(4.5119999999999996, 1.3680000000000001, 1.919),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1191867523.17kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-39.975000000000001, 0.0, 0.0),
'Pos': Point3(-330.82600000000002, -4.5519999999999996, 55.795999999999999),
'Scale': VBase3(5.181, 1.929, 1.702),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1191867573.21kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-39.75, 0.0, 0.0),
'Pos': Point3(-254.98099999999999, -70.542000000000002, 53.537999999999997),
'Scale': VBase3(2.8460000000000001, 2.4580000000000002, 2.3799999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1191867602.73kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(0.95399999999999996, 0.0, 0.0),
'Pos': Point3(-249.52799999999999, -76.189999999999998, 54.771999999999998),
'Scale': VBase3(0.51000000000000001, 1.0, 1.7370000000000001),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867711.21kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-204.792, -97.459000000000003, 54.847999999999999),
'Scale': VBase3(2.2080000000000002, 2.2080000000000002, 2.2080000000000002),
'Visual': {
'Model': 'models/props/rock_3_sphere' } },
'1191867730.78kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-125.776, 0.0, 0.0),
'Pos': Point3(-202.958, -88.858000000000004, 55.795999999999999),
'Scale': VBase3(2.0990000000000002, 2.0990000000000002, 2.0990000000000002),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867749.09kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(154.64400000000001, 0.0, 0.0),
'Pos': Point3(-187.96700000000001, -84.748999999999995, 54.576000000000001),
'Scale': VBase3(2.0059999999999998, 1.7689999999999999, 2.3330000000000002),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867800.53kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(72.995999999999995, 0.0, 0.0),
'Pos': Point3(-259.04700000000003, -51.161000000000001, 55.795999999999999),
'Scale': VBase3(0.873, 1.4330000000000001, 2.9079999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867837.78kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(151.79400000000001, 0.0, 0.0),
'Pos': Point3(-263.91699999999997, -43.725999999999999, 54.978999999999999),
'Scale': VBase3(1.4550000000000001, 1.4990000000000001, 2.9860000000000002),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867881.68kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-149.202, 0.0, 0.0),
'Pos': Point3(-315.09300000000002, -9.3279999999999994, 54.628999999999998),
'Scale': VBase3(1.0, 1.0, 2.3319999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867896.37kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(142.006, 0.0, 0.0),
'Pos': Point3(-309.68900000000002, -7.6989999999999998, 55.253999999999998),
'Scale': VBase3(0.32200000000000001, 1.0, 2.3969999999999998),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867934.7kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(50.884999999999998, 0.0, 0.0),
'Pos': Point3(-342.964, 36.116, 58.156999999999996),
'Scale': VBase3(1.585, 1.585, 3.3719999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867953.0kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(127.227, 0.0, 0.0),
'Pos': Point3(-341.017, 45.661999999999999, 60.677),
'Scale': VBase3(1.0, 1.0, 2.8879999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191867988.96kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(50.372999999999998, 0.0, 0.0),
'Pos': Point3(-388.18000000000001, -36.228000000000002, 54.823),
'Scale': VBase3(4.0529999999999999, 4.0529999999999999, 4.0529999999999999),
'Visual': {
'Model': 'models/props/rock_4_sphere' } },
'1191868005.95kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(129.84, 0.0, 0.0),
'Pos': Point3(-383.08100000000002, -36.814, 54.555999999999997),
'Scale': VBase3(1.984, 1.984, 3.137),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191868020.75kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(56.182000000000002, 0.0, 0.0),
'Pos': Point3(-380.274, -49.298999999999999, 55.795999999999999),
'Scale': VBase3(1.268, 1.268, 2.6779999999999999),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191868051.62kmuller': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(-98.352999999999994, 0.0, 0.0),
'Pos': Point3(-394.47800000000001, 2.21, 55.356999999999999),
'Scale': VBase3(0.66500000000000004, 0.66500000000000004, 0.66500000000000004),
'Visual': {
'Color': (0.39000000000000001, 0.44, 0.43529411764705883, 1.0),
'Model': 'models/props/rockpile_cave_stone' } },
'1191868230.18kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Holiday': '',
'Hpr': VBase3(90.042000000000002, 0.0, 0.0),
'Pos': Point3(-309.67399999999998, -121.431, 55.701999999999998),
'Scale': VBase3(0.90400000000000003, 2.5790000000000002, 2.5790000000000002),
'VisSize': '',
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191868250.5kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(161.196, 0.0, 0.0),
'Pos': Point3(-276.39499999999998, -118.38800000000001, 54.639000000000003),
'Scale': VBase3(2.23, 2.2650000000000001, 2.8940000000000001),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191868310.0kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(52.128, 0.0, 0.0),
'Pos': Point3(-317.56999999999999, -84.878, 55.795999999999999),
'Scale': VBase3(1.427, 1.222, 1.181),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1191868369.04kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(123.74299999999999, 0.0, 0.0),
'Pos': Point3(-346.072, -63.735999999999997, 55.228000000000002),
'Scale': VBase3(1.0, 1.0, 1.8540000000000001),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1191868381.03kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-172.99199999999999, 0.0, 0.0),
'Pos': Point3(-353.803, -60.32, 55.234999999999999),
'Scale': VBase3(1.0, 1.0, 1.843),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1192838354.48dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(421.45800000000003, 50.709000000000003, -1.0640000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192838366.94dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(339.69799999999998, -66.445999999999998, -1.0640000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192838486.16dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(289.786, 60.472999999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192838489.22dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(427.35000000000002, 120.36799999999999, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192841225.98dxschafe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-471.03800000000001, 164.34700000000001, 78.063999999999993),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192841232.98dxschafe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-300.387, 79.921999999999997, 65.341999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843451.03dxschafe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-412.39499999999998, 199.63900000000001, 78.063999999999993),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843457.3dxschafe': {
'Type': 'Movement Node',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-287.71600000000001, 215.93199999999999, 67.052000000000007),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843589.59dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(92.296000000000006, -136.19999999999999, 26.303000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843669.58dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(214.71899999999999, -254.38900000000001, 10.343999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843764.56dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-178.19900000000001, -11.819000000000001, 57.575000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1192843797.75dxschafe': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(30.576000000000001, -177.71600000000001, 29.053000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1193356416.0dxschafe0': {
'Type': 'Spawn Node',
'AnimSet': 'idleB',
'Hpr': VBase3(137.524, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(333.74200000000002, -203.06200000000001, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1193356416.0dxschafe1': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(385.83100000000002, 148.761, 2.1749999999999998),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1193356416.0dxschafe2': {
'Type': 'Spawn Node',
'AnimSet': 'idleB',
'Hpr': VBase3(-138.21700000000001, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '12.0000',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(298.06400000000002, -194.239, 2.1749999999999998),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T3',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1193356416.0dxschafe3': {
'Type': 'Movement Node',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(41.904000000000003, -130.72800000000001, 28.545999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1193356544.0dxschafe': {
'Type': 'Spawn Node',
'Aggro Radius': '22.2892',
'AnimSet': 'attention',
'Hpr': VBase3(12.289999999999999, 0.0, 0.0),
'Min Population': '1',
'Patrol Radius': '9.3554',
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-288.14100000000002, -102.958, 55.795999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Scale': VBase3(1.0, 1.0, 1.0),
'Spawnables': 'Navy T2',
'Start State': 'Patrol',
'StartFrame': '0',
'Team': 'default',
'TrailFX': 'None',
'VisSize': '',
'Visual': {
'Color': (0, 0, 0.65000000000000002, 1),
'Model': 'models/misc/smiley' } },
'1193356544.0dxschafe0': {
'Type': 'Movement Node',
'Hpr': VBase3(-85.150999999999996, 0.0, 0.0),
'Pause Chance': '0',
'Pause Duration': '5',
'Pos': Point3(-304.78399999999999, 117.251, 65.780000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.65000000000000002, 0, 0, 1),
'Model': 'models/misc/smiley' } },
'1200003606.2akelts': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-151.48099999999999, 0.0, 0.0),
'Pos': Point3(502.91800000000001, -134.55099999999999, 2.1749999999999998),
'Scale': VBase3(0.379, 1.0, 1.0),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1200005249.48akelts': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-61.747999999999998, 0.0, 0.0),
'Pos': Point3(-210.078, -102.52800000000001, 55.795999999999999),
'Scale': VBase3(1.9299999999999999, 1.0, 1.964),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1200005508.08akelts': {
'Type': 'Rock',
'DisableCollision': True,
'Hpr': VBase3(63.838000000000001, 0.0, 0.0),
'Pos': Point3(-209.34700000000001, -100.339, 54.244999999999997),
'Scale': VBase3(1.52, 1.52, 1.52),
'Visual': {
'Model': 'models/props/rock_3_sphere' } },
'1203997192.95dxschafe': {
'Type': 'Door Locator Node',
'Name': 'door_locator',
'Hpr': VBase3(-180.0, 0.0, 0.0),
'Pos': Point3(-0.54100000000000004, -20.634, 0.79500000000000004),
'Scale': VBase3(1.0, 1.0, 1.0) },
'1234979173.77caoconno': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Holiday': '',
'Hpr': VBase3(-155.733, 0.0, 0.0),
'Pos': Point3(-300.36799999999999, -121.029, 55.701999999999998),
'Scale': VBase3(3.077, 2.5790000000000002, 2.5790000000000002),
'VisSize': '',
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1239930457.38piwanow': {
'Type': 'NavySailor',
'AnimSet': 'idleB',
'AvId': 0,
'AvTrack': 0,
'Boss': True,
'DNA': '1239930457.38piwanow',
'Hpr': VBase3(-94.399000000000001, 0.0, 0.0),
'NavyFaction': 'TradingCo',
'Patrol Radius': '12.0000',
'Pos': Point3(-370.43400000000003, -28.765000000000001, 55.795999999999999),
'PoseAnim': '',
'PoseFrame': '',
'Respawns': True,
'Scale': VBase3(1.0, 1.0, 1.0),
'Start State': 'Idle',
'StartFrame': '0',
'TrailFX': 'None',
'VisSize': '' } },
'Visibility': 'Grid',
'Visual': {
'Model': 'models/caves/cave_b_zero' } } },
'TodSettings': {
'AmbientColors': {
-1: Vec4(0.44705899999999998, 0.44705899999999998, 0.415686, 1),
0: Vec4(0.49603900000000001, 0.56862699999999999, 0.67451000000000005, 1),
2: Vec4(1, 1, 1, 1),
4: Vec4(0.72156900000000002, 0.611765, 0.61960800000000005, 1),
6: Vec4(0.43705899999999998, 0.44647100000000001, 0.55666700000000002, 1),
8: Vec4(0.38921600000000001, 0.42627500000000002, 0.569608, 1),
12: Vec4(0.34000000000000002, 0.28000000000000003, 0.40999999999999998, 1),
13: Vec4(0.34000000000000002, 0.28000000000000003, 0.40999999999999998, 1),
16: Vec4(0.25, 0.25, 0.25, 1) },
'DirectionalColors': {
-1: Vec4(0.81568600000000002, 0.71764700000000003, 0.80392200000000003, 1),
0: Vec4(0.96078399999999997, 0.91372500000000001, 0.89411799999999997, 1),
2: Vec4(1, 1, 1, 1),
4: Vec4(0.439216, 0.17647099999999999, 0, 1),
6: Vec4(0.51372600000000002, 0.48235299999999998, 0.64313699999999996, 1),
8: Vec4(0.44705899999999998, 0.439216, 0.54117599999999999, 1),
12: Vec4(0.66000000000000003, 0.76000000000000001, 0.050000000000000003, 1),
13: Vec4(0.66000000000000003, 0.76000000000000001, 0.050000000000000003, 1),
16: Vec4(0, 0, 0, 1) },
'FogColors': {
-1: Vec4(0.87058800000000003, 0.87451000000000001, 0.82352899999999996, 1),
0: Vec4(0.27450999999999998, 0.19215699999999999, 0.21176500000000001, 0),
2: Vec4(0.0313726, 0.054901999999999999, 0.078431399999999998, 1),
4: Vec4(0.231373, 0.20392199999999999, 0.18431400000000001, 1),
6: Vec4(0.156863, 0.219608, 0.32941199999999998, 0),
8: Vec4(0.129412, 0.13725499999999999, 0.207843, 0),
12: Vec4(0.10000000000000001, 0.12, 0.029999999999999999, 0),
13: Vec4(0.10000000000000001, 0.12, 0.029999999999999999, 0),
16: Vec4(0.25, 0.25, 0.25, 1) },
'FogRanges': {
0: 0.00020000000000000001,
2: 0.00060000002849847078,
4: 0.00039999998989515007,
6: 0.00040000000000000002,
8: 0.00020000000000000001,
12: 0.00025000000000000001,
13: 0.00025000000000000001,
16: 0.0001 },
'LinearFogRanges': {
0: (0.0, 100.0),
2: (0.0, 100.0),
4: (0.0, 100.0),
6: (0.0, 100.0),
8: (0.0, 100.0),
12: (0.0, 100.0),
13: (0.0, 100.0),
16: (0.0, 100.0) } },
'Node Links': [
[
'1162436315.06sdnaik',
'1162436324.81sdnaik',
'Bi-directional'],
[
'1162436315.06sdnaik',
'1162436327.92sdnaik',
'Bi-directional'],
[
'1162436315.06sdnaik',
'1162436337.55sdnaik',
'Bi-directional'],
[
'1162436324.81sdnaik',
'1162436327.92sdnaik',
'Bi-directional'],
[
'1162436324.81sdnaik',
'1162436337.55sdnaik',
'Bi-directional'],
[
'1162436327.92sdnaik',
'1162436337.55sdnaik',
'Bi-directional'],
[
'1186699904.0dxschafe',
'1190926976.0dchiappe0',
'Bi-directional'],
[
'1190417152.0dxschafe',
'1190927104.0dchiappe0',
'Bi-directional'],
[
'1190927104.0dchiappe',
'1190417152.0dxschafe0',
'Bi-directional'],
[
'1190927232.0dchiappe',
'1165198212.56Shochet',
'Bi-directional'],
[
'1190928256.0dchiappe',
'1190326400.0dxschafe',
'Bi-directional'],
[
'1192838354.48dxschafe',
'1192838366.94dxschafe',
'Bi-directional'],
[
'1174697088.0dxschafe0',
'1192838366.94dxschafe',
'Bi-directional'],
[
'1192838354.48dxschafe',
'1174697088.0dxschafe0',
'Bi-directional'],
[
'1192838486.16dxschafe',
'1192838489.22dxschafe',
'Bi-directional'],
[
'1192838486.16dxschafe',
'1174696960.0dxschafe0',
'Bi-directional'],
[
'1192841232.98dxschafe',
'1192841225.98dxschafe',
'Bi-directional'],
[
'1175224576.0dxschafe',
'1192841225.98dxschafe',
'Bi-directional'],
[
'1192841232.98dxschafe',
'1175224576.0dxschafe',
'Bi-directional'],
[
'1165198339.88Shochet',
'1192843457.3dxschafe',
'Bi-directional'],
[
'1165198339.88Shochet',
'1192843451.03dxschafe',
'Bi-directional'],
[
'1192843451.03dxschafe',
'1192843457.3dxschafe',
'Bi-directional'],
[
'1192843589.59dxschafe',
'1165198277.17Shochet',
'Bi-directional'],
[
'1192843589.59dxschafe',
'1192843669.58dxschafe',
'Bi-directional'],
[
'1165198262.39Shochet',
'1192843764.56dxschafe',
'Bi-directional'],
[
'1174696960.0dxschafe',
'1192843797.75dxschafe',
'Bi-directional'],
[
'1193356416.0dxschafe1',
'1193356416.0dxschafe0',
'Bi-directional'],
[
'1193356416.0dxschafe3',
'1193356416.0dxschafe2',
'Bi-directional'],
[
'1193356544.0dxschafe0',
'1193356544.0dxschafe',
'Bi-directional']],
'Layers': { },
'ObjectIds': {
'1158121765.09sdnaik': '["Objects"]["1158121765.09sdnaik"]',
'1158171645.33sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1158171645.33sdnaik"]',
'1158171645.36sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1158171645.36sdnaik"]',
'1162436263.03sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1162436263.03sdnaik"]',
'1162436315.06sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1162436315.06sdnaik"]',
'1162436324.81sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1162436324.81sdnaik"]',
'1162436327.92sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1162436327.92sdnaik"]',
'1162436337.55sdnaik': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1162436337.55sdnaik"]',
'1165198179.13Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198179.13Shochet"]',
'1165198212.56Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198212.56Shochet"]',
'1165198250.78Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198250.78Shochet"]',
'1165198262.39Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198262.39Shochet"]',
'1165198277.17Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198277.17Shochet"]',
'1165198307.05Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198307.05Shochet"]',
'1165198339.88Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198339.88Shochet"]',
'1165198380.77Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198380.77Shochet"]',
'1165198398.22Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198398.22Shochet"]',
'1165198503.47Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198503.47Shochet"]',
'1165198531.48Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198531.48Shochet"]',
'1165198551.64Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198551.64Shochet"]',
'1165198557.73Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198557.73Shochet"]',
'1165198567.91Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832256.0dxschafe0"]["Objects"]["1165198567.91Shochet"]',
'1165198715.92Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198715.92Shochet"]',
'1165198750.63Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198750.63Shochet"]',
'1165198987.73Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165198987.73Shochet"]',
'1165199209.06Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165199209.06Shochet"]',
'1165199266.86Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165199266.86Shochet"]',
'1165199269.66Shochet': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1165199269.66Shochet"]',
'1174696064.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174696064.0dxschafe"]',
'1174696704.0dxschafe3': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174696704.0dxschafe3"]',
'1174696832.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174696832.0dxschafe0"]',
'1174696960.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174696960.0dxschafe"]',
'1174696960.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174696960.0dxschafe0"]',
'1174697088.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174697088.0dxschafe"]',
'1174697088.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174697088.0dxschafe0"]',
'1174697216.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1174697216.0dxschafe"]',
'1175224320.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1175224320.0dxschafe"]',
'1175224576.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1175224576.0dxschafe"]',
'1176832000.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832000.0dxschafe"]',
'1176832128.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832128.0dxschafe"]',
'1176832256.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832256.0dxschafe"]',
'1176832256.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832256.0dxschafe0"]',
'1176832384.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1176832384.0dxschafe"]',
'1178674718.63kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178674718.63kmuller"]',
'1178674825.55kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178674825.55kmuller"]',
'1178911434.38Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911434.38Aholdun"]',
'1178911724.66Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911724.66Aholdun"]',
'1178911731.8Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911731.8Aholdun"]',
'1178911742.6Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911742.6Aholdun"]',
'1178911745.27Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911745.27Aholdun"]',
'1178911759.62Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911759.62Aholdun"]',
'1178911771.35Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911771.35Aholdun"]',
'1178911776.87Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911776.87Aholdun"]',
'1178911781.71Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911781.71Aholdun"]',
'1178911785.35Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911785.35Aholdun"]',
'1178911790.93Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911790.93Aholdun"]',
'1178911800.87Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911800.87Aholdun"]',
'1178911806.46Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911806.46Aholdun"]',
'1178911820.85Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178911820.85Aholdun"]',
'1178912429.07Aholdun': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1178912429.07Aholdun"]',
'1186083916.67kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186083916.67kmuller"]',
'1186083958.75kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186083958.75kmuller"]',
'1186535808.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186535808.0dxschafe"]',
'1186535936.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186535936.0dxschafe"]',
'1186536320.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186536320.0dxschafe"]',
'1186536576.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186536576.0dxschafe"]',
'1186536576.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186536576.0dxschafe0"]',
'1186536832.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186536832.0dxschafe"]',
'1186536832.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186536832.0dxschafe0"]',
'1186538496.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186538496.0dxschafe"]',
'1186699264.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186699264.0dxschafe"]',
'1186699264.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186699264.0dxschafe"]',
'1186699904.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186699904.0dxschafe"]',
'1187117022.19kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187117022.19kmuller"]',
'1187117094.23kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187117094.23kmuller"]',
'1187117117.58kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187117117.58kmuller"]',
'1187117147.0kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187117147.0kmuller"]',
'1187229184.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187229184.0dxschafe"]',
'1187229184.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187229184.0dxschafe0"]',
'1187229184.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187229184.0dxschafe1"]',
'1187230080.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187230080.0dxschafe"]',
'1187393536.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187393536.0dxschafe"]',
'1187393664.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187393664.0dxschafe"]',
'1187393664.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187393664.0dxschafe0"]',
'1187393664.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1187393664.0dxschafe1"]',
'1188518735.04dzlu': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1188518735.04dzlu"]',
'1188518837.84dzlu': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1188518837.84dzlu"]',
'1188519129.73dzlu': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1188519129.73dzlu"]',
'1190324864.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190324864.0dxschafe"]',
'1190324992.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190324992.0dxschafe"]',
'1190324992.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190324992.0dxschafe0"]',
'1190324992.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190324992.0dxschafe1"]',
'1190325120.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325120.0dxschafe0"]',
'1190325120.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325120.0dxschafe1"]',
'1190325248.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325248.0dxschafe"]',
'1190325248.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325248.0dxschafe0"]',
'1190325248.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325248.0dxschafe1"]',
'1190325376.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325376.0dxschafe"]',
'1190325376.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325376.0dxschafe0"]',
'1190325504.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325504.0dxschafe"]',
'1190325504.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325504.0dxschafe0"]',
'1190325504.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325504.0dxschafe1"]',
'1190325504.0dxschafe2': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325504.0dxschafe2"]',
'1190325632.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325632.0dxschafe"]',
'1190325632.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325632.0dxschafe0"]',
'1190325632.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325632.0dxschafe1"]',
'1190325632.0dxschafe2': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325632.0dxschafe2"]',
'1190325760.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325760.0dxschafe"]',
'1190325760.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325760.0dxschafe1"]',
'1190325888.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325888.0dxschafe"]',
'1190325888.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190325888.0dxschafe0"]',
'1190326016.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190326016.0dxschafe"]',
'1190326144.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190326144.0dxschafe"]',
'1190326144.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190326144.0dxschafe0"]',
'1190326400.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190326400.0dxschafe"]',
'1190326784.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190326784.0dxschafe"]',
'1190417152.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190417152.0dxschafe"]',
'1190417152.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190417152.0dxschafe0"]',
'1190417280.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190417280.0dxschafe"]',
'1190418048.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418048.0dxschafe"]',
'1190418048.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418048.0dxschafe0"]',
'1190418048.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418048.0dxschafe1"]',
'1190418176.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418176.0dxschafe"]',
'1190418176.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418176.0dxschafe0"]',
'1190418304.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418304.0dxschafe"]',
'1190418304.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418304.0dxschafe0"]',
'1190418432.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190418432.0dxschafe"]',
'1190926976.0dchiappe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190926976.0dchiappe0"]',
'1190927104.0dchiappe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190927104.0dchiappe"]',
'1190927104.0dchiappe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190927104.0dchiappe0"]',
'1190927232.0dchiappe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190927232.0dchiappe"]',
'1190927872.0dchiappe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190927872.0dchiappe"]',
'1190928000.0dchiappe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928000.0dchiappe0"]',
'1190928256.0dchiappe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928256.0dchiappe"]',
'1190928384.0dchiappe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928384.0dchiappe0"]',
'1190928512.0dchiappe2': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928512.0dchiappe2"]',
'1190928640.0dchiappe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928640.0dchiappe0"]',
'1190928640.0dchiappe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1190928640.0dchiappe1"]',
'1191866354.18kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191866354.18kmuller"]',
'1191866382.48kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191866382.48kmuller"]',
'1191866461.7kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191866461.7kmuller"]',
'1191866500.32kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191866500.32kmuller"]',
'1191866530.96kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191866530.96kmuller"]',
'1191867492.56kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867492.56kmuller"]',
'1191867523.17kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867523.17kmuller"]',
'1191867573.21kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867573.21kmuller"]',
'1191867602.73kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867602.73kmuller"]',
'1191867711.21kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867711.21kmuller"]',
'1191867730.78kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867730.78kmuller"]',
'1191867749.09kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867749.09kmuller"]',
'1191867800.53kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867800.53kmuller"]',
'1191867837.78kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867837.78kmuller"]',
'1191867881.68kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867881.68kmuller"]',
'1191867896.37kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867896.37kmuller"]',
'1191867934.7kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867934.7kmuller"]',
'1191867953.0kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867953.0kmuller"]',
'1191867988.96kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191867988.96kmuller"]',
'1191868005.95kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868005.95kmuller"]',
'1191868020.75kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868020.75kmuller"]',
'1191868051.62kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868051.62kmuller"]',
'1191868230.18kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868230.18kmuller"]',
'1191868250.5kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868250.5kmuller"]',
'1191868310.0kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868310.0kmuller"]',
'1191868369.04kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868369.04kmuller"]',
'1191868381.03kmuller': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1191868381.03kmuller"]',
'1192838354.48dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192838354.48dxschafe"]',
'1192838366.94dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192838366.94dxschafe"]',
'1192838486.16dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192838486.16dxschafe"]',
'1192838489.22dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192838489.22dxschafe"]',
'1192841225.98dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192841225.98dxschafe"]',
'1192841232.98dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192841232.98dxschafe"]',
'1192843451.03dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843451.03dxschafe"]',
'1192843457.3dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843457.3dxschafe"]',
'1192843589.59dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843589.59dxschafe"]',
'1192843669.58dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843669.58dxschafe"]',
'1192843764.56dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843764.56dxschafe"]',
'1192843797.75dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1192843797.75dxschafe"]',
'1193356416.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356416.0dxschafe0"]',
'1193356416.0dxschafe1': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356416.0dxschafe1"]',
'1193356416.0dxschafe2': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356416.0dxschafe2"]',
'1193356416.0dxschafe3': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356416.0dxschafe3"]',
'1193356544.0dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356544.0dxschafe"]',
'1193356544.0dxschafe0': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1193356544.0dxschafe0"]',
'1200003606.2akelts': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1200003606.2akelts"]',
'1200005249.48akelts': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1200005249.48akelts"]',
'1200005508.08akelts': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1200005508.08akelts"]',
'1203997191.31dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1186699264.0dxschafe"]["Objects"]["1203997191.31dxschafe"]',
'1203997192.95dxschafe': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1203997192.95dxschafe"]',
'1234979173.77caoconno': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1234979173.77caoconno"]',
'1239930457.38piwanow': '["Objects"]["1158121765.09sdnaik"]["Objects"]["1239930457.38piwanow"]' } }
extraInfo = {
'camPos': Point3(265.62, -249.672, 197.285),
'camHpr': VBase3(65.602199999999996, -66.227699999999999, -2.1180100000000001e-005),
'focalLength': 1.3999999761599999,
'skyState': 2,
'fog': 0 }
| [
"[email protected]"
] | |
327809a84718295084f6dd6659765fe2618759b4 | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/RegexCountLowercaseLetters/lowercase_count_test.py | 9963e8b814750d42a58eaf4ce0dbc591bce0db78 | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from lowercase_count import lowercase_count
import unittest
class TestRegexCountLowercaseLetters(unittest.TestCase):
def test(self):
self.assertEqual(lowercase_count("abc"), 3)
self.assertEqual(lowercase_count("abcABC123"), 3)
self.assertEqual(lowercase_count("abcABC123!@#$%^&*()_-+=}{[]|\':;?/>.<,~"), 3)
self.assertEqual(lowercase_count(""), 0)
self.assertEqual(lowercase_count("ABC123!@#$%^&*()_-+=}{[]|\':;?/>.<,~"), 0)
self.assertEqual(lowercase_count("abcdefghijklmnopqrstuvwxyz"), 26)
def test_rand(self):
from random import randint, choice
chars = "abcdefghijklmnopqrstuvwqyzqwertyuiopasdfghjklzxcvbnmABC0123456789!@#\$%^&*()-_+={}[]|\:;?/>.<,)"
def randchar():
return choice(chars)
def randstr(l):
return "".join(randchar() for _ in range(l))
def solution(strng):
return len([ch for ch in strng if ch.islower()])
for i in range(40):
strng = randstr(randint(5, 20))
self.assertEqual(lowercase_count(strng), solution(strng), "Failed when strng = '{}'".format(strng))
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
5b7258bca0fe5638af4081a60d47911f67d42a7e | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/regressiontests/forms/localflavor/pl.py | 96703fa07504708d232ea59784853cb85214537f | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/regressiontests/forms/localflavor/pl.py | [
"[email protected]"
] | |
74d581b1542990d6c95912aba7444715cb02de7c | a350e6471598e8518f639fcff50511c35a94bceb | /docker/FlaskFileSystem/file_system.py | 7b25c3b28c8fc371e8106f681690523fa29af01a | [
"MIT"
] | permissive | WooodHead/bearing_project | 2e26602c326f703869e13bf84cecba95edff59fa | ca64b04dad7010620414e37b2c7923fd904a0f11 | refs/heads/master | 2022-04-20T16:03:35.179584 | 2020-04-15T18:23:24 | 2020-04-15T18:23:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,332 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: file_system.py
@time: 2020-04-14 15:03
"""
# source .venv/bin/activate
# export FLASK_APP=file_system.py
# flask run -h 0.0.0.0
# * Running on http://127.0.0.1:5000/
from __future__ import unicode_literals
import os
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
UPLOAD_FOLDER = os.path.join(BASE_PATH, 'uploads')
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'xls', 'xlsx', 'doc', 'docx', 'ppt', 'pptx'}
app = Flask(
__name__,
static_folder='uploads',
static_url_path='/files',
)
app.config['DEBUG'] = True
# flash message 功能需要 SECRET_KEY
app.config['SECRET_KEY'] = '\x03\xabjR\xbbg\x82\x0b{\x96f\xca\xa8\xbdM\xb0x\xdbK%\xf2\x07\r\x8c'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
@app.route('/')
def index():
return '''
<ul>
<li><a href="%s" target="_blank">%s</a></li>
</ul>
''' % (
url_for('downloads'),
url_for('downloads'),
)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploads', methods=['GET', 'POST'])
def uploads():
"""文件上传"""
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
request_file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if request_file.filename == '':
flash('No selected file')
return redirect(request.url)
if request_file and allowed_file(request_file.filename):
filename = secure_filename(request_file.filename)
request_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('downloads', filename=filename))
else:
flash('Extension not allowed')
return redirect(request.url)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/downloads/', methods=['GET', 'POST'])
@app.route('/downloads/<path:subdir>/', methods=['GET', 'POST'])
def downloads(subdir=''):
"""文件上传下载"""
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
request_file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if request_file.filename == '':
flash('No selected file')
return redirect(request.url)
if request_file and allowed_file(request_file.filename):
filename = secure_filename(request_file.filename)
cur_abs_dir = os.path.join(app.config['UPLOAD_FOLDER'], subdir)
print(os.path.join(cur_abs_dir, filename))
request_file.save(os.path.join(cur_abs_dir, filename))
return redirect(url_for('downloads', subdir=subdir))
else:
flash('Extension not allowed')
return redirect(request.url)
file_html = '''
<!doctype html>
<title>Uploads File</title>
<h3>Uploads File</h3>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# subdir = secure_filename(subdir)
cur_abs_dir = os.path.join(app.config['UPLOAD_FOLDER'], subdir)
for root, dirs, files in os.walk(cur_abs_dir, topdown=True):
print(root, dirs, files)
files.sort()
files.reverse()
# files.sort(reverse=True)
# 目录
if dirs:
file_html += '<div>'
file_html += '<span>目录</span>'
file_html += '<ul>'
for dir_name in dirs:
file_html += '<li>'
file_html += '<a href="%s">' % url_for('downloads', subdir=os.path.join(subdir, dir_name))
file_html += dir_name
file_html += '</a>'
file_html += '</li>'
file_html += '</ul>'
file_html += '</div>'
# 文件
if files:
file_html += '<div>'
file_html += '<span>文件</span>'
file_html += '<ul>'
for file_name in files:
file_html += '<li>'
file_html += '<a href="%s" target="_blank">' % url_for('static',
filename=os.path.join(subdir, file_name))
file_html += file_name
file_html += '</a>'
file_html += '</li>'
file_html += '</ul>'
file_html += '</div>'
break
return file_html
| [
"[email protected]"
] | |
fcee90732903a5d69df706143389337770cdb896 | c498cefc16ba5d75b54d65297b88357d669c8f48 | /static/datapack/data/scripts/quests/357_WarehouseKeepersAmbition/__init__.py | aaf1920475142e8f04ed667ebd0cdf21258eec6d | [] | no_license | ManWithShotgun/l2i-JesusXD-3 | e17f7307d9c5762b60a2039655d51ab36ec76fad | 8e13b4dda28905792621088714ebb6a31f223c90 | refs/heads/master | 2021-01-17T16:10:42.561720 | 2016-07-22T18:41:22 | 2016-07-22T18:41:22 | 63,967,514 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | # Made by disKret
# Rate fix by Gnat
import sys
from ru.catssoftware import Config
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "357_WarehouseKeepersAmbition"
#CUSTOM VALUES
DROPRATE=50
REWARD1=900 #This is paid per item
REWARD2=10000 #Extra reward, if > 100
#NPC
SILVA = 30686
#ITEMS
JADE_CRYSTAL = 5867
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [JADE_CRYSTAL]
def onEvent (self,event,st) :
htmltext = event
if event == "30686-2.htm" :
st.set("cond","1")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "30686-7.htm" :
count = st.getQuestItemsCount(JADE_CRYSTAL)
if count:
reward = count * REWARD1
if count >= 100 :
reward = reward + REWARD2
st.takeItems(JADE_CRYSTAL,-1)
st.rewardItems(57,reward)
else:
htmltext="30686-4.htm"
if event == "30686-8.htm" :
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond=st.getInt("cond")
jade = st.getQuestItemsCount(JADE_CRYSTAL)
if cond == 0 :
if player.getLevel() >= 47 :
htmltext = "30686-0.htm"
else:
htmltext = "30686-0a.htm"
st.exitQuest(1)
elif not jade :
htmltext = "30686-4.htm"
elif jade :
htmltext = "30686-6.htm"
return htmltext
def onKill(self,npc,player,isPet):
partyMember = self.getRandomPartyMemberState(player,State.STARTED)
if not partyMember: return
st = partyMember.getQuestState(qn)
if st :
numItems, chance = divmod(DROPRATE*Config.RATE_DROP_QUEST,100)
if st.getRandom(100) < chance :
numItems += 1
if numItems :
st.giveItems(JADE_CRYSTAL,int(numItems))
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(357,qn,"Warehouse Keepers Ambition")
QUEST.addStartNpc(SILVA)
QUEST.addTalkId(SILVA)
for MOBS in range(20594,20598) :
QUEST.addKillId(MOBS) | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.