blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fe4d6a04dfbfa4abc2c316b83f7cfbcaa30e5e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/wikipedia/testcase/interestallcases/testcase6_022_1.py | 64a6c615586113f38ef615001e76c1bd64bd8f23 | []
| no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,077 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.wikipedia',
'appActivity' : 'org.wikipedia.main.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.wikipedia/org.wikipedia.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
swipe(driver, 0.5, 0.6, 0.5, 0.2)
else:
return element
return
def clickoncheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if (len(lists) == 1) :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
# preference setting and exit
try :
os.popen("adb shell svc data enable")
time.sleep(5)
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.DeveloperSettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"useRestbase_setManually\")").click()
clickoncheckable(driver, "new UiSelector().text(\"useRestbase_setManually\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"suppressNotificationPolling\")").click()
clickoncheckable(driver, "new UiSelector().text(\"suppressNotificationPolling\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"memoryLeakTest\")").click()
clickoncheckable(driver, "new UiSelector().text(\"memoryLeakTest\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")", "false")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.SettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"Download only over Wi-Fi\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Download only over Wi-Fi\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Show images\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Show images\")", "true")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
finally :
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase022
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Got it\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/view_static_card_icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/page_toolbar_button_search\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"Find in page\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/search_src_text\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("Search");
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"View main page\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/page_toolbar_button_search\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"Find in page\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"en\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Recent searches:\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.wikipedia'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
os.popen("adb shell svc data enable")
| [
"[email protected]"
]
| |
c27074644766ba4228e511a9a1c884d8ec0e431b | ea262de505a1dd5ae1c7b546b85184309c3fdd35 | /src/models/modules/scales.py | 78234592ac2641e1791aa6240573f86204bde16e | [
"MIT"
]
| permissive | Runki2018/CvPytorch | 306ff578c5f8d3d196d0834e5cad5adba7a89676 | 1e1c468e5971c1c2b037334f7911ae0a5087050f | refs/heads/master | 2023-08-25T09:48:48.764117 | 2021-10-15T05:11:21 | 2021-10-15T05:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2021/3/29 9:22
# @Author : liumin
# @File : scales.py
import torch
import torch.nn as nn
class Scale(nn.Module):
"""
A learnable scale parameter
"""
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
| [
"[email protected]"
]
| |
3835bd462d27894a5442d6a412b2dd67de3d593d | 675cdd4d9d2d5b6f8e1383d1e60c9f758322981f | /supervised_learning/0x03-optimization/2-shuffle_data.py | 1fc0ce20d6f011ea71c6f64624e3d65b15d7e653 | []
| no_license | AndresSern/holbertonschool-machine_learning-1 | 5c4a8db28438d818b6b37725ff95681c4757fd9f | 7dafc37d306fcf2ea0f5af5bd97dfd78d388100c | refs/heads/main | 2023-07-11T04:47:01.565852 | 2021-08-03T04:22:38 | 2021-08-03T04:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | #!/usr/bin/env python3
"""
Shuffles the data points in two matrices the same way
"""
import numpy as np
def shuffle_data(X, Y):
"""
Shuffles the data points in two matrices the same way
"""
i = np.random.permutation(np.arange(X.shape[0]))
return X[i], Y[i]
| [
"[email protected]"
]
| |
f8fccfa10aaf61b927be76184af448a1b5c565f6 | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /metadata-ingestion/tests/integration/snowflake/common.py | 43f5e04fbc89fcd2cb4b24d5cc32c9cf6600679d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
]
| permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 22,993 | py | import json
from datetime import datetime, timezone
from datahub.configuration.time_window_config import BucketDuration
from datahub.ingestion.source.snowflake import snowflake_query
from datahub.ingestion.source.snowflake.snowflake_query import SnowflakeQuery
NUM_TABLES = 10
NUM_VIEWS = 2
NUM_COLS = 10
NUM_OPS = 10
FROZEN_TIME = "2022-06-07 17:00:00"
def default_query_results( # noqa: C901
query,
num_tables=NUM_TABLES,
num_views=NUM_VIEWS,
num_cols=NUM_COLS,
num_ops=NUM_OPS,
):
if query == SnowflakeQuery.current_account():
return [{"CURRENT_ACCOUNT()": "ABC12345"}]
if query == SnowflakeQuery.current_region():
return [{"CURRENT_REGION()": "AWS_AP_SOUTH_1"}]
if query == SnowflakeQuery.show_tags():
return []
if query == SnowflakeQuery.current_role():
return [{"CURRENT_ROLE()": "TEST_ROLE"}]
elif query == SnowflakeQuery.current_version():
return [{"CURRENT_VERSION()": "X.Y.Z"}]
elif query == SnowflakeQuery.current_database():
return [{"CURRENT_DATABASE()": "TEST_DB"}]
elif query == SnowflakeQuery.current_schema():
return [{"CURRENT_SCHEMA()": "TEST_SCHEMA"}]
elif query == SnowflakeQuery.current_warehouse():
return [{"CURRENT_WAREHOUSE()": "TEST_WAREHOUSE"}]
elif query == SnowflakeQuery.show_databases():
return [
{
"name": "TEST_DB",
"created_on": datetime(2021, 6, 8, 0, 0, 0, 0),
"comment": "Comment for TEST_DB",
}
]
elif query == SnowflakeQuery.get_databases("TEST_DB"):
return [
{
"DATABASE_NAME": "TEST_DB",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "Comment for TEST_DB",
}
]
elif query == SnowflakeQuery.schemas_for_database("TEST_DB"):
return [
{
"SCHEMA_NAME": "TEST_SCHEMA",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "comment for TEST_DB.TEST_SCHEMA",
},
{
"SCHEMA_NAME": "TEST2_SCHEMA",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "comment for TEST_DB.TEST_SCHEMA",
},
]
elif query == SnowflakeQuery.tables_for_database("TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query == SnowflakeQuery.show_views_for_database("TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query == SnowflakeQuery.tables_for_schema("TEST_SCHEMA", "TEST_DB"):
return [
{
"TABLE_SCHEMA": "TEST_SCHEMA",
"TABLE_NAME": "TABLE_{}".format(tbl_idx),
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"BYTES": 1024,
"ROW_COUNT": 10000,
"COMMENT": "Comment for Table",
"CLUSTERING_KEY": None,
}
for tbl_idx in range(1, num_tables + 1)
]
elif query == SnowflakeQuery.show_views_for_schema("TEST_SCHEMA", "TEST_DB"):
return [
{
"schema_name": "TEST_SCHEMA",
"name": "VIEW_{}".format(view_idx),
"created_on": datetime(2021, 6, 8, 0, 0, 0, 0),
"comment": "Comment for View",
"text": None,
}
for view_idx in range(1, num_views + 1)
]
elif query == SnowflakeQuery.columns_for_schema("TEST_SCHEMA", "TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query in [
*[
SnowflakeQuery.columns_for_table(
"TABLE_{}".format(tbl_idx), "TEST_SCHEMA", "TEST_DB"
)
for tbl_idx in range(1, num_tables + 1)
],
*[
SnowflakeQuery.columns_for_table(
"VIEW_{}".format(view_idx), "TEST_SCHEMA", "TEST_DB"
)
for view_idx in range(1, num_views + 1)
],
]:
return [
{
# "TABLE_CATALOG": "TEST_DB",
# "TABLE_SCHEMA": "TEST_SCHEMA",
# "TABLE_NAME": "TABLE_{}".format(tbl_idx),
"COLUMN_NAME": "COL_{}".format(col_idx),
"ORDINAL_POSITION": col_idx,
"IS_NULLABLE": "NO",
"DATA_TYPE": "TEXT" if col_idx > 1 else "NUMBER",
"COMMENT": "Comment for column",
"CHARACTER_MAXIMUM_LENGTH": 255 if col_idx > 1 else None,
"NUMERIC_PRECISION": None if col_idx > 1 else 38,
"NUMERIC_SCALE": None if col_idx > 1 else 0,
}
for col_idx in range(1, num_cols + 1)
]
elif query in (
SnowflakeQuery.use_database("TEST_DB"),
SnowflakeQuery.show_primary_keys_for_schema("TEST_SCHEMA", "TEST_DB"),
SnowflakeQuery.show_foreign_keys_for_schema("TEST_SCHEMA", "TEST_DB"),
):
return []
elif query == SnowflakeQuery.get_access_history_date_range():
return [
{
"MIN_TIME": datetime(2021, 6, 8, 0, 0, 0, 0),
"MAX_TIME": datetime(2022, 6, 7, 7, 17, 0, 0),
}
]
elif query == snowflake_query.SnowflakeQuery.operational_data_for_time_window(
1654473600000,
1654586220000,
):
return [
{
"QUERY_START_TIME": datetime(2022, 6, 2, 4, 41, 1, 367000).replace(
tzinfo=timezone.utc
),
"QUERY_TEXT": "create or replace table TABLE_{} as select * from TABLE_2 left join TABLE_3 using COL_1 left join TABLE 4 using COL2".format(
op_idx
),
"QUERY_TYPE": "CREATE_TABLE_AS_SELECT",
"ROWS_INSERTED": 0,
"ROWS_UPDATED": 0,
"ROWS_DELETED": 0,
"BASE_OBJECTS_ACCESSED": json.dumps(
[
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_3",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_4",
},
]
),
"DIRECT_OBJECTS_ACCESSED": json.dumps(
[
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_3",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_4",
},
]
),
"OBJECTS_MODIFIED": json.dumps(
[
{
"columns": [
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
}
]
),
"USER_NAME": "SERVICE_ACCOUNT_TESTS_ADMIN",
"FIRST_NAME": None,
"LAST_NAME": None,
"DISPLAY_NAME": "SERVICE_ACCOUNT_TESTS_ADMIN",
"EMAIL": "[email protected]",
"ROLE_NAME": "ACCOUNTADMIN",
}
for op_idx in range(1, num_ops + 1)
]
elif (
query
== snowflake_query.SnowflakeQuery.usage_per_object_per_time_bucket_for_time_window(
1654473600000,
1654586220000,
use_base_objects=False,
top_n_queries=10,
include_top_n_queries=True,
time_bucket_size=BucketDuration.DAY,
)
):
return []
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.table_to_table_lineage_history(
1654473600000, 1654586220000, False
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"UPSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_2",
"UPSTREAM_TABLE_COLUMNS": json.dumps(
[
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
]
),
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
]
),
}
for op_idx in range(1, num_ops + 1)
] + [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_1",
"UPSTREAM_TABLE_NAME": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"UPSTREAM_TABLE_COLUMNS": json.dumps(
[{"columnId": 0, "columnName": "COL_1"}]
),
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_1",
"directSources": [
{
"columnName": "COL_1",
"objectDomain": "Table",
"objectId": 0,
"objectName": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
}
],
}
]
),
}
]
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history_v2(
start_time_millis=1654473600000,
end_time_millis=1654586220000,
include_view_lineage=True,
include_column_lineage=True,
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "TABLE",
}
]
+ ( # This additional upstream is only for TABLE_1
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.VIEW_1",
"upstream_object_domain": "VIEW",
},
{
"upstream_object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"upstream_object_domain": "TABLE",
},
]
if op_idx == 1
else []
)
),
"UPSTREAM_COLUMNS": json.dumps(
[
{
"column_name": "COL_{}".format(col_idx),
"upstreams": [
[
{
"object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"object_domain": "Table",
"column_name": "COL_{}".format(col_idx),
}
]
],
}
for col_idx in range(1, num_cols + 1)
]
+ ( # This additional upstream is only for TABLE_1
[
{
"column_name": "COL_1",
"upstreams": [
[
{
"object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"object_domain": "Table",
"column_name": "COL_1",
}
]
],
}
]
if op_idx == 1
else []
)
),
}
for op_idx in range(1, num_ops + 1)
]
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history_v2(
start_time_millis=1654473600000,
end_time_millis=1654586220000,
include_view_lineage=False,
include_column_lineage=False,
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "TABLE",
},
]
+ ( # This additional upstream is only for TABLE_1
[
{
"upstream_object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"upstream_object_domain": "TABLE",
},
]
if op_idx == 1
else []
)
),
}
for op_idx in range(1, num_ops + 1)
]
elif query == snowflake_query.SnowflakeQuery.external_table_lineage_history(
1654473600000,
1654586220000,
):
return []
elif query in [
snowflake_query.SnowflakeQuery.view_dependencies(),
]:
return [
{
"REFERENCED_OBJECT_DOMAIN": "table",
"REFERENCING_OBJECT_DOMAIN": "view",
"DOWNSTREAM_VIEW": "TEST_DB.TEST_SCHEMA.VIEW_2",
"VIEW_UPSTREAM": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
]
elif query in [
snowflake_query.SnowflakeQuery.view_dependencies_v2(),
]:
# VIEW_2 has dependency on TABLE_2
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.VIEW_2",
"DOWNSTREAM_TABLE_DOMAIN": "view",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "table",
}
]
),
}
]
elif query in [
snowflake_query.SnowflakeQuery.view_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.view_lineage_history(
1654473600000, 1654586220000, False
),
]:
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_1",
"VIEW_NAME": "TEST_DB.TEST_SCHEMA.VIEW_1",
"VIEW_DOMAIN": "VIEW",
"VIEW_COLUMNS": json.dumps(
[
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
]
),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
]
),
}
]
elif query in [
snowflake_query.SnowflakeQuery.external_table_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.view_dependencies_v2(),
snowflake_query.SnowflakeQuery.view_dependencies(),
snowflake_query.SnowflakeQuery.show_external_tables(),
]:
return []
elif (
query
== snowflake_query.SnowflakeQuery.get_all_tags_in_database_without_propagation(
"TEST_DB"
)
):
return [
*[
{
"TAG_DATABASE": "TEST_DB",
"TAG_SCHEMA": "TEST_SCHEMA",
"TAG_NAME": f"my_tag_{ix}",
"TAG_VALUE": f"my_value_{ix}",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": "TEST_SCHEMA",
"OBJECT_NAME": "VIEW_2",
"COLUMN_NAME": None,
"DOMAIN": "TABLE",
}
for ix in range(3)
],
{
"TAG_DATABASE": "TEST_DB",
"TAG_SCHEMA": "TEST_SCHEMA",
"TAG_NAME": "security",
"TAG_VALUE": "pii",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": "TEST_SCHEMA",
"OBJECT_NAME": "VIEW_1",
"COLUMN_NAME": "COL_1",
"DOMAIN": "COLUMN",
},
{
"TAG_DATABASE": "OTHER_DB",
"TAG_SCHEMA": "OTHER_SCHEMA",
"TAG_NAME": "my_other_tag",
"TAG_VALUE": "other",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": None,
"OBJECT_NAME": "TEST_SCHEMA",
"COLUMN_NAME": None,
"DOMAIN": "SCHEMA",
},
{
"TAG_DATABASE": "OTHER_DB",
"TAG_SCHEMA": "OTHER_SCHEMA",
"TAG_NAME": "my_other_tag",
"TAG_VALUE": "other",
"OBJECT_DATABASE": None,
"OBJECT_SCHEMA": None,
"OBJECT_NAME": "TEST_DB",
"COLUMN_NAME": None,
"DOMAIN": "DATABASE",
},
]
# Unreachable code
raise Exception(f"Unknown query {query}")
| [
"[email protected]"
]
| |
5ce264684ee8cc4bdf3fe7fa5259b05e6e179cd9 | 381b75fe68a4da258e2e60a97105b66ac47214e4 | /qa/rpc-tests/rawtransactions.py | 0ac80a49539a555d1b868ed4d8ea36442667ff7f | [
"MIT"
]
| permissive | lipcoin/lipcoin | 3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3 | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | refs/heads/master | 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""rawtranscation RPCs QA test.
# Tests the following RPCs:
# - createrawtransaction
# - signrawtransaction
# - sendrawtransaction
# - decoderawtransaction
# - getrawtransaction
"""
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(LipCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://lipcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 LIPC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| [
"[email protected]"
]
| |
0b90df7dbd721ecc641998896bff6d7087d4c28c | ac0a583e4765f2b5b97e898f30d6df0fc71ea8f6 | /pyros_msgs/opt_as_nested/__init__.py | 4beab2c01e9a05fe2344eb3a0f0e64941a108eae | [
"MIT"
]
| permissive | pyros-dev/pyros-msgs | 5ce9efaa246ffa94396552fd6034c0eeacddeb76 | 28d9d6aa3cfbb42d154360f16eea1900be518f74 | refs/heads/master | 2022-07-06T15:53:16.764600 | 2018-02-17T15:03:36 | 2018-02-17T15:03:36 | 67,676,303 | 1 | 3 | MIT | 2022-06-21T21:19:34 | 2016-09-08T06:45:37 | Python | UTF-8 | Python | false | false | 347 | py | from __future__ import absolute_import
from __future__ import print_function
"""
pyros_msgs.opt_as_nested is a module that declares optional fields as a specific message type.
This is useful if you want to express an optional field in a message without any ambiguity.
"""
from .opt_as_nested import duck_punch
__all__ = [
'duck_punch',
] | [
"[email protected]"
]
| |
d0dade868cb00ef5e103594ae46c0d072fcbd126 | e94d22cdb7c73b8a55262d5a6c2c7b0d75f3b63e | /snussum/analytics/management/commands/createanalytics.py | 6aae31665b387f93092ff96a50482be7c680c3e8 | []
| no_license | dobestan/snussum | 594d1169cc6a0a799c8104135dc028d65a3967d0 | 4f1f092a4c5cebd913a64c5a0d7f12b3e061552f | refs/heads/master | 2021-01-18T18:25:00.237448 | 2015-06-01T06:03:29 | 2015-06-01T06:03:29 | 34,576,643 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from django.core.management.base import BaseCommand, CommandError
from analytics.models.demographic import Demographic
class Command(BaseCommand):
help = "Create Analytics Data"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
demograhic = Demographic.objects.create_analytics()
self.stdout.write('Successfully created demograhic analytics data ...')
| [
"[email protected]"
]
| |
d457f176565b80c978bfb00733dec4d02f4861d8 | 256644d14bd15f8e1a3e92c95b1655fd36681399 | /pure_python/ga+ppm/main/utilities.py | a07447f3183438919021284b04c4c34a872f020c | []
| no_license | mfbx9da4/neuron-astrocyte-networks | 9d1c0ff45951e45ce1f8297ec62b69ee4159305a | bcf933491bdb70031f8d9c859fc17e0622e5b126 | refs/heads/master | 2021-01-01T10:13:59.099090 | 2018-06-03T12:32:13 | 2018-06-03T12:32:13 | 12,457,305 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import random
import math
from pylab import zeros, where, array, empty_like
def crossover(m1, m2, NN):
# Maybe could be sped up using flatten/reshape output?
net = NN()
r = random.randint(0, net.wi.size + net.wo.size)
output1 = [empty_like(net.wi), empty_like(net.wo)]
output2 = [empty_like(net.wi), empty_like(net.wo)]
for i in xrange(len(m1)):
for j in xrange(len(m1[i])):
for k in xrange(len(m1[i][j])):
if r >= 0:
output1[i][j][k][:] = m1[i][j][k]
output2[i][j][k][:] = m2[i][j][k]
elif r < 0:
output1[i][j][k][:] = m2[i][j][k]
output2[i][j][k][:] = m1[i][j][k]
r -= 1
return output1, output2
def mutate(m, mutation_rate):
# Variation: could include a constant to control
# how much the weight is mutated by
for i in xrange(len(m)):
for j in xrange(len(m[i])):
for k in xrange(len(m[i][j])):
if random.random() < mutation_rate:
m[i][j][k] = random.uniform(-2.0,2.0)
def percentAcc(all_aos, targets):
correct = 0
for i, trg in enumerate(targets):
sample_res = where(trg == array(all_aos[i]), True, False)
if sample_res.all():
correct += 1
total = len(all_aos)
return float(correct) / total
def sigmoid(x):
return math.tanh(x)
def randomizeMatrix(matrix, a, b):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] = random.uniform(a, b)
def roulette(fitnessScores):
cumalativeFitness = 0.0
r = random.random()
for i in range(len(fitnessScores)):
cumalativeFitness += fitnessScores[i]
if cumalativeFitness > r:
return i
def calcFit(numbers):
"""each fitness is a fraction of the total error"""
# POTENTIAL IMPROVEMENTS:
# maybe give the better scores much higher weighting?
# maybe use the range to calculate the fitness?
# maybe do ind / range of accuracies?
total, fitnesses = sum(numbers), []
for i in range(len(numbers)):
try:
fitness = numbers[i] / total
except ZeroDivisionError:
print 'individual outputted zero correct responses'
fitness = 0
fitnesses.append(fitness)
return fitnesses
| [
"[email protected]"
]
| |
a775e4b0f818ac2bdd927c36c645d58aea22d114 | 389d95ee1f8d4ba992114e36c5fc427d02ba2a6c | /flexmessage_project/settings.py | ef5e0ea9c78442b5078ec03584e4b733d9fc65ac | [
"MIT"
]
| permissive | adepeter/sleekmessage | d7a6b4279f6a60659cf8a98897136ca22c1b830a | 64621842cb9b0d707523e87f8bd6549d4e2d8433 | refs/heads/master | 2022-11-16T23:58:18.477628 | 2020-07-15T15:50:16 | 2020-07-15T15:50:16 | 265,276,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | """
Django settings for flexmessage_project project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SETTINGS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k0lop(x*yo$2jm03k)@2c3$ch0@4l=)0)0ab+(10)5sn#llx@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'messages.apps.MessagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flexmessage_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'string_if_invalid': '%s is not a valid template variable',
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flexmessage_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')] | [
"[email protected]"
]
| |
0e11d74b63a525a47ac0423bcedf37d6db871a31 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_security_context.py | 4be4451ac2cdd63dad12dfb4d7fa91b4d3b660a7 | []
| no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,971 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SecurityContext(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_privilege_escalation': 'bool',
'capabilities': 'V1Capabilities',
'privileged': 'bool',
'proc_mount': 'str',
'read_only_root_filesystem': 'bool',
'run_as_group': 'int',
'run_as_non_root': 'bool',
'run_as_user': 'int',
'se_linux_options': 'V1SELinuxOptions'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'capabilities': 'capabilities',
'privileged': 'privileged',
'proc_mount': 'procMount',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'run_as_group': 'runAsGroup',
'run_as_non_root': 'runAsNonRoot',
'run_as_user': 'runAsUser',
'se_linux_options': 'seLinuxOptions'
}
def __init__(self, allow_privilege_escalation=None, capabilities=None, privileged=None, proc_mount=None, read_only_root_filesystem=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None):
"""
V1SecurityContext - a model defined in Swagger
"""
self._allow_privilege_escalation = None
self._capabilities = None
self._privileged = None
self._proc_mount = None
self._read_only_root_filesystem = None
self._run_as_group = None
self._run_as_non_root = None
self._run_as_user = None
self._se_linux_options = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if capabilities is not None:
self.capabilities = capabilities
if privileged is not None:
self.privileged = privileged
if proc_mount is not None:
self.proc_mount = proc_mount
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if run_as_group is not None:
self.run_as_group = run_as_group
if run_as_non_root is not None:
self.run_as_non_root = run_as_non_root
if run_as_user is not None:
self.run_as_user = run_as_user
if se_linux_options is not None:
self.se_linux_options = se_linux_options
@property
def allow_privilege_escalation(self):
"""
Gets the allow_privilege_escalation of this V1SecurityContext.
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:return: The allow_privilege_escalation of this V1SecurityContext.
:rtype: bool
"""
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
"""
Sets the allow_privilege_escalation of this V1SecurityContext.
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param allow_privilege_escalation: The allow_privilege_escalation of this V1SecurityContext.
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
@property
def capabilities(self):
"""
Gets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:return: The capabilities of this V1SecurityContext.
:rtype: V1Capabilities
"""
return self._capabilities
@capabilities.setter
def capabilities(self, capabilities):
"""
Sets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param capabilities: The capabilities of this V1SecurityContext.
:type: V1Capabilities
"""
self._capabilities = capabilities
@property
def privileged(self):
"""
Gets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:return: The privileged of this V1SecurityContext.
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""
Sets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param privileged: The privileged of this V1SecurityContext.
:type: bool
"""
self._privileged = privileged
@property
def proc_mount(self):
"""
Gets the proc_mount of this V1SecurityContext.
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:return: The proc_mount of this V1SecurityContext.
:rtype: str
"""
return self._proc_mount
@proc_mount.setter
def proc_mount(self, proc_mount):
"""
Sets the proc_mount of this V1SecurityContext.
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param proc_mount: The proc_mount of this V1SecurityContext.
:type: str
"""
self._proc_mount = proc_mount
@property
def read_only_root_filesystem(self):
"""
Gets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:return: The read_only_root_filesystem of this V1SecurityContext.
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""
Sets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext.
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def run_as_group(self):
"""
Gets the run_as_group of this V1SecurityContext.
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_group of this V1SecurityContext.
:rtype: int
"""
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
"""
Sets the run_as_group of this V1SecurityContext.
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_group: The run_as_group of this V1SecurityContext.
:type: int
"""
self._run_as_group = run_as_group
@property
def run_as_non_root(self):
"""
Gets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_non_root of this V1SecurityContext.
:rtype: bool
"""
return self._run_as_non_root
@run_as_non_root.setter
def run_as_non_root(self, run_as_non_root):
"""
Sets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_non_root: The run_as_non_root of this V1SecurityContext.
:type: bool
"""
self._run_as_non_root = run_as_non_root
@property
def run_as_user(self):
"""
Gets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_user of this V1SecurityContext.
:rtype: int
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""
Sets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_user: The run_as_user of this V1SecurityContext.
:type: int
"""
self._run_as_user = run_as_user
@property
def se_linux_options(self):
"""
Gets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The se_linux_options of this V1SecurityContext.
:rtype: V1SELinuxOptions
"""
return self._se_linux_options
@se_linux_options.setter
def se_linux_options(self, se_linux_options):
"""
Sets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param se_linux_options: The se_linux_options of this V1SecurityContext.
:type: V1SELinuxOptions
"""
self._se_linux_options = se_linux_options
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SecurityContext):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
c5a506c058d8d1a23c61352ae6ad017163387afd | d2e2f05f8b894e32f43dac8a45819da54888109f | /0x01-python-if_else_loops_functions/9-print_last_digit.py~ | 726544fbcc4abd272721ae61b076e57545212c2f | []
| no_license | wolf-coder/holbertonschool-higher_level_programming | 1b249a63c77156fcb25dda616497dd1abc272e75 | a81ac6c7b7e59210b19f413bd413f999ed599d2c | refs/heads/master | 2023-05-31T20:47:54.785063 | 2021-07-09T14:21:47 | 2021-07-09T14:21:47 | 259,362,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | #!/usr/bin/python3
def print_last_digit(number):
return (5)
| [
"[email protected]"
]
| ||
f53f414f7ee5fbc8b13847a32418970ec312c561 | 4af454bced0f99e4ed8269d71e97284f0ef13afb | /loginserver/keys/rsa.py | 02c4c9811d4e423b0a531fa48b9e687d9ba12cbd | []
| no_license | L2jBrasil/L2py | c46db78238b4caf272a2399f4e4910fc256b3cca | d1c2e7bddb54d222f9a3d04262c09ad70329a226 | refs/heads/master | 2022-11-19T01:39:02.019777 | 2020-07-24T20:07:15 | 2020-07-24T20:07:15 | 292,115,581 | 1 | 1 | null | 2020-09-01T21:53:54 | 2020-09-01T21:53:54 | null | UTF-8 | Python | false | false | 2,315 | py | from Cryptodome.PublicKey import RSA
from M2Crypto import BIO
from M2Crypto import RSA as M2RSA
from common.helpers.bytearray import ByteArray
class L2RsaKey(RSA.RsaKey):
def scramble_mod(self) -> bytes:
n = ByteArray(self.n_bytes)
# step 1: 0x4d - 0x50 <-> 0x00 - 0x04
for i in range(4):
n[i], n[0x4d + i] = n[0x4d + i], n[i]
# step 2 : xor first 0x40 bytes with last 0x40 bytes
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
# step 3 : xor bytes 0x0d-0x10 with bytes 0x34-0x38
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
# step 4 : xor last 0x40 bytes with first 0x40 bytes
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
return bytes(n)
@classmethod
def unscramble_mod(cls, n: bytes) -> int:
n = ByteArray(n)
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
for i in range(4):
temp = n[0x00 + i]
n[0x00 + i] = n[0x4d + i]
n[0x4d + i] = temp
return int.from_bytes(bytes(n), "big")
@property
def n_bytes(self):
return self.n.to_bytes(128, "big")
@classmethod
def from_scrambled(cls, data) -> "L2RsaKey":
modulus = cls.unscramble_mod(data)
key = RSA.construct((modulus, 65537))
key.__class__ = L2RsaKey
return key
@classmethod
def generate(cls, bits=1024, randfunc=None, e=65537) -> "L2RsaKey":
key = RSA.generate(bits, randfunc, e)
key.__class__ = cls
return key
def __repr__(self):
return "L2" + super().__repr__()
@property
def m2crypto_key(self):
key_bio = BIO.MemoryBuffer(self.export_key())
if self.has_private():
return M2RSA.load_key_bio(key_bio)
else:
return M2RSA.load_pub_key_bio(key_bio)
@property
def scrambled_key(self):
scrambled_key = RSA.construct((int.from_bytes(self.scramble_mod(), "big"), self.e))
key_bio = BIO.MemoryBuffer(scrambled_key.export_key())
return M2RSA.load_key_bio(key_bio)
| [
"[email protected]"
]
| |
3ed03f450ecd93b825fa1583fb79154b40c83ff4 | 70d4ef0863906b3ca64f986075cd35b8412b871e | /packages/blueking/component/apis/sops.py | 9446f23f6bd7a4629b842b45ea8ea69b7a4e32f0 | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | selinagyan/bk-sops | 72db0ac33d9c307f51769e4baa181ceb8e1b279e | 39e63e66416f688e6a3641ea8e975d414ece6b04 | refs/heads/master | 2020-05-07T16:44:33.312442 | 2019-04-11T02:09:25 | 2019-04-11T02:09:25 | 180,696,241 | 0 | 0 | null | 2019-04-11T02:07:11 | 2019-04-11T02:07:10 | null | UTF-8 | Python | false | false | 2,426 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from ..base import ComponentAPI
class CollectionsSOPS(object):
"""Collections of SOPS APIS"""
def __init__(self, client):
self.client = client
self.create_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/create_task/',
description=u'创建任务'
)
self.get_task_status = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_status/',
description=u'查询任务或节点状态'
)
self.get_template_info = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_info/',
description=u'查询单个模板详情'
)
self.get_template_list = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_list/',
description=u'查询模板列表'
)
self.operate_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/operate_task/',
description=u'操作任务'
)
self.query_task_count = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/query_task_count/',
description=u'查询任务分类统计'
)
self.start_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/start_task/',
description=u'开始任务'
)
| [
"[email protected]"
]
| |
602d5661471469217459de0236ec43a9a1f0e8de | 81344c55ed60bf12818d1a0ec246f3c24c79cb4c | /力扣习题/8字符串转整数/atoi.py | 73fb9e626ac6852e2287bfbded03dddd0161775c | [
"MIT"
]
| permissive | lollipopnougat/AlgorithmLearning | 7d5c4a37bd5c814c5caea6963e81fbe0cb44b7b7 | cb13caa0159f0179d3c1bacfb1801d156c7d1344 | refs/heads/master | 2023-05-11T04:47:09.758889 | 2023-05-07T06:55:48 | 2023-05-07T06:55:48 | 194,078,151 | 7 | 2 | MIT | 2023-03-25T01:23:44 | 2019-06-27T10:53:08 | Python | UTF-8 | Python | false | false | 144 | py | class Solution:
def myAtoi(self, str: str) -> int:
return max(min(int(*re.findall('^[\+\-]?\d+', str.lstrip())), 2**31 - 1), -2**31) | [
"[email protected]"
]
| |
889a29dd98a7786a22e8d2fbde68e5a1ce2d4137 | a6ed0c42659f54f88024a9171c353e7cbe51328e | /Python/flask_MySQL/emailval/server.py | 1d9d6e7aa490bb6f47f766d7b83b106c0677f317 | []
| no_license | tomama1/Practice | c4a44a044fe67b3f4eb34dca0a0dd9ea38f4c766 | 8adecd0ee985db06497578a11d067ac16502da7b | refs/heads/master | 2021-09-05T04:32:42.020673 | 2018-01-24T05:51:16 | 2018-01-24T05:51:16 | 104,159,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | from flask import Flask, request, redirect, render_template, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
app.secret_key = ("CodingDojo")
mysql = MySQLConnector(app,'listserv')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def create():
# grabbing user input from form
email = request.form['emailcheck']
# query for checking the database
query = "SELECT email from listserv WHERE email = :echeck"
data = {
'echeck':email
}
emailcheck = mysql.query_db(query, data)
# if email exists in database
if emailcheck:
flash("Valid Email")
return redirect('/success')
else:
# regex check for valid email string
if re.match(r"[^@]+@[^@]+\.[^@]+",email):
# insert query into database
query = "INSERT INTO listserv (email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())"
# mysql.query_db("INSERT INTO listserv(email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())",{'emailtobeinserted':email})"
data = {
'emailtobeinserted': request.form['emailcheck']
}
mysql.query_db(query, data)
flash("Email has been Inserted!")
else:
# not a valid email string ( no @ sign)
flash("Not a valid email")
return redirect('/')
@app.route('/success')
def success():
# display all rows in the listserv table
emails = mysql.query_db("SELECT * FROM listserv")
return render_template('success.html', all_emails = emails)
@app.route('/goback')
def goback():
return redirect('/')
app.run(debug=True) | [
"[email protected]"
]
| |
ea0207d1f4614c56c66b011cec3e7d9ecefe2d10 | 58f6184fbfe4782bccf7803fbb978b5a5f93bb50 | /src/scs_analysis/cmd/cmd_sample_tally.py | a7dff9ca4f518978eee941ce646bb2796fd1ea4b | [
"MIT"
]
| permissive | seoss/scs_analysis | d41db35a1c7d97d75776a797df099749dbced824 | c203093fd6728eafe576a1798bd9040ca18c73f8 | refs/heads/master | 2020-04-04T20:14:48.026665 | 2018-11-05T12:51:23 | 2018-11-05T12:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | """
Created on 22 Aug 2017
@author: Bruno Beloff ([email protected])
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdSampleTally(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-t TALLY] [-p PRECISION] [-v] [PATH]", version="%prog 1.0")
# optional...
self.__parser.add_option("--tally", "-t", type="int", nargs=1, action="store", dest="tally",
help="generate a rolling aggregate for TALLY number of data points (default all)")
self.__parser.add_option("--prec", "-p", type="int", nargs=1, action="store", default=None, dest="precision",
help="precision (default 0 decimal places)")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.tally is not None and self.tally < 1:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def tally(self):
return self.__opts.tally
@property
def precision(self):
return self.__opts.precision
@property
def verbose(self):
return self.__opts.verbose
@property
def path(self):
return self.__args[0] if len(self.__args) > 0 else None
@property
def args(self):
return self.__args
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdSampleTally:{tally:%s, tally:%s, verbose:%s, path:%s, args:%s}" % \
(self.tally, self.precision, self.verbose, self.path, self.args)
| [
"[email protected]"
]
| |
d470117b87c20044939b34206f9e9d67c89cc690 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=76/params.py | 29fdaca73a9d96a41ddea9479708049d1a27dfc2 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.010214',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 76,
'utils': 'uni-medium-3'}
| [
"[email protected]"
]
| |
629585562843f773778c17fec9276488963e4e18 | 515e7d6e5756e3922df0b874b241c8b0744b4570 | /packs/python_packs.py | 1d34ff441b4097d542aca3c6d08a9dd2b0ef7e4d | []
| no_license | mjgpy3/udm_script | d77f4904df62e33c72f690cdf4049a1118be105b | d04802d21797fa6ed03cfc35c955bcc6d028f1c2 | refs/heads/master | 2021-01-23T11:40:25.415072 | 2013-07-30T16:53:31 | 2013-07-30T16:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | #!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 16:47:44 EST 2013
#
#
from package_container import PackageContainer
packages = {'Pygame': 'python-pygame',
'Sympy': 'python-sympy',
'Numpy': 'python-numpy',
'Scipy': 'python-scipy',
'Virtualenv': 'python-virtualenv',
'PIP': 'python-pip',
'Django': 'python-django',
'Pychecker': 'pychecker',
'IPython': 'ipython',
'IDLE': 'idle',
'Epydoc': 'python-epydoc',
'Sphinx': 'python-sphinx',
'SQLAlchemy': 'python-sqlalchemy',
'Requests': 'python-requests',
'Flask': 'python-flask',
'Python Dev': 'python-dev',
'Beautiful Soup': 'python-beautifulsoup',
'Jython': 'jython',
'Cython': 'cython',
'PyPy': 'pypy',
'Python Openoffice': 'python-openoffice',
'CX Freeze': 'cx-freeze'}
special_package_instructions = {'sh': ['pip install sh'],
'Selenium': ['pip install selenium']}
container = PackageContainer("Python", 'python', packages, special_package_instructions)
| [
"[email protected]"
]
| |
e36f86f692711d3516598a57f325dc3781d9a3e0 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/task/deployctx.py | 5ec363019c9a320d6f2cdd11ef473a166e344841 | []
| no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,835 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DeployCtx(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.task.DeployCtx")
meta.moClassName = "taskDeployCtx"
meta.rnFormat = "TaskDeployCtx"
meta.category = MoCategory.REGULAR
meta.label = "DeployCtxTask"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.parentClasses.add("cobra.model.top.Root")
meta.rnPrefixes = [
('TaskDeployCtx', False),
]
prop = PropMeta("str", "annotation", "annotation", 51688, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 51689, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
306169c51708eb9ebd6f3a4715d52aaf5b2f46c0 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Research/async play/wxasync1.py | 938fa1c468981bdc521f7644434f52312729c2b3 | []
| no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 1,429 | py | import wx
from wxasync import AsyncBind, WxAsyncApp, StartCoroutine
import asyncio
from asyncio.events import get_event_loop
import time
class TestFrame(wx.Frame):
def __init__(self, parent=None):
super(TestFrame, self).__init__(parent)
vbox = wx.BoxSizer(wx.VERTICAL)
button1 = wx.Button(self, label="Submit")
self.edit = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
self.edit_timer = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
vbox.Add(button1, 2, wx.EXPAND|wx.ALL)
vbox.AddStretchSpacer(1)
vbox.Add(self.edit, 1, wx.EXPAND|wx.ALL)
vbox.Add(self.edit_timer, 1, wx.EXPAND|wx.ALL)
self.SetSizer(vbox)
self.Layout()
AsyncBind(wx.EVT_BUTTON, self.async_callback, button1)
StartCoroutine(self.update_clock, self)
async def async_callback(self, event):
self.edit.SetLabel("Button clicked")
await asyncio.sleep(1)
self.edit.SetLabel("Working")
await asyncio.sleep(1)
self.edit.SetLabel("Completed")
async def update_clock(self):
while True:
self.edit_timer.SetLabel(time.strftime('%H:%M:%S'))
await asyncio.sleep(0.5)
app = WxAsyncApp()
frame = TestFrame()
frame.Show()
app.SetTopWindow(frame)
loop = get_event_loop()
loop.run_until_complete(app.MainLoop())
| [
"[email protected]"
]
| |
24f2de63f6fe12b2e69518221df7bc7cef282fb6 | 078e35f6b03e4e7a9616f2335a740109d8292176 | /examples/adwords/v201609/advanced_operations/add_ad_customizer.py | f3c8da4ffc6854a0fdba2a28bd13a0f160fd0adb | [
"Apache-2.0"
]
| permissive | parander/googleads-python-lib | 5f5b09e8adf7d733bddca314f6aa624b60c5abde | bc1bdff2d58fdc7cf4f09b879c68757c5b9b3abc | refs/heads/master | 2021-01-12T16:36:44.861582 | 2017-02-27T04:27:18 | 2017-02-27T04:27:18 | 71,418,777 | 0 | 0 | null | 2016-10-20T02:38:33 | 2016-10-20T02:38:32 | null | UTF-8 | Python | false | false | 7,140 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that uses the feed to populate
dynamic data.
"""
from datetime import datetime
from uuid import uuid4
# Import appropriate classes from the client library.
from googleads import adwords
from googleads import errors
FEED_NAME = 'Interplanetary Feed Name %s' % uuid4()
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print ('Created an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.')
def CreateCustomizerFeed(client, feed_name):
"""Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
"""
# Get the AdCustomizerFeedService
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
"""Creates FeedItems for the specified AdGroups.
These FeedItems contain values to use in ad customizations for the AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing two AdGroup Ids.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Raises:
GoogleAdsError: if no FeedItems were added.
"""
# Get the FeedItemService
feed_item_service = client.GetService('FeedItemService')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [
CreateFeedItemAddOperation(
'Mars', '$1234.56', mars_date.strftime(time_format), adgroup_ids[0],
ad_customizer_feed),
CreateFeedItemAddOperation(
'Venus', '$1450.00', venus_date.strftime(time_format),
adgroup_ids[1], ad_customizer_feed)
]
response = feed_item_service.mutate(feed_item_operations)
if 'value' in response:
for feed_item in response['value']:
print 'Added FeedItem with ID %d.' % feed_item['feedItemId']
else:
raise errors.GoogleAdsError('No FeedItems were added.')
def CreateFeedItemAddOperation(name, price, date, adgroup_id,
ad_customizer_feed):
"""Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values and AdGroupTargeting when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
adgroup_id: the ID of the ad_group to target with the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem.
"""
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': adgroup_id
},
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
return {'operator': 'ADD', 'operand': feed_item}
def main(client, adgroup_ids, feed_name=FEED_NAME):
# Create a customizer feed. One feed per account can be used for all ads.
ad_customizer_feed = CreateCustomizerFeed(client, feed_name)
# Add feed items containing the values we'd like to place in ads.
CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed)
# All set! We can now create ads with customizations.
CreateAdsWithCustomizations(client, adgroup_ids, feed_name)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| [
"[email protected]"
]
| |
f160905d816728acf5ab28b38fe37cd56249ef23 | a95aebf977058d32fa4298e35939fb5813f11276 | /nn/layers.py | f339ba6e01b645a013632b3b8d3cd2e47a1ae2a2 | [
"MIT"
]
| permissive | CV-IP/uqvi | f6e595c60ab86eb00c3b221d24f7300a4f872839 | 2534c26c41a4745e98d4b12d66270691002d1a5f | refs/heads/master | 2022-12-22T20:47:44.140964 | 2020-10-03T17:40:17 | 2020-10-03T17:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | import os
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _triple
from nn.bayes_conv import BayesConv3d, BayesConv2d
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, padding=1, bayes = False):
super(ConvBlock, self).__init__()
if bayes:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
BayesConv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
else:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
def forward(self, x):
x = self.conv(x)
return x
class BasicDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, downsample, bayes=False):
super(BasicDownBlock, self).__init__()
if downsample:
str = 2
else:
str = 1
self.conv_1 = ConvBlock(in_ch, out_ch, kernel=3, stride=str, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.down = None
if downsample:
self.down = ConvBlock(in_ch, out_ch, kernel=1, stride=2, padding=0, bayes=False)
def forward(self, inp):
x = self.conv_1(inp)
x = self.conv_2(x)
if self.down is not None:
return x + self.down(inp)
else:
return x + inp
class BasicUpBlock(nn.Module):
def __init__(self, in_ch, out_ch, bayes=False):
super(BasicUpBlock, self).__init__()
self.upsample = nn.Sequential(
ConvBlock(in_ch, out_ch, kernel=1, stride=1, padding=0, bayes=False),
nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
)
self.conv_1 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
def forward(self, inp, skip_connection=None):
x = self.upsample(inp)
if skip_connection is not None:
x = x + skip_connection
x1 = self.conv_1(x)
x1 = self.conv_2(x1)
return x1 + x | [
"[email protected]"
]
| |
daffcd2c71e1aa642d272207dca6fb0e42a37757 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /smith/bert/tokenization.py | dc88d1b4e3bbfaae01a5a7e0f295c7f14bd70f27 | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 13,084 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
from absl import flags
import six
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"preserve_unused_tokens", False,
"If True, Wordpiece tokenization will not be applied to words in the vocab."
)
_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
def preserve_token(token, vocab):
"""Returns True if the token should forgo tokenization and be preserved."""
if not FLAGS.preserve_unused_tokens:
return False
if token not in vocab:
return False
return bool(_UNUSED_TOKEN_RE.search(token))
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
if token not in vocab:
vocab[token] = len(vocab)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenization."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, vocab=self.vocab)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=tuple()):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
vocab: A container of tokens to not mutate during tokenization.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| [
"[email protected]"
]
| |
c441260c06dae91d6641ae9d9d73cf55928c8d6e | 3f0a446f493951693af0e6f44fa8076b7522a2fb | /ga_v3.py | a55948cf4f0a5f235cdd9c4537673fda6923a895 | []
| no_license | by-student-2017/eam_database_fit | f74fb2c8504f709e677b1a2c4c9e34c688a3930c | a74006c402bd46550b67dc27a9284c7dd1d262e2 | refs/heads/master | 2023-03-24T07:12:44.277706 | 2021-03-02T03:49:50 | 2021-03-02T03:49:50 | 281,506,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,809 | py | import random
from deap import creator, base, tools, algorithms
import numpy
import numpy as np
import commands
import sys
#----------------------------------------------------------------------
file_tmp = 'EAM_code.tmp'
file_inp = 'EAM_code'
lammps_adress = "lmp"
cif2cell_adress = "cif2cell"
commands.getoutput("setenv OMP_NUM_THREADS 1")
num_core = commands.getoutput("grep 'core id' /proc/cpuinfo | sort -u | wc -l")
#pwscf_adress = "mpirun -np "+str(num_core)+" --allow-run-as-root pw.x"
#pwscf_adress = "mpirun -np "+str(num_core)+" pw.x"
pwscf_adress = "mpirun -np 2 pw.x"
satom = commands.getoutput("grep \"atomtype\" EAM.input | sed -e \"s/.*=//\" -e \"s/'//g\"")
commands.getoutput("chmod +x ./cfg2vasp/cfg2vasp")
commands.getoutput("chmod +x pwscf2force")
commands.getoutput("chmod +x setinp")
commands.getoutput("./setinp")
commands.getoutput("mkdir cfg")
commands.getoutput("mkdir work")
commands.getoutput("echo -n > energy.dat")
temp_K = commands.getoutput("awk '{if($2==\"temp\"){print $4}}' in.lmp")
print "Lammps MD: "+temp_K+" K"
target = [0,0,0] # dummy data
y_str = [0] # dummy data
natom = commands.getoutput("awk '{if($2==\"atoms\"){print $1}}' data.in")
fxl = numpy.ones(int(natom)+1)
fyl = numpy.ones(int(natom)+1)
fzl = numpy.ones(int(natom)+1)
fxp = numpy.ones(int(natom)+1)
fyp = numpy.ones(int(natom)+1)
fzp = numpy.ones(int(natom)+1)
#----------------------------------------------------------------------
print "read parameters from EAM_code.init"
nline = commands.getoutput("grep -n "+str(satom)+" EAM_code.init | head -1 | sed -e \"s/:.*//g\"")
print "read line: "+nline
check_satom = commands.getoutput("awk '{if(NR=="+str(nline)+"+0){print $1}}' EAM_code.init | head -1")
print "fit element: "+check_satom
# fitting parameters
x0 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+1){print $1}}' EAM_code.init | head -1"))
x1 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+2){print $1}}' EAM_code.init | head -1"))
x2 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+3){print $1}}' EAM_code.init | head -1"))
x3 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+4){print $1}}' EAM_code.init | head -1"))
x4 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+5){print $1}}' EAM_code.init | head -1"))
x5 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+6){print $1}}' EAM_code.init | head -1"))
x6 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+7){print $1}}' EAM_code.init | head -1"))
x7 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+8){print $1}}' EAM_code.init | head -1"))
x8 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+9){print $1}}' EAM_code.init | head -1"))
x9 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+10){print $1}}' EAM_code.init | head -1"))
x10 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+11){print $1}}' EAM_code.init | head -1"))
x11 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+12){print $1}}' EAM_code.init | head -1"))
x12 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+13){print $1}}' EAM_code.init | head -1"))
x13 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+14){print $1}}' EAM_code.init | head -1"))
x14 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+15){print $1}}' EAM_code.init | head -1"))
x15 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+16){print $1}}' EAM_code.init | head -1"))
x16 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+17){print $1}}' EAM_code.init | head -1"))
x17 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+18){print $1}}' EAM_code.init | head -1"))
x18 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+19){print $1}}' EAM_code.init | head -1"))
x19 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+20){print $1}}' EAM_code.init | head -1"))
x20 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+23){print $1}}' EAM_code.init | head -1"))
x21 = float(commands.getoutput("awk '{if(NR=="+str(nline)+"+26){print $1}}' EAM_code.init | head -1"))
#print "initial parameters: ",x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21
x = [x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21]
print "initial parameters: ",x
count = 0
#----------------------------------------------------------------------
creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
creator.create("Individual", numpy.ndarray, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
n_gene = 22 # number of parameters
min_ind = numpy.ones(n_gene) * -1.0
max_ind = numpy.ones(n_gene) * 1.0
for i in range(n_gene):
#min_ind[i] = b1[i][0]
#max_ind[i] = b1[i][1]
min_ind[i] = float(x[i]) - float(x[i])*0.1
max_ind[i] = float(x[i]) + float(x[i])*0.1
print "search area of paramter "+str(i)+": "+str(min_ind[i])+" | "+str(max_ind[i])
#----------------------------------------------------------------------
def create_ind_uniform(min_ind, max_ind):
ind = []
for min, max in zip(min_ind, max_ind):
ind.append(random.uniform(min, max))
return ind
#----------------------------------------------------------------------
toolbox.register("create_ind", create_ind_uniform, min_ind, max_ind)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.create_ind)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
#----------------------------------------------------------------------
#def evalOneMax(individual):
# return sum(individual),
#----------------------------------------------------------------------
def evalOneMax(individual):
print "------------------------"
global count
count += 1
print count
fi = open(file_tmp,'r')
text = fi.read().replace('re',str(individual[0]).replace("[","").replace("]",""))
text = text.replace('fe',str(individual[1]).replace("[","").replace("]",""))
text = text.replace('rhoe1',str(individual[2]).replace("[","").replace("]",""))
text = text.replace('rhoe2',str(individual[3]).replace("[","").replace("]",""))
text = text.replace('alpha',str(individual[4]).replace("[","").replace("]",""))
text = text.replace('beta',str(individual[5]).replace("[","").replace("]",""))
text = text.replace('Ap',str(individual[6]).replace("[","").replace("]",""))
text = text.replace('Bp',str(individual[7]).replace("[","").replace("]",""))
text = text.replace('kappa',str(individual[8]).replace("[","").replace("]",""))
text = text.replace('lambda',str(individual[9]).replace("[","").replace("]",""))
text = text.replace('Fn0',str(individual[10]).replace("[","").replace("]",""))
text = text.replace('Fn1',str(individual[11]).replace("[","").replace("]",""))
text = text.replace('Fn2',str(individual[12]).replace("[","").replace("]",""))
text = text.replace('Fn3',str(individual[13]).replace("[","").replace("]",""))
text = text.replace('F0',str(individual[14]).replace("[","").replace("]",""))
text = text.replace('F1',str(individual[15]).replace("[","").replace("]",""))
text = text.replace('F2',str(individual[16]).replace("[","").replace("]",""))
text = text.replace('F3',str(individual[17]).replace("[","").replace("]",""))
text = text.replace('eta',str(individual[18]).replace("[","").replace("]",""))
text = text.replace('Fep',str(individual[19]).replace("[","").replace("]",""))
text = text.replace('F4',str(individual[20]).replace("[","").replace("]",""))
text = text.replace('rhol',str(individual[21]).replace("[","").replace("]",""))
fi.close
with open(file_inp,'w') as f:
print >> f, text
commands.getoutput("./Zhou04_EAM_2 < EAM.input")
if (count % 9000) == 1:
commands.getoutput(lammps_adress+" < in.lmp")
commands.getoutput("cp ./cfg/run.50.cfg run.50.cfg")
commands.getoutput("./cfg2vasp/cfg2vasp run.50.cfg")
commands.getoutput("python ./vasp2cif/vasp2cif.py run.50.vasp")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p pwscf --pwscf-pseudo-PSLibrary-libdr=\"./potentials\" --setup-all --k-resolution=0.48 --pwscf-force=yes --pwscf-stress=yes --pwscf-run-type=scf -o pw.in")
commands.getoutput(pwscf_adress+" < pw.scf.in")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p pwscf --pwscf-pseudo-PSLibrary-libdr=\"./potentials\" --setup-all --k-resolution=0.18 --pwscf-force=yes --pwscf-stress=yes --pwscf-run-type=scf -o pw.in")
commands.getoutput(pwscf_adress+" < pw.scf.in > pw.out")
commands.getoutput("./pwscf2force >> config_potfit")
commands.getoutput(cif2cell_adress+" run.50.vasp.cif --no-reduce -p lammps -o data_fix.in")
commands.getoutput(lammps_adress+" < in.lmp_fix")
commands.getoutput("mv data.in.restart data.in")
#
commands.getoutput("./pwscf2force > config")
else:
commands.getoutput(lammps_adress+" < in.lmp_fix")
# 1 bar = 0.0001 GPa
# stress = -pressure
#pxxl = commands.getoutput("awk '{if($1==\"pxxl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pyyl = commands.getoutput("awk '{if($1==\"pyyl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pzzl = commands.getoutput("awk '{if($1==\"pzzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxyl = commands.getoutput("awk '{if($1==\"pxyl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxzl = commands.getoutput("awk '{if($1==\"pxzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pyzl = commands.getoutput("awk '{if($1==\"pyzl\"){printf \"%10.8f\",$3*7.4028083e-11}}' log.lammps")
#pxxp = commands.getoutput("awk '{if($1==\"#S\"){print -$2}}' config")
#pyyp = commands.getoutput("awk '{if($1==\"#S\"){print -$3}}' config")
#pzzp = commands.getoutput("awk '{if($1==\"#S\"){print -$4}}' config")
#pxyp = commands.getoutput("awk '{if($1==\"#S\"){print -$5}}' config")
#pxzp = commands.getoutput("awk '{if($1==\"#S\"){print -$6}}' config")
#pyzp = commands.getoutput("awk '{if($1==\"#S\"){print -$7}}' config")
#diffpxx = (float(pxxl) - float(pxxp))/(float(pxxp)+0.000000101)*100.0/6.0
#diffpyy = (float(pyyl) - float(pyyp))/(float(pyyp)+0.000000101)*100.0/6.0
#diffpzz = (float(pzzl) - float(pzzp))/(float(pzzp)+0.000000101)*100.0/6.0
#diffpxy = (float(pxyl) - float(pxyp))/(float(pxyp)+0.000000101)*100.0/6.0
#diffpxz = (float(pxzl) - float(pxzp))/(float(pxzp)+0.000000101)*100.0/6.0
#diffpyz = (float(pyzl) - float(pyzp))/(float(pyzp)+0.000000101)*100.0/6.0
#diffp = abs(diffpxx) + abs(diffpyy) + abs(diffpzz) + abs(diffpxy) + abs(diffpxz) + abs(diffpyz)
#print "lammps: "+str(pxxl)+", "+str(pyyl)+", "+str(pzzl)+", "+str(pxyl)+", "+str(pxzl)+", "+str(pyzl)+" [eV/A^3]"
#print "pwscf: "+str(pxxp)+", "+str(pyyp)+", "+str(pzzp)+", "+str(pxyp)+", "+str(pxzp)+", "+str(pyzp)+" [eV/A^3]"
#print "P diff (%): "+str(diffp)
#print "---------------"
diffp = 0.0
# force
difffx = 0.0
difffy = 0.0
difffz = 0.0
difff = 0.0
for i in range(int(natom)):
fxl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$7}}' trajectory.lammpstrj")
fyl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$8}}' trajectory.lammpstrj")
fzl[i] = commands.getoutput("awk '{if(NR==10+"+str(i)+"){printf \"%10.8f\",$9}}' trajectory.lammpstrj")
fxp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $5}}' config")
fyp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $6}}' config")
fzp[i] = commands.getoutput("awk '{if(NR==11+"+str(i)+"){print $7}}' config")
difffx = (float(fxl[i]) - float(fxp[i]))/(float(fxp[i])+0.000000101)*100.0/3.0/float(natom)
difffy = (float(fyl[i]) - float(fyp[i]))/(float(fyp[i])+0.000000101)*100.0/3.0/float(natom)
difffz = (float(fzl[i]) - float(fzp[i]))/(float(fzp[i])+0.000000101)*100.0/3.0/float(natom)
difff = difff + abs(difffx) + abs(difffy) + abs(difffz)
print "lammps: "+str(fxl[0])+" : "+str(fyl[0])+" : "+str(fzl[0])
print "PWscf: "+str(fxp[0])+" : "+str(fyp[0])+" : "+str(fzp[0])
print "force diff (%): "+str(difff)
print "---------------"
lammps_get_data = "grep \"Total Energy\" log.lammps | tail -1 | awk '{printf \"%-20.10f\",$4}'"
lmpe = commands.getoutput(lammps_get_data)
pwe = commands.getoutput("awk '{if($1==\"#E\"){print $2}}' config")
pwe = float(pwe) * float(natom)
print "lammps: "+str(lmpe)+" [eV]"
print "PWscf: "+str(pwe)+" [eV]"
diffe = float(pwe) - float(lmpe)
print "diff: "+str(diffe)+" [eV]"
diffea = float(diffe)/float(natom)
print "diff/atom: "+str(diffea)+" [eV/atom]"
commands.getoutput("echo "+str(count)+" "+str(diffe)+" >> energy.dat")
rhoin = float(individual[2])*float(individual[21])
rhoout = float(individual[2])*1.15
print "---------------"
print "F boundary 1, rho: "+str(rhoin)
print "F boundary 2, rho: "+str(individual[2])
print "F boundary 3, rho: "+str(rhoout)
commands.getoutput("cp "+satom+"_Zhou04.eam.alloy"+" Xx_Zhou04.eam.alloy")
commands.getoutput("./plot")
rhoin1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(rhoin)+"){print $2}}' | tail -2 | head -1")
rhoin2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(rhoin)+"){print $2}}' | head -2 | tail -1")
rhoe1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(individual[2])+"){print $2}}' | tail -2 | head -1")
rhoe2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(individual[2])+"){print $2}}' | head -2 | tail -1")
rhoout1 = commands.getoutput("cat F.plt | awk '{if($1<"+str(rhoout)+"){print $2}}' | tail -2 | head -1")
rhoout2 = commands.getoutput("cat F.plt | awk '{if($1>"+str(rhoout)+"){print $2}}' | head -2 | tail -1")
print "F near boundary 1, F: "+str(rhoin1)+" | "+str(rhoin2)+" | diff "+str(float(rhoin1) - float(rhoin2))
print "F near boundary 2, F: "+str(rhoe1)+" | "+str(rhoe2)+" | diff "+str(float(rhoe1) - float(rhoe2))
print "F near boundary 3, F: "+str(rhoout1)+" | "+str(rhoout2)+" | diff "+str(float(rhoout1) - float(rhoout2))
print "---------------"
y = (abs(diffea)**2 + 1000*abs(float(rhoin1) - float(rhoin2))**2 + 1000*abs(float(rhoe1) - float(rhoe2))**2 + 1000*abs(float(rhoout1) - float(rhoout2))**2 + 0.0000002*abs(diffp)**2 + 0.0000010*abs(difff)**2)
print "Evaluate: ", y
#print "Parameters: ", individual
print "Parameters: x0 = "+"[ "+str(individual[0])+","+str(individual[1])+","+str(individual[2])+","+str(individual[3])+","+str(individual[4])+","+str(individual[5])+","+str(individual[6])+","+str(individual[7])+","+str(individual[8])+","+str(individual[9])+","+str(individual[10])+","+str(individual[11])+","+str(individual[12])+","+str(individual[13])+","+str(individual[14])+","+str(individual[15])+","+str(individual[16])+","+str(individual[17])+","+str(individual[18])+","+str(individual[19])+","+str(individual[20])+","+str(individual[21])+" ]"
print "------------------------"
return y,
#----------------------------------------------------------------------
def cxTwoPointCopy(ind1, ind2):
size = len(ind1)
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size-1)
if (cxpoint2 >= cxpoint1):
cxpoint2 += 1
else:
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint2:cxpoint2] = ind2[cxpoint1:cxpoint2].copy(), ind1[cxpoint1:cxpoint2].copy()
return ind1, ind2
#----------------------------------------------------------------------
def mutUniformDbl(individual, min_ind, max_ind, indpb):
size = len(individual)
for i, min, max in zip(xrange(size), min_ind, max_ind):
if (random.random() < indpb):
individual[i] = random.uniform(min, max)
return indivisual,
#----------------------------------------------------------------------
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
#----------------------------------------------------------------------
def main():
random.seed(64)
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1, similar=numpy.array_equal)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=500, stats=stats, halloffame=hof)
return pop, stats, hof
#----------------------------------------------------------------------
if (__name__ == "__main__"):
main()
#----------------------------------------------------------------------
| [
"[email protected]"
]
| |
a98861179cec2687753f1bbd895f2aea1f551798 | 1aefa304f794c1ed9e06ce71248206098c756cf3 | /Django_Assignments/userauth_assignment/userauth_assignment/urls.py | 2ff85cdcbde74c5815e5277909f196ebe52546f4 | []
| no_license | dilipksahu/django_class | 333233bbced5491d886687b5990c8836dac2f145 | a044c4a079c61a6a6de05674103e8a9ba2b4d28c | refs/heads/master | 2023-01-10T07:40:44.713361 | 2020-11-10T15:26:33 | 2020-11-10T15:26:33 | 282,398,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | """userauth_assignment URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('userauth.urls')),
]
| [
"[email protected]"
]
| |
a0e0bfbddd2d9003785d592b78d9b8475e63b70c | 097eae4e0190da97570ae7db748fca306f977fbd | /py/learn/test/class/example.py | f8642e40064bba601cac875200d08370551f363f | []
| no_license | jiaolj/other | 42257c593495d97ab98b9a9af00d3791ccce7a57 | 78d0366cbd599f4dde7bf6e44ca4cfc373132418 | refs/heads/master | 2021-05-24T04:14:03.829126 | 2016-08-28T07:40:49 | 2016-08-28T07:40:49 | 64,064,262 | 0 | 1 | null | 2020-07-23T17:05:36 | 2016-07-24T12:25:56 | JavaScript | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
class b(object):
def __init__(self):
self.t=2
def getb(self):
self.t+=1
class a(b):
#----如果不声明init函数,会继承基类init属性。声明init是为了加一些自定义属性
def __init__(self):
b.__init__(self)
def get(self):
print 1
temp=a()
temp.getb()
print temp.t | [
"[email protected]"
]
| |
54bbd219f19c1ed9466ccdbb26db23e887394dba | 6cb11cb804f316d16efa083effb3def1c2cab57c | /22.py | c55af12e976c5a84557d4af19a98af4e455b732f | []
| no_license | davemolk/python_practice | 8879cd5bdcb77c3d84ff5c7f961fda1cd48b2f93 | 91d3e411b32f3a4a29d60148b352b91ce8e1d11b | refs/heads/main | 2023-08-01T12:57:45.779824 | 2021-09-18T16:54:11 | 2021-09-18T16:54:11 | 400,767,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | '''
return True if array has two consecutive 2s, otherwise return false
'''
import re
def has22(nums):
pattern = r"[2]{2}"
regex = re.compile(pattern)
match = regex.findall(("".join(str(el) for el in nums)))
return True if match else False
print(has22([1, 2, 2])) | [
"[email protected]"
]
| |
f3d109ee8baa41ca18eaa3f3d511d490209b0c12 | 0619b1ba176456c4b62d78d6a72fc4d9a9084287 | /thesite/communication_app/forms.py | 4eabf764b1d1037b42e5497319e87205eb1f6f36 | [
"Apache-2.0"
]
| permissive | jacinda/petwitter | c13dd43a5b76786f5d5c5c3f29420153cb5a16c7 | ea7ffa16b8d8b1207f04ace619b31dba4efc45bc | refs/heads/master | 2021-01-13T06:38:31.439749 | 2015-04-15T17:25:03 | 2015-04-15T17:25:03 | 33,678,730 | 0 | 0 | null | 2015-04-09T16:02:42 | 2015-04-09T16:02:40 | Python | UTF-8 | Python | false | false | 699 | py | from django import forms
import communication_app.models
class PetForm(forms.ModelForm):
class Meta:
model = communication_app.models.Pet
fields = ['name']
def __init__(self, *args, **kwargs):
super(PetForm, self).__init__(*args, **kwargs)
self.fields['name'].widget = forms.TextInput(attrs={
'class': 'form-control'})
class UpdateForm(forms.ModelForm):
class Meta:
model = communication_app.models.Update
fields = ['text']
def __init__(self, *args, **kwargs):
super(UpdateForm, self).__init__(*args, **kwargs)
self.fields['text'].widget = forms.TextInput(attrs={
'class': 'form-control'})
| [
"[email protected]"
]
| |
55bf5b769ce8bafe053fe39564ed13cc2e3360c2 | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_10_01_preview/operations/_component_containers_operations.py | 53f9c4e0361d8779eb7f2a89e6c4f5d84f60bf39 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 22,292 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
skip = kwargs.pop('skip', None) # type: Optional[str]
list_view_type = kwargs.pop('list_view_type', None) # type: Optional[Union[str, "_models.ListViewType"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
if list_view_type is not None:
query_parameters['listViewType'] = _SERIALIZER.query("list_view_type", list_view_type, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class ComponentContainersOperations(object):
"""ComponentContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
workspace_name, # type: str
skip=None, # type: Optional[str]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ComponentContainerResourceArmPaginatedResult"]
"""List component containers.
List component containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param skip: Continuation token for pagination.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainerResourceArmPaginatedResult or
the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ComponentContainer"
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
body, # type: "_models.ComponentContainer"
**kwargs # type: Any
):
# type: (...) -> "_models.ComponentContainer"
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:keyword api_version: Api Version. The default value is "2022-10-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ComponentContainer')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ComponentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
| [
"[email protected]"
]
| |
7e1029ad59d5a3c4e3e7636aa5802f22953086cd | e15d63ccde04e7458bff5af1bdad63a5c699b489 | /example/Transformer_vision/2dpose/vit/multi_branch/config.py | 5582a68fa5d82c8142ce319cab34a1901077d3e7 | [
"WTFPL"
]
| permissive | ddddwee1/TorchSUL | 775b6a2b1e4ab7aac25a3f0411de83affc257af5 | 6c7cd41b14fc8b746983e8b981d1ba4d08370ca2 | refs/heads/master | 2023-08-21T15:21:24.131718 | 2023-08-18T09:37:56 | 2023-08-18T09:37:56 | 227,628,298 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import numpy as np
# size
inp_size = 224
out_size = 56
base_sigma = 2.5
num_pts = 17
pairs = [[0,1], [1,2],[2,3], [0,4], [4,5],[5,6], [0,7],[7,8],[8,9],[9,10], [8,11],[11,12],[12,13],[8,14],[14,15],[15,16]]
# augmentation
rotation = 0
min_scale = 1 # this controls largest size
max_scale = 1 # this controls smallest sise
max_translate = 0
blur_prob = 0.0
blur_size = [7, 11, 15, 21]
blur_type = ['vertical','horizontal','mean']
# training
data_root = '/data/pose/mpii/images/'
max_epoch = 300
init_lr = 0.0005
decay = 0.0001
momentum = 0.9
lr_epoch = [150,250]
save_interval = 1
# extra
distributed = True
scale_var = 19.2
angle_var = np.pi
| [
"[email protected]"
]
| |
10b0d6c77a5a22b76ba2d6593ccd3657539ce9fd | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /1.0/_downloads/469209d8040c0923f6b4f925074d58d7/evoked_topomap.py | f677e3d7f02abfe8f6f3546a99379b408253479f | []
| permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 5,921 | py | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <[email protected]>
# Tal Linzen <[email protected]>
# Denis A. Engeman <[email protected]>
# Mikołaj Magnuski <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path / 'MEG' / 'sample' / 'sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# %%
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
# %%
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
# %%
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
# %%
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
# %%
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
# %%
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
# %%
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
# %%
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
# %%
# We can also highlight specific channels by adding a mask, to e.g. mark
# channels exceeding a threshold at a given time:
# Define a threshold and create the mask
mask = evoked.data > 1e-13
# Select times and plot
times = (0.09, 0.1, 0.11)
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Or by manually picking the channels to highlight at different times:
times = (0.09, 0.1, 0.11)
_times = ((np.abs(evoked.times - t)).argmin() for t in times)
significant_channels = [
('MEG 0231', 'MEG 1611', 'MEG 1621', 'MEG 1631', 'MEG 1811'),
('MEG 2411', 'MEG 2421'),
('MEG 1621')]
_channels = [np.in1d(evoked.ch_names, ch) for ch in significant_channels]
mask = np.zeros(evoked.data.shape, dtype='bool')
for _chs, _time in zip(_channels, _times):
mask[_chs, _time] = True
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
| [
"[email protected]"
]
| |
c157b99f15cf4b7b2d4bd05ea5b0e5f89507cf3a | 07bb913fea5e0f1e65e35a7ca5c594fa1d144eb8 | /publishconf.py | ab389e79f3df4349f62293bf934b3def399eb94a | []
| no_license | jbzdak/pwzn-lessons | 8373552fabb260593cf612a27bf821d7b70b452d | 5ca58dba6220259b170c8a689a10338122c4eefd | refs/heads/master | 2021-04-05T20:48:56.447870 | 2020-03-19T20:36:08 | 2020-03-19T20:36:08 | 248,600,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
#ITEURL = 'http://pwzn.s3-website-us-east-1.amazonaws.com'
SITEURL = 'http://db.fizyka.pw.edu.pl/pwzn'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
| [
"[email protected]"
]
| |
86a3a8b7517688b3d439381f7baf7469c0eb82a9 | 9f2a231557a9aabc181ed388faaf2f0b3b59c530 | /Testcode/spellCheck.py | 5be1f5b0fa649c1d809ee849a078538109829c13 | []
| no_license | abhashjain/DIC_Project | 7e379cd5ef99d1fc31d414985e1b04388b475fe0 | 329f8da2f61e95410292a3062c68ed06845ec6ac | refs/heads/master | 2020-04-25T14:49:58.508999 | 2018-12-11T04:36:09 | 2018-12-11T04:36:09 | 172,855,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import os, time, re
os.getcwd()
startTime = time.time()
wordFile = open("..\\src\\words.txt","r")
words = wordFile.read()
print("Words in dictionary:",len(words))
inputDoc = open("..\\src\\guten.txt", "r", encoding="utf-8")
doc = inputDoc.read().split()
print("Words in file:",len(doc))
## Processing the input document
def is_number(x):
#checking if number is int or not
try:
int(x)
return True
except (TypeError, ValueError):
pass
return False
processedInput = list()
for word in doc:
if not is_number(word):
if not "@" in word:
if not "www." in word:
if len(re.sub('[^A-Za-z0-9]+', '', word)) > 1:
processedInput.append(re.sub('[^A-Za-z0-9]+', '', word))
misspelledWords = list()
i = 0
for word in processedInput:
# i += 1
# print(i, end=", ")
if word.lower() not in words:
misspelledWords.append(word)
print("Total misspelled words =",len(misspelledWords))
print("Total execution time = %s sec"%(time.time() - startTime))
with open("..//results//outputPython.txt", "w") as outFile:
for word in misspelledWords:
outFile.write(word)
outFile.write("\n")
print ("Incorrect words written to outputPython.txt")
| [
"[email protected]"
]
| |
2f1462be3f29b5ddc93d058062150d802f915cac | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/InstallArea/x86_64-slc6-gcc49-opt/python/StrippingArchive/Stripping23/StrippingQEE/StrippingH24Mu.py | f8c83a16c95755164e65577006aff13f275fff10 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,741 | py | '''
Module for construction of h-->MuMuMuMu stripping selection
Exported symbols (use python help!):
- H24MuLineConf
Based on Bsmumu stripping lines
'''
__author__ = ['Xabier Cid Vidal']
__date__ = '11/22/2013'
__all__ = ('H24MuLineConf',
'default_name',
'default_config'
)
from Gaudi.Configuration import *
from Configurables import FilterDesktop, CombineParticles
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
#from Configurables import OfflineVertexFitter
default_name = 'H24Mu'
#### This is the dictionary of all tunable cuts ########
default_config={
'NAME': default_name,
'BUILDERTYPE' : 'H24MuLineConf',
'WGs' : [ 'QEE' ],
'STREAMS' : [ 'Leptonic' ],
'CONFIG':{'DefaultPostscale' : 1,
'PromptLinePrescale' : 1,
'SimpleLinePrescale' : 1,
'DetachedLinePrescale' : 1,
'LooseLinePrescale' : 0.01,
'MuTrackChi2DoF' : 3,
'MupTprompt' : 375, #MeV
'MupTdetached' : 250, #MeV
'MuGhostProb' : 0.4,
'MuMaxIPchi2' : 3,
'MuMinIPchi2' : 1,
'MuPIDdll' : -3, # muon combDLL
'MuNShared' : 3, # muon NShared
'A1maxMass' : 2000, #MeV
'A1Doca' : 0.2, #mm
'A1DocaTight' : 0.1, #mm
'A1Vchi2' : 7.5,
'A1Vchi2Tight' : 1,
'A1Dira' : 0,
'A1maxIPchi2' : 25,
'A1FDChi2' : 4,
'HmaxDOCA' : 0.75, #mm
'HmaxDOCATight' : 0.25, #mm
'HVchi2' : 10,
'HVchi2Tight' : 2,
'HpT' : 1200, #MeV
'MuTrackChi2DoF_loose' : 10,
'MupT_loose' : 0,
'MuMaxIPchi2_loose' : 1000000,
'A1maxMass_loose' : 5000, #MeV
'A1Doca_loose' : 10, #mm
'A1Vchi2_loose' : 20,
'HmaxDOCA_loose' : 1000000, #mm
'HpT_loose' : 300, #MeV
'HVchi2_loose' : 50
}
}
class H24MuLineConf(LineBuilder) :
"""
Builder of:
- H-> mumumumu stripping lines: prompt, detached and control,
Usage:
>>> config = { .... }
>>> Conf = H24MuLinesConf('Test',config)
>>> myLines = Conf.lines
>>> for line in myLines:
>>> print line.name(), line.outputLocation()
The lines can be used directly to build a StrippingStream object.
Exports as instance data members:
selPrompt : nominal prompt H24mu stripping line
selSimple : nominal simple H24mu stripping line (no pT, IP cuts)
selDetached : nominal detached H24mu stripping line
selLoose : loose H24mu stripping line to understand systematics (prescaled)
promptLine : Stripping line made from selPrompt
simpleLine : Stripping line made from selSimple
detachedLine : Stripping line made from selDetached
looseLine : Stripping line made from selLoose
lines : list of lines: [ promptLine, simpleLine, detachedLine, looseLine ]
"""
__configuration_keys__ = (
'DefaultPostscale',
'PromptLinePrescale',
'SimpleLinePrescale',
'DetachedLinePrescale',
'LooseLinePrescale',
'MuTrackChi2DoF',
'MuPIDdll',
'MuNShared',
'MupTprompt',
'MupTdetached',
'MuMaxIPchi2',
'MuMinIPchi2',
'MuGhostProb',
'A1maxMass',
'A1Doca',
'A1Vchi2',
'A1DocaTight',
'A1Vchi2Tight',
'A1Dira',
'A1maxIPchi2',
'A1FDChi2',
'HmaxDOCA',
'HpT',
'HVchi2',
'HmaxDOCATight',
'HVchi2Tight',
'MuTrackChi2DoF_loose',
'MupT_loose',
'MuMaxIPchi2_loose',
'A1maxMass_loose',
'A1Doca_loose',
'A1Vchi2_loose',
'HmaxDOCA_loose',
'HpT_loose',
'HVchi2_loose'
)
def __init__(self,
name = default_name,
config = None,
debug_cuts = 0):
LineBuilder.__init__(self, name, config)
prompt_name=name+'Prompt'
simple_name=name+'Simple'
detached_name=name+'Detached'
loose_name=name+'Loose'
self.config_dict = config
self.debug_cuts = debug_cuts
self.selPrompt = self.makeDefault(prompt_name,type = 0)
self.selSimple = self.makeDefault(simple_name,type = 1)
self.selDetached = self.makeDefault(detached_name,type = 2)
self.selLoose = self.makeDefault(loose_name,type = 3)
ExtraInfoTools = [{'Type' : 'ConeVariables',
'ConeNumber' : 1,
'ConeAngle' : 1.0,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'ConeVariables',
'ConeNumber' : 2,
'ConeAngle' : 1.5,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'ConeVariables',
'ConeNumber' : 3,
'ConeAngle' : 2.0,
'Variables' : ['angle', 'mult','p','pt',
'ptasy','pasy']},
{'Type' : 'VertexIsolation'}]
ExtraInfoDaughters = {"prompt" : [getattr(self,"A1"+prompt_name)],
"simple" : [getattr(self,"A1"+simple_name)],
"detached": [getattr(self,"A1"+detached_name)],
"loose" : [getattr(self,"A1"+loose_name)]}
self.promptLine = StrippingLine(prompt_name+"Line",
prescale = config['PromptLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selPrompt ],
selection = self.selPrompt,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["prompt"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
self.simpleLine = StrippingLine(simple_name+"Line",
prescale = config['SimpleLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selSimple ],
selection = self.selSimple,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["simple"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
self.detachedLine = StrippingLine(detached_name+"Line",
prescale = config['DetachedLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selDetached ],
selection = self.selDetached,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["detached"],
MDSTFlag = True,
RequiredRawEvents = ["Muon"]
)
## no need for mdst or raw data in the loose line...
self.looseLine = StrippingLine(loose_name+"Line",
prescale = config['LooseLinePrescale'],
postscale = config['DefaultPostscale'],
# algos = [ self.selLoose ],
selection = self.selLoose,
ExtraInfoTools = ExtraInfoTools,
ExtraInfoSelections = ExtraInfoDaughters["loose"],
)
self.registerLine(self.promptLine)
self.registerLine(self.simpleLine)
self.registerLine(self.detachedLine)
#self.registerLine(self.looseLine)
def makeA1(self,name,type) :
"""
Prompt A1 selection
Arguments:
name : name of the Selection.
type : 0 (prompt), 1 (simple), 2 (detached), 3 (loose)
"""
A1 = CombineParticles("Combine"+name)
A1.DecayDescriptor = "KS0 -> mu+ mu-"
# prompt
if type==0:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (PT > %(MupTprompt)s * MeV ) "\
"& (MIPCHI2DV(PRIMARY)< %(MuMaxIPchi2)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2)s ) "\
"& (MM < %(A1maxMass)s * MeV)" %self.config_dict
# simple: tighten DOCA and Vchi2, tighten muID cut
elif type==1:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (PIDmu > %(MuPIDdll)s ) "\
"& (PPINFO(LHCb.ProtoParticle.MuonNShared,99999)<= %(MuNShared)s ) " %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1DocaTight)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2Tight)s ) "\
"& (MM < %(A1maxMass)s * MeV)" %self.config_dict
#detached
elif type==2:
#A1.addTool( OfflineVertexFitter )
#A1.ParticleCombiners.update( { "" : "OfflineVertexFitter"} )
#A1.ReFitPVs = True
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF)s ) "\
"& (PT > %(MupTdetached)s * MeV ) "\
"& ( TRGHOSTPROB < %(MuGhostProb)s ) " \
"& (MIPCHI2DV(PRIMARY)> %(MuMinIPchi2)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2)s ) "\
"& (MM < %(A1maxMass)s * MeV)" \
"& (BPVDIRA > %(A1Dira)s )" \
"& (BPVIPCHI2() < %(A1maxIPchi2)s )" \
"& (BPVVDCHI2 > %(A1FDChi2)s )" %self.config_dict
#loose
else:
A1.DaughtersCuts = { "mu+" : "(TRCHI2DOF < %(MuTrackChi2DoF_loose)s ) "\
"& (PT > %(MupT_loose)s * MeV ) "\
"& (MIPCHI2DV(PRIMARY)< %(MuMaxIPchi2_loose)s )" %self.config_dict }
A1.CombinationCut = "(AM < %(A1maxMass_loose)s * MeV ) "\
"& (AMAXDOCA('')<%(A1Doca_loose)s * mm)" %self.config_dict
A1.MotherCut = "(VFASPF(VCHI2)< %(A1Vchi2_loose)s ) "\
"& (MM < %(A1maxMass_loose)s * MeV)" %self.config_dict
_stdAllLooseMuons = DataOnDemand(Location = "Phys/StdAllLooseMuons/Particles")
if self.debug_cuts:
print "DEBUG - A1 cuts for type", type
print A1.DaughtersCuts
print A1.MotherCut
print A1.CombinationCut
return Selection ("Sel"+name,
Algorithm = A1,
RequiredSelections = [ _stdAllLooseMuons ])
def makeDefault(self,name,type=0) :
"""
H-->A0(mumu)A0(mumu) selection.
Arguments:
name : name of the Selection.
type : 0 (prompt), 1 (simple), 2 (detached), 3 (loose)
"""
SelA1 = self.makeA1("A1"+name,type)
setattr(self,"A1"+name,SelA1)
H25 = CombineParticles("Combine_H25"+name)
H25.DecayDescriptor = "H_10 -> KS0 KS0"
H25.DaughtersCuts = {}
# simple: do not cut in pT, cut tighter in DOCA, VCHI2
if type==1:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCATight)s * mm )" %self.config_dict
H25.MotherCut = "(VFASPF(VCHI2)< %(HVchi2Tight)s )" %self.config_dict
# loose: loosen all cuts
elif type==3:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCA_loose)s * mm )" %self.config_dict
H25.MotherCut = "(PT > %(HpT_loose)s * MeV ) "\
"& (VFASPF(VCHI2)< %(HVchi2_loose)s ) " %self.config_dict
# prompt or detached
else:
H25.CombinationCut = "(AMAXDOCA('')< %(HmaxDOCA)s * mm )" %self.config_dict
H25.MotherCut = "(PT > %(HpT)s * MeV ) "\
"& (VFASPF(VCHI2)< %(HVchi2)s ) " %self.config_dict
if self.debug_cuts:
print "DEBUG - H cuts for type", type
print H25.MotherCut
print H25.CombinationCut
return Selection( "SelH4mu"+name,
Algorithm = H25,
RequiredSelections=[SelA1] )
| [
"[email protected]"
]
| |
44d7b163937a1cc756b6f3918b58cb04e955dc93 | 04aacfdb9944e6d796671198835394e07db98ecf | /pythonz/commands/locate.py | 939aedb0faee04842dfa3a3a10a968e88396ce8c | []
| no_license | rmoorman/pythonz | ea86f302c70b67440c2829d4a0a9161d4a006ccc | 3d43172cae190284cf0b620aa28c0f794f770497 | refs/heads/master | 2021-01-12T19:51:39.057258 | 2014-10-16T07:20:06 | 2014-10-16T07:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py |
import os
from pythonz.commands import Command
from pythonz.define import PATH_PYTHONS
from pythonz.util import Package, is_installed
from pythonz.log import logger
class LocateCommand(Command):
name = "locate"
usage = "%prog [options] VERSION"
summary = "Locate the given version of python"
def __init__(self):
super(LocateCommand, self).__init__()
self.parser.add_option(
"-t", "--type",
dest="type",
default="cpython",
help="Type of Python version: cpython, stackless, pypy, pypy3 or jython."
)
def run_command(self, options, args):
if not args or len(args) > 1:
self.parser.print_help()
return
pkg = Package(args[0], options.type)
pkgname = pkg.name
if not is_installed(pkg):
logger.error("`%s` is not installed." % pkgname)
return
logger.log(os.path.join(PATH_PYTHONS, pkgname, 'bin', 'python'))
LocateCommand()
| [
"[email protected]"
]
| |
82af4577975f944ba39e44d7f9a294e05163755e | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/intent.py | ab7d83589724b82269ef2d692eaaf2e0c46698b6 | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,442 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'IntentView',
'Intent',
'ListIntentsRequest',
'ListIntentsResponse',
'GetIntentRequest',
'CreateIntentRequest',
'UpdateIntentRequest',
'DeleteIntentRequest',
},
)
class IntentView(proto.Enum):
r"""Represents the options for views of an intent.
An intent can be a sizable object. Therefore, we provide a
resource view that does not return training phrases in the
response.
"""
INTENT_VIEW_UNSPECIFIED = 0
INTENT_VIEW_PARTIAL = 1
INTENT_VIEW_FULL = 2
class Intent(proto.Message):
r"""An intent represents a user's intent to interact with a
conversational agent.
You can provide information for the Dialogflow API to use to
match user input to an intent by adding training phrases (i.e.,
examples of user input) to your intent.
Attributes:
name (str):
The unique identifier of the intent. Required for the
[Intents.UpdateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.UpdateIntent]
method.
[Intents.CreateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.CreateIntent]
populates the name automatically. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
display_name (str):
Required. The human-readable name of the
intent, unique within the agent.
training_phrases (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.TrainingPhrase]):
The collection of training phrases the agent
is trained on to identify the intent.
parameters (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.Parameter]):
The collection of parameters associated with
the intent.
priority (int):
The priority of this intent. Higher numbers represent higher
priorities.
- If the supplied value is unspecified or 0, the service
translates the value to 500,000, which corresponds to the
``Normal`` priority in the console.
- If the supplied value is negative, the intent is ignored
in runtime detect intent requests.
is_fallback (bool):
Indicates whether this is a fallback intent.
Currently only default fallback intent is
allowed in the agent, which is added upon agent
creation.
Adding training phrases to fallback intent is
useful in the case of requests that are
mistakenly matched, since training phrases
assigned to fallback intents act as negative
examples that triggers no-match event.
labels (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.LabelsEntry]):
The key/value metadata to label an intent. Labels can
contain lowercase letters, digits and the symbols '-' and
'_'. International characters are allowed, including letters
from unicase alphabets. Keys must start with a letter. Keys
and values can be no longer than 63 characters and no more
than 128 bytes.
Prefix "sys-" is reserved for Dialogflow defined labels.
Currently allowed Dialogflow defined labels include:
- sys-head
- sys-contextual The above labels do not require value.
"sys-head" means the intent is a head intent.
"sys-contextual" means the intent is a contextual intent.
description (str):
Human readable description for better
understanding an intent like its scope, content,
result etc. Maximum character limit: 140
characters.
"""
class TrainingPhrase(proto.Message):
r"""Represents an example that the agent is trained on to
identify the intent.
Attributes:
id (str):
Output only. The unique identifier of the
training phrase.
parts (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent.TrainingPhrase.Part]):
Required. The ordered list of training phrase parts. The
parts are concatenated in order to form the training phrase.
Note: The API does not automatically annotate training
phrases like the Dialogflow Console does.
Note: Do not forget to include whitespace at part
boundaries, so the training phrase is well formatted when
the parts are concatenated.
If the training phrase does not need to be annotated with
parameters, you just need a single part with only the
[Part.text][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase.Part.text]
field set.
If you want to annotate the training phrase, you must create
multiple parts, where the fields of each part are populated
in one of two ways:
- ``Part.text`` is set to a part of the phrase that has no
parameters.
- ``Part.text`` is set to a part of the phrase that you
want to annotate, and the ``parameter_id`` field is set.
repeat_count (int):
Indicates how many times this example was
added to the intent.
"""
class Part(proto.Message):
r"""Represents a part of a training phrase.
Attributes:
text (str):
Required. The text for this part.
parameter_id (str):
The
[parameter][google.cloud.dialogflow.cx.v3beta1.Intent.Parameter]
used to annotate this part of the training phrase. This
field is required for annotated parts of the training
phrase.
"""
text = proto.Field(proto.STRING, number=1)
parameter_id = proto.Field(proto.STRING, number=2)
id = proto.Field(proto.STRING, number=1)
parts = proto.RepeatedField(proto.MESSAGE, number=2,
message='Intent.TrainingPhrase.Part',
)
repeat_count = proto.Field(proto.INT32, number=3)
class Parameter(proto.Message):
r"""Represents an intent parameter.
Attributes:
id (str):
Required. The unique identifier of the parameter. This field
is used by [training
phrases][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase]
to annotate their
[parts][google.cloud.dialogflow.cx.v3beta1.Intent.TrainingPhrase.Part].
entity_type (str):
Required. The entity type of the parameter. Format:
``projects/-/locations/-/agents/-/entityTypes/<System Entity Type ID>``
for system entity types (for example,
``projects/-/locations/-/agents/-/entityTypes/sys.date``),
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``
for developer entity types.
is_list (bool):
Indicates whether the parameter represents a
list of values.
redact (bool):
Indicates whether the parameter content should be redacted
in log. If redaction is enabled, the parameter content will
be replaced by parameter name during logging. Note: the
parameter content is subject to redaction if either
parameter level redaction or [entity type level
redaction][google.cloud.dialogflow.cx.v3beta1.EntityType.redact]
is enabled.
"""
id = proto.Field(proto.STRING, number=1)
entity_type = proto.Field(proto.STRING, number=2)
is_list = proto.Field(proto.BOOL, number=3)
redact = proto.Field(proto.BOOL, number=4)
name = proto.Field(proto.STRING, number=1)
display_name = proto.Field(proto.STRING, number=2)
training_phrases = proto.RepeatedField(proto.MESSAGE, number=3,
message=TrainingPhrase,
)
parameters = proto.RepeatedField(proto.MESSAGE, number=4,
message=Parameter,
)
priority = proto.Field(proto.INT32, number=5)
is_fallback = proto.Field(proto.BOOL, number=6)
labels = proto.MapField(proto.STRING, proto.STRING, number=7)
description = proto.Field(proto.STRING, number=8)
class ListIntentsRequest(proto.Message):
r"""The request message for
[Intents.ListIntents][google.cloud.dialogflow.cx.v3beta1.Intents.ListIntents].
Attributes:
parent (str):
Required. The agent to list all intents for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
language_code (str):
The language to list intents for. The following fields are
language dependent:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
intent_view (google.cloud.dialogflowcx_v3beta1.types.IntentView):
The resource view to apply to the returned
intent.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
intent_view = proto.Field(proto.ENUM, number=5,
enum='IntentView',
)
page_size = proto.Field(proto.INT32, number=3)
page_token = proto.Field(proto.STRING, number=4)
class ListIntentsResponse(proto.Message):
r"""The response message for
[Intents.ListIntents][google.cloud.dialogflow.cx.v3beta1.Intents.ListIntents].
Attributes:
intents (Sequence[google.cloud.dialogflowcx_v3beta1.types.Intent]):
The list of intents. There will be a maximum number of items
returned based on the page_size field in the request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
intents = proto.RepeatedField(proto.MESSAGE, number=1,
message='Intent',
)
next_page_token = proto.Field(proto.STRING, number=2)
class GetIntentRequest(proto.Message):
r"""The request message for
[Intents.GetIntent][google.cloud.dialogflow.cx.v3beta1.Intents.GetIntent].
Attributes:
name (str):
Required. The name of the intent. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
language_code (str):
The language to retrieve the intent for. The following
fields are language dependent:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
name = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
class CreateIntentRequest(proto.Message):
r"""The request message for
[Intents.CreateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.CreateIntent].
Attributes:
parent (str):
Required. The agent to create an intent for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
intent (google.cloud.dialogflowcx_v3beta1.types.Intent):
Required. The intent to create.
language_code (str):
The language of the following fields in ``intent``:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
parent = proto.Field(proto.STRING, number=1)
intent = proto.Field(proto.MESSAGE, number=2,
message='Intent',
)
language_code = proto.Field(proto.STRING, number=3)
class UpdateIntentRequest(proto.Message):
r"""The request message for
[Intents.UpdateIntent][google.cloud.dialogflow.cx.v3beta1.Intents.UpdateIntent].
Attributes:
intent (google.cloud.dialogflowcx_v3beta1.types.Intent):
Required. The intent to update.
language_code (str):
The language of the following fields in ``intent``:
- ``Intent.training_phrases.parts.text``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get updated.
If the mask is not present, all fields will be
updated.
"""
intent = proto.Field(proto.MESSAGE, number=1,
message='Intent',
)
language_code = proto.Field(proto.STRING, number=2)
update_mask = proto.Field(proto.MESSAGE, number=3,
message=field_mask.FieldMask,
)
class DeleteIntentRequest(proto.Message):
r"""The request message for
[Intents.DeleteIntent][google.cloud.dialogflow.cx.v3beta1.Intents.DeleteIntent].
Attributes:
name (str):
Required. The name of the intent to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>``.
"""
name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
d7e6fb902bb4d82e45d61c4cff79935749eb6882 | 60f75884ced267a5f0f09a0b43f68e7d8c5c7a14 | /tester/test_handlers/test_page_handler.py | 25b3ed3218c1b9b75b71fae6e6b25697c3bb7901 | [
"MIT"
]
| permissive | liusheng6982/TorCMS | b0fa1fe96a814c10dc7163b127672e1076d19e02 | cb5ee651ece0cff28eae1dcde9013edf28387073 | refs/heads/master | 2021-01-19T04:31:54.221405 | 2017-04-04T11:41:50 | 2017-04-04T11:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # -*- coding:utf-8 -*-
from torcms.handlers.page_handler import PageHandler
from torcms.handlers.page_ajax_handler import PageAjaxHandler
def Test():
urls = [
("/label/(.*)", PageAjaxHandler, dict()),
("/label/(.*)", PageHandler, dict()),
]
assert urls
| [
"[email protected]"
]
| |
9adcc12b7ba970cf3f19bbad83bbd0ecb835aa85 | f15c8b3a6b093c3b70a900221f485d74a1bc1f95 | /0_joan_stark/golf.py | 2f646ee459477d0f80708844a426c3b1cdd2b1bf | [
"MIT"
]
| permissive | wang0618/ascii-art | 2955023e47b988f491b9d46bc8a300ba4a6cdd60 | 7ce6f152541716034bf0a22d341a898b17e2865f | refs/heads/master | 2023-07-17T23:17:31.187906 | 2021-09-04T12:46:31 | 2021-09-04T12:46:31 | 400,987,004 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,978 | py | # Hole in One!
# https://web.archive.org/web/20000307135811/http://geocities.com/SoHo/Gallery/6446/golfanim.htm
duration = 200
name = "Golf"
frames = [
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" /|\\o |\n" +
" | | |\n" +
" ,|/| |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" |>18>>\n" +
" |\n" +
" __O |\n" +
" / /\\o |\n" +
" ,/ | |\n" +
" | |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" |>18>>\n" +
" |\n" +
" |\n" +
" __O |\n" +
" \\ \\ |\n" +
" / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" __O |\n" +
" / /\\ |\n" +
" ,/ |\\ |\n" +
" |/ o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" |\\ |\n" +
" /\\| |\n" +
" / ||o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" |\\ |\n" +
" |\\| |\n" +
" / ||o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" /> |\n" +
" //\\ |\n" +
" ,// / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" ,___/| |\n" +
" /\\ |\n" +
" / / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" `\\ |>18>>\n" +
" \\ |\n" +
" <<O |\n" +
" | |\n" +
" |\\ |\n" +
" / | o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" /` |>18>>\n" +
" / |\n" +
" <<O |\n" +
" \\ |\n" +
" /\\ |\n" +
" / / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" /` |>18>>\n" +
" / |\n" +
" <<O |\n" +
" \\ |\n" +
" /\\ |\n" +
" / / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" /` |>18>>\n" +
" / |\n" +
" <<O |\n" +
" \\ |\n" +
" /\\ |\n" +
" / / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" `\\ |>18>>\n" +
" \\ |\n" +
" <<O |\n" +
" | |\n" +
" |\\ |\n" +
" / | o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" ,___/| |\n" +
" /\\ |\n" +
" / / o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" |\\ |\n" +
" |\\| |\n" +
" / ||o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" `/ |\n" +
" O__/ |\n" +
" \\-` o |\n" +
" /\\ . |\n" +
" / / .' |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . o |>18>>\n" +
" \\ . |\n" +
" O>> . |\n" +
" \\ . |\n" +
" /\\ . |\n" +
" / / .' |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . 'o |\n" +
" \\ . |\n" +
" /\\ . |\n" +
" / / .' |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . ' |\n" +
" \\ . ' . |\n" +
" /\\ . . |\n" +
" / / .' o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . ' |\n" +
" \\ . ' . |\n" +
" /\\ . . . o |\n" +
" / / .' . |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . ' |\n" +
" \\ . ' . |\n" +
" /\\ . . . ' . |\n" +
" / / .' . o |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . ' |\n" +
" \\ . ' . |\n" +
" /\\ . . . ' . |\n" +
" / / .' . . o\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" '\\ . . |>18>>\n" +
" \\ . ' . |\n" +
" O>> . ' |\n" +
" \\ . ' . |\n" +
" /\\ . . . ' . |\n" +
" / / .' . . .|\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" `/ . . |>18>>\n" +
" / . ' . |\n" +
" \\O/ . ' |\n" +
" | . ' . |\n" +
" /\\ . . . ' . |\n" +
" / | .' . . .|\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" `/ . . |>18>>\n" +
" / . ' . |\n" +
" __O/ . ' |\n" +
" | . ' . |\n" +
" /\\ . . . ' . |\n" +
" | \\ .' . . .|\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" . . |>18>>\n" +
" `/ . ' . |\n" +
" O__/ . ' |\n" +
" /| . ' . |\n" +
" /\\ . . . ' . |\n" +
" / / .' . . .|\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" \\O |\n" +
" |\\ |\n" +
" /\\\\ |\n" +
" / | \\, |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" /|\\ |\n" +
" |\\\\ |\n" +
" / | \\, |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" yipee! |>18>>\n" +
" |\n" +
" \\O |\n" +
" |\\ |\n" +
" /\\\\ |\n" +
" / | \\, |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" yipee! |>18>>\n" +
" |\n" +
" O |\n" +
" /|\\ |\n" +
" |\\\\ |\n" +
" / | \\, |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" yipee! |>18>>\n" +
" |\n" +
" O |\n" +
" /|\\ |\n" +
" / |\\ |\n" +
" /,/ | |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ",
" \n" +
" |>18>>\n" +
" |\n" +
" O |\n" +
" /|\\o |\n" +
" | | |\n" +
" ,|/| |\n" +
" jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "
]
| [
"[email protected]"
]
| |
efe1227c94da541154a41caa2ddbf5eddd02211b | 6371acdb640e62e4e6addac2ba1aa70002a8c1b1 | /Algorithms/pySINDy/pySINDy/sindybase.py | b8faf881ff1a71bfc9d1b60c2251129f08263c46 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | M-Vause/SEED | 263307152ebac1e4f49cd81dcd5207ecbdf51139 | cda94a02a5ef47a1e9a885d330eef2821301ebed | refs/heads/master | 2022-12-13T20:11:58.893994 | 2020-04-27T16:10:09 | 2020-04-27T16:10:09 | 252,790,026 | 3 | 3 | MIT | 2022-12-08T01:52:05 | 2020-04-03T16:55:10 | Jupyter Notebook | UTF-8 | Python | false | false | 18,773 | py | """
Base Module for SINDy: 'fit' method must be implemented in inherited classes
"""
import numpy as np
import matplotlib.pyplot as plt
class SINDyBase(object):
"""
Sparse Identification of Nonlinear Dynamics base class
"""
def __init__(self, name='SINDy model'):
self.name = name
self._coef = None
self._desp = None
@property
def coefficients(self):
"""
:return: get the coefficients of the model
"""
return self._coef
@property
def descriptions(self):
"""
:return: get the items we need to fit the data
"""
return self._desp
@property
def plot_coefficients(self):
"""
:return: plot of the coefficients
"""
SINDyBase.plot(self._coef.T, self._desp)
def fit(self, data):
"""
Abstract method to fit the snapshot matrix, it has to be
implemented in subclasses
:param data: the snapshot matrix
:return: None
"""
raise NotImplementedError('Subclass must implement abstract method {}.fit'.format(
self.__class__.__name__))
@staticmethod
def finite_difference(data, _dx, order=1, dim=0):
"""
Take derivative using 2nd order finite difference method
:param data: a tensor to be differentiated
:param _dx: grid spacing, assume to be uniform
:param order: the order of the derivative to be applied
:param dim: the dimension to be taken the derivative
:return: a tensor after differentiation
"""
data = np.squeeze(data)
if dim >= data.ndim:
raise ValueError('The selected dim should be less than #of dimensions of data!')
data_shape = data.shape
_n = data_shape[dim]
idxs = [slice(None)]*len(data_shape)
data_dx = np.zeros(data_shape)
if order == 1:
for i in np.arange(1, _n-1):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+1, dim) - np.take(data, i-1, dim))/(2*_dx)
idxs[dim] = 0
data_dx[idxs] = (-3.0/2*np.take(data, 0, dim) + 2*np.take(data, 1, dim) -
np.take(data, 2, dim)/2)/_dx
idxs[dim] = _n - 1
data_dx[idxs] = (3.0/2*np.take(data, _n-1, dim) - 2*np.take(data, _n-2, dim) +
np.take(data, _n-3, dim)/2)/_dx
elif order == 2:
for i in np.arange(1, _n-1):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+1, dim) - 2*np.take(data, i, dim) +
np.take(data, i-1, dim))/_dx**2
idxs[dim] = 0
data_dx[idxs] = (2*np.take(data, 0, dim) - 5*np.take(data, 1, dim) +
4*np.take(data, 2, dim) - np.take(data, 3, dim))/_dx**2
idxs[dim] = _n - 1
data_dx[idxs] = (2*np.take(data, _n-1, dim) - 5*np.take(data, _n-2, dim) +
4*np.take(data, _n-3, dim) - np.take(data, _n-4, dim))/_dx**2
elif order == 3:
for i in np.arange(2, _n-2):
idxs[dim] = i
data_dx[idxs] = (np.take(data, i+2, dim)/2 - np.take(data, i+1, dim) +
np.take(data, i-1, dim) - np.take(data, i-2, dim)/2)/_dx**3
idxs[dim] = 0
data_dx[idxs] = (-2.5*np.take(data, 0, dim) + 9*np.take(data, 1, dim) -
12*np.take(data, 2, dim) + 7*np.take(data, 3, dim) -
1.5*np.take(data, 4, dim))/_dx**3
idxs[dim] = 1
data_dx[idxs] = (-2.5*np.take(data, 1, dim) + 9*np.take(data, 2, dim) -
12*np.take(data, 3, dim) + 7*np.take(data, 4, dim) -
1.5*np.take(data, 5, dim))/_dx**3
idxs[dim] = _n - 1
data_dx[idxs] = (2.5 * np.take(data, _n-1, dim) - 9 * np.take(data, _n-2, dim) +
12 * np.take(data, _n-3, dim) - 7 * np.take(data, _n-4, dim) +
1.5 * np.take(data, _n-5, dim)) /_dx**3
idxs[dim] = _n - 2
data_dx[idxs] = (2.5*np.take(data, _n-2, dim) - 9*np.take(data, _n-3, dim) +
12*np.take(data, _n-4, dim) - 7*np.take(data, _n-5, dim) +
1.5*np.take(data, _n-6, dim))/_dx**3
elif order > 3:
return SINDyBase.finite_difference(SINDyBase.finite_difference(data, _dx, 3, dim),
_dx, order-3, dim)
else:
raise ValueError('order of the derivative should be a positive integer!')
return data_dx
@staticmethod
def pointwise_polynomial_difference(data, xgrid, order=1, degree=2, index=None):
"""
:param data: a 1D flattened vector represents nearby function values
:param xgrid: grid information
:param order: the order of the derivatives to be applied
:param index: index of the derivative to take
:param degree: degree of polynomial to use
:return: value of derivative at this point
"""
if isinstance(order, int):
order = [order]
data = data.flatten()
_n = len(data)
if index is None:
index = int((_n - 1)/2)
# Fit to a Chebyshev polynomial
poly = np.polynomial.chebyshev.Chebyshev.fit(xgrid, data, degree)
return np.array([poly.deriv(m=order[i])(xgrid[index]) for i in np.arange(len(order))])
@staticmethod
def polynomial_difference(data, xgrid, order=1, dim=0, degree=2):
"""
Taking derivatives using Chebyshev polynomial interpolation
:param data: a tensor to be differentiated
:param xgrid: grid information
:param order: an integer, or a list of orders of the derivative to be applied
:param dim: the dimension to be taken the derivative
:param degree: degree of polynomials to be used for interpolation
:return: a list of tensors after differentiation, same length of order
"""
data = np.squeeze(data)
if dim >= data.ndim:
raise ValueError('The selected dim should be less than #of dimensions of data!')
if dim < 0:
dim = data.ndim + dim
if isinstance(order, int):
order = [order]
data_shape = data.shape
_n = data_shape[dim]
idxs = [slice(None)]*len(data_shape)
new_data_shape = list(data_shape)
data_slice_shape = list(data_shape)
new_data_shape[dim] -= 2*degree
data_slice_shape[dim] = 1
data_dx = [np.zeros(tuple(new_data_shape))]*len(order)
if _n != len(xgrid):
raise ValueError('Grids information does not match with the data!')
for j in np.arange(degree, _n - degree):
pts = np.arange(j - degree, j + degree)
idxs[dim] = slice(j - degree, j + degree)
pos = (dim, ) + tuple(np.arange(0, dim)) + tuple(np.arange(dim+1, data.ndim))
batch_data = np.transpose(data[idxs], pos).reshape((2*degree, -1))
data_dx_tmp = np.zeros((1, batch_data.shape[1], len(order)))
for k in np.arange(batch_data.shape[1]):
deriv = SINDyBase.pointwise_polynomial_difference(batch_data[:, k].flatten(),
xgrid[pts], order=order,
degree=degree)
data_dx_tmp[0, k, :] = deriv
for i in np.arange(len(order)):
idxs[dim] = j - degree
data_dx[i][idxs] = np.squeeze(data_dx_tmp[..., i].reshape(tuple(data_slice_shape)))
if len(order) == 1:
return data_dx[0]
return data_dx
@staticmethod
def get_poly_exponents(nfeat, degree=1):
"""
:param nfeat: number of original features
:param degree: maximum degree of the polynomials
:return: a 2D array consists of the exponents
"""
if nfeat == 0:
yield ()
else:
for _x in np.arange(degree+1):
for _t in SINDyBase.get_poly_exponents(nfeat - 1, degree):
if sum(_t) + _x <= degree:
yield _t + (_x,)
@staticmethod
def get_ordered_poly_exponents(nfeat, degree=1, remove_zero_order=False):
"""
:param nfeat: number of original features
:param degree: maximum degree of the polynomials
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:return: a 2D array consists of ordered exponents according to the sum
"""
exponents = np.array(list(SINDyBase.get_poly_exponents(nfeat, degree)))
all_exponents = exponents[np.argsort(np.sum(exponents, axis=1))]
if remove_zero_order:
return all_exponents[1:, :]
return all_exponents
@staticmethod
def polynomial_expansion(data, degree=1, remove_zero_order=False, var_names=None):
"""
:param data: a 2D numpy array of original features stored in each column
:param degree: degree of polynomials of features to be expanded
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:param var_names: variable names, default as None
:return: a tensor consists of extended features, and corresponding descriptions
"""
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) > 2:
raise ValueError("The input array is not 2D!")
# extended features
nfeat = data.shape[-1]
exponents = SINDyBase.get_ordered_poly_exponents(nfeat, degree, remove_zero_order)
result = np.array([np.prod([data[:, k] ** e[k] for k in np.arange(nfeat)],
axis=0) for e in exponents]).T
# descriptions of each extended feature
desp = SINDyBase.exponent_to_description(exponents, 'sup', remove_zero_order,
var_names=var_names)
return result, desp
@staticmethod
def threshold_ls(mtx, _b, cut_off=1e-3, max_iter=10, normalize=0):
"""
Find the sparse coefficients of fit using threshold least squares
:param mtx: the training theta matrix of shape (M, N)
:param _b: a vector or an array of shape (M,) or (M, K)
:param cut_off: the threshold cutoff value
:param max_iter: # of iterations
:param normalize: normalization methods, default as 0 (no normalization)
:return: coefficients of fit
"""
if len(_b.shape) == 1:
_b = _b[:, np.newaxis]
dim = _b.shape[-1]
# normalize each column of mtx
if normalize != 0:
w_col_norms = np.linalg.norm(mtx, ord=normalize, axis=0)
b_col_norms = np.linalg.norm(_b, ord=normalize, axis=0)
mtx = mtx / w_col_norms[np.newaxis, :]
_b = _b / b_col_norms[np.newaxis, :]
_w = np.linalg.lstsq(mtx, _b, rcond=None)[0]
for _ in np.arange(max_iter):
small_inds = np.abs(_w) <= cut_off
_w[small_inds] = 0
if np.all(np.sum(np.abs(_w), axis=0)):
for ind in np.arange(dim):
big_inds = ~small_inds[:, ind]
_w[big_inds, ind] = np.linalg.lstsq(mtx[:, big_inds], _b[:, ind], rcond=None)[0]
else:
break
if normalize != 0:
_w = _w * w_col_norms[:, np.newaxis]
_w = _w / b_col_norms[np.newaxis, :]
return _w
@staticmethod
def sparsify_dynamics(mtx, _b, init_tol, max_iter=25, thresh_iter=10,
l0_penalty=None, split=0.8, normalize=0):
"""
:param mtx: the theta matrix of shape (M, N)
:param _b: a vector or an array of shape (M,) or (M, K)
:param init_tol: maximum tolerance (cut_off value)
:param max_iter: maximum iteration of the outer loop
:param thresh_iter: maximum iteration for threshold least squares
:param l0_penalty: penalty factor for nonzero coefficients
:param split: proportion of the training set
:param normalize: normalization methods, default as 0 (no normalization)
:return: the best coefficients of fit
"""
if mtx.ndim != 2:
raise ValueError('mtx is not a 2D numpy array!')
if _b.ndim == 1:
_b = _b[:, np.newaxis]
elif _b.ndim > 2:
raise ValueError('b is not a 1D/2D numpy array!')
# split the data
np.random.seed(12345)
_n = mtx.shape[0]
train = np.random.choice(_n, int(_n*split), replace=False)
test = [x for x in np.arange(_n) if x not in train]
train_mtx = mtx[train, :]
test_mtx = mtx[test, :]
train_b = _b[train, :]
test_b = _b[test, :]
# set up initial tolerance, l0 penalty, best error, etc.
if l0_penalty is None:
# l0_penalty = 0.001*np.linalg.cond(mtx)
l0_penalty = np.linalg.norm(test_b) / len(test)
tol = d_tol = float(init_tol)
# no sparsity constraints
w_best = np.linalg.lstsq(train_mtx, train_b, rcond=None)[0]
err_best = np.linalg.norm(test_b - test_mtx.dot(w_best), 2) + \
l0_penalty*np.count_nonzero(w_best)
tol_best = 0.
imp_flag = True
for i in np.arange(max_iter):
_w = SINDyBase.threshold_ls(train_mtx, train_b, tol, thresh_iter, normalize)
err = np.linalg.norm(test_b - test_mtx.dot(_w), 2) + l0_penalty*np.count_nonzero(_w)
if err < err_best:
err_best = err
w_best = _w
tol_best = tol
tol += d_tol
imp_flag = False
else:
# tol = max([0, tol - d_tol])
tol = max([0, tol - 2*d_tol])
# d_tol /= 2
d_tol = 2 * d_tol/(max_iter - i)
tol = tol + d_tol
if imp_flag:
print('cutoff value maybe too small/large to threshold ....')
return w_best, tol_best
@staticmethod
def exponent_to_description(exponents, typ='sup', remove_zero_order=False, as_dict=False,
var_names=None):
"""
:param exponents: a 2D numpy array of exponents
:param typ: a string, can be either 'sup' (superscript) or 'sub' (subscript)
:param remove_zero_order: boolean value, indicate whether to remove the zero order term
:param as_dict: whether to include exponents in the descriptions as a dict
:param var_names: variable name, default to be None
:return: a list or a dict (depends on 'as_dict') of descriptions of corresponding exponents
"""
if not isinstance(exponents, np.ndarray) or exponents.ndim != 2:
raise ValueError("exponents must be a 2D numpy array!")
desp = []
desp_dict = {}
_m, _n = exponents.shape
if typ == 'sup':
if var_names is not None:
assert isinstance(var_names, list), "var_names must be a list of strings when " \
"typ =='sup'!"
assert len(var_names) == _n, "length of var_names doesn't match with exponents!"
else:
var_names = ['u%d' % i for i in np.arange(_n)]
for i in np.arange(_m):
if np.any(exponents[i, :]):
# exist nonzero element
key = ''
for j in np.arange(_n):
if exponents[i, j] == 1:
key += var_names[j]
elif exponents[i, j]:
key += (var_names[j] + '^{%d}' % exponents[i, j])
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif not remove_zero_order:
key = '1'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif typ == 'sub':
# name of each dimension
# (with xyz coordinates as default except for higher dimensional cases)
if var_names is not None:
assert isinstance(var_names, str), "var_names must be of type str when " \
"typ == 'sub'!"
else:
var_names = 'u'
if _n == 1:
dim_strs = ['x']
elif _n == 2:
dim_strs = ['x', 'y']
elif _n == 3:
dim_strs = ['x', 'y', 'z']
else:
dim_strs = ['x%d' % i for i in np.arange(_n)]
for i in np.arange(_m):
if np.any(exponents[i, :]):
# exist nonzero element
key = (var_names + '_{')
for j in np.arange(_n):
key += dim_strs[j]*exponents[i, j]
key += '}'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
elif not remove_zero_order:
key = 'u'
desp.append(key)
desp_dict[key] = exponents[i, :].tolist()
else:
raise ValueError("type argument should be either 'sub' or 'sup'!")
# which type of description to return
if as_dict:
return desp_dict
return desp
@staticmethod
def plot(coe, desp):
"""
:param coe: coefficients to be plotted
:param desp: descriptions of data
:return: a plot of coefficients with corresponding description
"""
idx = np.ones((coe.shape), dtype=bool)
_mm, _nn = coe.shape
for i in range(_nn):
vec = coe[:, i]
if np.all(vec == 0):
idx[:, i] = 0
_coe = coe[idx].reshape(_mm, -1)
_desp = []
for i in range(_nn):
if idx[0, i] == 1:
_desp.append(desp[i])
_m, _n = _coe.shape
width = 1 / 1.5
plt.figure(num=None, figsize=(40, 5), dpi=80, facecolor='w', edgecolor='k')
for i in range(_m):
plt.subplot(_m, _m, _m * i + 1)
plt.bar(range(_n), _coe[i], width)
plt.ylabel('value')
plt.xticks(range(_n), _desp)
| [
"[email protected]"
]
| |
845e06146026e7a00fd10824220dd35e50e2ccab | 127d8c209b00978f4f660534363e95eca3f514f2 | /backend/home/migrations/0002_load_initial_data.py | 110b21901b630cf3f96ad807523e091bfc8ac157 | []
| no_license | crowdbotics-apps/sitespace-19938 | afd070e64d32ab455f9b2b05e376152e9e28e5ad | 416b5cef0bdb25018ec3b634bf3096e61fe8b662 | refs/heads/master | 2022-12-10T15:52:58.601025 | 2020-09-02T15:20:15 | 2020-09-02T15:20:15 | 292,319,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Sitespace"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Sitespace</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sitespace-19938.botics.co"
site_params = {
"name": "Sitespace",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
]
| |
348fc47cef3dc9dc96c748af7cf91394fd8222e7 | 2d7c21a793c8080a090ce8c9f05df38f6477c7c7 | /tests/data_templates/test_field_definitions.py | c4f05eb9f57000460d8661f4d47b2a554f7826ea | [
"Apache-2.0"
]
| permissive | kids-first/kf-api-study-creator | c40e0a8a514fd52a857e9a588635ef76d16d5bc7 | ba62b369e6464259ea92dbb9ba49876513f37fba | refs/heads/master | 2023-08-17T01:09:38.789364 | 2023-08-15T14:06:29 | 2023-08-15T14:06:29 | 149,347,812 | 3 | 0 | Apache-2.0 | 2023-09-08T15:33:40 | 2018-09-18T20:25:38 | Python | UTF-8 | Python | false | false | 5,204 | py | import os
import json
import pytest
import pandas
from marshmallow import ValidationError
from pprint import pprint
from creator.data_templates.models import TemplateVersion
from creator.data_templates.field_definitions_schema import (
coerce_number,
coerce_bool,
FieldDefinitionSchema,
FieldDefinitionsSchema
)
@pytest.mark.parametrize(
"in_value, expected_out",
[
("0.0", 0.0),
(0.0, 0.0),
("0", 0),
(0, 0),
("10.0", 10),
(10.0, 10),
("200", 200),
(200, 200),
("1.234", 1.234),
(1.234, 1.234),
("foo", "foo"),
(None, None),
]
)
def test_coerce_number(in_value, expected_out):
"""
Test helper function that coerces strings to float/int
"""
assert coerce_number(in_value) == expected_out
@pytest.mark.parametrize(
"in_value, expected_out",
[
(True, True),
(False, False),
("foo", "foo"),
("0.0", False),
("1", True),
("True", True),
("FALSE", False),
("Yes", True),
("no", False),
("Required", True),
("Not Required", False),
(None, False),
]
)
def test_coerce_bool(in_value, expected_out):
"""
Test helper function that coerces strings to booleans
"""
assert coerce_bool(in_value) == expected_out
def test_schema_clean():
"""
Test FieldDefinitionSchema.clean method
"""
schema = FieldDefinitionSchema()
# Test keys are all snake cased
in_data = {
"Label": None,
"Data Type": None,
}
out_data = schema.clean(in_data)
assert {"label", "data_type"} == set(out_data.keys())
# Test data_type default
assert out_data["data_type"] == "string"
# Test data_type casing
in_data["data_type"] = "Number"
out_data = schema.clean(in_data)
assert out_data["data_type"] == "number"
# Test accepted_values
in_data["accepted_values"] = None
out_data = schema.clean(in_data)
assert out_data["accepted_values"] is None
in_data["data_type"] = "foobar"
in_data["accepted_values"] = "1.0, 2.0, 3.0"
out_data = schema.clean(in_data)
assert out_data["accepted_values"] == ["1.0", "2.0", "3.0"]
assert out_data["data_type"] == "enum"
# Test missing values
in_data["missing_values"] = None
out_data = schema.clean(in_data)
assert out_data["missing_values"] is None
in_data["missing_values"] = "None, Unknown"
out_data = schema.clean(in_data)
assert ["None", "Unknown"] == out_data["missing_values"]
# Test empty strings handled properly
in_data["accepted_values"] = " "
in_data["missing_values"] = ""
in_data["required"] = " "
in_data["data_type"] = " "
out_data = schema.clean(in_data)
assert out_data["accepted_values"] is None
assert out_data["missing_values"] is None
assert out_data["required"] == False # noqa
assert out_data["data_type"] == "string"
def test_validation_error():
"""
Test custom handling of validation errors
"""
in_fields = {
"fields": [
{
"Key": "person.id",
"Label": "Person ID",
# Missing description, but has required keys
},
{
"Key": "specimen.id",
"Description": "Identifier for specimen"
# Missing label but has other required keys
}
]
}
schema = FieldDefinitionsSchema()
# Test custom validation message
with pytest.raises(ValidationError) as e:
schema.load(in_fields)
errors = e.value.messages[0]
assert "fields" not in errors
assert "Field Definition [1]" in errors
assert "Field Definition [Person ID]" in errors
# Test normal validation message
with pytest.raises(ValidationError) as e:
schema.load("foo")
assert {'_schema': ['Invalid input type.']} == e.value.messages
def test_schema_load():
"""
End to end test using the field definitions schema to clean and validate
input data
"""
in_fields = {
"fields": [
{
"Key": "person.id",
"Label": "Person ID",
"Description": "Identifier for person"
},
{
"Key": "specimen.id",
"Label": "Specimen ID",
"Description": "Identifier for specimen"
}
]
}
schema = FieldDefinitionsSchema()
data = schema.load(in_fields)
out_fields = data["fields"]
# Check version
assert data["schema_version"]["number"] == schema.SCHEMA_VERSION["number"]
# Check all fields are in output
assert len(out_fields) == len(in_fields["fields"])
# Check that defaults were set right and all components of a field
# definition are present in each field definition instance
for out in out_fields:
assert set(FieldDefinitionsSchema.key_order) == set(out.keys())
assert out["data_type"] == "string"
assert out["required"] == False # noqa
assert out["accepted_values"] is None
assert out["instructions"]is None
| [
"[email protected]"
]
| |
19caab41b1e7e5822d71d8e70217b1ac6dda3b67 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_005_20180620141234.py | 396d0dea7f396d2fdc9165bfceb7cd75b20f3c37 | []
| no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,622 | py | from random import randint
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("dupa", sudoku_number)
sudoku = listaSudoku[sudoku_number]
#print("ktore = ", sudoku)
elif int(choice) == 1:
s = 1
sudoku = sudoku1
elif int(choice) == 2:
s = 2
sudoku = sudoku2
elif int(choice) == 3:
s = 3
sudoku = sudoku3
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1 :
sudoku = sudoku1
elif s == 1 :
sudoku = sudoku1
s == 1 :
sudoku = sudoku1
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
list = []
while i < 9:
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
#p rint(list)
# print("Suma columny ", i, " = ", column)
i += 1
is45 = 0
for listElement in list:
if listElement == 45:
is45 = is45 + 1
# print("Ile kolumen OK", is45)
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
| [
"[email protected]"
]
| |
ba508a2958f5325258855671103405bc641ebe97 | a5e591dc09e11e88af56fb5a881fae064fb9c495 | /recruitment/recruitment/doctype/interview/interview.py | 0449ed7ff48f9261f3c429e7522f6aad25c3b49d | [
"MIT"
]
| permissive | barathprathosh/recruitment | 6b61dd1ee9c0b9d7851b0b3e5bab307f7ee2d1b5 | 9660944856e72288e47960e6802ec97a220a656d | refs/heads/master | 2020-04-29T03:03:51.722972 | 2019-03-15T08:58:32 | 2019-03-15T08:58:32 | 175,794,797 | 0 | 0 | NOASSERTION | 2019-03-15T10:00:32 | 2019-03-15T10:00:31 | null | UTF-8 | Python | false | false | 250 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Interview(Document):
pass
| [
"[email protected]"
]
| |
cac8cca8bbafc756a771cbbd21f316a640e98cd7 | 6b4a48fb6142789326654c48d32acda3eb5e7b08 | /formationproject/wsgi.py | a9ea3c0a982ffb7af95cba5e2211d90796a89dd1 | []
| no_license | mwesterhof/formationproject | 0d9795c218b5010bfbb716216d3d8f4fa5bd4799 | 1b4a057b996829609e308c78721aca840ec58ee7 | refs/heads/master | 2023-08-19T00:08:58.282341 | 2021-10-08T16:19:18 | 2021-10-08T16:19:18 | 401,425,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for formationproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "formationproject.settings.dev")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
433dc5780c6bf966236e507e8947e87df83870a2 | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /Amazon/VideoOnsite/926.flip-string-to-monotone-increasing.py | d4efde64ddbe2e4540f93d5acfa3516e947730ab | []
| no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | #
# @lc app=leetcode id=926 lang=python3
#
# [926] Flip String to Monotone Increasing
#
# @lc code=start
class Solution:
def minFlipsMonoIncr(self, s: str) -> int:
n = len(s)
cnt0 = s.count('0')
cnt1 = 0
res = n - cnt0
for i in range(n):
if s[i] == '0':
cnt0 -= 1
elif s[i] == '1':
res = min(res, cnt1 + cnt0)
cnt1 += 1
return res
# @lc code=end
| [
"[email protected]"
]
| |
89b1685f529264b86004c272eb59419b27a1315b | 4a42fefd8945c73402ddf36f8943e011cd9c4151 | /projects/myhellowebapp/hellowebapp/wsgi.py | 2b6fe00b6b8875c39ed849cf147b0eb94f51d25b | []
| no_license | momentum-cohort-2018-10/hello-web-app-SowmyaAji | c2c1374b460232822ff91fc1d034f1d89a400332 | 2cfe7fd6d22db4f9b9ac0d8fdc611787cb1372c5 | refs/heads/master | 2020-04-06T11:53:35.991478 | 2018-11-18T20:48:49 | 2018-11-18T20:48:49 | 157,434,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for hellowebapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hellowebapp.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
c718408ccc29e4bca88b5deef7e84bb586acddfc | ea0c0b8d67a42086f840149b3dbe1c0e4f58e56f | /members_area/forms.py | 06d19b868f16f535ae4172f3cc5f191a2c75b8b0 | [
"MIT"
]
| permissive | AzeezBello/raodoh | 78b27e0886f8882144a4def160d9c3f53bcc6af9 | 296bd44069bd750557bf49995374601f5052d695 | refs/heads/master | 2022-05-03T05:07:21.632642 | 2020-02-26T10:16:08 | 2020-02-26T10:16:08 | 235,878,080 | 0 | 0 | MIT | 2022-04-22T23:01:27 | 2020-01-23T20:15:39 | JavaScript | UTF-8 | Python | false | false | 194 | py | from django.forms import ModelForm
from .models import Lesson
class LessonForm(ModelForm):
class Meta:
model = Lesson
fields = ('title', 'course', 'body', 'url', 'video')
| [
"[email protected]"
]
| |
91ea1c1fcfcc6577bf717c2abd059bc968643776 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/pyglet/libs/darwin/cocoapy/runtime.py | b692ce130d04d7d7af0a3e1daa11e437a71c142c | [
"MIT"
]
| permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 51,751 | py | # objective-ctypes
#
# Copyright (c) 2011, Phillip Nguyen
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# Neither the name of objective-ctypes nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import platform
import struct
from ctypes import *
from ctypes import util
from .cocoatypes import *
__LP64__ = (8*struct.calcsize("P") == 64)
__i386__ = (platform.machine() == 'i386')
__arm64__ = (platform.machine() == 'arm64')
if sizeof(c_void_p) == 4:
c_ptrdiff_t = c_int32
elif sizeof(c_void_p) == 8:
c_ptrdiff_t = c_int64
######################################################################
lib = util.find_library('objc')
# Hack for compatibility with macOS > 11.0
if lib is None:
lib = '/usr/lib/libobjc.dylib'
objc = cdll.LoadLibrary(lib)
######################################################################
# BOOL class_addIvar(Class cls, const char *name, size_t size, uint8_t alignment, const char *types)
objc.class_addIvar.restype = c_bool
objc.class_addIvar.argtypes = [c_void_p, c_char_p, c_size_t, c_uint8, c_char_p]
# BOOL class_addMethod(Class cls, SEL name, IMP imp, const char *types)
objc.class_addMethod.restype = c_bool
# BOOL class_addProtocol(Class cls, Protocol *protocol)
objc.class_addProtocol.restype = c_bool
objc.class_addProtocol.argtypes = [c_void_p, c_void_p]
# BOOL class_conformsToProtocol(Class cls, Protocol *protocol)
objc.class_conformsToProtocol.restype = c_bool
objc.class_conformsToProtocol.argtypes = [c_void_p, c_void_p]
# Ivar * class_copyIvarList(Class cls, unsigned int *outCount)
# Returns an array of pointers of type Ivar describing instance variables.
# The array has *outCount pointers followed by a NULL terminator.
# You must free() the returned array.
objc.class_copyIvarList.restype = POINTER(c_void_p)
objc.class_copyIvarList.argtypes = [c_void_p, POINTER(c_uint)]
# Method * class_copyMethodList(Class cls, unsigned int *outCount)
# Returns an array of pointers of type Method describing instance methods.
# The array has *outCount pointers followed by a NULL terminator.
# You must free() the returned array.
objc.class_copyMethodList.restype = POINTER(c_void_p)
objc.class_copyMethodList.argtypes = [c_void_p, POINTER(c_uint)]
# objc_property_t * class_copyPropertyList(Class cls, unsigned int *outCount)
# Returns an array of pointers of type objc_property_t describing properties.
# The array has *outCount pointers followed by a NULL terminator.
# You must free() the returned array.
objc.class_copyPropertyList.restype = POINTER(c_void_p)
objc.class_copyPropertyList.argtypes = [c_void_p, POINTER(c_uint)]
# Protocol ** class_copyProtocolList(Class cls, unsigned int *outCount)
# Returns an array of pointers of type Protocol* describing protocols.
# The array has *outCount pointers followed by a NULL terminator.
# You must free() the returned array.
objc.class_copyProtocolList.restype = POINTER(c_void_p)
objc.class_copyProtocolList.argtypes = [c_void_p, POINTER(c_uint)]
# id class_createInstance(Class cls, size_t extraBytes)
objc.class_createInstance.restype = c_void_p
objc.class_createInstance.argtypes = [c_void_p, c_size_t]
# Method class_getClassMethod(Class aClass, SEL aSelector)
# Will also search superclass for implementations.
objc.class_getClassMethod.restype = c_void_p
objc.class_getClassMethod.argtypes = [c_void_p, c_void_p]
# Ivar class_getClassVariable(Class cls, const char* name)
objc.class_getClassVariable.restype = c_void_p
objc.class_getClassVariable.argtypes = [c_void_p, c_char_p]
# Method class_getInstanceMethod(Class aClass, SEL aSelector)
# Will also search superclass for implementations.
objc.class_getInstanceMethod.restype = c_void_p
objc.class_getInstanceMethod.argtypes = [c_void_p, c_void_p]
# size_t class_getInstanceSize(Class cls)
objc.class_getInstanceSize.restype = c_size_t
objc.class_getInstanceSize.argtypes = [c_void_p]
# Ivar class_getInstanceVariable(Class cls, const char* name)
objc.class_getInstanceVariable.restype = c_void_p
objc.class_getInstanceVariable.argtypes = [c_void_p, c_char_p]
# const char *class_getIvarLayout(Class cls)
objc.class_getIvarLayout.restype = c_char_p
objc.class_getIvarLayout.argtypes = [c_void_p]
# IMP class_getMethodImplementation(Class cls, SEL name)
objc.class_getMethodImplementation.restype = c_void_p
objc.class_getMethodImplementation.argtypes = [c_void_p, c_void_p]
# The function is marked as OBJC_ARM64_UNAVAILABLE.
if not __arm64__:
# IMP class_getMethodImplementation_stret(Class cls, SEL name)
objc.class_getMethodImplementation_stret.restype = c_void_p
objc.class_getMethodImplementation_stret.argtypes = [c_void_p, c_void_p]
# const char * class_getName(Class cls)
objc.class_getName.restype = c_char_p
objc.class_getName.argtypes = [c_void_p]
# objc_property_t class_getProperty(Class cls, const char *name)
objc.class_getProperty.restype = c_void_p
objc.class_getProperty.argtypes = [c_void_p, c_char_p]
# Class class_getSuperclass(Class cls)
objc.class_getSuperclass.restype = c_void_p
objc.class_getSuperclass.argtypes = [c_void_p]
# int class_getVersion(Class theClass)
objc.class_getVersion.restype = c_int
objc.class_getVersion.argtypes = [c_void_p]
# const char *class_getWeakIvarLayout(Class cls)
objc.class_getWeakIvarLayout.restype = c_char_p
objc.class_getWeakIvarLayout.argtypes = [c_void_p]
# BOOL class_isMetaClass(Class cls)
objc.class_isMetaClass.restype = c_bool
objc.class_isMetaClass.argtypes = [c_void_p]
# IMP class_replaceMethod(Class cls, SEL name, IMP imp, const char *types)
objc.class_replaceMethod.restype = c_void_p
objc.class_replaceMethod.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p]
# BOOL class_respondsToSelector(Class cls, SEL sel)
objc.class_respondsToSelector.restype = c_bool
objc.class_respondsToSelector.argtypes = [c_void_p, c_void_p]
# void class_setIvarLayout(Class cls, const char *layout)
objc.class_setIvarLayout.restype = None
objc.class_setIvarLayout.argtypes = [c_void_p, c_char_p]
# Class class_setSuperclass(Class cls, Class newSuper)
objc.class_setSuperclass.restype = c_void_p
objc.class_setSuperclass.argtypes = [c_void_p, c_void_p]
# void class_setVersion(Class theClass, int version)
objc.class_setVersion.restype = None
objc.class_setVersion.argtypes = [c_void_p, c_int]
# void class_setWeakIvarLayout(Class cls, const char *layout)
objc.class_setWeakIvarLayout.restype = None
objc.class_setWeakIvarLayout.argtypes = [c_void_p, c_char_p]
######################################################################
# const char * ivar_getName(Ivar ivar)
objc.ivar_getName.restype = c_char_p
objc.ivar_getName.argtypes = [c_void_p]
# ptrdiff_t ivar_getOffset(Ivar ivar)
objc.ivar_getOffset.restype = c_ptrdiff_t
objc.ivar_getOffset.argtypes = [c_void_p]
# const char * ivar_getTypeEncoding(Ivar ivar)
objc.ivar_getTypeEncoding.restype = c_char_p
objc.ivar_getTypeEncoding.argtypes = [c_void_p]
######################################################################
# char * method_copyArgumentType(Method method, unsigned int index)
# You must free() the returned string.
objc.method_copyArgumentType.restype = c_char_p
objc.method_copyArgumentType.argtypes = [c_void_p, c_uint]
# char * method_copyReturnType(Method method)
# You must free() the returned string.
objc.method_copyReturnType.restype = c_char_p
objc.method_copyReturnType.argtypes = [c_void_p]
# void method_exchangeImplementations(Method m1, Method m2)
objc.method_exchangeImplementations.restype = None
objc.method_exchangeImplementations.argtypes = [c_void_p, c_void_p]
# void method_getArgumentType(Method method, unsigned int index, char *dst, size_t dst_len)
# Functionally similar to strncpy(dst, parameter_type, dst_len).
objc.method_getArgumentType.restype = None
objc.method_getArgumentType.argtypes = [c_void_p, c_uint, c_char_p, c_size_t]
# IMP method_getImplementation(Method method)
objc.method_getImplementation.restype = c_void_p
objc.method_getImplementation.argtypes = [c_void_p]
# SEL method_getName(Method method)
objc.method_getName.restype = c_void_p
objc.method_getName.argtypes = [c_void_p]
# unsigned method_getNumberOfArguments(Method method)
objc.method_getNumberOfArguments.restype = c_uint
objc.method_getNumberOfArguments.argtypes = [c_void_p]
# void method_getReturnType(Method method, char *dst, size_t dst_len)
# Functionally similar to strncpy(dst, return_type, dst_len)
objc.method_getReturnType.restype = None
objc.method_getReturnType.argtypes = [c_void_p, c_char_p, c_size_t]
# const char * method_getTypeEncoding(Method method)
objc.method_getTypeEncoding.restype = c_char_p
objc.method_getTypeEncoding.argtypes = [c_void_p]
# IMP method_setImplementation(Method method, IMP imp)
objc.method_setImplementation.restype = c_void_p
objc.method_setImplementation.argtypes = [c_void_p, c_void_p]
######################################################################
# Class objc_allocateClassPair(Class superclass, const char *name, size_t extraBytes)
objc.objc_allocateClassPair.restype = c_void_p
objc.objc_allocateClassPair.argtypes = [c_void_p, c_char_p, c_size_t]
# Protocol **objc_copyProtocolList(unsigned int *outCount)
# Returns an array of *outcount pointers followed by NULL terminator.
# You must free() the array.
objc.objc_copyProtocolList.restype = POINTER(c_void_p)
objc.objc_copyProtocolList.argtypes = [POINTER(c_int)]
# id objc_getAssociatedObject(id object, void *key)
objc.objc_getAssociatedObject.restype = c_void_p
objc.objc_getAssociatedObject.argtypes = [c_void_p, c_void_p]
# id objc_getClass(const char *name)
objc.objc_getClass.restype = c_void_p
objc.objc_getClass.argtypes = [c_char_p]
# int objc_getClassList(Class *buffer, int bufferLen)
# Pass None for buffer to obtain just the total number of classes.
objc.objc_getClassList.restype = c_int
objc.objc_getClassList.argtypes = [c_void_p, c_int]
# id objc_getMetaClass(const char *name)
objc.objc_getMetaClass.restype = c_void_p
objc.objc_getMetaClass.argtypes = [c_char_p]
# Protocol *objc_getProtocol(const char *name)
objc.objc_getProtocol.restype = c_void_p
objc.objc_getProtocol.argtypes = [c_char_p]
# You should set return and argument types depending on context.
# id objc_msgSend(id theReceiver, SEL theSelector, ...)
# id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
# The function is marked as OBJC_ARM64_UNAVAILABLE.
if not __arm64__:
# void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...)
objc.objc_msgSendSuper_stret.restype = None
# double objc_msgSend_fpret(id self, SEL op, ...)
# objc.objc_msgSend_fpret.restype = c_double
# The function is marked as OBJC_ARM64_UNAVAILABLE.
if not __arm64__:
# void objc_msgSend_stret(void * stretAddr, id theReceiver, SEL theSelector, ...)
objc.objc_msgSend_stret.restype = None
# void objc_registerClassPair(Class cls)
objc.objc_registerClassPair.restype = None
objc.objc_registerClassPair.argtypes = [c_void_p]
# void objc_removeAssociatedObjects(id object)
objc.objc_removeAssociatedObjects.restype = None
objc.objc_removeAssociatedObjects.argtypes = [c_void_p]
# void objc_setAssociatedObject(id object, void *key, id value, objc_AssociationPolicy policy)
objc.objc_setAssociatedObject.restype = None
objc.objc_setAssociatedObject.argtypes = [c_void_p, c_void_p, c_void_p, c_int]
######################################################################
# id object_copy(id obj, size_t size)
objc.object_copy.restype = c_void_p
objc.object_copy.argtypes = [c_void_p, c_size_t]
# id object_dispose(id obj)
objc.object_dispose.restype = c_void_p
objc.object_dispose.argtypes = [c_void_p]
# Class object_getClass(id object)
objc.object_getClass.restype = c_void_p
objc.object_getClass.argtypes = [c_void_p]
# const char *object_getClassName(id obj)
objc.object_getClassName.restype = c_char_p
objc.object_getClassName.argtypes = [c_void_p]
# Ivar object_getInstanceVariable(id obj, const char *name, void **outValue)
objc.object_getInstanceVariable.restype = c_void_p
objc.object_getInstanceVariable.argtypes=[c_void_p, c_char_p, c_void_p]
# id object_getIvar(id object, Ivar ivar)
objc.object_getIvar.restype = c_void_p
objc.object_getIvar.argtypes = [c_void_p, c_void_p]
# Class object_setClass(id object, Class cls)
objc.object_setClass.restype = c_void_p
objc.object_setClass.argtypes = [c_void_p, c_void_p]
# Ivar object_setInstanceVariable(id obj, const char *name, void *value)
# Set argtypes based on the data type of the instance variable.
objc.object_setInstanceVariable.restype = c_void_p
# void object_setIvar(id object, Ivar ivar, id value)
objc.object_setIvar.restype = None
objc.object_setIvar.argtypes = [c_void_p, c_void_p, c_void_p]
######################################################################
# const char *property_getAttributes(objc_property_t property)
objc.property_getAttributes.restype = c_char_p
objc.property_getAttributes.argtypes = [c_void_p]
# const char *property_getName(objc_property_t property)
objc.property_getName.restype = c_char_p
objc.property_getName.argtypes = [c_void_p]
######################################################################
# BOOL protocol_conformsToProtocol(Protocol *proto, Protocol *other)
objc.protocol_conformsToProtocol.restype = c_bool
objc.protocol_conformsToProtocol.argtypes = [c_void_p, c_void_p]
class OBJC_METHOD_DESCRIPTION(Structure):
_fields_ = [ ("name", c_void_p), ("types", c_char_p) ]
# struct objc_method_description *protocol_copyMethodDescriptionList(Protocol *p, BOOL isRequiredMethod, BOOL isInstanceMethod, unsigned int *outCount)
# You must free() the returned array.
objc.protocol_copyMethodDescriptionList.restype = POINTER(OBJC_METHOD_DESCRIPTION)
objc.protocol_copyMethodDescriptionList.argtypes = [c_void_p, c_bool, c_bool, POINTER(c_uint)]
# objc_property_t * protocol_copyPropertyList(Protocol *protocol, unsigned int *outCount)
objc.protocol_copyPropertyList.restype = c_void_p
objc.protocol_copyPropertyList.argtypes = [c_void_p, POINTER(c_uint)]
# Protocol **protocol_copyProtocolList(Protocol *proto, unsigned int *outCount)
objc.protocol_copyProtocolList = POINTER(c_void_p)
objc.protocol_copyProtocolList.argtypes = [c_void_p, POINTER(c_uint)]
# struct objc_method_description protocol_getMethodDescription(Protocol *p, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod)
objc.protocol_getMethodDescription.restype = OBJC_METHOD_DESCRIPTION
objc.protocol_getMethodDescription.argtypes = [c_void_p, c_void_p, c_bool, c_bool]
# const char *protocol_getName(Protocol *p)
objc.protocol_getName.restype = c_char_p
objc.protocol_getName.argtypes = [c_void_p]
######################################################################
# const char* sel_getName(SEL aSelector)
objc.sel_getName.restype = c_char_p
objc.sel_getName.argtypes = [c_void_p]
# SEL sel_getUid(const char *str)
# Use sel_registerName instead.
# BOOL sel_isEqual(SEL lhs, SEL rhs)
objc.sel_isEqual.restype = c_bool
objc.sel_isEqual.argtypes = [c_void_p, c_void_p]
# SEL sel_registerName(const char *str)
objc.sel_registerName.restype = c_void_p
objc.sel_registerName.argtypes = [c_char_p]
######################################################################
def ensure_bytes(x):
if isinstance(x, bytes):
return x
return x.encode('ascii')
######################################################################
def get_selector(name):
return c_void_p(objc.sel_registerName(ensure_bytes(name)))
def get_class(name):
return c_void_p(objc.objc_getClass(ensure_bytes(name)))
def get_object_class(obj):
return c_void_p(objc.object_getClass(obj))
def get_metaclass(name):
return c_void_p(objc.objc_getMetaClass(ensure_bytes(name)))
def get_superclass_of_object(obj):
cls = c_void_p(objc.object_getClass(obj))
return c_void_p(objc.class_getSuperclass(cls))
# http://www.sealiesoftware.com/blog/archive/2008/10/30/objc_explain_objc_msgSend_stret.html
# http://www.x86-64.org/documentation/abi-0.99.pdf (pp.17-23)
# executive summary: on x86-64, who knows?
def x86_should_use_stret(restype):
"""Try to figure out when a return type will be passed on stack."""
if type(restype) != type(Structure):
return False
if not __LP64__ and sizeof(restype) <= 8:
return False
if __LP64__ and sizeof(restype) <= 16: # maybe? I don't know?
return False
return True
# http://www.sealiesoftware.com/blog/archive/2008/11/16/objc_explain_objc_msgSend_fpret.html
def should_use_fpret(restype):
"""Determine if objc_msgSend_fpret is required to return a floating point type."""
if not __i386__:
# Unneeded on non-intel processors
return False
if __LP64__ and restype == c_longdouble:
# Use only for long double on x86_64
return True
if not __LP64__ and restype in (c_float, c_double, c_longdouble):
return True
return False
# By default, assumes that restype is c_void_p
# and that all arguments are wrapped inside c_void_p.
# Use the restype and argtypes keyword arguments to
# change these values. restype should be a ctypes type
# and argtypes should be a list of ctypes types for
# the arguments of the message only.
def send_message(receiver, selName, *args, **kwargs):
if isinstance(receiver, str):
receiver = get_class(receiver)
selector = get_selector(selName)
restype = kwargs.get('restype', c_void_p)
#print 'send_message', receiver, selName, args, kwargs
argtypes = kwargs.get('argtypes', [])
# Choose the correct version of objc_msgSend based on return type.
if should_use_fpret(restype):
objc.objc_msgSend_fpret.restype = restype
objc.objc_msgSend_fpret.argtypes = [c_void_p, c_void_p] + argtypes
result = objc.objc_msgSend_fpret(receiver, selector, *args)
elif x86_should_use_stret(restype):
objc.objc_msgSend_stret.argtypes = [POINTER(restype), c_void_p, c_void_p] + argtypes
result = restype()
objc.objc_msgSend_stret(byref(result), receiver, selector, *args)
else:
objc.objc_msgSend.restype = restype
objc.objc_msgSend.argtypes = [c_void_p, c_void_p] + argtypes
result = objc.objc_msgSend(receiver, selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
class OBJC_SUPER(Structure):
_fields_ = [ ('receiver', c_void_p), ('class', c_void_p) ]
OBJC_SUPER_PTR = POINTER(OBJC_SUPER)
# http://stackoverflow.com/questions/3095360/what-exactly-is-super-in-objective-c
#
# `superclass_name` is optional and can be used to force finding the superclass
# by name. It is used to circumvent a bug in which the superclass was resolved
# incorrectly which lead to an infinite recursion:
# https://github.com/pyglet/pyglet/issues/5
def send_super(receiver, selName, *args, superclass_name=None, **kwargs):
if hasattr(receiver, '_as_parameter_'):
receiver = receiver._as_parameter_
if superclass_name is None:
superclass = get_superclass_of_object(receiver)
else:
superclass = get_class(superclass_name)
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get('restype', c_void_p)
argtypes = kwargs.get('argtypes', None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
######################################################################
cfunctype_table = {}
def parse_type_encoding(encoding):
"""Takes a type encoding string and outputs a list of the separated type codes.
Currently does not handle unions or bitfields and strips out any field width
specifiers or type specifiers from the encoding. For Python 3.2+, encoding is
assumed to be a bytes object and not unicode.
Examples:
parse_type_encoding('^v16@0:8') --> ['^v', '@', ':']
parse_type_encoding('{CGSize=dd}40@0:8{CGSize=dd}16Q32') --> ['{CGSize=dd}', '@', ':', '{CGSize=dd}', 'Q']
"""
type_encodings = []
brace_count = 0 # number of unclosed curly braces
bracket_count = 0 # number of unclosed square brackets
typecode = b''
for c in encoding:
# In Python 3, c comes out as an integer in the range 0-255. In Python 2, c is a single character string.
# To fix the disparity, we convert c to a bytes object if necessary.
if isinstance(c, int):
c = bytes([c])
if c == b'{':
# Check if this marked the end of previous type code.
if typecode and typecode[-1:] != b'^' and brace_count == 0 and bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
brace_count += 1
elif c == b'}':
typecode += c
brace_count -= 1
assert(brace_count >= 0)
elif c == b'[':
# Check if this marked the end of previous type code.
if typecode and typecode[-1:] != b'^' and brace_count == 0 and bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
bracket_count += 1
elif c == b']':
typecode += c
bracket_count -= 1
assert(bracket_count >= 0)
elif brace_count or bracket_count:
# Anything encountered while inside braces or brackets gets stuck on.
typecode += c
elif c in b'0123456789':
# Ignore field width specifiers for now.
pass
elif c in b'rnNoORV':
# Also ignore type specifiers.
pass
elif c in b'^cislqCISLQfdBv*@#:b?':
if typecode and typecode[-1:] == b'^':
# Previous char was pointer specifier, so keep going.
typecode += c
else:
# Add previous type code to the list.
if typecode:
type_encodings.append(typecode)
# Start a new type code.
typecode = c
# Add the last type code to the list
if typecode:
type_encodings.append(typecode)
return type_encodings
# Limited to basic types and pointers to basic types.
# Does not try to handle arrays, arbitrary structs, unions, or bitfields.
# Assume that encoding is a bytes object and not unicode.
def cfunctype_for_encoding(encoding):
# Check if we've already created a CFUNCTYPE for this encoding.
# If so, then return the cached CFUNCTYPE.
if encoding in cfunctype_table:
return cfunctype_table[encoding]
# Otherwise, create a new CFUNCTYPE for the encoding.
typecodes = {b'c':c_char, b'i':c_int, b's':c_short, b'l':c_long, b'q':c_longlong,
b'C':c_ubyte, b'I':c_uint, b'S':c_ushort, b'L':c_ulong, b'Q':c_ulonglong,
b'f':c_float, b'd':c_double, b'B':c_bool, b'v':None, b'*':c_char_p,
b'@':c_void_p, b'#':c_void_p, b':':c_void_p, NSPointEncoding:NSPoint,
NSSizeEncoding:NSSize, NSRectEncoding:NSRect, NSRangeEncoding:NSRange,
PyObjectEncoding:py_object}
argtypes = []
for code in parse_type_encoding(encoding):
if code in typecodes:
argtypes.append(typecodes[code])
elif code[0:1] == b'^' and code[1:] in typecodes:
argtypes.append(POINTER(typecodes[code[1:]]))
else:
raise Exception('unknown type encoding: ' + code)
cfunctype = CFUNCTYPE(*argtypes)
# Cache the new CFUNCTYPE in the cfunctype_table.
# We do this mainly because it prevents the CFUNCTYPE
# from being garbage-collected while we need it.
cfunctype_table[encoding] = cfunctype
return cfunctype
######################################################################
# After calling create_subclass, you must first register
# it with register_subclass before you may use it.
# You can add new methods after the class is registered,
# but you cannot add any new ivars.
def create_subclass(superclass, name):
if isinstance(superclass, str):
superclass = get_class(superclass)
return c_void_p(objc.objc_allocateClassPair(superclass, ensure_bytes(name), 0))
def register_subclass(subclass):
objc.objc_registerClassPair(subclass)
# types is a string encoding the argument types of the method.
# The first type code of types is the return type (e.g. 'v' if void)
# The second type code must be '@' for id self.
# The third type code must be ':' for SEL cmd.
# Additional type codes are for types of other arguments if any.
def add_method(cls, selName, method, types):
type_encodings = parse_type_encoding(types)
assert(type_encodings[1] == b'@') # ensure id self typecode
assert(type_encodings[2] == b':') # ensure SEL cmd typecode
selector = get_selector(selName)
cfunctype = cfunctype_for_encoding(types)
imp = cfunctype(method)
objc.class_addMethod.argtypes = [c_void_p, c_void_p, cfunctype, c_char_p]
objc.class_addMethod(cls, selector, imp, types)
return imp
def add_ivar(cls, name, vartype):
return objc.class_addIvar(cls, ensure_bytes(name), sizeof(vartype), alignment(vartype), encoding_for_ctype(vartype))
def set_instance_variable(obj, varname, value, vartype):
objc.object_setInstanceVariable.argtypes = [c_void_p, c_char_p, vartype]
objc.object_setInstanceVariable(obj, ensure_bytes(varname), value)
def get_instance_variable(obj, varname, vartype):
variable = vartype()
objc.object_getInstanceVariable(obj, ensure_bytes(varname), byref(variable))
return variable.value
######################################################################
class ObjCMethod:
"""This represents an unbound Objective-C method (really an IMP)."""
# Note, need to map 'c' to c_byte rather than c_char, because otherwise
# ctypes converts the value into a one-character string which is generally
# not what we want at all, especially when the 'c' represents a bool var.
typecodes = {b'c':c_byte, b'i':c_int, b's':c_short, b'l':c_long, b'q':c_longlong,
b'C':c_ubyte, b'I':c_uint, b'S':c_ushort, b'L':c_ulong, b'Q':c_ulonglong,
b'f':c_float, b'd':c_double, b'B':c_bool, b'v':None, b'Vv':None, b'*':c_char_p,
b'@':c_void_p, b'#':c_void_p, b':':c_void_p, b'^v':c_void_p, b'?':c_void_p,
NSPointEncoding:NSPoint, NSSizeEncoding:NSSize, NSRectEncoding:NSRect,
NSRangeEncoding:NSRange,
PyObjectEncoding:py_object}
cfunctype_table = {}
def __init__(self, method):
"""Initialize with an Objective-C Method pointer. We then determine
the return type and argument type information of the method."""
self.selector = c_void_p(objc.method_getName(method))
self.name = objc.sel_getName(self.selector)
self.pyname = self.name.replace(b':', b'_')
self.encoding = objc.method_getTypeEncoding(method)
self.return_type = objc.method_copyReturnType(method)
self.nargs = objc.method_getNumberOfArguments(method)
self.imp = c_void_p(objc.method_getImplementation(method))
self.argument_types = []
for i in range(self.nargs):
buffer = c_buffer(512)
objc.method_getArgumentType(method, i, buffer, len(buffer))
self.argument_types.append(buffer.value)
# Get types for all the arguments.
try:
self.argtypes = [self.ctype_for_encoding(t) for t in self.argument_types]
except:
#print 'no argtypes encoding for %s (%s)' % (self.name, self.argument_types)
self.argtypes = None
# Get types for the return type.
try:
if self.return_type == b'@':
self.restype = ObjCInstance
elif self.return_type == b'#':
self.restype = ObjCClass
else:
self.restype = self.ctype_for_encoding(self.return_type)
except:
#print 'no restype encoding for %s (%s)' % (self.name, self.return_type)
self.restype = None
self.func = None
def ctype_for_encoding(self, encoding):
"""Return ctypes type for an encoded Objective-C type."""
if encoding in self.typecodes:
return self.typecodes[encoding]
elif encoding[0:1] == b'^' and encoding[1:] in self.typecodes:
return POINTER(self.typecodes[encoding[1:]])
elif encoding[0:1] == b'^' and encoding[1:] in [CGImageEncoding, NSZoneEncoding]:
# special cases
return c_void_p
elif encoding[0:1] == b'r' and encoding[1:] in self.typecodes:
# const decorator, don't care
return self.typecodes[encoding[1:]]
elif encoding[0:2] == b'r^' and encoding[2:] in self.typecodes:
# const pointer, also don't care
return POINTER(self.typecodes[encoding[2:]])
else:
raise Exception('unknown encoding for %s: %s' % (self.name, encoding))
def get_prototype(self):
"""Returns a ctypes CFUNCTYPE for the method."""
if self.restype == ObjCInstance or self.restype == ObjCClass:
# Some hacky stuff to get around ctypes issues on 64-bit. Can't let
# ctypes convert the return value itself, because it truncates the pointer
# along the way. So instead, we must do set the return type to c_void_p to
# ensure we get 64-bit addresses and then convert the return value manually.
self.prototype = CFUNCTYPE(c_void_p, *self.argtypes)
else:
self.prototype = CFUNCTYPE(self.restype, *self.argtypes)
return self.prototype
def __repr__(self):
return "<ObjCMethod: %s %s>" % (self.name, self.encoding)
def get_callable(self):
"""Returns a python-callable version of the method's IMP."""
if not self.func:
prototype = self.get_prototype()
self.func = cast(self.imp, prototype)
if self.restype == ObjCInstance or self.restype == ObjCClass:
self.func.restype = c_void_p
else:
self.func.restype = self.restype
self.func.argtypes = self.argtypes
return self.func
def __call__(self, objc_id, *args):
"""Call the method with the given id and arguments. You do not need
to pass in the selector as an argument since it will be automatically
provided."""
f = self.get_callable()
try:
result = f(objc_id, self.selector, *args)
# Convert result to python type if it is a instance or class pointer.
if self.restype == ObjCInstance:
result = ObjCInstance(result)
elif self.restype == ObjCClass:
result = ObjCClass(result)
return result
except ArgumentError as error:
# Add more useful info to argument error exceptions, then reraise.
error.args += ('selector = ' + str(self.name),
'argtypes =' + str(self.argtypes),
'encoding = ' + str(self.encoding))
raise
######################################################################
class ObjCBoundMethod:
"""This represents an Objective-C method (an IMP) which has been bound
to some id which will be passed as the first parameter to the method."""
def __init__(self, method, objc_id):
"""Initialize with a method and ObjCInstance or ObjCClass object."""
self.method = method
self.objc_id = objc_id
def __repr__(self):
return '<ObjCBoundMethod %s (%s)>' % (self.method.name, self.objc_id)
def __call__(self, *args):
"""Call the method with the given arguments."""
return self.method(self.objc_id, *args)
######################################################################
class ObjCClass:
"""Python wrapper for an Objective-C class."""
# We only create one Python object for each Objective-C class.
# Any future calls with the same class will return the previously
# created Python object. Note that these aren't weak references.
# After you create an ObjCClass, it will exist until the end of the
# program.
_registered_classes = {}
def __new__(cls, class_name_or_ptr):
"""Create a new ObjCClass instance or return a previously created
instance for the given Objective-C class. The argument may be either
the name of the class to retrieve, or a pointer to the class."""
# Determine name and ptr values from passed in argument.
if isinstance(class_name_or_ptr, str):
name = class_name_or_ptr
ptr = get_class(name)
else:
ptr = class_name_or_ptr
# Make sure that ptr value is wrapped in c_void_p object
# for safety when passing as ctypes argument.
if not isinstance(ptr, c_void_p):
ptr = c_void_p(ptr)
name = objc.class_getName(ptr)
# Check if we've already created a Python object for this class
# and if so, return it rather than making a new one.
if name in cls._registered_classes:
return cls._registered_classes[name]
# Otherwise create a new Python object and then initialize it.
objc_class = super(ObjCClass, cls).__new__(cls)
objc_class.ptr = ptr
objc_class.name = name
objc_class.instance_methods = {} # mapping of name -> instance method
objc_class.class_methods = {} # mapping of name -> class method
objc_class._as_parameter_ = ptr # for ctypes argument passing
# Store the new class in dictionary of registered classes.
cls._registered_classes[name] = objc_class
# Not sure this is necessary...
objc_class.cache_instance_methods()
objc_class.cache_class_methods()
return objc_class
def __repr__(self):
return "<ObjCClass: %s at %s>" % (self.name, str(self.ptr.value))
def cache_instance_methods(self):
"""Create and store python representations of all instance methods
implemented by this class (but does not find methods of superclass)."""
count = c_uint()
method_array = objc.class_copyMethodList(self.ptr, byref(count))
for i in range(count.value):
method = c_void_p(method_array[i])
objc_method = ObjCMethod(method)
self.instance_methods[objc_method.pyname] = objc_method
def cache_class_methods(self):
"""Create and store python representations of all class methods
implemented by this class (but does not find methods of superclass)."""
count = c_uint()
method_array = objc.class_copyMethodList(objc.object_getClass(self.ptr), byref(count))
for i in range(count.value):
method = c_void_p(method_array[i])
objc_method = ObjCMethod(method)
self.class_methods[objc_method.pyname] = objc_method
def get_instance_method(self, name):
"""Returns a python representation of the named instance method,
either by looking it up in the cached list of methods or by searching
for and creating a new method object."""
if name in self.instance_methods:
return self.instance_methods[name]
else:
# If method name isn't in the cached list, it might be a method of
# the superclass, so call class_getInstanceMethod to check.
selector = get_selector(name.replace(b'_', b':'))
method = c_void_p(objc.class_getInstanceMethod(self.ptr, selector))
if method.value:
objc_method = ObjCMethod(method)
self.instance_methods[name] = objc_method
return objc_method
return None
def get_class_method(self, name):
"""Returns a python representation of the named class method,
either by looking it up in the cached list of methods or by searching
for and creating a new method object."""
if name in self.class_methods:
return self.class_methods[name]
else:
# If method name isn't in the cached list, it might be a method of
# the superclass, so call class_getInstanceMethod to check.
selector = get_selector(name.replace(b'_', b':'))
method = c_void_p(objc.class_getClassMethod(self.ptr, selector))
if method.value:
objc_method = ObjCMethod(method)
self.class_methods[name] = objc_method
return objc_method
return None
def __getattr__(self, name):
"""Returns a callable method object with the given name."""
# If name refers to a class method, then return a callable object
# for the class method with self.ptr as hidden first parameter.
name = ensure_bytes(name)
method = self.get_class_method(name)
if method:
return ObjCBoundMethod(method, self.ptr)
# If name refers to an instance method, then simply return the method.
# The caller will need to supply an instance as the first parameter.
method = self.get_instance_method(name)
if method:
return method
# Otherwise, raise an exception.
raise AttributeError('ObjCClass %s has no attribute %s' % (self.name, name))
######################################################################
class ObjCInstance:
"""Python wrapper for an Objective-C instance."""
_cached_objects = {}
def __new__(cls, object_ptr):
"""Create a new ObjCInstance or return a previously created one
for the given object_ptr which should be an Objective-C id."""
# Make sure that object_ptr is wrapped in a c_void_p.
if not isinstance(object_ptr, c_void_p):
object_ptr = c_void_p(object_ptr)
# If given a nil pointer, return None.
if not object_ptr.value:
return None
# Check if we've already created an python ObjCInstance for this
# object_ptr id and if so, then return it. A single ObjCInstance will
# be created for any object pointer when it is first encountered.
# This same ObjCInstance will then persist until the object is
# deallocated.
if object_ptr.value in cls._cached_objects:
return cls._cached_objects[object_ptr.value]
# Otherwise, create a new ObjCInstance.
objc_instance = super(ObjCInstance, cls).__new__(cls)
objc_instance.ptr = object_ptr
objc_instance._as_parameter_ = object_ptr
# Determine class of this object.
class_ptr = c_void_p(objc.object_getClass(object_ptr))
objc_instance.objc_class = ObjCClass(class_ptr)
# Store new object in the dictionary of cached objects, keyed
# by the (integer) memory address pointed to by the object_ptr.
cls._cached_objects[object_ptr.value] = objc_instance
# Create a DeallocationObserver and associate it with this object.
# When the Objective-C object is deallocated, the observer will remove
# the ObjCInstance corresponding to the object from the cached objects
# dictionary, effectively destroying the ObjCInstance.
observer = send_message(send_message('DeallocationObserver', 'alloc'), 'initWithObject:', objc_instance)
objc.objc_setAssociatedObject(objc_instance, observer, observer, 0x301)
# The observer is retained by the object we associate it to. We release
# the observer now so that it will be deallocated when the associated
# object is deallocated.
send_message(observer, 'release')
return objc_instance
def __repr__(self):
if self.objc_class.name == b'NSCFString':
# Display contents of NSString objects
from .cocoalibs import cfstring_to_string
string = cfstring_to_string(self)
return "<ObjCInstance %#x: %s (%s) at %s>" % (id(self), self.objc_class.name, string, str(self.ptr.value))
return "<ObjCInstance %#x: %s at %s>" % (id(self), self.objc_class.name, str(self.ptr.value))
def __getattr__(self, name):
"""Returns a callable method object with the given name."""
# Search for named instance method in the class object and if it
# exists, return callable object with self as hidden argument.
# Note: you should give self and not self.ptr as a parameter to
# ObjCBoundMethod, so that it will be able to keep the ObjCInstance
# alive for chained calls like MyClass.alloc().init() where the
# object created by alloc() is not assigned to a variable.
name = ensure_bytes(name)
method = self.objc_class.get_instance_method(name)
if method:
return ObjCBoundMethod(method, self)
# Else, search for class method with given name in the class object.
# If it exists, return callable object with a pointer to the class
# as a hidden argument.
method = self.objc_class.get_class_method(name)
if method:
return ObjCBoundMethod(method, self.objc_class.ptr)
# Otherwise raise an exception.
raise AttributeError('ObjCInstance %s has no attribute %s' % (self.objc_class.name, name))
######################################################################
def convert_method_arguments(encoding, args):
"""Used by ObjCSubclass to convert Objective-C method arguments to
Python values before passing them on to the Python-defined method."""
new_args = []
arg_encodings = parse_type_encoding(encoding)[3:]
for e, a in zip(arg_encodings, args):
if e == b'@':
new_args.append(ObjCInstance(a))
elif e == b'#':
new_args.append(ObjCClass(a))
else:
new_args.append(a)
return new_args
# ObjCSubclass is used to define an Objective-C subclass of an existing
# class registered with the runtime. When you create an instance of
# ObjCSubclass, it registers the new subclass with the Objective-C
# runtime and creates a set of function decorators that you can use to
# add instance methods or class methods to the subclass.
#
# Typical usage would be to first create and register the subclass:
#
# MySubclass = ObjCSubclass('NSObject', 'MySubclassName')
#
# then add methods with:
#
# @MySubclass.method('v')
# def methodThatReturnsVoid(self):
# pass
#
# @MySubclass.method('Bi')
# def boolReturningMethodWithInt_(self, x):
# return True
#
# @MySubclass.classmethod('@')
# def classMethodThatReturnsId(self):
# return self
#
# It is probably a good idea to organize the code related to a single
# subclass by either putting it in its own module (note that you don't
# actually need to expose any of the method names or the ObjCSubclass)
# or by bundling it all up inside a python class definition, perhaps
# called MySubclassImplementation.
#
# It is also possible to add Objective-C ivars to the subclass, however
# if you do so, you must call the __init__ method with register=False,
# and then call the register method after the ivars have been added.
# But rather than creating the ivars in Objective-C land, it is easier
# to just define python-based instance variables in your subclass's init
# method.
#
# This class is used only to *define* the interface and implementation
# of an Objective-C subclass from python. It should not be used in
# any other way. If you want a python representation of the resulting
# class, create it with ObjCClass.
#
# Instances are created as a pointer to the objc object by using:
#
# myinstance = send_message('MySubclassName', 'alloc')
# myinstance = send_message(myinstance, 'init')
#
# or wrapped inside an ObjCInstance object by using:
#
# myclass = ObjCClass('MySubclassName')
# myinstance = myclass.alloc().init()
#
class ObjCSubclass:
"""Use this to create a subclass of an existing Objective-C class.
It consists primarily of function decorators which you use to add methods
to the subclass."""
def __init__(self, superclass, name, register=True):
self._imp_table = {}
self.name = name
self.objc_cls = create_subclass(superclass, name)
self._as_parameter_ = self.objc_cls
if register:
self.register()
def register(self):
"""Register the new class with the Objective-C runtime."""
objc.objc_registerClassPair(self.objc_cls)
# We can get the metaclass only after the class is registered.
self.objc_metaclass = get_metaclass(self.name)
def add_ivar(self, varname, vartype):
"""Add instance variable named varname to the subclass.
varname should be a string.
vartype is a ctypes type.
The class must be registered AFTER adding instance variables."""
return add_ivar(self.objc_cls, varname, vartype)
def add_method(self, method, name, encoding):
imp = add_method(self.objc_cls, name, method, encoding)
self._imp_table[name] = imp
# http://iphonedevelopment.blogspot.com/2008/08/dynamically-adding-class-objects.html
def add_class_method(self, method, name, encoding):
imp = add_method(self.objc_metaclass, name, method, encoding)
self._imp_table[name] = imp
def rawmethod(self, encoding):
"""Decorator for instance methods without any fancy shenanigans.
The function must have the signature f(self, cmd, *args)
where both self and cmd are just pointers to objc objects."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
name = f.__name__.replace('_', ':')
self.add_method(f, name, encoding)
return f
return decorator
def method(self, encoding):
"""Function decorator for instance methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_method(objc_self, objc_cmd, *args):
py_self = ObjCInstance(objc_self)
py_self.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_self, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_method(objc_method, name, encoding)
return objc_method
return decorator
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator
######################################################################
# Instances of DeallocationObserver are associated with every
# Objective-C object that gets wrapped inside an ObjCInstance.
# Their sole purpose is to watch for when the Objective-C object
# is deallocated, and then remove the object from the dictionary
# of cached ObjCInstance objects kept by the ObjCInstance class.
#
# The methods of the class defined below are decorated with
# rawmethod() instead of method() because DeallocationObservers
# are created inside of ObjCInstance's __new__ method and we have
# to be careful to not create another ObjCInstance here (which
# happens when the usual method decorator turns the self argument
# into an ObjCInstance), or else get trapped in an infinite recursion.
class DeallocationObserver_Implementation:
DeallocationObserver = ObjCSubclass('NSObject', 'DeallocationObserver', register=False)
DeallocationObserver.add_ivar('observed_object', c_void_p)
DeallocationObserver.register()
@DeallocationObserver.rawmethod('@@')
def initWithObject_(self, cmd, anObject):
self = send_super(self, 'init')
self = self.value
set_instance_variable(self, 'observed_object', anObject, c_void_p)
return self
@DeallocationObserver.rawmethod('v')
def dealloc(self, cmd):
anObject = get_instance_variable(self, 'observed_object', c_void_p)
ObjCInstance._cached_objects.pop(anObject, None)
send_super(self, 'dealloc')
@DeallocationObserver.rawmethod('v')
def finalize(self, cmd):
# Called instead of dealloc if using garbage collection.
# (which would have to be explicitly started with
# objc_startCollectorThread(), so probably not too much reason
# to have this here, but I guess it can't hurt.)
anObject = get_instance_variable(self, 'observed_object', c_void_p)
ObjCInstance._cached_objects.pop(anObject, None)
send_super(self, 'finalize')
| [
"[email protected]"
]
| |
e78a07d5a9ac0d6375bab50be733a669fac273ff | e5b6d2e79d6593587fa8f5854def9ebf4d47a9e1 | /djangocli/wsgi.py | 8e9c0ba06187289fb8d23d2abffc8b6bcf5721d6 | []
| no_license | redeyed-archive/DjangoSiteCheckerExample | 35756664f0b9667e151d4608c6ebd5d279523534 | e53b2fad15d2a768e75bc853c69113c0d54c2ed2 | refs/heads/master | 2023-03-17T06:22:46.129989 | 2019-02-17T05:48:43 | 2019-02-17T05:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for djangocli project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangocli.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
16d37fe91e6e6174ecc5ebf06d10063687980ee8 | 97e54e4b18c1d696926678f1e320b2fc9cef5436 | /jaraco/text/strip-prefix.py | 761717a9b9e1f837eeacf0e888822f6fad881361 | [
"MIT"
]
| permissive | jaraco/jaraco.text | 8ff2d7d49b3af0ca5e98c1cb337562bde9d3ba72 | 460dc329b799b88adb32ea95435d3a9e03cbdc00 | refs/heads/main | 2023-09-04T06:57:23.624303 | 2023-07-30T01:01:42 | 2023-07-30T01:01:42 | 48,551,451 | 15 | 8 | MIT | 2023-07-30T14:52:20 | 2015-12-24T17:20:06 | Python | UTF-8 | Python | false | false | 412 | py | import sys
import autocommand
from jaraco.text import Stripper
def strip_prefix():
r"""
Strip any common prefix from stdin.
>>> import io, pytest
>>> getfixture('monkeypatch').setattr('sys.stdin', io.StringIO('abcdef\nabc123'))
>>> strip_prefix()
def
123
"""
sys.stdout.writelines(Stripper.strip_prefix(sys.stdin).lines)
autocommand.autocommand(__name__)(strip_prefix)
| [
"[email protected]"
]
| |
2bc4f1ab2384a7e76f74641976a53715c495cc2a | b0c528e2650dec1ff011215537fc5ea536627966 | /main/urls.py | 58a80f586c83f786718a9f83bb105e9b11210f7e | []
| no_license | trinhgliedt/Python_Great_number_game | 9cb84a1bd95333df15140cc2e1c466d0911b7b19 | 8358c84012981b8dfaafb9017fc9a92450a98e7b | refs/heads/master | 2023-02-08T21:14:23.124896 | 2021-01-01T06:18:02 | 2021-01-01T06:18:02 | 325,926,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('result/', views.process_form),
] | [
"[email protected]"
]
| |
dc72573a696b1184ae2cf899bda0ecd956d49f9d | 0931b32140ba932b3ba02f5109a087c6c70a244d | /frappe/desk/desk_page.py | fc7281e06c18d9766c2efcb8f939fa6938c5c494 | [
"MIT"
]
| permissive | cstkyrilos/frappe | b60ed4e95ce929c74c2fc46000080d10b343190e | 27d9306bc5924c11c2749503454cc6d11a8cc654 | refs/heads/main | 2023-03-23T10:35:42.732385 | 2021-03-22T21:55:58 | 2021-03-22T21:55:58 | 350,292,784 | 0 | 0 | MIT | 2021-03-22T10:01:08 | 2021-03-22T10:01:07 | null | UTF-8 | Python | false | false | 1,569 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.translate import send_translations
@frappe.whitelist()
def get(name):
"""
Return the :term:`doclist` of the `Page` specified by `name`
"""
page = frappe.get_doc('Page', name)
if page.is_permitted():
page.load_assets()
docs = frappe._dict(page.as_dict())
if getattr(page, '_dynamic_page', None):
docs['_dynamic_page'] = 1
return docs
else:
frappe.response['403'] = 1
raise frappe.PermissionError, 'No read permission for Page %s' % \
(page.title or name)
@frappe.whitelist(allow_guest=True)
def getpage():
"""
Load the page from `frappe.form` and send it via `frappe.response`
"""
page = frappe.form_dict.get('name')
doc = get(page)
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("page", page))
frappe.response.docs.append(doc)
def has_permission(page):
if frappe.session.user == "Administrator" or "System Manager" in frappe.get_roles():
return True
page_roles = [d.role for d in page.get("roles")]
if page_roles:
if frappe.session.user == "Guest" and "Guest" not in page_roles:
return False
elif not set(page_roles).intersection(set(frappe.get_roles())):
# check if roles match
return False
if not frappe.has_permission("Page", ptype="read", doc=page):
# check if there are any user_permissions
return False
else:
# hack for home pages! if no Has Roles, allow everyone to see!
return True
| [
"[email protected]"
]
| |
c9a91552c1b8f4b8a2ff609676b81cd11cf08ead | 48df99f4358be7a51becd3d685e1ec825d295ba4 | /dentalstate/models.py | 36c642462ac4cabb367d2fe592fdd0be94d557a6 | [
"Apache-2.0"
]
| permissive | kuyesu/tscharts | 21d2aedeea4aad3b126defaa1703f60f44f14de6 | 9ed4e4bb0a6d296e1156afca5b55d0f71dfb894b | refs/heads/master | 2023-06-03T04:50:15.282855 | 2021-06-12T19:50:51 | 2021-06-12T19:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | #(C) Copyright Syd Logan 2020
#(C) Copyright Thousand Smiles Foundation 2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import unicode_literals
from django.db import models
from patient.models import Patient
from clinic.models import Clinic
from dentalcdt.models import DentalCDT
class DentalState(models.Model):
clinic = models.ForeignKey(Clinic)
patient = models.ForeignKey(Patient)
username = models.CharField(max_length=64, default = "") # user supplied name
time = models.DateTimeField(auto_now=True)
'''
tooth location is relative to location (top or bottom). Zero
indicates the treatment applies to whole mouth (and location
is ignored
'''
tooth = models.IntegerField(default = 0)
DENTAL_LOCATION_TOP = 't'
DENTAL_LOCATION_BOTTOM = 'b'
DENTAL_LOCATION_CHOICES = ((DENTAL_LOCATION_TOP, "top"), (DENTAL_LOCATION_BOTTOM, "bottom"))
location = models.CharField(max_length = 1, choices = DENTAL_LOCATION_CHOICES, default = DENTAL_LOCATION_TOP)
code = models.ForeignKey(DentalCDT)
DENTAL_STATE_NONE = 'n'
DENTAL_STATE_UNTREATED = 'u'
DENTAL_STATE_TREATED = 't'
DENTAL_STATE_OTHER = 'o'
DENTAL_STATE_MISSING = 'm'
DENTAL_STATE_CHOICES = ((DENTAL_STATE_MISSING, "missing"), (DENTAL_STATE_NONE, "none"), (DENTAL_STATE_UNTREATED, "untreated"), (DENTAL_STATE_TREATED, "treated"), (DENTAL_STATE_OTHER, "other"))
state = models.CharField(max_length = 1, choices = DENTAL_STATE_CHOICES, default = DENTAL_STATE_NONE)
DENTAL_SURFACE_NONE = 'n'
DENTAL_SURFACE_BUCCAL = 'b'
DENTAL_SURFACE_LINGUAL = 'u'
DENTAL_SURFACE_MESIAL = 'm'
DENTAL_SURFACE_OCCLUSAL = 'c'
DENTAL_SURFACE_LABIAL = 'a'
DENTAL_SURFACE_INCISAL = 'i'
DENTAL_SURFACE_WHOLE_MOUTH_OR_VISIT = 'w'
DENTAL_SURFACE_OTHER = 'o'
DENTAL_SURFACE_CHOICES = ((DENTAL_SURFACE_NONE, "none"), (DENTAL_SURFACE_BUCCAL, "buccal"), (DENTAL_SURFACE_LINGUAL, "lingual"), (DENTAL_SURFACE_MESIAL, "mesial"), (DENTAL_SURFACE_OCCLUSAL, 'occlusal'), (DENTAL_SURFACE_LABIAL, 'labial'), (DENTAL_SURFACE_INCISAL, 'incisal'), (DENTAL_SURFACE_WHOLE_MOUTH_OR_VISIT, 'whole_mouth_or_visit'), (DENTAL_SURFACE_OTHER, 'other'))
# here we define a charfield as a string to hold a set of surfaces
# this won't work with forms, but since we are just a REST API, doesn't
# matter much. The DENTAL_STATE_CHOICES tuple will be useful as we
# serialize/unserialize values between the client and the model. We
# could also have done this as an integer bitmask, but a string of chars
# facilitates debugging.
surface = models.CharField(max_length = 10, choices = DENTAL_SURFACE_CHOICES, default = DENTAL_SURFACE_NONE)
comment = models.TextField(default = "")
| [
"[email protected]"
]
| |
a650fcc83f32dd0898f953ec683b1b54eb77b733 | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /guvi3.py | 6dd143242eb16cf5b6ec3091f1ddba172fd1f82f | []
| no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | def check(n):
count = 0
for i in str(n):
count+=int(i)
if str(count)[0]=='1':
return count
else:
return False
n = int(input())
l = [8]
c = 0
diff = 2
curr = 800
while curr+diff<=n:
curr+=diff
w = check(curr)
if w!=False:
l.append(w)
diff+=2
c+=1
print(*l)
print(c) | [
"[email protected]"
]
| |
24ebdd333e00edb3f74ccd4677e9ab43d5c096e3 | c03d7a4e03c581d4be98b6363003cddb9c213ec0 | /pets/migrations/0007_auto_20180910_0016.py | 6228879999e3df790cc687d09ad854b059402325 | []
| no_license | hernandavidc/plataforma | b333e4f06290713072d8dc609c27d4ce8af1d9df | 4316e2a59db76e74f1e6106958631ad4a7a653c7 | refs/heads/master | 2020-04-06T17:08:21.019355 | 2019-04-09T04:41:00 | 2019-04-09T04:41:00 | 157,648,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # Generated by Django 2.1 on 2018-09-10 05:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pets', '0006_auto_20180910_0011'),
]
operations = [
migrations.RemoveField(
model_name='mascota',
name='dueno',
),
migrations.AddField(
model_name='mascota',
name='dueno',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.PROTECT, related_name='get_pets', to=settings.AUTH_USER_MODEL, verbose_name='Dueños'),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
a3eefa3f23a8dfe00c158170d73f421c29d1e373 | c79737296bdf4b3a969ab5ceb69198daf66def0e | /python/solutii/bogdan_iacoboae/caesar/caesar.py | 315bde89ddbea8afd9d78e0152861ba4b9c51fa0 | [
"MIT"
]
| permissive | ilieandrei98/labs | 96c749072b6455b34dc5f0bd3bb20f7a0e95b706 | cda09cbf5352e88909f51546c2eb360e1ff2bec1 | refs/heads/master | 2020-04-26T03:23:48.220151 | 2019-03-01T08:56:43 | 2019-03-01T08:56:43 | 173,265,757 | 0 | 0 | MIT | 2019-03-01T08:37:14 | 2019-03-01T08:37:14 | null | UTF-8 | Python | false | false | 1,939 | py | # coding=utf-8
# from __future__ import print_function
"""Împăratul a primit serie de mesaje importante pe care este
important să le descifreze cât mai repede.
Din păcate mesagerul nu a apucat să îi spună împăratul care au fost
cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi
misterul.
Informații:
În criptografie, cifrul lui Caesar este o metodă simplă de a cripta
un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată
la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut
"""
def afla_pasul(mesaj):
""" Afla pasul encodarii """
first_letter = 'a'
my_letter = mesaj[0]
return ord(my_letter) - ord(first_letter)
def real_letter(character, key):
""" Afla caracterul """
if character.isalpha():
character = ord(character)-key
if character < ord('a'):
character = ord('z') - abs(ord('a') - character) + 1
return chr(character)
else:
return character
def decripteaza_mesajul(mesaj, fisier):
""" Decriptarea mesajului """
key = afla_pasul(mesaj)
puncte = 0.
for index in mesaj:
if index == ".":
if puncte == 1:
print ".\n"
fisier.write("\n")
else:
puncte = puncte + 1
print ".",
fisier.write(".")
else:
print real_letter(index, key),
fisier.write(real_letter(index, key))
def main():
""" Main function docstring """
try:
fisier = open("../../../date_intrare/mesaje.secret", "r")
towrite = open("../../../date_iesire/mesaje.decodat", "w")
mesaje = fisier.read()
fisier.close()
except IOError:
print "Nu am putut obtine mesajele."
return
for mesaj in mesaje.splitlines():
decripteaza_mesajul(mesaj, towrite)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
989528ac7820dca22e21aec571ce43ed89e4c1a0 | e3e3071e5f01f75ba3716ac229abef484e8c051a | /mnist.py | f9639c963cb10a4bdcfc9a82659ccfe73a01289c | []
| no_license | schmit/dictlearning | 9efc9e15e73a99f840db71d81925dbe7c0bd22d0 | 14c37631aa4d330d58fc174b2294866e2484d5d0 | refs/heads/master | 2021-01-10T04:06:33.899917 | 2013-03-15T18:40:33 | 2013-03-15T18:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | import numpy as np
import scipy.io as sio
import dictionary
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from multiOGD import *
from kernels import *
import sys
import argparse
import utility
sys.stdout = utility.Logger()
print 'Starting run of MNIST.py'
parser = argparse.ArgumentParser(description=\
'MNIST: Encode sparse dictionary and fit model')
parser.add_argument('dict_fit',\
help="model for fitting dictionary (linreg, lasso, lars)")
parser.add_argument('dict_init',\
help='initialization of dictionary')
parser.add_argument('dict_atoms',\
help='nr of atoms in dictionary')
parser.add_argument('dict_reg',\
help='regularization in sparse encoding')
parser.add_argument('mod_reg', \
help='regularization svm fit')
params = parser.parse_args(sys.argv[1:])
DICT_FIT = params.dict_fit
DICT_INIT = params.dict_init
DICT_ATOMS = int(params.dict_atoms)
DICT_REG = float(params.dict_reg)
MOD_REG = float(params.mod_reg)
print params
def showimage(x):
img = np.reshape(x, (28, 28), order = 'F')
imgplot = plt.imshow(img)
plt.show()
mnist_train = sio.loadmat('./data/mnist/MNIST_train.mat')
mnist_test = sio.loadmat('./data/mnist/MNIST_test.mat')
X_train = mnist_train['X'][0][0][2].transpose()
y_train = mnist_train['y']
X_test = mnist_test['Xtest'].transpose()
y_test = mnist_test['ytest']
dim = X_train.shape[1]
## Dictionary
lasso_d = dictionary.Dictionary(dim, DICT_ATOMS, DICT_FIT, DICT_REG, \
DICT_INIT)
lasso_d.batchtrain(X_train
# Save dictionary atoms as images
#lasso_d.dimagesave((28, 28), 'mnist')
# Find reconstructions
alphas_train = lasso_d.batchreconstruction(X_train, \
'mnist_train_s')
alphas_test = lasso_d.batchreconstruction(X_test, \
'mnist_test_s')
## Classification
ogd_m = multiOGD(10, DICT_ATOMS, MOD_REG)
ogd_m.train(alphas_train, y_train)
ogd_m.predict(alphas_test, y_test)
print 'Run of MNIST.py is complete!'
'''
Atoms: 200
Reg: 0.05 too much
'''
| [
"[email protected]"
]
| |
c735627231131ebf3d41f8b0d0b2c4e1b2f91659 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/kubevirt_vm.py | ddcab20e9863af4105ed6489bcd7c4e7021b0e5a | [
"GPL-3.0-only",
"MIT",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
]
| permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 16,645 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_vm
short_description: Manage KubeVirt virtual machine
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
- "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
- "I(absent) - Remove a virtual machine."
- "I(running) - Create or update a virtual machine and run it."
- "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
default: "present"
choices:
- present
- absent
- running
- stopped
type: str
name:
description:
- Name of the virtual machine.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine exists.
required: true
type: str
ephemeral:
description:
- If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
- Works only with C(state) I(present) and I(absent).
type: bool
default: false
datavolumes:
description:
- "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
type: list
template:
description:
- "Name of Template to be used in creation of a virtual machine."
type: str
template_parameters:
description:
- "New values of parameters from Template."
type: dict
extends_documentation_fragment:
- community.kubernetes.k8s_auth_options
- community.general.kubevirt_vm_options
- community.general.kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Start virtual machine 'myvm'
kubevirt_vm:
state: running
name: myvm
namespace: vms
- name: Create virtual machine 'myvm' and start it
kubevirt_vm:
state: running
name: myvm
namespace: vms
memory: 64Mi
cpu_cores: 1
bootloader: efi
smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
cpu_model: Conroe
headless: true
hugepage_size: 2Mi
tablets:
- bus: virtio
name: tablet1
cpu_limit: 3
cpu_shares: 2
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Create virtual machine 'myvm' with multus network interface
kubevirt_vm:
name: myvm
namespace: vms
memory: 512M
interfaces:
- name: default
bridge: {}
network:
pod: {}
- name: mynet
bridge: {}
network:
multus:
networkName: mynetconf
- name: Combine inline definition with Ansible parameters
kubevirt_vm:
# Kubernetes specification:
definition:
metadata:
labels:
app: galaxy
service: web
origin: vmware
# Ansible parameters:
state: running
name: myvm
namespace: vms
memory: 64M
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start ephemeral virtual machine 'myvm' and wait to be running
kubevirt_vm:
ephemeral: true
state: running
wait: true
wait_timeout: 180
name: myvm
namespace: vms
memory: 64M
labels:
kubevirt.io/vm: myvm
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start fedora vm with cloud init
kubevirt_vm:
state: running
wait: true
name: myvm
namespace: vms
memory: 1024M
cloud_init_nocloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
path: /disk/fedora.qcow2
disk:
bus: virtio
node_affinity:
soft:
- weight: 1
term:
match_expressions:
- key: security
operator: In
values:
- S2
- name: Create virtual machine with datavolume and specify node affinity
kubevirt_vm:
name: myvm
namespace: default
memory: 1024Mi
datavolumes:
- name: mydv
source:
http:
url: https://url/disk.qcow2
pvc:
accessModes:
- ReadWriteOnce
storage: 5Gi
node_affinity:
hard:
- term:
match_expressions:
- key: security
operator: In
values:
- S1
- name: Remove virtual machine 'myvm'
kubevirt_vm:
state: absent
name: myvm
namespace: vms
'''
RETURN = '''
kubevirt_vm:
description:
- The virtual machine dictionary specification returned by the API.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible_collections.community.general.plugins.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
VM_SPEC_DEF_ARG_SPEC
)
VM_ARG_SPEC = {
'ephemeral': {'type': 'bool', 'default': False},
'state': {
'type': 'str',
'choices': [
'present', 'absent', 'running', 'stopped'
],
'default': 'present'
},
'datavolumes': {'type': 'list'},
'template': {'type': 'str'},
'template_parameters': {'type': 'dict'},
}
# Which params (can) modify 'spec:' contents of a VM:
VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
class KubeVirtVM(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VM_ARG_SPEC)
return argument_spec
@staticmethod
def fix_serialization(obj):
if obj and hasattr(obj, 'to_dict'):
return obj.to_dict()
return obj
def _wait_for_vmi_running(self):
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
phase = status.get('phase', None)
if phase == 'Running':
return entity
self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
def _wait_for_vm_state(self, new_state):
if new_state == 'running':
want_created = want_ready = True
else:
want_created = want_ready = False
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
created = status.get('created', False)
ready = status.get('ready', False)
if (created, ready) == (want_created, want_ready):
return entity
self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
"Maybe try a higher wait_timeout value?".format(new_state))
def manage_vm_state(self, new_state, already_changed):
new_running = True if new_state == 'running' else False
changed = False
k8s_obj = {}
if not already_changed:
k8s_obj = self.get_resource(self._kind_resource)
if not k8s_obj:
self.fail("VirtualMachine object disappeared during module operation, aborting.")
if k8s_obj.spec.get('running', False) == new_running:
return False, k8s_obj
newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
self.name, self.namespace, merge_type='merge')
if err:
self.fail_json(**err)
else:
changed = True
if self.params.get('wait'):
k8s_obj = self._wait_for_vm_state(new_state)
return changed, k8s_obj
def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
def set_template_default(default_name, default_name_index, definition_spec):
default_value = proccess_template['metadata']['annotations'][default_name]
if default_value:
values = definition_spec[default_name_index]
default_values = [d for d in values if d.get('name') == default_value]
defaults[default_name_index] = default_values
if definition_spec[default_name_index] is None:
definition_spec[default_name_index] = []
definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
devices = processedtemplate['spec']['template']['spec']['domain']['devices']
spec = processedtemplate['spec']['template']['spec']
set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
set_template_default('defaults.template.cnv.io/network', 'networks', spec)
def construct_definition(self, kind, our_state, ephemeral):
definition = virtdict()
processedtemplate = {}
# Construct the API object definition:
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
vm_template = self.params.get('template')
if vm_template:
# Find the template the VM should be created from:
template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
# Set proper template values taken from module option 'template_parameters':
for k, v in self.params.get('template_parameters', {}).items():
for parameter in proccess_template.parameters:
if parameter.name == k:
parameter.value = v
# Proccess the template:
processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
# Process defaults of the template:
self._process_template_defaults(proccess_template, processedtemplate, defaults)
if not ephemeral:
definition['spec']['running'] = our_state == 'running'
template = definition if ephemeral else definition['spec']['template']
template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
return self.merge_dicts(definition, processedtemplate)
def execute_module(self):
# Parse parameters specific to this module:
ephemeral = self.params.get('ephemeral')
k8s_state = our_state = self.params.get('state')
kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
_used_params = [name for name in self.params if self.params[name] is not None]
# Is 'spec:' getting changed?
vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
changed = False
crud_executed = False
method = ''
# Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
if ephemeral:
# Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
if our_state == 'running':
self.params['state'] = k8s_state = 'present'
elif our_state == 'stopped':
self.params['state'] = k8s_state = 'absent'
else:
if our_state != 'absent':
self.params['state'] = k8s_state = 'present'
# Start with fetching the current object to make sure it exists
# If it does, but we end up not performing any operations on it, at least we'll be able to return
# its current contents as part of the final json
self.client = self.get_api_client()
self._kind_resource = self.find_supported_resource(kind)
k8s_obj = self.get_resource(self._kind_resource)
if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
# If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
# Also check_mode always warrants a CRUD, as that'll produce a sane result
if vm_spec_change or k8s_state == 'absent' or self.check_mode:
definition = self.construct_definition(kind, our_state, ephemeral)
result = self.execute_crud(kind, definition)
changed = result['changed']
k8s_obj = result['result']
method = result['method']
crud_executed = True
if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
# Waiting for k8s_state==absent is handled inside execute_crud()
k8s_obj = self._wait_for_vmi_running()
if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
# State==present/absent doesn't involve any additional VMI state management and is fully
# handled inside execute_crud() (including wait logic)
patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
changed = changed or patched
if changed:
method = method or 'patch'
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_vm': self.fix_serialization(k8s_obj),
'method': method
})
def main():
module = KubeVirtVM()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a98677c79904384ea4f9182f45560317822060b0 | 1eb50735e3861cde4bca8f4feab5afc730003078 | /future/flags_threadpool.py | 68c2812cf5337155961672ac7f2d7ec0945eca02 | []
| no_license | chinaylssly/fluent-python | 442e6458215e3c5a74c4d34d020b714da108f81d | 126c1d3e7853628c4a2c0e6ff475362b7d7fe33a | refs/heads/master | 2020-04-17T13:58:03.534184 | 2019-02-01T14:40:42 | 2019-02-01T14:40:42 | 166,637,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
import random,sys
from time import sleep,time
from concurrent import futures
MAX_WORKERS= 20
tl=[i*0.01 for i in range(20)]
def do_one(t=0.2):
# print (t)
sleep(t)
return t
def do_many(tl=tl):
workers=min(len(tl),MAX_WORKERS)
with futures.ThreadPoolExecutor(workers) as executor:
'''
executor.__exit__()方法会调用executor.shutdown(wait=True)方法,它会在所有的线程都执行完毕前阻塞线程
'''
res=executor.map(do_one,tl)
return len(list(res))
##返回获取结果的数量,如果有线程抛出异常,异常会在这里抛出,这与隐式调用next()函数从迭代器中回去相应的返回值一样
def main(do_many=do_many):
t0=time()
count=do_many()
t=time()-t0
msg='execute {:2d} task cost {:.2f} s'
print (msg.format(count,t))
if __name__ =='__main__':
main()
| [
"[email protected]"
]
| |
25a966ceab5d2deb560acac18c7d2c9729e93236 | be999cad30c28d0370a57b73057cb734fdffbf23 | /workshop_corona19/corona19_07_여행력.py | 899ed6dd8d5552a0e7aa1dc68988569ffc65f5fa | []
| no_license | choicoding1026/ML | 341181d5b1088f48fa0336a6db773ed7cfbecc21 | 69db5fcd559a7a41ce9fb0ece543d3cf9b44d5bb | refs/heads/master | 2023-01-01T07:35:09.655664 | 2020-10-15T07:41:50 | 2020-10-15T07:41:50 | 303,907,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | '''
서울시 코로나19 데이터 수집 및 분석
26. 여행력
'''
import pandas as pd
import numpy as np
file_name = "seoul_corona_10_11_.csv"
df = pd.read_csv(file_name, encoding="utf-8") # 한글처리
# 1. '연번' 기준으로 오름차순 정렬
df = df.sort_values(by="연번", ascending=False)
print("1. '연번' 기준으로 오름차순 정렬:\n", df.head())
# 2. 확진일의 빈도수 ==> 어느 날짜에 가장 많이 확진이 되었는지 확인 가능
# value_counts() 자동으로 내림차순 정렬해서 반환
print("2. 확진일의 빈도수: \n", df["확진일"].value_counts())
# 3. '확진일자' 컬럼 추가 => 2020_10_11 날짜형식
# 기존의 '확진일' 컬럼값은 문자이기 때문에 날짜로 변경해야 된다.
'''
1) 10.11 --> 10-11 변경
2) 10-11 --> 2020-10-11 로 변경
3) 2020-10-11 문자열 ---- > 2020-10-11 날짜로 변경 (pd.to_datetime 함수 )
4) df["확진일자"] = 날짜
'''
df["확진일자"] = pd.to_datetime("2020-"+df["확진일"].str.replace(".", "-"))
print("3. '확진일자' 컬럼 추가: \n", df.head())
# 4. '확진일자' 날짜 데이터 컬럼 이용하여 '월' 컬럼 추가
df["월"] = df["확진일자"].dt.month
print("4. '월' 컬럼 추가: \n", df.head())
# 5. '확진일자' 날짜 데이터 컬럼 이용하여 '주(week)' 컬럼 추가
# 해당년도의 몇번째 주(week)인지 반환
df["주"] = df["확진일자"].dt.isocalendar().week
print("5. '주' 컬럼 추가: \n", df.head())
# 6. '확진일자' 날짜 데이터 컬럼 이용하여 '월-일' 컬럼 추가
# m = df["확진일자"].dt.month
# d = df["확진일자"].dt.day
# df["월-일"] = m.astype(str) + "-" + d.astype(str)
df["월-일"] = df["확진일자"].astype(str).map(lambda x:x[-5:]) # map함수는 데이터가공시 사용
print("6. '월-일' 컬럼 추가: \n", df.head())
print("6. '월-일' 컬럼 추가: \n", df.tail())
########################################################################
# 26. 여행력
print(df["여행력"])
print(df["여행력"].unique())
print(df["여행력"].value_counts())
'''
1. '-' ==> NaN 처리
==> "-"을 np.nan 으로 변경 처리
2. 공통명으로 변경
'아랍에미리트', 'UAE' ===> 아랍에미리트
'중국 청도','우한교민','우한 교민', '중국 우한시', '중국' ==> 중국
'프랑스, 스페인','스페인, 프랑스' ==> 프랑스, 스페인
체코,헝가리,오스트리아,이탈리아,프랑스,모로코,독일,스페인,영국,폴란드,터키,아일랜드 ==>유럽
브라질,아르헨티아,칠레,볼리비아, 멕시코, 페루 => 남미
'''
## 공통명으로 변경하고 시각화
df["해외"]=df["여행력"]
print(df["해외"].str.contains('아랍에미리트|UAE'))
df.loc[df["해외"].str.contains('아랍에미리트|UAE'), "해외"] = "아랍에미리트"
df.loc[df["해외"].str.contains('우한|중국'), "해외"] = "중국"
df.loc[df["해외"].
str.contains('체코|헝가리|오스트리아|이탈리아|프랑스|모로코|독일,스페인|영국\폴란드|터키|아일랜드'),
"해외"] = "유럽"
df.loc[df["해외"].str.contains('브라질|아르헨티아|칠레|볼리비아|멕시코|페루'), "해외"] = "남미"
## "-"을 np.nan 으로 변경 처리
df["해외"]=df["해외"].replace("-", np.nan)
print(df["해외"].unique())
print(df["해외"].value_counts())
# 상위 15개만 시각화
import matplotlib.pyplot as plt
plt.rc("font", family="Malgun Gothic") # 한글 처리
# plt.rc("figure", titlesize=4) # title 크기
plt.rc("ytick", labelsize=8) # y축 라벨 크기
plt.rc("xtick", labelsize=8) # x축 라벨 크기
plt.style.use("fivethirtyeight")
g = df["해외"].value_counts().head(15).sort_values().plot.barh(title="xxxx", figsize=(16,4))
plt.show()
| [
"[email protected]"
]
| |
218da19b57c5712555289e34068f1467c2a0dd69 | 16047f965a69893a8cd2c8d18fbd7b9c86a07eb3 | /src/networkx/algorithms/tree/mst.py | b7b8c1d9326cb3ad5fb75cc1770917b253928b1e | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
]
| permissive | guctum/aws-kube-codesuite | 9ce2cc02fe5fa15c2e175fb697138014fb162f1e | 5d62beaadc13bec745ac7d2fc18f07805e91cef3 | refs/heads/master | 2021-05-24T10:08:00.651840 | 2020-04-23T20:21:46 | 2020-04-23T20:21:46 | 253,511,083 | 0 | 0 | Apache-2.0 | 2020-04-06T13:48:14 | 2020-04-06T13:48:13 | null | UTF-8 | Python | false | false | 21,167 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2017 NetworkX Developers
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Loïc Séguin-C. <[email protected]>
# All rights reserved.
# BSD license.
"""
Algorithms for calculating min/max spanning trees/forests.
"""
from heapq import heappop, heappush
from operator import itemgetter
from itertools import count
from math import isnan
import networkx as nx
from networkx.utils import UnionFind, not_implemented_for
__all__ = [
'minimum_spanning_edges', 'maximum_spanning_edges',
'minimum_spanning_tree', 'maximum_spanning_tree',
]
@not_implemented_for('multigraph')
def boruvka_mst_edges(G, minimum=True, weight='weight',
keys=False, data=True, ignore_nan=False):
"""Iterate over edges of a Borůvka's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The edges of `G` must have distinct weights,
otherwise the edges may not form a tree.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
This argument is ignored since this function is not
implemented for multigraphs; it exists only for consistency
with the other minimum spanning tree functions.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
# Initialize a forest, assuming initially that it is the discrete
# partition of the nodes of the graph.
forest = UnionFind(G)
def best_edge(component):
"""Returns the optimum (minimum or maximum) edge on the edge
boundary of the given set of nodes.
A return value of ``None`` indicates an empty boundary.
"""
sign = 1 if minimum else -1
minwt = float('inf')
boundary = None
for e in nx.edge_boundary(G, component, data=True):
wt = e[-1].get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % (e,))
if wt < minwt:
minwt = wt
boundary = e
return boundary
# Determine the optimum edge in the edge boundary of each component
# in the forest.
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# If each entry was ``None``, that means the graph was disconnected,
# so we are done generating the forest.
while best_edges:
# Determine the optimum edge in the edge boundary of each
# component in the forest.
#
# This must be a sequence, not an iterator. In this list, the
# same edge may appear twice, in different orientations (but
# that's okay, since a union operation will be called on the
# endpoints the first time it is seen, but not the second time).
#
# Any ``None`` indicates that the edge boundary for that
# component was empty, so that part of the forest has been
# completed.
#
# TODO This can be parallelized, both in the outer loop over
# each component in the forest and in the computation of the
# minimum. (Same goes for the identical lines outside the loop.)
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# Join trees in the forest using the best edges, and yield that
# edge, since it is part of the spanning tree.
#
# TODO This loop can be parallelized, to an extent (the union
# operation must be atomic).
for u, v, d in best_edges:
if forest[u] != forest[v]:
if data:
yield u, v, d
else:
yield u, v
forest.union(u, v)
def kruskal_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of a Kruskal's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
subtrees = UnionFind()
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, k, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, f, k, d),))
yield wt, u, v, k, d
else:
edges = G.edges(data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
yield wt, u, v, d
edges = sorted(filter_nan_edges(), key=itemgetter(0))
# Multigraphs need to handle edge keys in addition to edge data.
if G.is_multigraph():
for wt, u, v, k, d in edges:
if subtrees[u] != subtrees[v]:
if keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
subtrees.union(u, v)
else:
for wt, u, v, d in edges:
if subtrees[u] != subtrees[v]:
if data:
yield (u, v, d)
else:
yield (u, v)
subtrees.union(u, v)
def prim_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of Prim's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
is_multigraph = G.is_multigraph()
push = heappush
pop = heappop
nodes = list(G)
c = count()
sign = 1 if minimum else -1
while nodes:
u = nodes.pop(0)
frontier = []
visited = [u]
if is_multigraph:
for v, keydict in G.adj[u].items():
for k, d in keydict.items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, k, d),))
push(frontier, (wt, next(c), u, v, k, d))
else:
for v, d in G.adj[u].items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
push(frontier, (wt, next(c), u, v, d))
while frontier:
if is_multigraph:
W, _, u, v, k, d = pop(frontier)
else:
W, _, u, v, d = pop(frontier)
if v in visited:
continue
# Multigraphs need to handle edge keys in addition to edge data.
if is_multigraph and keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
# update frontier
visited.append(v)
nodes.remove(v)
if is_multigraph:
for w, keydict in G.adj[v].items():
if w in visited:
continue
for k2, d2 in keydict.items():
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, k2, d2))
else:
for w, d2 in G.adj[v].items():
if w in visited:
continue
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, d2))
ALGORITHMS = {
'boruvka': boruvka_mst_edges,
u'borůvka': boruvka_mst_edges,
'kruskal': kruskal_mst_edges,
'prim': prim_mst_edges
}
@not_implemented_for('directed')
def minimum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a minimum spanning forest of an undirected
weighted graph.
A minimum spanning tree is a subgraph of the graph (a tree)
with the minimum sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from networkx.algorithms import tree
Find minimum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Find minimum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=True, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
@not_implemented_for('directed')
def maximum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a maximum spanning forest of an undirected
weighted graph.
A maximum spanning tree is a subgraph of the graph (a tree)
with the maximum possible sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a maximum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from networkx.algorithms import tree
Find maximum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.maximum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (1, 2)]
Find maximum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3
>>> mst = tree.maximum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (3, 2)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=False, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
def minimum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a minimum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.minimum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`networkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = minimum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
def maximum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a maximum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.maximum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`networkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = maximum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
edges = list(edges)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
| [
"[email protected]"
]
| |
156d6f7fc512c8f3ba50b7135ffd548e1d30f08e | 8e75843fc2b27d50e1f8a95f0367a3a96a3dae30 | /Code/python_quote.py | a1cb9f69bbba8935805a704b36ca94ea7291b786 | []
| no_license | franklin-phan/CS-2-Tweet-Generator | 5f122e2aab7a6ee749feb888d094c8057671a7ee | fedb9ba46be3f31a1586f8d64986ec92c58296b6 | refs/heads/master | 2021-07-14T14:37:13.404088 | 2020-03-06T07:08:03 | 2020-03-06T07:08:03 | 236,772,553 | 0 | 0 | null | 2021-03-20T02:58:02 | 2020-01-28T15:47:39 | Python | UTF-8 | Python | false | false | 332 | py | import random
quotes = ("It's just a flesh wound.",
"He's not the Messiah. He's a very naughty boy!",
"THIS IS AN EX-PARROT!!")
def random_python_quote():
rand_index = random.randint(0, len(quotes) - 1)
return quotes[rand_index]
if __name__ == '__main__':
quote = random_python_quote()
print | [
"[email protected]"
]
| |
7e6aaa5e69e03122dd3e0dec7476a9bc38d155c2 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Difference/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Difference_MovingMedian_7__20.py | 3857b72f05dfa8e9071a105b318bb037455121e2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 268 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"[email protected]"
]
| |
e39687a83d7901840b63d3e947089e5c408f944d | 11137bde91389c04a95df6f6fdaf64f7f49f5f80 | /secondTest/introduction_MIT2/5_1.py | 47272dfbccb9fef4086d3fd1feaa61aff6aa3068 | []
| no_license | starschen/learning | cf3c5a76c867567bce73e9cacb2cf0979ba053d9 | 34decb8f9990117a5f40b8db6dba076a7f115671 | refs/heads/master | 2020-04-06T07:02:56.444233 | 2016-08-24T08:11:49 | 2016-08-24T08:11:49 | 39,417,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #encoding:utf8
def findDivisors(n1,n2):
divisors=()
for i in range(1,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
divisors=divisors+(i,)
return divisors
divisors=findDivisors(20,200)
# print divisors
total=0
for d in divisors:
total+=d
# print total
def findExtremeDivisors(n1,n2):
divisors=()
minVal,maxVal=None,None
for i in range(2,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
if minVal==None or i<minVal:
minVal=i
if maxVal==None or i >maxVal:
maxVal=i
return (minVal,maxVal)
# minVal,maxVal=findExtremeDivisors(100,200)
# print 'minVal=',minVal
# print 'maxVal=',maxVal
print findExtremeDivisors(100,200)
| [
"[email protected]"
]
| |
c3a82f8ae4512e4c66cb1f0c074facd96d2a4bf3 | e89a3a203bd4b433c0f1acc05b2e3c89a7020a9f | /src/robots/descriptions/cheetah_simu/cheetah_sim/ros_package/cheetah_core/src/leg_control/__init__.py | 88699440d88fb79604aa080b53b86fcbe3df9d3b | []
| no_license | JJHbrams/QuadrupedMotionPlanning | 03e5dfdd8410710a26553441aa557e9585b5f10f | cf83de83776b8215b2e94dbc2afa5f59039e6d4d | refs/heads/master | 2023-01-12T11:30:38.202023 | 2020-11-11T10:52:20 | 2020-11-11T10:52:20 | 311,865,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | __all__=["legController"]
| [
"[email protected]"
]
| |
f72ea5adb6bb93fb22ed43dc90bdc32c3d350e5e | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_pkg_firewall_policy6.py | 4e8e6249ef88271ad42c8c22653a3b534363cbf7 | []
| no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80,550 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_pkg_firewall_policy6
short_description: Configure IPv6 policies.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
pkg:
description: the parameter (pkg) in requested url
type: str
required: true
pkg_firewall_policy6:
description: the top level parameters set
required: false
type: dict
suboptions:
action:
type: str
description: 'Policy action (allow/deny/ipsec).'
choices:
- 'deny'
- 'accept'
- 'ipsec'
- 'ssl-vpn'
app-category:
type: str
description: 'Application category ID list.'
application:
description: no description
type: int
application-list:
type: str
description: 'Name of an existing Application list.'
auto-asic-offload:
type: str
description: 'Enable/disable policy traffic ASIC offloading.'
choices:
- 'disable'
- 'enable'
av-profile:
type: str
description: 'Name of an existing Antivirus profile.'
comments:
type: str
description: 'Comment.'
custom-log-fields:
type: str
description: 'Log field index numbers to append custom log fields to log messages for this policy.'
devices:
type: str
description: 'Names of devices or device groups that can be matched by the policy.'
diffserv-forward:
type: str
description: 'Enable to change packets DiffServ values to the specified diffservcode-forward value.'
choices:
- 'disable'
- 'enable'
diffserv-reverse:
type: str
description: 'Enable to change packets reverse (reply) DiffServ values to the specified diffservcode-rev value.'
choices:
- 'disable'
- 'enable'
diffservcode-forward:
type: str
description: 'Change packets DiffServ to this value.'
diffservcode-rev:
type: str
description: 'Change packets reverse (reply) DiffServ to this value.'
dlp-sensor:
type: str
description: 'Name of an existing DLP sensor.'
dscp-match:
type: str
description: 'Enable DSCP check.'
choices:
- 'disable'
- 'enable'
dscp-negate:
type: str
description: 'Enable negated DSCP match.'
choices:
- 'disable'
- 'enable'
dscp-value:
type: str
description: 'DSCP value.'
dsri:
type: str
description: 'Enable DSRI to ignore HTTP server responses.'
choices:
- 'disable'
- 'enable'
dstaddr:
type: str
description: 'Destination address and address group names.'
dstaddr-negate:
type: str
description: 'When enabled dstaddr specifies what the destination address must NOT be.'
choices:
- 'disable'
- 'enable'
dstintf:
type: str
description: 'Outgoing (egress) interface.'
firewall-session-dirty:
type: str
description: 'How to handle sessions if the configuration of this firewall policy changes.'
choices:
- 'check-all'
- 'check-new'
fixedport:
type: str
description: 'Enable to prevent source NAT from changing a sessions source port.'
choices:
- 'disable'
- 'enable'
global-label:
type: str
description: 'Label for the policy that appears when the GUI is in Global View mode.'
groups:
type: str
description: 'Names of user groups that can authenticate with this policy.'
icap-profile:
type: str
description: 'Name of an existing ICAP profile.'
inbound:
type: str
description: 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
choices:
- 'disable'
- 'enable'
ippool:
type: str
description: 'Enable to use IP Pools for source NAT.'
choices:
- 'disable'
- 'enable'
ips-sensor:
type: str
description: 'Name of an existing IPS sensor.'
label:
type: str
description: 'Label for the policy that appears when the GUI is in Section View mode.'
logtraffic:
type: str
description: 'Enable or disable logging. Log all sessions or security profile sessions.'
choices:
- 'disable'
- 'enable'
- 'all'
- 'utm'
logtraffic-start:
type: str
description: 'Record logs when a session starts and ends.'
choices:
- 'disable'
- 'enable'
mms-profile:
type: str
description: 'Name of an existing MMS profile.'
name:
type: str
description: 'Policy name.'
nat:
type: str
description: 'Enable/disable source NAT.'
choices:
- 'disable'
- 'enable'
natinbound:
type: str
description: 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
choices:
- 'disable'
- 'enable'
natoutbound:
type: str
description: 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
choices:
- 'disable'
- 'enable'
np-accelation:
type: str
description: 'Enable/disable UTM Network Processor acceleration.'
choices:
- 'disable'
- 'enable'
outbound:
type: str
description: 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
choices:
- 'disable'
- 'enable'
per-ip-shaper:
type: str
description: 'Per-IP traffic shaper.'
policyid:
type: int
description: 'Policy ID.'
poolname:
type: str
description: 'IP Pool names.'
profile-group:
type: str
description: 'Name of profile group.'
profile-protocol-options:
type: str
description: 'Name of an existing Protocol options profile.'
profile-type:
type: str
description: 'Determine whether the firewall policy allows security profile groups or single profiles only.'
choices:
- 'single'
- 'group'
replacemsg-override-group:
type: str
description: 'Override the default replacement message group for this policy.'
rsso:
type: str
description: 'Enable/disable RADIUS single sign-on (RSSO).'
choices:
- 'disable'
- 'enable'
schedule:
type: str
description: 'Schedule name.'
send-deny-packet:
type: str
description: 'Enable/disable return of deny-packet.'
choices:
- 'disable'
- 'enable'
service:
type: str
description: 'Service and service group names.'
service-negate:
type: str
description: 'When enabled service specifies what the service must NOT be.'
choices:
- 'disable'
- 'enable'
session-ttl:
type: int
description: 'Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.'
spamfilter-profile:
type: str
description: 'Name of an existing Spam filter profile.'
srcaddr:
type: str
description: 'Source address and address group names.'
srcaddr-negate:
type: str
description: 'When enabled srcaddr specifies what the source address must NOT be.'
choices:
- 'disable'
- 'enable'
srcintf:
type: str
description: 'Incoming (ingress) interface.'
ssl-mirror:
type: str
description: 'Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).'
choices:
- 'disable'
- 'enable'
ssl-mirror-intf:
type: str
description: 'SSL mirror interface name.'
ssl-ssh-profile:
type: str
description: 'Name of an existing SSL SSH profile.'
status:
type: str
description: 'Enable or disable this policy.'
choices:
- 'disable'
- 'enable'
tags:
type: str
description: 'Names of object-tags applied to this policy.'
tcp-mss-receiver:
type: int
description: 'Receiver TCP maximum segment size (MSS).'
tcp-mss-sender:
type: int
description: 'Sender TCP maximum segment size (MSS).'
tcp-session-without-syn:
type: str
description: 'Enable/disable creation of TCP session without SYN flag.'
choices:
- 'all'
- 'data-only'
- 'disable'
timeout-send-rst:
type: str
description: 'Enable/disable sending RST packets when TCP sessions expire.'
choices:
- 'disable'
- 'enable'
traffic-shaper:
type: str
description: 'Reverse traffic shaper.'
traffic-shaper-reverse:
type: str
description: 'Reverse traffic shaper.'
url-category:
type: str
description: 'URL category ID list.'
users:
type: str
description: 'Names of individual users that can authenticate with this policy.'
utm-status:
type: str
description: 'Enable AV/web/ips protection profile.'
choices:
- 'disable'
- 'enable'
uuid:
type: str
description: 'Universally Unique Identifier (UUID; automatically assigned but can be manually reset).'
vlan-cos-fwd:
type: int
description: 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
vlan-cos-rev:
type: int
description: 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
voip-profile:
type: str
description: 'Name of an existing VoIP profile.'
vpntunnel:
type: str
description: 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1.'
webfilter-profile:
type: str
description: 'Name of an existing Web filter profile.'
anti-replay:
type: str
description: 'Enable/disable anti-replay check.'
choices:
- 'disable'
- 'enable'
app-group:
type: str
description: 'Application group names.'
cifs-profile:
type: str
description: 'Name of an existing CIFS profile.'
dnsfilter-profile:
type: str
description: 'Name of an existing DNS filter profile.'
emailfilter-profile:
type: str
description: 'Name of an existing email filter profile.'
http-policy-redirect:
type: str
description: 'Redirect HTTP(S) traffic to matching transparent web proxy policy.'
choices:
- 'disable'
- 'enable'
inspection-mode:
type: str
description: 'Policy inspection mode (Flow/proxy). Default is Flow mode.'
choices:
- 'proxy'
- 'flow'
np-acceleration:
type: str
description: 'Enable/disable UTM Network Processor acceleration.'
choices:
- 'disable'
- 'enable'
ssh-filter-profile:
type: str
description: 'Name of an existing SSH filter profile.'
ssh-policy-redirect:
type: str
description: 'Redirect SSH traffic to matching transparent proxy policy.'
choices:
- 'disable'
- 'enable'
tos:
type: str
description: 'ToS (Type of Service) value used for comparison.'
tos-mask:
type: str
description: 'Non-zero bit positions are used for comparison while zero bit positions are ignored.'
tos-negate:
type: str
description: 'Enable negated TOS match.'
choices:
- 'disable'
- 'enable'
vlan-filter:
type: str
description: 'Set VLAN filters.'
waf-profile:
type: str
description: 'Name of an existing Web application firewall profile.'
webcache:
type: str
description: 'Enable/disable web cache.'
choices:
- 'disable'
- 'enable'
webcache-https:
type: str
description: 'Enable/disable web cache for HTTPS.'
choices:
- 'disable'
- 'enable'
webproxy-forward-server:
type: str
description: 'Web proxy forward server name.'
webproxy-profile:
type: str
description: 'Webproxy profile name.'
fsso-groups:
type: str
description: 'Names of FSSO groups.'
decrypted-traffic-mirror:
type: str
description: 'Decrypted traffic mirror.'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure IPv6 policies.
fmgr_pkg_firewall_policy6:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
pkg: <your own value>
state: <value in [present, absent]>
pkg_firewall_policy6:
action: <value in [deny, accept, ipsec, ...]>
app-category: <value of string>
application: <value of integer>
application-list: <value of string>
auto-asic-offload: <value in [disable, enable]>
av-profile: <value of string>
comments: <value of string>
custom-log-fields: <value of string>
devices: <value of string>
diffserv-forward: <value in [disable, enable]>
diffserv-reverse: <value in [disable, enable]>
diffservcode-forward: <value of string>
diffservcode-rev: <value of string>
dlp-sensor: <value of string>
dscp-match: <value in [disable, enable]>
dscp-negate: <value in [disable, enable]>
dscp-value: <value of string>
dsri: <value in [disable, enable]>
dstaddr: <value of string>
dstaddr-negate: <value in [disable, enable]>
dstintf: <value of string>
firewall-session-dirty: <value in [check-all, check-new]>
fixedport: <value in [disable, enable]>
global-label: <value of string>
groups: <value of string>
icap-profile: <value of string>
inbound: <value in [disable, enable]>
ippool: <value in [disable, enable]>
ips-sensor: <value of string>
label: <value of string>
logtraffic: <value in [disable, enable, all, ...]>
logtraffic-start: <value in [disable, enable]>
mms-profile: <value of string>
name: <value of string>
nat: <value in [disable, enable]>
natinbound: <value in [disable, enable]>
natoutbound: <value in [disable, enable]>
np-accelation: <value in [disable, enable]>
outbound: <value in [disable, enable]>
per-ip-shaper: <value of string>
policyid: <value of integer>
poolname: <value of string>
profile-group: <value of string>
profile-protocol-options: <value of string>
profile-type: <value in [single, group]>
replacemsg-override-group: <value of string>
rsso: <value in [disable, enable]>
schedule: <value of string>
send-deny-packet: <value in [disable, enable]>
service: <value of string>
service-negate: <value in [disable, enable]>
session-ttl: <value of integer>
spamfilter-profile: <value of string>
srcaddr: <value of string>
srcaddr-negate: <value in [disable, enable]>
srcintf: <value of string>
ssl-mirror: <value in [disable, enable]>
ssl-mirror-intf: <value of string>
ssl-ssh-profile: <value of string>
status: <value in [disable, enable]>
tags: <value of string>
tcp-mss-receiver: <value of integer>
tcp-mss-sender: <value of integer>
tcp-session-without-syn: <value in [all, data-only, disable]>
timeout-send-rst: <value in [disable, enable]>
traffic-shaper: <value of string>
traffic-shaper-reverse: <value of string>
url-category: <value of string>
users: <value of string>
utm-status: <value in [disable, enable]>
uuid: <value of string>
vlan-cos-fwd: <value of integer>
vlan-cos-rev: <value of integer>
voip-profile: <value of string>
vpntunnel: <value of string>
webfilter-profile: <value of string>
anti-replay: <value in [disable, enable]>
app-group: <value of string>
cifs-profile: <value of string>
dnsfilter-profile: <value of string>
emailfilter-profile: <value of string>
http-policy-redirect: <value in [disable, enable]>
inspection-mode: <value in [proxy, flow]>
np-acceleration: <value in [disable, enable]>
ssh-filter-profile: <value of string>
ssh-policy-redirect: <value in [disable, enable]>
tos: <value of string>
tos-mask: <value of string>
tos-negate: <value in [disable, enable]>
vlan-filter: <value of string>
waf-profile: <value of string>
webcache: <value in [disable, enable]>
webcache-https: <value in [disable, enable]>
webproxy-forward-server: <value of string>
webproxy-profile: <value of string>
fsso-groups: <value of string>
decrypted-traffic-mirror: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy6'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy6/{policy6}'
]
url_params = ['adom', 'pkg']
module_primary_key = 'policyid'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'pkg': {
'required': True,
'type': 'str'
},
'pkg_firewall_policy6': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True
},
'options': {
'action': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'deny',
'accept',
'ipsec',
'ssl-vpn'
],
'type': 'str'
},
'app-category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'application': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'application-list': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'auto-asic-offload': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'av-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'comments': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'custom-log-fields': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'devices': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'diffserv-forward': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'diffserv-reverse': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'diffservcode-forward': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'diffservcode-rev': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dlp-sensor': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dscp-match': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dscp-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dscp-value': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dsri': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dstaddr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dstaddr-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dstintf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'firewall-session-dirty': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'check-all',
'check-new'
],
'type': 'str'
},
'fixedport': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'global-label': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'groups': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'icap-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'inbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ippool': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ips-sensor': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'label': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'logtraffic': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable',
'all',
'utm'
],
'type': 'str'
},
'logtraffic-start': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'mms-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'name': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'nat': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'natinbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'natoutbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'np-accelation': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'outbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'per-ip-shaper': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'policyid': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'poolname': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-group': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-protocol-options': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'single',
'group'
],
'type': 'str'
},
'replacemsg-override-group': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'rsso': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'schedule': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'send-deny-packet': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'service': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'service-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'session-ttl': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'spamfilter-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'srcaddr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'srcaddr-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'srcintf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssl-mirror': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-mirror-intf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssl-ssh-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tags': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tcp-mss-receiver': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'tcp-mss-sender': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'tcp-session-without-syn': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'all',
'data-only',
'disable'
],
'type': 'str'
},
'timeout-send-rst': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'traffic-shaper': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'traffic-shaper-reverse': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'url-category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'users': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'utm-status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'uuid': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'vlan-cos-fwd': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'vlan-cos-rev': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'voip-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'vpntunnel': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webfilter-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'anti-replay': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'app-group': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'cifs-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dnsfilter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'emailfilter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'http-policy-redirect': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'inspection-mode': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'proxy',
'flow'
],
'type': 'str'
},
'np-acceleration': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssh-filter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssh-policy-redirect': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tos': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tos-mask': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tos-negate': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'vlan-filter': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'waf-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webcache': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'webcache-https': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'webproxy-forward-server': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webproxy-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'fsso-groups': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'decrypted-traffic-mirror': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'pkg_firewall_policy6'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
6c42121e14c982c244c5e02c8719f1cf0456c50b | 00829e1ff78f73dab073a201d68139960c1d1922 | /tools/toolset/tool/rigging/beam/core/maths/color.py | dc40bd6d1cd95c9443bd68690d5e0cbba7ef7e09 | []
| no_license | liangyongg/Beam_Tools | a021ceb4187107508536c46726da5b9629ffd1cf | 21b5d06e660f058434e589ae4f672f96296b7540 | refs/heads/master | 2018-11-04T04:43:02.523654 | 2018-08-26T12:33:09 | 2018-08-26T12:33:09 | 115,005,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,953 | py | """Kraken - maths.color module.
Classes:
Color -- Color object.
"""
import random
import math
from rigging.beam.core.beam_system import bs
from math_object import MathObject
class Color(MathObject):
"""Vector 4 object."""
def __init__(self, r=0.0, g=0.0, b=0.0, a=1.0):
"""Initializes r, g b and a values for Color object."""
super(Color, self).__init__()
#if bs.getRTValTypeName(r) == 'Color':
# self._rtval = r
#else:
# self._rtval = bs.rtVal('Color')
# if isinstance(r, Color):
# self.set(r=r.r, g=r.g, b=r.b, a=r.b)
# else:
# self.set(r=r, g=g, b=b, a=a)
def __str__(self):
"""String representation of the Color object.
Returns:
str: String representation of the Color object."""
stringRep = "Color("
stringRep += str(self.r) + ","
stringRep += str(self.g) + ","
stringRep += str(self.b) + ","
stringRep += str(self.a) + ")"
return stringRep
@property
def r(self):
"""Gets red channel of this color.
Returns:
float: red channel of this color.
"""
return self._rtval.r.getSimpleType()
@r.setter
def r(self, value):
"""Sets red channel from the input channel.
Args:
channel (float): Value to set the red channel to.
Returns:
bool: True if successful.
"""
self._rtval.r = bs.rtVal('Scalar', value)
return True
@property
def g(self):
"""Gets green channel of this color.
Returns:
float: green channel of this color.
"""
return self._rtval.g.getSimpleType()
@g.setter
def g(self, value):
"""Sets green channel from the input channel.
Args:
channel (float): Value to set the green property as.
Returns:
bool: True if successful.
"""
self._rtval.g = bs.rtVal('Scalar', value)
return True
@property
def b(self):
"""Gets blue channel of this color.
Returns:
float: blue channel of this color.
"""
return self._rtval.b.getSimpleType()
@b.setter
def b(self, value):
"""Sets blue channel from the input channel.
Args:
channel (float): Value to set the blue property as.
Returns:
bool: True if successful.
"""
self._rtval.b = bs.rtVal('Scalar', value)
return True
@property
def a(self):
"""Gets alpha channel of this color.
Returns:
float: alpha channel of this color.
"""
return self._rtval.a.getSimpleType()
@a.setter
def a(self, value):
"""Sets a channel from the input channel.
Args:
channel (float): Value to set the a property as.
Returns:
bool: True if successful.
"""
self._rtval.a = bs.rtVal('Scalar', value)
def __eq__(self, other):
return self.equal(other)
def __ne__(self, other):
return not self.equal(other)
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.subtract(other)
def __mul__(self, other):
return self.multiply(other)
def __div__(self, other):
return self.divide(other)
def clone(self):
"""Returns a clone of the Color.
Returns:
Color: The cloned Color
"""
color = Color()
color.r = self.r
color.g = self.g
color.b = self.b
return color
def set(self, r, g, b, a):
"""Sets the r, g, b, and a value from the input values.
Args:
r (float): Value to set the r channel to.
g (float): Value to set the g channel to.
b (float): Value to set the b channel to.
a (float): Value to set the a channel to.
Returns:
bool: True if successful.
"""
self._rtval.set('', bs.rtVal('Scalar', r), bs.rtVal('Scalar', g),
bs.rtVal('Scalar', b), bs.rtVal('Scalar', a))
return True
def equal(self, other):
"""Checks equality of this color with another.
Args:
other (Color): other color to check equality with.
Returns:
bool: True if equal.
"""
return self._rtval.equal('Boolean', other._rtval).getSimpleType()
def almostEqual(self, other, precision):
"""Checks almost equality of this Color with another.
Args:
other (Color): other value to check equality with.
precision (float): Precision value.
Returns:
bool: True if almost equal.
"""
return self._rtval.almostEqual('Boolean', other._rtval,
bs.rtVal('Scalar', precision)).getSimpleType()
def component(self, i ):
"""Gets the component of this Color by index.
Args:
i (int): index of the component to return.
Returns:
float: component of this Color.
"""
return self._rtval.component('Scalar', bs.rtVal('Size', i)).getSimpleType()
def setComponent(self, i, v ):
"""Sets the component of this Color by index.
Args:
i (int): index of the component to set.
v (float): Value to set component as.
Returns:
bool: True if successful.
"""
return self._rtval.setComponent('', bs.rtVal('Size', i),
bs.rtVal('Scalar', v))
def add(self, other):
"""Overload method for the add operator.
Args:
other (Color): other color to add to this one.
Returns:
Color: New Color of the sum of the two Color's.
"""
return Color(self._rtval.add('Color', other._rtval))
def subtract(self, other):
"""Overload method for the subtract operator.
Args:
other (Color): other color to subtract from this one.
Returns:
Color: New Color of the difference of the two Color's.
"""
return Color(self._rtval.subtract('Color', other._rtval))
def multiply(self, other):
"""Overload method for the multiply operator.
Args:
other (Color): other color to multiply from this one.
Returns:
Color: New Color of the product of the two Color's.
"""
return Color(self._rtval.multiply('Color', other._rtval))
def divide(self, other):
"""Divides this color and an other.
Args:
other (Color): other color to divide by.
Returns:
Color: Quotient of the division of this color by the other.
"""
return Color(self._rtval.divide('Color', other._rtval))
def multiplyScalar(self, other):
"""Product of this color and a scalar.
Args:
other (float): Scalar value to multiply this color by.
Returns:
Color: Product of the multiplication of the scalar and this color.
"""
return Color(self._rtval.multiplyScalar('Color', bs.rtVal('Scalar', other)))
def divideScalar(self, other):
"""Divides this color and a scalar.
Args:
other (float): Value to divide this color by.
Returns:
Color: Quotient of the division of the color by the scalar.
"""
return Color(self._rtval.divideScalar('Color', bs.rtVal('Scalar', other)))
def linearInterpolate(self, other, t):
"""Linearly interpolates this color with another one based on a scalar
blend value (0.0 to 1.0).
Args:
other (Color): color to blend to.
t (float): Blend value.
Returns:
Color: New color blended between this and the input color.
"""
return Color(self._rtval.linearInterpolate('Color', bs.rtVal('Color', other), bs.rtVal('Scalar', t)))
@classmethod
def randomColor(cls, gammaAdjustment):
""" Generates a random color based on a seed and offset with gamma adjustment.
Example:
# Generate a regular random color
color = randomColor(seed)
# Generate a light random color
color = randomColor(seed, 0.5)
# Generate a dark random color
color = randomColor(seed, -0.5)
Args:
gammaAdjustment (float): A gamma adjustment to offset the range of the generated color.
Returns:
Color: New random color.
"""
def lerp( val1, val2, t):
return val1 + ((val2 - val1) * t)
if(gammaAdjustment > 0.0001):
# Generate a light color with values between gammaAdjustment and 1.0
return Color(
lerp(gammaAdjustment, 1.0, random.random()),
lerp(gammaAdjustment, 1.0, random.random()),
lerp(gammaAdjustment, 1.0, random.random())
)
elif(gammaAdjustment < -0.0001):
# Generate a dark color with values between 0.0 and 1.0-gammaAdjustment
return Color(
lerp(0.0, 1.0+gammaAdjustment, random.random()),
lerp(0.0, 1.0+gammaAdjustment, random.random()),
lerp(0.0, 1.0+gammaAdjustment, random.random())
)
else:
# We add an arbitrary offset to the provided offset so that each color
# generated based on the seed and offset is unique.
return Color(
random.random(),
random.random(),
random.random()
)
| [
"hhhh"
]
| hhhh |
b3659978c254246c6d5f9ff0bb961a8029d82c3e | 30e2a85fc560165a16813b0486a862317c7a486a | /tensorflow/test/misc/graph.py | f141c134d6cf1435e3b25c0f9515954553e7ee26 | []
| no_license | muryliang/python_prac | 2f65b6fdb86c3b3a44f0c6452a154cd497eb2d01 | 0301e8f523a2e31e417fd99a968ad8414e9a1e08 | refs/heads/master | 2021-01-21T11:03:48.397178 | 2017-09-18T04:13:27 | 2017-09-18T04:13:27 | 68,801,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import tensorflow as tf
g1 = tf.Graph()
with g1.as_default():
v = tf.get_variable(
"v", initializer=tf.zeros_initializer(shape=[1]))
g2 = tf.Graph()
with g2.as_default():
v= tf.get_variable(
"v", initializer=tf.ones_initializer(shape=[1]))
with tf.Session(graph=g1) as sess:
tf.initialize_all_variables().run()
with tf.variable_scope("", reuse=True):
print sess.run(tf.get_variable("v")
| [
"[email protected]"
]
| |
2d28abd02b655286a0b2762b8b7f33ce1e3ce5c8 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/u19.py | 44f8d26c2e4a9b7cdc3d42f29e4fe1307c540b0c | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u19':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
ad62f9feb1c07f0e0d3a9a0db8defb334439b636 | a2fdcd5252741bdd3ad96f20944c07d80bd57dc8 | /class_sample.py | ca23e1669eeab4e7a15a44c5a304dc1c92735155 | []
| no_license | chaossky/Python2019 | 7cd11aab7cecf23acb42b7635f8bfb506555c856 | fd63563f6a175a6aef1f3248aefb3f754f6658e1 | refs/heads/master | 2021-07-31T09:15:14.430835 | 2019-08-16T12:13:45 | 2019-08-16T12:13:45 | 200,347,544 | 0 | 0 | null | 2019-08-05T21:54:10 | 2019-08-03T07:43:34 | Python | UTF-8 | Python | false | false | 365 | py | class Ball:
color=""
speed=0
def setSpeed(self,value):
self.speed=value
ball01=Ball()
ball01.color="Red"
ball01.setSpeed(10)
ball02=Ball()
ball02.color="Blue"
ball02.setSpeed(20)
print("Ball01 color:%s" %ball01.color)
print("Ball01 speed:%s" %ball01.speed)
print("Ball02 color:%s" %ball02.color)
print("Ball02 speed:%s" %ball02.speed)
| [
"[email protected]"
]
| |
38910cfa0d829421a6d14748e1081970a606abe0 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/dossier/upgrades/20170307184059_reindex_searchable_text_for_dossier_templates/upgrade.py | 436c02f98340c2800590927a5f6bf366f0ad4ab2 | []
| no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 365 | py | from ftw.upgrade import UpgradeStep
class ReindexSearchableTextForDossierTemplates(UpgradeStep):
"""Reindex SearchableText for dossier templates.
"""
def __call__(self):
self.install_upgrade_profile()
self.catalog_reindex_objects(
{'portal_type': 'opengever.dossier.dossiertemplate'},
idxs=['SearchableText'])
| [
"[email protected]"
]
| |
0c9d8876bb93f2c786e18e37dc1213e7ef6c6c2d | 8e8260d109d6b3680e3ce966e9baaa540393db8b | /xadmintest/settings.py | ff250650fd023fd3bc87381d949c7e0e1b6833b0 | []
| no_license | loveguan/xadmintest | 4472c13bd68f4b4ae47479449f4319e6f50df4fc | f20093afe25216154861fd8f6f061bcfee7269f2 | refs/heads/master | 2020-09-04T23:24:06.094513 | 2019-11-06T05:57:04 | 2019-11-06T05:57:04 | 219,922,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | """
Django settings for xadmintest project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'goero%l-vd)wg%)1*5rt29kv8#=qo40=94_vvp(!+o(g#^^n%c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Xadmin.apps.XadminConfig',
'app01.apps.App01Config',
'app02.apps.App02Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'xadmintest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'xadmintest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,"static")] | [
"[email protected]"
]
| |
49f3e7d823cd9ee17a9c42cca9f6a42c42a6c33e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ENJTPoWCyEGgnXYjM_18.py | 35efe9177850a3a6a90e03c5f007d2d7ec0fb8cc | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py |
def percent_filled(box):
return str(round((''.join(box).count('o') / ((len(box[0]) - 2) * (len(box) - 2))) * 100)) + '%'
| [
"[email protected]"
]
| |
445c2230f975dd0e1e6f4f7c980b766500609f3a | 6c37d1d2437a08e43b13d621d4a8da4da7135b3a | /yt_dlp/extractor/mirrativ.py | 0a8ee0c3a52eeff28f2d9e679e0ae5913bc34970 | [
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
]
| permissive | yt-dlp/yt-dlp | be040bde10cc40258c879c75ab30215686352824 | d3d81cc98f554d0adb87d24bfd6fabaaa803944d | refs/heads/master | 2023-09-05T21:15:21.050538 | 2023-09-05T20:35:23 | 2023-09-05T20:35:23 | 307,260,205 | 52,742 | 5,376 | Unlicense | 2023-09-14T05:22:08 | 2020-10-26T04:22:55 | Python | UTF-8 | Python | false | false | 4,880 | py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
dict_get,
traverse_obj,
try_get,
)
class MirrativBaseIE(InfoExtractor):
def assert_error(self, response):
error_message = traverse_obj(response, ('status', 'error'))
if error_message:
raise ExtractorError('Mirrativ says: %s' % error_message, expected=True)
class MirrativIE(MirrativBaseIE):
IE_NAME = 'mirrativ'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/live/(?P<id>[^/?#&]+)'
TESTS = [{
'url': 'https://mirrativ.com/live/UQomuS7EMgHoxRHjEhNiHw',
'info_dict': {
'id': 'UQomuS7EMgHoxRHjEhNiHw',
'title': 'ねむいぃ、。『参加型』🔰jcが初めてやるCOD✨初見さん大歓迎💗',
'is_live': True,
'description': 'md5:bfcd8f77f2fab24c3c672e5620f3f16e',
'thumbnail': r're:https?://.+',
'uploader': '# あ ち ゅ 。💡',
'uploader_id': '118572165',
'duration': None,
'view_count': 1241,
'release_timestamp': 1646229192,
'timestamp': 1646229167,
'was_live': False,
},
'skip': 'livestream',
}, {
'url': 'https://mirrativ.com/live/POxyuG1KmW2982lqlDTuPw',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://www.mirrativ.com/live/%s' % video_id, video_id)
live_response = self._download_json(f'https://www.mirrativ.com/api/live/live?live_id={video_id}', video_id)
self.assert_error(live_response)
hls_url = dict_get(live_response, ('archive_url_hls', 'streaming_url_hls'))
is_live = bool(live_response.get('is_live'))
if not hls_url:
raise ExtractorError('Neither archive nor live is available.', expected=True)
formats = self._extract_m3u8_formats(
hls_url, video_id,
ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', live=is_live)
return {
'id': video_id,
'title': self._og_search_title(webpage, default=None) or self._search_regex(
r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title'),
'is_live': is_live,
'description': live_response.get('description'),
'formats': formats,
'thumbnail': live_response.get('image_url'),
'uploader': traverse_obj(live_response, ('owner', 'name')),
'uploader_id': traverse_obj(live_response, ('owner', 'user_id')),
'duration': try_get(live_response, lambda x: x['ended_at'] - x['started_at']) if not is_live else None,
'view_count': live_response.get('total_viewer_num'),
'release_timestamp': live_response.get('started_at'),
'timestamp': live_response.get('created_at'),
'was_live': bool(live_response.get('is_archive')),
}
class MirrativUserIE(MirrativBaseIE):
IE_NAME = 'mirrativ:user'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/user/(?P<id>\d+)'
_TESTS = [{
# Live archive is available up to 3 days
# see: https://helpfeel.com/mirrativ/%E9%8C%B2%E7%94%BB-5e26d3ad7b59ef0017fb49ac (Japanese)
'url': 'https://www.mirrativ.com/user/110943130',
'note': 'multiple archives available',
'only_matching': True,
}]
def _entries(self, user_id):
page = 1
while page is not None:
api_response = self._download_json(
f'https://www.mirrativ.com/api/live/live_history?user_id={user_id}&page={page}', user_id,
note=f'Downloading page {page}')
self.assert_error(api_response)
lives = api_response.get('lives')
if not lives:
break
for live in lives:
if not live.get('is_archive') and not live.get('is_live'):
# neither archive nor live is available, so skip it
# or the service will ban your IP address for a while
continue
live_id = live.get('live_id')
url = 'https://www.mirrativ.com/live/%s' % live_id
yield self.url_result(url, video_id=live_id, video_title=live.get('title'))
page = api_response.get('next_page')
def _real_extract(self, url):
user_id = self._match_id(url)
user_info = self._download_json(
f'https://www.mirrativ.com/api/user/profile?user_id={user_id}', user_id,
note='Downloading user info', fatal=False)
self.assert_error(user_info)
return self.playlist_result(
self._entries(user_id), user_id,
user_info.get('name'), user_info.get('description'))
| [
"[email protected]"
]
| |
7d50401e7f1cf6286d23132c3ea577467c6a556e | 84caee3f7b0e9811f91da65f59c93b08f76453f3 | /later/task.py | a201c2d1487a33d00572d896156b06435581c5c9 | [
"Apache-2.0"
]
| permissive | thatch/later | 541492ca2ebd77d4b5859c00ff46247847a1d1a5 | 29c614c8a14eb290555bd1708fafea6542365e60 | refs/heads/master | 2021-01-09T16:10:39.247170 | 2020-02-19T01:33:45 | 2020-02-19T01:36:01 | 242,367,315 | 0 | 0 | null | 2020-02-22T15:46:40 | 2020-02-22T15:46:39 | null | UTF-8 | Python | false | false | 11,199 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import contextvars
import logging
from contextlib import suppress
from functools import partial, wraps
from inspect import isawaitable
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from unittest.mock import Mock
from .event import BiDirectionalEvent
FixerType = Callable[[asyncio.Task], Union[asyncio.Task, Awaitable[asyncio.Task]]]
logger = logging.getLogger(__name__)
F = TypeVar("F", bound=Callable[..., Awaitable[Any]])
__all__: Sequence[str] = ["Watcher", "START_TASK", "TaskSentinel", "cancel", "as_task"]
class TaskSentinel(asyncio.Task):
""" When you need a done task for typing """
def __init__(self):
fake = Mock()
asyncio.Future.__init__(self, loop=fake) # typing: ignore, don't create a loop
asyncio.Future.set_result(self, None)
async def cancel(fut: asyncio.Future) -> None:
"""
Cancel a future/task and await for it to cancel.
This method suppresses the CancelledError
"""
fut.cancel()
await asyncio.sleep(0) # let loop cycle
with suppress(asyncio.CancelledError):
await fut
def as_task(func: F) -> F:
"""
Decorate a function, So that when called it is wrapped in a task
on the running loop.
"""
@wraps(func)
def create_task(*args, **kws):
loop = asyncio.get_running_loop()
return loop.create_task(func(*args, **kws))
return cast(F, create_task)
# Sentinel Task
START_TASK: asyncio.Task = TaskSentinel()
# ContextVar for Finding an existing Task Watcher
WATCHER_CONTEXT: contextvars.ContextVar[Watcher] = contextvars.ContextVar(
"WATCHER_CONTEXT"
)
class WatcherError(RuntimeError):
pass
class Watcher:
_tasks: Dict[asyncio.Future, Optional[FixerType]]
_scheduled: List[FixerType]
_tasks_changed: BiDirectionalEvent
_cancelled: asyncio.Event
_cancel_timeout: float
_preexit_callbacks: List[Callable[[], None]]
_shielded_tasks: Dict[asyncio.Task, asyncio.Future]
loop: asyncio.AbstractEventLoop
running: bool
@staticmethod
def get() -> Watcher:
return WATCHER_CONTEXT.get()
def __init__(self, *, cancel_timeout: float = 300, context: bool = False) -> None:
"""
cancel_timeout is the time in seconds we will wait after cancelling all
the tasks watched by this watcher.
context is wether to expose this Watcher via contextvars now or at __aenter__
"""
if context:
WATCHER_CONTEXT.set(self)
self._cancel_timeout = cancel_timeout
self._tasks = {}
self._scheduled = []
self._tasks_changed = BiDirectionalEvent()
self._cancelled = asyncio.Event()
self._preexit_callbacks = []
self._shielded_tasks = {}
self.running = False
async def _run_scheduled(self) -> None:
scheduled = self._scheduled
while scheduled:
fixer = scheduled.pop()
task = fixer(START_TASK)
if not isinstance(task, asyncio.Task) and isawaitable(task):
task = await task
if isinstance(task, asyncio.Task):
self._tasks[task] = fixer
else:
raise TypeError(f"{fixer}(START_TASK) failed to return a task.")
async def unwatch(
self,
task: asyncio.Task = START_TASK,
fixer: Optional[FixerType] = None,
*,
shield: bool = False,
) -> bool:
"""
The ability to unwatch a task, by task or fixer
This is a coroutine to insure the watcher has re-watched the tasks list
If the task was shielded then you need to specify here so we can find
the shield and remove it from the watch list.
When unwatching a fixer, if the returned task is not the same
as the one passed in we will cancel it, and await it.
"""
async def tasks_changed():
if self.running:
await self._tasks_changed.set()
if shield:
if task in self._shielded_tasks:
del self._tasks[self._shielded_tasks[task]]
del self._shielded_tasks[task]
await tasks_changed()
return True
elif fixer is not None:
for t, fix in tuple(self._tasks.items()):
if fix is fixer:
del self._tasks[t]
await tasks_changed()
if t is not task:
await cancel(t)
return True
elif task is not START_TASK:
if task in self._tasks:
del self._tasks[task]
await tasks_changed()
return True
return False
def watch(
self,
task: asyncio.Task = START_TASK,
fixer: Optional[FixerType] = None,
*,
shield: bool = False,
) -> None:
"""
Add a task to be watched by the watcher
You can also attach a fixer co-routine or function to be used to fix a
task that has died.
The fixer will be passed the failed task, and is expected to return a working
task, or raise if that is impossible.
You can also just pass in the fixer and we will use it to create the task
to be watched. The fixer will be passed a dummy task singleton:
`later.task.START_TASK`
shield argument lets you watch a task, but not cancel it in this watcher.
Useful for triggering on task failures, but not managing said task.
"""
# Watching a coro, leads to a confusing error deep in watcher
# so use runtime checks not just static types.
if not isinstance(task, asyncio.Task):
raise TypeError("only asyncio.Task objects can be watched.")
if task is START_TASK:
if not fixer:
raise ValueError("fixer must be specified when using START_TASK.")
self._scheduled.append(fixer)
elif shield:
if fixer:
raise ValueError("`fixer` can not be used with shield=True")
self._shielded_tasks[task] = asyncio.shield(task)
self._tasks[self._shielded_tasks[task]] = None
else:
self._tasks[task] = fixer
self._tasks_changed.set_nowait()
def cancel(self) -> None:
"""
Stop the watcher and cause it to cancel all the tasks in its care.
"""
self._cancelled.set()
def add_preexit_callback(self, callback: Callable[..., None], *args, **kws) -> None:
self._preexit_callbacks.append(partial(callback, *args, **kws))
def _run_preexit_callbacks(self) -> None:
for callback in self._preexit_callbacks:
try:
callback()
except Exception as e:
logger.exception(
f"ignoring exception from pre-exit callback {callback}: {e}"
)
async def __aenter__(self) -> "Watcher":
WATCHER_CONTEXT.set(self)
self.loop = asyncio.get_running_loop()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> bool:
cancel_task: asyncio.Task = self.loop.create_task(self._cancelled.wait())
changed_task: asyncio.Task = START_TASK
try:
self.running = True
while not self._cancelled.is_set():
if self._scheduled:
await self._run_scheduled()
if changed_task is START_TASK or changed_task.done():
changed_task = self.loop.create_task(self._tasks_changed.wait())
try:
if not self._tasks:
return False # There are no tasks just exit.
done, pending = await asyncio.wait(
[cancel_task, changed_task, *self._tasks.keys()],
return_when=asyncio.FIRST_COMPLETED,
)
if cancel_task in done:
break # Don't bother doing fixes just break out
for task in done:
task = cast(asyncio.Task, task)
if task is changed_task:
continue
else:
await self._fix_task(task)
except asyncio.CancelledError:
self.cancel()
finally:
self.running = False
self._run_preexit_callbacks()
await self._event_task_cleanup(cancel_task, changed_task)
await self._handle_cancel()
self._tasks.clear()
self._shielded_tasks.clear()
return False
async def _event_task_cleanup(self, *tasks):
for task in tasks:
if task is not START_TASK:
await cancel(task)
async def _fix_task(self, task: asyncio.Task) -> None:
# Insure we "retrieve" the result of failed tasks
exc = task.exception()
if exc is None:
task.result()
fixer = self._tasks[task]
if fixer is None:
raise RuntimeError(f"{task} finished and there is no fixer!") from exc
new_task = fixer(task)
if not isinstance(new_task, asyncio.Task) and isawaitable(new_task):
new_task = await new_task
if isinstance(new_task, asyncio.Task):
del self._tasks[task]
self._tasks[new_task] = fixer
else:
raise TypeError(
f"{fixer}(task) failed to return a task, returned:" f"{new_task}!"
) from exc
async def _handle_cancel(self):
tasks = [task for task in self._tasks if not task.done()]
if not tasks:
return
for task in tasks:
task.cancel()
done, pending = await asyncio.wait(tasks, timeout=self._cancel_timeout)
bad_tasks: List[asyncio.Task] = []
for task in done:
if task.cancelled():
continue
if task.exception() is not None:
bad_tasks.append(task)
bad_tasks.extend(pending)
if bad_tasks:
raise WatcherError(
"The following tasks didn't cancel cleanly or at all!", bad_tasks
)
| [
"[email protected]"
]
| |
4403759cc3a6535b10eb3e09928d293cb9555aad | bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d | /problemSet/591C_Median_Smoothing.py | 9436f6108c5e3ab88ea40e68a7cd92378f7749a0 | []
| no_license | yamaton/codeforces | 47b98b23da0a3a8237d9021b0122eaa498d98628 | e0675fd010df852c94eadffdf8b801eeea7ad81b | refs/heads/master | 2021-01-10T01:22:02.338425 | 2018-11-28T02:45:04 | 2018-11-28T03:21:45 | 45,873,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | """
Codeforces Round #327 (Div. 2)
Problem 591 C. Median Smoothing
@author yamaton
@date 2015-11-06
"""
def reduce_consec(iterable):
"""
[1, 2, 3, 6, 7, 9, 10, 11, 12, 13, 20]
--> [(1, 3), (6, 2), (9, 5), (20, 1)]
Detect consecutive part and (starting_value, length) pair
:param xs: List of int
:return: List of pair of int
"""
stack = []
for x in iterable:
if stack:
# check if consective
if stack[-1] + 1 == x:
stack.append(x)
# if not consecutive, flush stack and start with new element
else:
yield (stack[0], len(stack))
stack = [x]
else:
# starting element
stack.append(x)
if stack:
yield (stack[0], len(stack))
def alternating_indices(xs):
for i, x in enumerate(xs):
if i == 0 or i == len(xs) - 1:
continue
if xs[i-1] != x and xs[i+1] != x:
yield i
def alternating_position_and_length(xs):
for x in xs:
pass
def solve(xs, n):
# zigzag = [] # alternating part
# for i, x in enumerate(xs):
# if i == 0 or i == n - 1:
# continue
# if xs[i-1] != x and xs[i+1] != x:
# zigzag.append(i)
zigzag = alternating_indices(xs)
zigzag_start_length_pairs = reduce_consec(zigzag)
count = 0
result = xs[:]
for (i, n) in zigzag_start_length_pairs:
n_half = n // 2
count = max(count, (n + 1) // 2)
if n % 2 == 0:
for j in range(i, i + n_half):
result[j] = xs[i-1]
for j in range(i + n_half, i + n):
result[j] = 1 - xs[i-1]
else:
for j in range(i, i + n):
result[j] = xs[i-1]
return count, result
def solve_bruteforce(xs, n):
def transform(ps):
result = []
for i in range(n):
if i == 0 or i == n-1:
result.append(ps[i])
else:
median = int(sum(ps[i-1:i+2]) >= 2)
result.append(median)
return tuple(result)
xs = tuple(xs)
seen = set()
seen.add(xs)
ys = transform(xs)
count = 0
while ys != xs:
# Actually, this system always ends up to a fixed point. No cycle exists.
if ys in seen:
return -1, xs
xs = ys
seen.add(xs)
count += 1
ys = transform(xs)
return count, xs
def main():
n = int(input())
xs = [int(i) for i in input().strip().split()]
count, seq = solve(xs, n)
print(count)
print(' '.join(str(n) for n in seq))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
c8bca5286d0a9ad049f59155f5a9114d8f06dd8c | b92eee41d665314bc42043d1ff46c608af5ffdfd | /sesion_3/prog.4.py | eda17bf266e753571861d3d45fc42db362032da6 | []
| no_license | badillosoft/python-economics | 40efe8326558a8fb93f84fdbd2137428844ee5f3 | 82af43c7a47297ce186dc0e23e30620d46e6693a | refs/heads/master | 2021-01-11T18:55:15.762752 | 2017-05-09T01:15:59 | 2017-05-09T01:15:59 | 79,656,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from openpyxl import load_workbook
from geg import *
wb = load_workbook("puntos.xlsx")
ws = wb.active
puntos = automatic_load_data(ws, "A2")
def f(x, y):
return x**2 + y**2
for p in puntos:
x = p["X"]
y = p["Y"]
z = f(x, y)
print "%f, %f, %f" %(x, y, z) | [
"[email protected]"
]
| |
713e56b0dfc1b28ab55d67e75f8720cff692e593 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=49/params.py | fcbfbdfe45d4c06dbfe8c250d00b2d4aa9ae3364 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.557024',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 49,
'utils': 'uni-medium-3'}
| [
"[email protected]"
]
| |
c0cf0962495662ae563a1a6b07d1ec6c2b8f5619 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/9549174/snippet.py | 312f68f442a31f1ee8acc642c7594905cdeb8ac0 | [
"MIT"
]
| permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 3,652 | py | import random
import sys
def print_grid(grid):
print ("\n%s\n" % "+".join([('-' * 4)] * 4)).join(
["|".join(["%4d" % item if item > 0 else " " * 4 for item in line]) for line in grid])
def get_available_cells(grid):
return [(y, x) for y in range(4) for x in range(4) if not grid[y][x]]
def insert_new_item(grid):
available_cells = get_available_cells(grid)
if len(available_cells) == 0:
return False
y, x = random.choice(available_cells)
grid[y][x] = 2 if random.random() < 0.9 else 4
return True
def is_legal_position(y, x):
return 0 <= y <= 3 and 0 <= x <= 3
def get_next_position(y, x, (y_offset, x_offset)):
return y + y_offset, x + x_offset
def get_next_nonzero_cell(grid, y, x, (y_offset, x_offset)):
next_y, next_x = get_next_position(y, x, (y_offset, x_offset))
if is_legal_position(next_y, next_x):
if grid[next_y][next_x]:
return next_y, next_x
else:
return get_next_nonzero_cell(grid, next_y, next_x, (y_offset, x_offset))
else:
return None, None
def merge_cells(grid, (write_y, write_x), (read_y, read_x), direction, virtual, winning=False):
if (write_y, write_x) == (read_y, read_x):
read_y, read_x = get_next_nonzero_cell(grid, read_y, read_x, direction)
if not is_legal_position(write_y, write_x) or not is_legal_position(read_y, read_x):
return winning if not virtual else False
if grid[write_y][write_x]:
if grid[read_y][read_x] == grid[write_y][write_x]:
if virtual:
return True
grid[write_y][write_x] *= 2
grid[read_y][read_x] = 0
return merge_cells(grid, get_next_position(write_y, write_x, direction),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual,
winning or grid[write_y][write_x] > 1024)
else:
return merge_cells(grid, get_next_position(write_y, write_x, direction),
(read_y, read_x), direction, virtual, winning)
else:
if virtual:
return True
grid[write_y][write_x] = grid[read_y][read_x]
grid[read_y][read_x] = 0
return merge_cells(grid, (write_y, write_x),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual, winning)
def get_movable_directions(grid):
return [direction for direction in ["a", "d", "w", "s"] if move(grid, direction, True)]
def move(grid, direction, virtual):
if direction == "a": #left
return any([merge_cells(grid, (i, 0), (i, 0), (0, 1), virtual) for i in range(4)])
elif direction == "d": #right
return any([merge_cells(grid, (i, 3), (i, 3), (0, -1), virtual) for i in range(4)])
elif direction == "w": #up
return any([merge_cells(grid, (0, i), (0, i), (1, 0), virtual) for i in range(4)])
elif direction == "s": #down
return any([merge_cells(grid, (3, i), (3, i), (-1, 0), virtual) for i in range(4)])
grid = [[0 for x in range(4)] for y in range(4)]
insert_new_item(grid)
while True:
insert_new_item(grid)
print_grid(grid)
movable_directions = get_movable_directions(grid)
if len(movable_directions) == 0:
print "You lose!"
break
direction_name = sys.stdin.readline().strip().lower()
while direction_name not in movable_directions:
print "Invalid direction."
direction_name = sys.stdin.readline().strip().lower()
if move(grid, direction_name, False):
print_grid(grid)
print "You win!"
break | [
"[email protected]"
]
| |
87481f971aab378f0cea55dabcddcedecfdce3f5 | 4c704c60dcd8bba658f4e0cdc85f299c01f2058e | /002/for1.py | 9727ffbd6c0fcc9cbb45013575fc2759408bb8fa | []
| no_license | steveq1/py2016 | acd6c80595637fb3be7f1f3378bbdca8d2dcf8cc | fb9b2708d49790efe03d84315442d7e93a7cc6d6 | refs/heads/master | 2021-01-17T13:00:25.787387 | 2016-07-18T16:28:07 | 2016-07-18T16:28:07 | 63,125,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | for x in range(0,10):
if x >=3:
is_break = False
break
print('x={0}'.format(x))
if is_break:
break
| [
"[email protected]"
]
| |
19a873e4e3896df4714cebbd65d8a78cd02da923 | 773aef0de494fde01ea5a444b0cfdf57deb88b10 | /puchowebapp/urls.py | 925ea082cf610643223dc59a8d2e26160968a8dc | []
| no_license | gk90731/Pucho_Web | 44c509f92950dc7f35cd5dfd6cf3e42fb6b2d720 | 041239934cd9303120e67d613b2ae90f23c17f20 | refs/heads/master | 2022-12-10T19:47:43.400760 | 2020-04-04T14:28:29 | 2020-04-04T14:28:29 | 253,017,818 | 0 | 0 | null | 2022-12-08T03:59:01 | 2020-04-04T14:26:52 | HTML | UTF-8 | Python | false | false | 383 | py | from django.urls import path,include
from . import views
urlpatterns = [
path('',views.index ,name="home"),
path('what_we_do/',views.what_we_do ,name="what_we_do"),
path('about/',views.about ,name="about"),
path('protfolio/',views.protfolio ,name="protfolio"),
path('gallery/',views.gallery ,name="gallery"),
path('contact/',views.contact ,name="contact"),
]
| [
"[email protected]"
]
| |
91f683f5ae10fa0d17fac5d8d2ed8efc7e5b63a8 | fc1c1e88a191b47f745625688d33555901fd8e9a | /meraki_sdk/models/universal_search_knowledge_base_search_enum.py | eeb90cdd62bbd16c19b2fcca21e1750437564fb5 | [
"MIT",
"Python-2.0"
]
| permissive | RaulCatalano/meraki-python-sdk | 9161673cfd715d147e0a6ddb556d9c9913e06580 | 9894089eb013318243ae48869cc5130eb37f80c0 | refs/heads/master | 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UniversalSearchKnowledgeBaseSearchEnum(object):
"""Implementation of the 'UniversalSearchKnowledgeBaseSearch' enum.
The universal search box always visible on Dashboard will, by default,
present results from the Meraki KB. This configures
whether these Meraki KB results should be returned. Can be one of
'default or inherit', 'hide' or 'show'.
Attributes:
ENUM_DEFAULT OR INHERIT: TODO: type description here.
HIDE: TODO: type description here.
SHOW: TODO: type description here.
"""
ENUM_DEFAULT_OR_INHERIT = 'default or inherit'
HIDE = 'hide'
SHOW = 'show'
| [
"[email protected]"
]
| |
d0b8df90a505c6ce70739548052cf57d31f3c545 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/422/usersdata/328/89006/submittedfiles/lecker.py | 646555fee493b246ee37fbf271bb339645a2e877 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | n1=int(input('n1:'))
n2=int(input('n2:'))
n3=int(input('n3:'))
n4=int(input('n4:'))
if n1 >n2 and n4<n3:
print('S')
elif n2 >n1> n3 and n4<n3 :
print('S')
elif n3>n4>n2 and n1<n2:
print('S')
elif n4>n3 :
print('S')
else:
print('N') | [
"[email protected]"
]
| |
5f5b4e4172a9aafe394060657cf1b1bd9a055427 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631572862566400_0/Python/ugo/c.py | fc210345694d8b61a3644358a93468fbce72a716 | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py |
def get_candidates(bffs):
ret = []
for i in range(len(bffs)):
for j in range(i+1, len(bffs)):
if bffs[i] == j and bffs[j] == i:
ret.append((i, j))
return ret
def longest(n, dontgo, edges):
print 'longest', n, dontgo
ret = 1
for nb in edges[n]:
if nb != dontgo:
ret = max(ret, longest(nb, dontgo, edges) + 1)
return ret
# def dfs(n, starting, visited, edges):
# next = edges[n]
# if starting in visited
f = open('c.small.in')
fout = open('c.out', 'w')
numCases = int(f.readline().strip())
for numCase in range(numCases):
print 'CASE: {}'.format(numCase+1)
N = int(f.readline().strip())
bffs = [None] * N
reverse_bffs = []
for i in range(N):
reverse_bffs.append([])
ss = f.readline().split()
for i in range(N):
bffs[i] = int(ss[i]) - 1
reverse_bffs[int(ss[i]) - 1].append(i)
# print bffs
# print reverse_bffs
#case 1
case1max = 0
candidates = get_candidates(bffs)
len_candidates = len(candidates)
for (c_x, c_y) in candidates:
# print c_x, c_y
print c_x
d1 = longest(c_x, c_y, reverse_bffs)
print c_y
d2 = longest(c_y, c_x, reverse_bffs)
case1max = max(case1max, d1+d2 + 2 * (len_candidates-1) )
print c_x, d1
print c_y, d2
print case1max
case2max = 0
for n in range(0, N):
if len(reverse_bffs[n]) == 0:
continue
cnt = 1
cur = n
visited = set()
visited.add(cur)
while True:
next = bffs[cur]
if next == n:
break
if next in visited:
cnt = 0
break
visited.add(next)
cur = next
cnt += 1
print 'cycle starting n:', n, cnt
case2max = max(case2max, cnt)
# visited = set()
# visited.add(n)
# d = dfs(n, n, visited, bffs)
# print n, d
# case2max = max(case2max, d)
#case 2
# for node in range(1, N+1):
# print ' '.join(result)
print 'case1max', case1max, 'case2max', case2max
fout.write('Case #{}: {}\n'.format(numCase+1, max(case1max, case2max)))
fout.close()
| [
"[email protected]"
]
| |
f59015df0fd96a8dc9105e2b9aec3f31d216ca8f | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py | 5305689d09b944f6e37aa85567ce3f29fc6974a7 | [
"Apache-2.0"
]
| permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 249 | py | _base_ = [
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
| [
"[email protected]"
]
| |
08e64277223c06c607a305f6816878e91c7112f9 | 3b6b76aae93eb8a2c738a1364e923d3bad20e0a6 | /articles/wsgi-intro/twill-wsgi-example.py | c8d9450fb8f6a4c7610666f7a9687e7e5a2e8ccb | []
| no_license | ctb/ivory.idyll.org | 24e4a0f67fbbde399118aff3c27a03bac304aa8f | 88df5f33361e6e13eda248ee55f1e4e460b998d9 | refs/heads/master | 2020-04-10T10:42:00.111811 | 2018-12-08T19:54:05 | 2018-12-08T19:54:05 | 160,973,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #! /usr/bin/env python
import twill
def simple_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
if __name__ == '__main__':
print '*** installing WSGI intercept hook ***\n'
twill.add_wsgi_intercept('localhost', 80, lambda: simple_app)
twill.shell.main()
| [
"[email protected]"
]
| |
bd7f88508e67dbfcf5ecffbf0562f7a05eb1619b | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础08/10-异常传递.py | a53af9d709038f15ce320e9490696f4377f4e232 | []
| no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 405 | py | def test1():
print("---test1-1---")
print(num)
print("---test1-2---")
def test2():
print("---test2-1---")
test1()
print("---test2-2---")
def test3():
try:
print("---test3-1---")
test1()
print("---test3-2---")
except Exception as result:
print("捕获到了异常,信息是:%s"%result)
test3()
print("---华丽的分割线---")
test2() | [
"[email protected]"
]
| |
f4c50198426a22de4657d97af5065df4920d777b | 4f111dfacab0acc93900e7746538f85e0b3d8d78 | /day3/01关系运算符.py | 7daa883448340f101de2cd7477971865e50ce034 | []
| no_license | ljxproject/basecode | 5541f25cfe90d5fad26eac0b6e72802aa1fad1f4 | 485e4b41593839bfc61e67261247fb88dc80cc1d | refs/heads/master | 2020-03-26T16:16:26.422617 | 2018-08-17T08:05:11 | 2018-08-17T08:05:11 | 145,091,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | '''
关系元算符与关系表达式
关系运算符有:
> < == != >= <=
格式: 表达式1 关系运算符 表达式2
功能: 运算表达式1与表达式2的值,
值: 如果关系成立,则返回True,否则False
'''
num1 = 2
num2 = 5
mum3 = num1 != num2
print(mum3)
print(num1 != num2)
| [
"[email protected]"
]
| |
8e4afde0ad3d7cdf9500900a9d52568869e8ccec | b9d7194bb50a01e7e56d19ba2f3c048084af54b5 | /_OLD_/bottle.py | 8171ee3221df8251f6911cd57ccc179a1fc2edcf | []
| no_license | BernardoGO/TCC---2017 | 099e72d788974446b58fe5f409a2df25e3613cc5 | 75025e095956624470c22d8f3118441d5c28bdd7 | refs/heads/master | 2018-12-04T10:53:07.809161 | 2018-09-06T04:59:30 | 2018-09-06T04:59:30 | 64,803,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,571 | py | import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.models import save_model, load_model
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 150000
nb_validation_samples = 24000
epochs = 50
batch_size = 16
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_train = model.predict_generator(
generator, nb_train_samples // batch_size)
np.save(open('bottleneck_features_train.npy', 'wb'),
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples // batch_size)
np.save(open('bottleneck_features_validation.npy', 'wb'),
bottleneck_features_validation)
def train_top_model():
train_data = np.load(open('bottleneck_features_train.npy', "rb"))
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load(open('bottleneck_features_validation.npy', "rb"))
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
#model.load_weights(top_model_weights_path)
#save_model(model, "model1111.h5")
save_bottlebeck_features()
train_top_model()
| [
"[email protected]"
]
| |
e7e3f19d55f167659b9939895e3c7c8b47ad52da | c6818c06aacb1eca1fffa8bbc51b6f3aac25c177 | /acre/asgi.py | 7a5ee240ac0ce6bd5657ed8a2e6ac3c7e5f609cc | []
| no_license | Acon94/ACRE | 2d0769780c9f81eba05085ffd8b0af225666d6de | 73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1 | refs/heads/master | 2022-08-02T02:07:53.004308 | 2020-05-29T15:25:50 | 2020-05-29T15:25:50 | 267,840,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for acre project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'acre.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
def00a2abdbb12ba52b231da7124685b93516b93 | 23ef81cb94356fd321c07f06dab2877e04131b4d | /yiyuanduobao_shop/migrations/0058_item_proxy_sale_qr_code.py | da3d99780c61df4a84d1c939d53fdc4bb41fd205 | []
| no_license | dongshaohui/one_dolor | 0c688787d8cee42957bec087b74b5ea353cc80fc | 13dea458568152a3913c6f70ecd9a7e1f6e9514e | refs/heads/master | 2020-07-03T03:12:22.409542 | 2016-11-21T08:15:06 | 2016-11-21T08:15:06 | 74,202,604 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yiyuanduobao_shop', '0057_item_winner_customer'),
]
operations = [
migrations.AddField(
model_name='item',
name='proxy_sale_qr_code',
field=models.CharField(default=b'', max_length=500, verbose_name='\u672c\u671f\u4ee3\u5356\u4e8c\u7ef4\u7801'),
preserve_default=True,
),
]
| [
"[email protected]"
]
| |
0b231fcc73526c6bd8bb5185239f91dd9e68d7cf | 13edd8f1bc3b86fd881f85fbeafe94811392d7fc | /fourth_module/多线程多进程/new/多线程/01 开启线程的两种方式.py | cbdb541d1193f3e8f003cc5d85896cfbaa111812 | []
| no_license | ryan-yang-2049/oldboy_python_study | f4c90c9d8aac499e1d810a797ab368217f664bb1 | 6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f | refs/heads/master | 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 | HTML | UTF-8 | Python | false | false | 643 | py | # -*- coding: utf-8 -*-
"""
__title__ = '01 开启线程的两种方式.py'
__author__ = 'yangyang'
__mtime__ = '2018.02.07'
"""
from threading import Thread
import os,time
# def task(name):
# print("%s is running,PID: %s" % (name,os.getpid()))
#
# if __name__ == '__main__':
# p = Thread(target=task,args=('ryan',))
# p.start()
# print("主线程,PID:%s"%os.getpid())
class MyThread(Thread):
def __init__(self,name):
super().__init__()
self.name = name
def run(self):
print("%s is running,PID: %s"%(self.name,os.getpid()))
if __name__ == '__main__':
obj = MyThread('ryan')
obj.start()
print("主线程,PID: %s"%os.getpid()) | [
"[email protected]"
]
| |
00d288a2b6044bd45e41cb8a04842120a28cf531 | 90047daeb462598a924d76ddf4288e832e86417c | /chromecast/browser/DEPS | c273dc2c7d0751e9b9e547fd0285090933fa1b4b | [
"BSD-3-Clause"
]
| permissive | massbrowser/android | 99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080 | a9c4371682c9443d6e1d66005d4db61a24a9617c | refs/heads/master | 2022-11-04T21:15:50.656802 | 2017-06-08T12:31:39 | 2017-06-08T12:31:39 | 93,747,579 | 2 | 2 | BSD-3-Clause | 2022-10-31T10:34:25 | 2017-06-08T12:36:07 | null | UTF-8 | Python | false | false | 990 | include_rules = [
"+cc/base/switches.h",
"+chromecast/common",
"+chromecast/graphics",
"+chromecast/app/grit/chromecast_settings.h",
"+chromecast/app/resources/grit/shell_resources.h",
"+chromecast/media",
"+chromecast/net",
"+chromecast/service",
"+components/cdm/browser",
"+components/crash",
"+components/network_hints/browser",
"+components/prefs",
"+components/proxy_config",
"+content/public/android",
"+content/public/browser",
"+content/public/common",
"+content/public/test",
"+device/geolocation",
"+gin/v8_initializer.h",
"+gpu/command_buffer/service/gpu_switches.h",
"+media/audio",
"+media/base",
"+media/mojo",
"+mojo/public",
"+net",
"+services/service_manager/public",
"+ui/aura",
"+ui/base",
"+ui/compositor",
"+ui/events",
"+ui/gfx",
"+ui/gl",
"+ui/display",
"+ui/ozone/platform/cast/overlay_manager_cast.h",
# TODO(sanfin): Remove this by fixing the crash handler on android.
"!chromecast/app",
]
| [
"[email protected]"
]
| ||
26e94e33c7d3dda0924333d6df8e6e72572d6ac1 | a842f224d1b0c2e74b2043e8d03f49e3298086df | /grep_scales.py | 2b83cfe14e04d138314104c9309a15a7056c7411 | []
| no_license | ronsengupta/grep-scales | 68f8037171cdfd3f43c02d3d77f4f633e4196856 | 5740902b4694ae8d1cdee04e213f41c3d99bc428 | refs/heads/master | 2020-06-12T23:00:48.071262 | 2016-04-10T08:48:04 | 2016-04-10T08:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | from shutit_module import ShutItModule
class grep_scales(ShutItModule):
def build(self, shutit):
afile = r'''THIS LINE IS THE 1ST UPPER CASE LINE IN THIS FILE.
this line is the 1st lower case line in this file.
This Line Has All Its First Character Of The Word With Upper Case.
Two lines above this line is empty.
And this is the last line.
'''
shutit.send_file('afile',afile)
shutit.send('alias grep=grep')
afile_message = '''I have created a file called 'afile' that looks like this:
BEGINS
''' + afile + '''
ENDS
'''
follow_on_context={'check_command':'ls','context':'docker'}
#shutit.challenge('move file afile to filename: 1',challenge_type='golf',expect='1',follow_on_context=follow_on_context)
shutit.challenge(afile_message + '''
For your first task, grep out the last line, ie the one that reads: 'And this is the last line.'.''','And this is the last line.',hints=['last','grep last afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case sensitive)','1',hints=['-c','ask again to get answer','grep -c UPPER afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case insensitive)','2',hints=['-c','-i','ask again to get answer','grep -c -i UPPER afile'])
shutit.golf(afile_message + 'Return lines that have the word "in" in it (case insensitive)','264200b0557e7c2e75cffc57778311f4',expect_type='md5sum',hints=['-w','-i','ask again to get answer','grep -w -i in afile'])
shutit.golf(afile_message + '''Return lines that DON'T have the word 'case' (case insensitive) in it.''','ca75d0d8558569109e342ac5e09c4d01',expect_type='md5sum',hints=['-v','-i','ask again to get answer','grep -v case afile'])
shutit.golf(afile_message + '''Return line with "UPPER" in it, along with the line number.''','cc9246de53156c4259be5bf05dacadf6',expect_type='md5sum',hints=['-n','ask again to get answer','grep -n UPPER afile'])
shutit.golf(afile_message + 'Print the line after the empty line.','63b6f5fd46648742a6f7aacff644dd92',expect_type='md5sum',hints=['-A','-A1','ask again to get answer','grep -A1 ^$ afile'])
shutit.golf(afile_message + 'Print the two lines that come before the first line with nothing in it.','444cc6679be200fc6579678b6afe19e9',expect_type='md5sum',hints=['-B','-B2','^$ to match the empty line','ask again to get answer','grep -B2 ^$ afile'])
shutit.golf(afile_message + 'Print the line before the empty line, the empty line, and the line after.','7ba4233c4599e0aefd11e93a66c4bf17',expect_type='md5sum',hints=['-C','-C1','ask again to get answer','grep -C1 ^$ afile'],congratulations='Well done, all done!')
#-o, --only-matching Print only the matched (non-empty) parts of a matching line, with each such part on a separate output line.
#-l, --files-with-matches Suppress normal output; instead print the name of each input file from which output would normally have been printed. The scanning will stop on the first match.
#-r
#-e
return True
def module():
return grep_scales(
'tk.shutit.grep_scales.grep_scales', 1845506479.0001,
description='Practice your grep scales!',
maintainer='[email protected]',
delivery_methods=['docker'],
depends=['shutit.tk.setup']
)
| [
"[email protected]"
]
| |
f76d667d0fdea002d1fd512d3a7f98728174a0a4 | 2ece848b37f7fa6f13ce0e94ddfd0fbd46c72b8f | /backend/utils/crawl_mode.py | c0224bad7279761898d0187465fdb7edceb18649 | [
"Apache-2.0"
]
| permissive | rockeyzhu/eastmoney | 1a2d2db18bd658abe8e65875bf863f1cfcefd545 | c8aa33a69ebee54c64f22a8edbcf30ed0f29b293 | refs/heads/master | 2023-03-06T12:20:03.896607 | 2021-02-20T07:20:53 | 2021-02-20T07:20:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import configparser
def get_crawl_mode():
config = configparser.ConfigParser()
config.sections()
config.read("config.ini")
return config['CRAWL_MODE']['crawl_mode']
| [
"[email protected]"
]
| |
bc770a4a78f1a3e117c15af7a3ea4c7b4937bf1e | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC200~ABC299/ABC291/c.py | 468e2709c60f01b71d7144cca09a88563e9ae6c3 | []
| no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from collections import defaultdict
N = int(input())
S = input()
d = defaultdict(lambda: False)
d[(0, 0)] = True
nx, ny = 0, 0
ans = 'No'
for i in range(N):
s = S[i]
if s == 'R':
nx += 1
elif s == 'L':
nx -= 1
elif s == 'U':
ny += 1
else:
ny -= 1
if d[(nx, ny)]:
ans = 'Yes'
d[(nx, ny)] = True
print(ans)
| [
"[email protected]"
]
| |
860e1b6b4199edd993d0f6b16cdc645e43a2bee9 | 4cef505611a04383310ce6556fac7acb02dbc8a1 | /Unmapped content SE/Unmapped content SE/checking_unmapped_content_SE_api_new1.py | 9ba60d429c9bc2219b1ffb1ca9dea5b0474b5f8b | []
| no_license | Sayan8981/Projectx | 9d8727eec144da35f2acffc787f3c769beef02e1 | bcf93fe885e4cd68bb2c30c408a3b03e785965c3 | refs/heads/master | 2022-03-26T18:13:02.831185 | 2020-01-16T06:52:31 | 2020-01-16T06:52:31 | 187,637,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,452 | py | """writer:Saayan"""
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import MySQLdb
import collections
from pprint import pprint
import sys
import urllib2
import json
import os
from urllib2 import HTTPError
from urllib2 import URLError
import csv
import urllib
import os
import pymysql
import datetime
import httplib
import socket
import unidecode
sys.setrecursionlimit(2000)
import threading
def open_csv(start,name,end,id):
inputFile="unmapped_content_SE1"
f = open(os.getcwd()+'/'+inputFile+'.csv', 'rb')
reader = csv.reader(f)
fullist=list(reader)
result_sheet='/GuideBoxValidationTVSHowPreProd_PX_Saayan%d.csv'%id
if(os.path.isfile(os.getcwd()+result_sheet)):
os.remove(os.getcwd()+result_sheet)
csv.register_dialect('excel',lineterminator = '\n',skipinitialspace=True,escapechar='')
w=open(os.getcwd()+result_sheet,"wa")
with w as mycsvfile:
fieldnames = ["Id","Title","TotalEpisodes","ReleaseYear","Gb_id","Gb_id_PX","Season Number","Episode Number","EpisodeTitle","OzoneOriginalEpisodeTitle","OzoneEpisodeTitle","OzoneRoviId","Scheme","Search","Match","AmazonLink","Amazon_Flag","StarzLink","Starz_Flag","NetflixLink","Netflix_flag","NBCLink","NBC_flag","CBSLink","CBS_flag","VUDULink","VUDU_flag","ITUNESLink","ITUNES_flag","Ott_flag","Result","Ozone_Series_id","Px_series_id","Rovi_id","Px_series_title","Px_episode_title","Px_release_year","Px_season_number","Px_episode_number","projectx_id","amazon_flag","starz_flag","netflix_flag","cbs_flag","vudu_flag","itunes_flag","amazon_flag_expired","vudu_flag_expired","starz_flag_expired","netflix_flag_expired","cbs_flag_expired","itunes_flag_expired","comment","Series_duplicate","Duplicate id","series_match","episode_title_match","title_match","Season_number_match","Episode_number_match","Release_year_match"]
writer = csv.DictWriter(mycsvfile,fieldnames=fieldnames,dialect="excel",lineterminator = '\n')
writer.writeheader()
total=0
Token='Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3'
Token1='Token token=0b4af23eaf275daaf41c7e57749532f128660ec3befa0ff3aee94636e86a43e7'
domain_name='http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com'
for r in range(start,end-1):
total=total+1
print ({"thread_name":name,"total":total})
source_amazon=[]
source_starz=[]
source_netflix=[]
source_cbs=[]
source_vudu=[]
source_itunes=[]
search_px_id=[]
search_px_id_=[]
search_px_id_filtered=[]
series_id_px=[]
arr_px=[]
arr_rovi=[]
arr_gb=[]
sec_arr=[]
s=0
t=0
u=0
v=0
w=0
x=0
Result=str(fullist[r][29])
if Result=="MAP FAIL":
Id=str(fullist[r][0])
Title=unicode(str(fullist[r][1]),'utf-8')
Title=unidecode.unidecode(Title)
TotalEpisodes=str(fullist[r][2])
ReleaseYear=str(fullist[r][3])
Gb_id=str(fullist[r][4])
Season_Number=str(fullist[r][5])
Episode_Number=str(fullist[r][6])
EpisodeTitle=unicode(str(fullist[r][7]),'utf-8')
EpisodeTitle=unidecode.unidecode(EpisodeTitle)
OzoneOriginalEpisodeTitle=str(fullist[r][8])
OzoneEpisodeTitle=str(fullist[r][9])
OzoneRoviId=str(fullist[r][10])
Scheme=str(fullist[r][11])
Search=str(fullist[r][12])
Match=str(fullist[r][13])
AmazonLink=str(fullist[r][14])
Amazon_Flag=str(fullist[r][15])
StarzLink=str(fullist[r][16])
Starz_Flag=str(fullist[r][17])
NetflixLink=str(fullist[r][18])
Netflix_flag=str(fullist[r][19])
NBCLink=str(fullist[r][20])
NBC_flag=str(fullist[r][21])
CBSLink=str(fullist[r][22])
CBS_flag=str(fullist[r][23])
VUDULink=str(fullist[r][24])
VUDU_flag=str(fullist[r][25])
ITUNESLink=str(fullist[r][26])
ITUNES_flag=str(fullist[r][27])
Ott_flag=str(fullist[r][28])
Result=str(fullist[r][29])
Ozone_Series_id=str(fullist[r][30])
print Result
print Gb_id
amazon_flag_expired=''
vudu_flag_expired=''
starz_flag_expired=''
netflix_flag_expired=''
cbs_flag_expired=''
itunes_flag_expired=''
try:
try:
if eval(AmazonLink):
source_amazon=[]
for oo in eval(AmazonLink):
source_amazon.append(oo)
for l in source_amazon:
if source_amazon.count(l)>1:
source_amazon.remove(l)
except SyntaxError:
source_amazon=[0]
try:
if eval(StarzLink):
source_starz=[]
for oo in eval(StarzLink):
source_starz.append(oo)
for l in source_starz:
if source_starz.count(l)>1:
source_starz.remove(l)
except SyntaxError:
source_starz=[0]
try:
if eval(NetflixLink):
source_netflix=[]
for oo in eval(NetflixLink):
source_netflix.append(oo)
for l in source_netflix:
if source_netflix.count(l)>1:
source_netflix.remove(l)
except SyntaxError:
source_netflix=[0]
try:
if eval(CBSLink):
source_cbs=[]
for oo in eval(CBSLink):
source_cbs.append(oo)
for l in source_cbs:
if source_cbs.count(l)>1:
source_cbs.remove(l)
except SyntaxError:
source_cbs=[0]
try:
if eval(VUDULink):
source_vudu=[]
for oo in eval(VUDULink):
source_vudu.append(oo)
for l in source_vudu:
if source_vudu.count(l)>1:
source_vudu.remove(l)
except SyntaxError:
source_vudu=[0]
try:
if eval(ITUNESLink):
source_itunes=[]
for oo in eval(ITUNESLink):
source_itunes.append(oo)
for l in source_itunes:
if source_itunes.count(l)>1:
source_itunes.remove(l)
except SyntaxError:
source_itunes=[0]
#import pdb;pdb.set_trace()
if source_amazon!=[0]:
url_amazon="http://34.231.212.186:81/projectx/%s/amazon/ottprojectx"%source_amazon[0]
response_amazon=urllib2.Request(url_amazon)
response_amazon.add_header('Authorization',Token)
resp_amazon=urllib2.urlopen(response_amazon)
data_amazon=resp_amazon.read()
data_resp_amazon=json.loads(data_amazon)
for ii in data_resp_amazon:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
s=len(sec_arr)
if len(sec_arr)>=1:
amazon_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=amazon"%source_amazon[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
amazon_flag_expired='False'
amazon_flag='False'
else:
amazon_flag_expired='True'
amazon_flag='False'
else:
amazon_flag=''
arr_px=[]
if source_starz!=[0]:
url_starz="http://34.231.212.186:81/projectx/%s/starz/ottprojectx"%source_starz[0]
response_starz=urllib2.Request(url_starz)
response_starz.add_header('Authorization',Token)
resp_starz=urllib2.urlopen(response_starz)
data_starz=resp_starz.read()
data_resp_starz=json.loads(data_starz)
for ii in data_resp_starz:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
t=len(sec_arr)
if len(sec_arr)>s:
starz_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=starz"%source_starz[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
starz_flag_expired='False'
starz_flag='False'
else:
starz_flag_expired='True'
starz_flag='False'
else:
starz_flag=''
arr_px=[]
if source_netflix!=[0]:
url_netflix="http://34.231.212.186:81/projectx/%s/netflixusa/ottprojectx"%source_netflix[0]
response_netflix=urllib2.Request(url_netflix)
response_netflix.add_header('Authorization',Token)
resp_netflix=urllib2.urlopen(response_netflix)
data_netflix=resp_netflix.read()
data_resp_netflix=json.loads(data_netflix)
for ii in data_resp_netflix:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
u=len(sec_arr)
if len(sec_arr)>t:
netflix_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_netflix[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
netflix_flag_expired='False'
netflix_flag='False'
else:
netflix_flag_expired='True'
netflix_flag='False'
else:
netflix_flag=''
arr_px=[]
if source_cbs!=[0]:
url_cbs="http://34.231.212.186:81/projectx/%s/cbs/ottprojectx"%source_cbs[0]
response_cbs=urllib2.Request(url_cbs)
response_cbs.add_header('Authorization',Token)
resp_cbs=urllib2.urlopen(response_cbs)
data_cbs=resp_cbs.read()
data_resp_cbs=json.loads(data_cbs)
for ii in data_resp_cbs:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
v=len(sec_arr)
if len(sec_arr)>u:
cbs_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=cbs"%source_cbs[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
cbs_flag_expired='False'
cbs_flag='False'
else:
cbs_flag_expired='True'
cbs_flag='False'
else:
cbs_flag=''
arr_px=[]
if source_vudu!=[0]:
url_vudu="http://34.231.212.186:81/projectx/%s/vudu/ottprojectx"%source_vudu[0]
response_vudu=urllib2.Request(url_vudu)
response_vudu.add_header('Authorization',Token)
resp_vudu=urllib2.urlopen(response_vudu)
data_vudu=resp_vudu.read()
data_resp_vudu=json.loads(data_vudu)
for ii in data_resp_vudu:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
w=len(sec_arr)
if len(sec_arr)>v:
vudu_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_vudu[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
vudu_flag_expired='False'
vudu_flag='False'
else:
vudu_flag_expired='True'
vudu_flag='False'
else:
vudu_flag=''
arr_px=[]
if source_itunes!=[0]:
url_itune="http://34.231.212.186:81/projectx/%s/itunes/ottprojectx"%source_itunes[0]
response_itune=urllib2.Request(url_itune)
response_itune.add_header('Authorization',Token)
resp_itune=urllib2.urlopen(response_itune)
data_itune=resp_itune.read()
data_resp_itune=json.loads(data_itune)
for ii in data_resp_itune:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
x=len(sec_arr)
if len(sec_arr)>w:
itunes_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_itunes[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
itunes_flag_expired='False'
itunes_flag='False'
else:
itunes_flag_expired='True'
itunes_flag='False'
else:
itunes_flag=''
for bb in sec_arr:
while sec_arr.count(bb)>1:
sec_arr.remove(bb)
while sec_arr.count(bb)>1:
sec_arr.remove(bb)
for bb in arr_rovi:
if arr_rovi.count(bb)>1:
arr_rovi.remove(bb)
if bb in arr_rovi:
if arr_rovi.count(bb)>1:
arr_rovi.remove(bb)
for bb in arr_gb:
if arr_gb.count(bb)>1:
arr_gb.remove(bb)
if bb in arr_gb:
if arr_gb.count(bb)>1:
arr_gb.remove(bb)
if amazon_flag=='True' or starz_flag=='True' or netflix_flag=='True' or cbs_flag=='True' or vudu_flag=='True' or itunes_flag=='True':
if len(sec_arr)==1:
url_px="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/programs/%d?&ott=true"%sec_arr[0]
response_px=urllib2.Request(url_px)
response_px.add_header('Authorization',Token)
resp_px=urllib2.urlopen(response_px)
data_px=resp_px.read()
data_resp_px=json.loads(data_px)
for kk in data_resp_px:
if kk.get("original_title")!='':
series_title=unicode(kk.get("original_title"))
series_title=unidecode.unidecode(series_title)
else:
series_title=unicode(kk.get("long_title"))
series_title=unidecode.unidecode(series_title)
if kk.get("original_episode_title")!='':
episode_title=unicode(kk.get("original_episode_title"))
episode_title=unidecode.unidecode(episode_title)
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title =unicode(kk.get("episode_title"))
episode_title=unidecode.unidecode(episode_title)
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title_match="Below"+'90%'
title_match='Fail'
else:
episode_title =unicode(kk.get("episode_title"))
episode_title=unidecode.unidecode(episode_title)
release_year=kk.get("release_year")
season_number=kk.get("episode_season_number")
episode_number=kk.get("episode_season_sequence")
series_id=str(kk.get("series_id"))
if Ozone_Series_id==series_id:
series_match='Pass'
else:
series_match='Fail/Not ingested'
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title_match="Below"+'90%'
title_match='Fail'
if str(season_number)==Season_Number:
Season_number_match="Pass"
else:
Season_number_match='Fail'
if str(episode_number)==Episode_Number:
Episode_number_match="Pass"
else:
Episode_number_match='Fail'
if str(release_year)==ReleaseYear:
Release_year_match="Pass"
else:
r_y=release_year
r_ys=release_year
r_y=r_y+1
if str(r_y)==ReleaseYear:
Release_year_match='Pass'
else:
r_ys=r_ys-1
if str(r_ys)==ReleaseYear:
Release_year_match='Pass'
else:
Release_year_match='Fail'
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"EpisodeTitle":EpisodeTitle,"Px_series_id":series_id,"Px_series_title":series_title,"Px_episode_title":episode_title,"Px_release_year":release_year,"Px_season_number":season_number,"Px_episode_number":episode_number,"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'All link or any of them is present in projectx API',"series_match":series_match,"episode_title_match":episode_title_match,"title_match":title_match,"Season_number_match":Season_number_match,"Episode_number_match":Episode_number_match,"Release_year_match":Release_year_match})
if len(sec_arr)>1:
arr_gb=[]
arr_rovi=[]
search_px_id__=[]
search_px_id1_=[]
duplicate=""
search_px_id1=[]
next_page_url=""
data_resp_search=dict()
px_link="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/programs?ids=%s&ott=true&aliases=true" %'{}'.format(",".join([str(i) for i in sec_arr]))
response_link=urllib2.Request(px_link)
response_link.add_header('Authorization',Token)
resp_link=urllib2.urlopen(response_link)
data_link=resp_link.read()
data_resp_link3=json.loads(data_link)
for kk in data_resp_link3:
series_id_px.append(kk.get("series_id"))
for ll in series_id_px:
while series_id_px.count(ll)>1:
series_id_px.remove(ll)
if len(series_id_px)>1:
search_api="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/v3/voice_search?q=%s&safe_search=false&credit_summary=true&credit_types=Actor&aliases=true&ott=true"%urllib2.quote(Title)
response_search=urllib2.Request(search_api)
response_search.add_header('User-Agent','Branch Fyra v1.0')
response_search.add_header('Authorization',Token)
resp_search=urllib2.urlopen(response_search)
data_search=resp_search.read()
data_resp_search=json.loads(data_search)
if data_resp_search.get("top_results"):
for ii in data_resp_search.get("top_results"):
if ii.get("action_type")=="ott_search" and ii.get("action_type")!="web_results" and ii.get("results"):
for jj in ii.get("results"):
if jj.get("object").get("show_type")=='SM':
search_px_id.append(jj.get("object").get("id"))
if search_px_id:
for mm in search_px_id:
if mm in series_id_px:
search_px_id_.append(mm)
else:
search_px_id_filtered.append(mm)
if len(search_px_id_)==1 or search_px_id_==[]:
try:
search_px_id1_.append(search_px_id_[0])
search_px_id_=[]
search_px_id=[]
duplicate='False'
except IndexError:
search_px_id_=[]
search_px_id=[]
duplicate='False'
else:
if search_px_id_!=search_px_id__:
search_px_id__=search_px_id__+search_px_id_
duplicate='True'
search_px_id=[]
else:
search_px_id__=search_px_id__
duplicate='True'
search_px_id=[]
if duplicate=='False':
while data_resp_search.get("results"):
for nn in data_resp_search.get("results"):
if nn.get("action_type")=="ott_search" and (nn.get("results")==[] or nn.get("results")):
next_page_url=nn.get("next_page_url")
if next_page_url is not None:
search_api1=domain_name+next_page_url.replace(' ',"%20")
if search_api1!=domain_name :
search_api=search_api1
response_search=urllib2.Request(search_api)
response_search.add_header('User-Agent','Branch Fyra v1.0')
response_search.add_header('Authorization',Token)
resp_search=urllib2.urlopen(response_search)
data_search=resp_search.read()
data_resp_search=json.loads(data_search)
else:
data_resp_search={"resilts":[]}
else:
data_resp_search={"resilts":[]}
if data_resp_search.get("results"):
for nn in data_resp_search.get('results'):
if nn.get("results"):
for jj in nn.get("results"):
if jj.get("object").get("show_type")=='SM':
search_px_id.append(jj.get("object").get("id"))
if search_px_id:
for mm in search_px_id:
if mm in series_id_px:
search_px_id_.append(mm)
else:
search_px_id_filtered.append(mm)
if len(search_px_id_)==1 or search_px_id_==[]:
try:
search_px_id1_.append(search_px_id_[0])
search_px_id_=[]
search_px_id=[]
duplicate='False'
except IndexError:
search_px_id_=[]
search_px_id=[]
duplicate='False'
else:
if search_px_id_!=search_px_id__:
search_px_id__=search_px_id__+search_px_id_
duplicate='True'
search_px_id=[]
else:
search_px_id__=search_px_id__
duplicate='True'
search_px_id=[]
if len(search_px_id__)>1 and duplicate=='True':
series_duplicate="True"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found for series in search API',"Series_duplicate":series_duplicate,"Duplicate id":search_px_id__,"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
series_duplicate="False"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found',"Series_duplicate":series_duplicate,"Duplicate id":[],"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
series_duplicate="False"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found',"Series_duplicate":series_duplicate,"Duplicate id":[],"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
elif amazon_flag=='' and starz_flag=='' and netflix_flag=='' and cbs_flag=='' and vudu_flag=='' and itunes_flag=='':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'this links not in the sheet',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
elif amazon_flag_expired=='False' and vudu_flag_expired=='False' and starz_flag_expired=='False' and netflix_flag_expired=='False' and cbs_flag_expired=='False' and itunes_flag_expired=='False':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this links not expired',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
link=[]
link_present=''
gb_api="http://34.231.212.186:81/projectx/guideboxdata?sourceId=%d&showType=SE"%eval(Gb_id)
response_gb=urllib2.Request(gb_api)
response_gb.add_header('Authorization',Token)
resp_gb=urllib2.urlopen(response_gb)
data_gb=resp_gb.read()
data_resp_gb=json.loads(data_gb)
if data_resp_gb.get("tv_everywhere_web_sources") or data_resp_gb.get("subscription_web_sources") or data_resp_gb.get("free_web_sources") or data_resp_gb.get("purchase_web_sources") :
if data_resp_gb.get("tv_everywhere_web_sources"):
for aa in data_resp_gb.get("tv_everywhere_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("subscription_web_sources"):
for aa in data_resp_gb.get("subscription_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("free_web_sources"):
for aa in data_resp_gb.get("free_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("purchase_web_sources"):
for aa in data_resp_gb.get("purchase_web_sources"):
link.append(aa.get('link'))
if source_amazon[0]==0:
source_amazon[0]=' '
if source_starz[0]==0:
source_starz[0]=' '
if source_netflix[0]==0:
source_netflix[0]=' '
if source_cbs[0]==0:
source_cbs[0]=' '
if source_vudu[0]==0:
source_vudu[0]=' '
if source_itunes[0]==0:
source_itunes[0]=' '
for bb in link:
if str(source_amazon[0]) in bb or str(source_starz[0]) in bb or str(source_netflix[0]) in bb or str(source_cbs[0]) in bb or str(source_vudu[0]) in bb or str(source_itunes[0]) in bb:
link_present='True'
break
else:
link_present='False'
if link_present=='True':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested but ott link present in db',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested and not present in DB',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested and not present in DB',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
print datetime.datetime.now()
except httplib.BadStatusLine:
print ("exception caught httplib.BadStatusLine..............................Retrying.............")
continue
except urllib2.HTTPError:
print ("exception caught HTTPError....................................Retrying.......")
continue
except socket.error:
print ("exception caught SocketError..........................Retrying.................")
continue
except URLError:
print ("exception caught URLError.....................Retrying......................")
continue
print datetime.datetime.now()
#open_csv()
t1 =threading.Thread(target=open_csv,args=(1,"thread - 1",6242,1))
t1.start()
| [
"[email protected]"
]
| |
7fc78a96811a0f46faa2e7fdc489c6ccfdf5de20 | b7f1b4df5d350e0edf55521172091c81f02f639e | /components/arc/video_accelerator/DEPS | be1c9c99ce26a0e5b89f2611421f734fc2f70e77 | [
"BSD-3-Clause"
]
| permissive | blusno1/chromium-1 | f13b84547474da4d2702341228167328d8cd3083 | 9dd22fe142b48f14765a36f69344ed4dbc289eb3 | refs/heads/master | 2023-05-17T23:50:16.605396 | 2018-01-12T19:39:49 | 2018-01-12T19:39:49 | 117,339,342 | 4 | 2 | NOASSERTION | 2020-07-17T07:35:37 | 2018-01-13T11:48:57 | null | UTF-8 | Python | false | false | 296 | include_rules = [
"+components/arc/common",
"+gpu/command_buffer/service/gpu_preferences.h",
"+media/video",
"+media/base/video_frame.h",
"+media/base/video_types.h",
"+media/gpu",
"+mojo/edk/embedder",
"+services/service_manager/public/cpp",
"+ui/gfx",
"+ui/ozone/public",
]
| [
"[email protected]"
]
| ||
00728e4101b62fa2bf7ba2c3784d4576344c6cc3 | d5b3de6729e165bddcc17b8c3c285df808cd9fd0 | /application/modules/fonction/views_fct.py | 209fd03dd4976dbac54b11d2915ca69f51eb9231 | []
| no_license | wilrona/Gesacom | 907848d44d9fa1a285b5c7a452c647fc6cbbc2fa | 31ec26c78994030844f750039a89a43a66d61abf | refs/heads/master | 2020-04-06T15:00:36.522832 | 2016-09-08T13:19:06 | 2016-09-08T13:19:06 | 49,956,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | __author__ = 'Ronald'
from ...modules import *
from models_fct import Fonction
from forms_fct import FormFonction
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
prefix = Blueprint('fonction', __name__)
@prefix.route('/fonction')
@login_required
@roles_required([('super_admin', 'fonction')])
def index():
menu = 'societe'
submenu = 'entreprise'
context = 'fonction'
title_page = 'Parametre - Fonctions'
search = False
q = request.args.get('q')
if q:
search = True
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
datas = Fonction.query()
pagination = Pagination(css_framework='bootstrap3', page=page, total=datas.count(), search=search, record_name='fonctions')
if datas.count() > 10:
if page == 1:
offset = 0
else:
page -= 1
offset = page * 10
datas = datas.fetch(limit=10, offset=offset)
return render_template('fonction/index.html', **locals())
@prefix.route('/fonction/edit', methods=['GET', 'POST'])
@prefix.route('/fonction/edit/<int:fonction_id>', methods=['GET', 'POST'])
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def edit(fonction_id=None):
if fonction_id:
grades = Fonction.get_by_id(fonction_id)
form = FormFonction(obj=grades)
else:
grades = Fonction()
form = FormFonction()
success = False
if form.validate_on_submit():
grades.libelle = form.libelle.data
grades.put()
flash('Enregistement effectue avec succes', 'success')
success = True
return render_template('fonction/edit.html', **locals())
@prefix.route('/fonction/delete/<int:fonction_id>')
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def delete(fonction_id):
fonctions = Fonction.get_by_id(fonction_id)
if not fonctions.count():
fonctions.key.delete()
flash('Suppression reussie', 'success')
else:
flash('Impossible de supprimer', 'danger')
return redirect(url_for('fonction.index')) | [
"[email protected]"
]
| |
da0c2a1cf4183a389e9a794b268a35920914e270 | 226be49a7396e7c6004ba4de567f6c22b5b245c0 | /packaging/fremantle/.py2deb_build_folder/gread/src/opt/GRead/views/basic/utils/toolbar.py | ce31b63c1a91f4abdca09d651a501e4d2d0b2425 | []
| no_license | twidi/GRead | 0e315c0c924fa169cb5d16e927c6b54e79e25bd9 | 51429189762b706fbe8ca1b927d89071a556d51e | refs/heads/master | 2021-01-10T19:54:43.098022 | 2010-11-23T00:41:17 | 2010-11-23T00:41:17 | 1,146,572 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,891 | py | # -*- coding: utf-8 -*-
"""
Lib to manage toolbars which appear on mousedown(maemo) or mousemove(not maem0)
and stay visible a few seconds
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import time
class ToolbarOwnerEventFilter(QObject):
def __init__(self, *args, **kwargs):
super(ToolbarOwnerEventFilter, self).__init__(*args, **kwargs)
def eventFilter(self, obj, e):
if e.type() == QEvent.Resize:
self.parent().replace_toolbars()
return False
class ToolbarManager(QObject):
def __init__(self, toolbars, event_target, *args, **kwargs):
super(ToolbarManager, self).__init__(*args, **kwargs)
parent = self.parent()
self.event_target = event_target
self.toolbars = toolbars
self.mode_opacity = False # don't know how to change opacity !
self.timer = QTimer()
self.delay = 0
self.max_delay = 1000.0 # ms (don't forget ".0")
parent.installEventFilter(self)
parent.installEventFilter(ToolbarOwnerEventFilter(parent=self))
QObject.connect(self.timer, SIGNAL("timeout()"), self.hide)
def add_toolbar(self, toolbar):
if toolbar not in self.toolbars:
self.toolbars.append(toolbar)
toolbar.action.triggered.connect(self.display)
def replace_toolbars(self):
for toolbar in self.toolbars:
toolbar.replace()
def display(self):
for toolbar in self.toolbars:
if self.mode_opacity:
toolbar.setStyleSheet("opacity:1")
toolbar.show()
self.timer.stop()
self.delay = self.max_delay
self.timer.start(self.max_delay)
def hide(self):
if not self.delay:
return
if self.mode_opacity:
self.delay = int(self.delay/20)*10
else:
self.delay = 0
if self.delay == 0:
self.timer.stop()
for toolbar in self.toolbars:
toolbar.hide()
else:
opacity = 255*self.delay/self.max_delay
for toolbar in self.toolbars:
toolbar.setStyleSheet("opacity:%f" % opacity)
self.timer.setInterval(self.delay)
def eventFilter(self, obj, e):
if e.type() == QEvent.HoverMove:
if (not self.delay) or self.delay < 500:
self.display()
return False
class Toolbar(QObject):
def __init__(self, text, tooltip, callback, x, y, *args, **kwargs):
super(Toolbar, self).__init__(*args, **kwargs)
parent = self.parent()
self.enabled = False
self.x = x
self.y = y
self.toolbar = QToolBar(parent)
self.toolbar.setAllowedAreas(Qt.NoToolBarArea)
parent.addToolBar(Qt.NoToolBarArea, self.toolbar)
self.action = QAction(text, parent)
self.action.setToolTip(tooltip)
self.toolbar.addAction(self.action)
self.button = self.toolbar.children()[-1]
self.toolbar.setContentsMargins(0, 0, 0, 0)
font = self.button.font()
font.setPointSizeF(font.pointSizeF() * 3)
self.button.setFont(font)
palette = self.toolbar.palette()
self.button.setStyleSheet(
"""
QToolButton {
border : none;
border-radius : %(border_radius)s;
background: transparent;
color: %(background_hover)s;
}
QToolButton:hover {
background: %(background_hover)s;
color: %(foreground_hover)s;
}
""" %
{
'border_radius': int(self.button.height()/2),
'background_hover': palette.color(palette.Highlight).name(),
'foreground_hover': palette.color(palette.HighlightedText).name(),
}
)
self.toolbar.setStyleSheet("border:none;background:transparent")
self.toolbar.resize(self.button.sizeHint())
self.move(x, y)
self.toolbar.setMovable(False)
self.toolbar.hide()
if callback:
self.action.triggered.connect(callback)
def set_tooltip(self, tooltip):
self.action.setToolTip(tooltip)
def replace(self):
self.move(self.x, self.y)
def move(self, x, y):
"""
Move the toolbar to coordinates x,y
If a coordinate is 0 < ? <= 1, it's a percent
of the width or height
"""
w_width = self.parent().width()
t_width = self.toolbar.width()
if not x or x < 0:
_x = 0
elif x > 1:
_x = x
else:
_x = int(x * (w_width - t_width))
if _x < 2:
_x = 2
elif _x > (w_width - t_width -2):
_x = (w_width - t_width -2)
w_height = self.parent().height()
t_height = self.toolbar.height()
if not y or y < 0:
_y = 0
elif y > 1:
_y = y
else:
_y = int(y * (w_height - t_height))
if _y < 2:
_y = 2
elif _y > (w_height - t_height -2):
_y = (w_height - t_height -2)
self.toolbar.move(_x, _y)
def move_x(self, x):
self.move(x, self.toolbar.y())
def move_y(self, y):
self.move(self.toolbar.x(), y)
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
def hide(self):
self.toolbar.hide()
def show(self):
if not self.enabled:
return
#self.toolbar.setStyleSheet("opacity:1")
self.toolbar.show()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.