blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dfd37b502e225e3f66840019c085a2442a476db | 2564d19ba0f40ecb0392cde6a28cb87dc70276db | /ADB/urls.py | 29b83b574dc92be8cb45195e4d40dd703ebdac8c | [] | no_license | Moochun/ADB_Project2_backend | 6476eb07506b55cda1ae92d8ce52bc11e69929a3 | b835c07c3c2fbf9fc537ea062fb7d052f845e5a8 | refs/heads/master | 2020-12-24T18:55:54.768338 | 2016-05-11T15:54:50 | 2016-05-11T15:54:50 | 58,558,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | """ADB URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^movies/$', "movie.views.movies"),
url(r'^movies/(?P<MID>[0-9]+)/$', 'movie.views.movie'),
url(r'^users/$', "movie.views.users"),
url(r'^login/$', "movie.views.login"),
url(r'^comments/$', "movie.views.comments"),
url(r'^comments/(?P<MID>[0-9]+)/$', 'movie.views.comment'),
url(r'^rates/$', "movie.views.rates"),
url(r'^collections/$', "movie.views.collections"),
url(r'^collections/delete/$', "movie.views.collections_delete"),
url(r'^collections/(?P<UID>[0-9]+)/$', 'movie.views.collection'),
url(r'^genres/$', 'movie.views.genres'),
url(r'^genres/(?P<GID>[0-9]+)/$', 'movie.views.genre')
]
| [
"[email protected]"
] | |
2c9c4fd5697467b536ce805d2da4f449ced0fadc | 5c9bc1e9eca2e020e8a459e53e66699d057eb03d | /run.py | b543b3b0b8436f76d78df794279f2f2cc20637b9 | [
"MIT"
] | permissive | alexomaset/PasswordLocker | aaa91581edd7b68991ceb26b8321908506743b17 | d74f12c97e7ff194d31333dd92d50482ade61dbf | refs/heads/master | 2020-05-05T13:30:27.481366 | 2019-04-08T11:28:53 | 2019-04-08T11:28:53 | 180,080,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,306 | py | from user import User
from credential import Credential
def create_account (account_name,user_name,user_password,confirmpassword):
"""
function to create a new account
"""
new_user = User(account_name,user_name,user_password,confirmpassword)
return new_user
def save_details(user):
"""
function to save save_details
"""
user.save_detail()
def display_all_details():
"""
function used to return all saved save_details
"""
return User.display_all_details()
def check_existing_user(username):
"""
a function that is used to check and return all exissting accounts
"""
return User.find_by_username(username)
def create_new_credential(account_name, account_password):
"""Function to create a new account and its credentials"""
new_credential = Credential(account_name, account_password)
return new_credential
def save_new_credential(credential):
"""Function to save the newly created account and password"""
credential.save_credential
def find_credential(account_name):
"""Function that finds credentials based on account_name given"""
return Credential.find_by_name(account_name)
def check_existing_credential(name):
"""Method that checks whether a particular account and its credentials exist based on searched account_name"""
return Credential.find_by_name(name)
def display_credential():
"""Function which displays all saved credentials"""
return Credential.display_credential()
def delete_credential(credential):
"""
Method that deletes credentials
"""
return Credential.delete_credential(credential)
def main():
while True:
print("Welcome to PassWordLocker.")
print('\n')
print("Use these short codes to select an option: Create New User use 'cu': Login to your account use 'lg' or 'ex' to exit password locker")
short_code = input()
print('\n')
if short_code == 'cu':
print("Create a UserName")
created_user_name = input()
print("Select a Password")
created_user_password = input()
print("Confirm Your Password")
confirm_password = input()
while confirm_password != created_user_password:
print("Sorry your passwords did not match!")
print("Enter a password")
created_user_password = input()
print("Confirm Your Password")
confirm_password = input()
else:
print(f"Congratulations {created_user_name}! You have created your new account.")
print('\n')
print("Proceed to Log In to your Account")
print("Username")
entered_userName = input()
print("Your Password")
entered_password = input()
while entered_userName != created_user_name or entered_password != created_user_password:
print("You entered a wrong username or password")
print("Username")
entered_userName = input()
print("Your Password")
entered_password = input()
else:
print(f"Welcome: {entered_userName} to your Account")
print('\n')
print("Select an option below to continue: Enter 1, 2, 3, 4 or 5")
print('\n')
while True:
print("1: View Your saved credentials")
print("2: Add new credentials")
print("3: Remove credentials")
print("4: Search credentials")
print("5: Log Out")
option = input()
if option == '2':
while True:
print("Continue to add? y/n")
choice = input()
if choice == 'y':
print("Enter The Account Name")
account_name = input()
print("Enter a password")
print(
"To generate password enter keyword 'n' to create your own password")
keyword = input()
if keyword == 'gp':
print('\n')
elif keyword == 'n':
print("Create your password")
account_password = input()
print(f"Account: {account_name}")
print(f"Password: {account_password}")
print('\n')
else:
print("Please enter a valid Code")
save_new_credential(create_new_credential(
account_name, account_password))
elif choice == 'n':
break
else:
print("Please use 'y' for yes or 'n' for no!")
elif option == '1':
while True:
print("Below is a list of all your credentials")
if display_credential():
for credential in display_credential():
print(f"ACCOUNT NAME:{credential.account_name}")
print(f"PASSWORD:{credential.account_password}")
else:
print('\n')
print("You don't seem to have any contacts yet")
print('\n')
print("Back to Menu? y/n")
back = input()
if back == 'y':
break
elif back == 'n':
continue
else:
print("Please Enter a valid code")
continue
elif option == '5':
print("WARNING! You will loose all your credentials if you log out. Are you sure? y/n")
logout = input()
if logout == 'y':
print("You have Successfully logged out")
break
elif logout == 'n':
continue
elif option == '3':
while True:
print("Search for credential to delete")
search_name = input()
if check_existing_credential(search_name):
search_credential = find_credential(search_name)
print(f"ACCOUNT NAME: {search_credential.account_name} \n PASSWORD: {search_credential.account_password}")
print("Delete? y/n")
sure = input()
if sure == 'y':
delete_credential(search_credential)
print("Account SUCCESSFULLY deleted")
break
elif sure == 'n':
continue
else:
print("That Contact Does not exist")
break
elif option == '4':
while True:
print("Continue? y/n")
option2 = input()
if option2 == 'y':
print("Enter an account name to find credentials")
search_name = input()
if check_existing_credential(search_name):
search_credential = find_credential(search_name)
print(f"ACCOUNT NAME: {search_credential.account_name} \n PASSWORD: {search_credential.account_password}")
else:
print("That Contact Does not exist")
elif option2 == 'n':
break
else:
print("Please enter a valid code")
else:
print("Please enter a valid code")
continue
elif short_code == 'lg':
print("WELCOME")
print("Enter UserName")
default_user_name = input()
print("Enter Your password")
default_user_password = input()
print('\n')
while default_user_name != 'testuser' or default_user_password != '12345':
print("Wrong userName or password. Username 'testuser' and password '12345'")
print("Enter UserName")
default_user_name = input()
print("Enter Your password")
default_user_password = input()
print('\n')
if default_user_name == 'testuser' and default_user_password == '12345':
print("YOU HAVE SUCCESSFULLY LOGGED IN!")
print('\n')
print("Select an option below to continue: Enter 1, 2, 3, 4 or 5")
print('\n')
while True:
print("1: saved credentials")
print("2: Add new credentials")
print("3: Remove credentials")
print("4: Search credentials")
print("5: Log Out")
option = input()
if option == '2':
while True:
print("Continue to add? y/n")
choice = input()
if choice == 'y':
print("Enter The Account Name")
account_name = input()
print("Enter a password")
print(
"To generate password enter keyword 'n' to create your own password")
keyword = input()
if keyword == 'gp':
print('\n')
elif keyword == 'n':
print("Create your password")
account_password = input()
print(f"Account: {account_name}")
print(f"Password: {account_password}")
print('\n')
else:
print("Please enter a valid Code")
save_new_credential(create_new_credential(
account_name, account_password))
elif choice == 'n':
break
else:
print("Please use 'y' for yes or 'n' for no!")
elif option == '1':
while True:
print("Below is a list of all your credentials")
if display_credential():
for credential in display_credential():
print(f"ACCOUNT NAME:{credential.account_name}")
print(f"PASSWORD:{credential.account_password}")
else:
print('\n')
print("You don't seem to have any contacts yet")
print('\n')
print("Back to Menu? y/n")
back = input()
if back == 'y':
break
elif back == 'n':
continue
else:
print("Please Enter a valid code")
# elif choice1 == 'n':
# break
# else:
# print("Please use y or n")
elif option == '5':
print("WARNING! You will loose all your credentials if you log out. Are you sure? y/n")
logout = input()
if logout == 'y':
print("You have Successfully logged out")
break
elif logout == 'n':
continue
elif option == '3':
while True:
print("Search for credential to delete")
search_name = input()
if check_existing_credential(search_name):
search_credential = find_credential(search_name)
print(f"ACCOUNT NAME: {search_credential.account_name} \n PASSWORD: {search_credential.account_password}")
print("Delete? y/n")
sure = input()
if sure == 'y':
delete_credential(search_credential)
print("Account SUCCESSFULLY deleted")
break
elif sure == 'n':
continue
else:
print("That Contact Does not exist")
break
elif option == '4':
while True:
print("Continue? y/n")
option2 = input()
if option2 == 'y':
print("Enter an account name to find credentials")
search_name = input()
if check_existing_credential(search_name):
search_credential = find_credential(search_name)
print(f"ACCOUNT NAME: {search_credential.account_name} \n PASSWORD: {search_credential.account_password}")
else:
print("That Contact Does not exist")
elif option2 == 'n':
break
else:
print("Please enter a valid code")
else:
print("Please enter a valid code")
elif short_code == 'ex':
break
else:
print("Please Enter a valid code to continue")
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
2d66de8b269ba8e5da83aedb44f710f028e31d44 | b1a7417caf4200557106821a0fe45d848b598467 | /python 101/files/files-write.py | 6d916edda92324b417762dd84371b64f4021a66e | [] | no_license | WonderLuc/brother-bootcamp | 3ab6a197160f71d17be837c0be2001b2abcc6c3c | 841285d516566730d35da7af9690c5d3b94051a4 | refs/heads/main | 2023-08-20T15:44:33.228640 | 2021-10-14T17:43:47 | 2021-10-14T17:43:47 | 404,047,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | try:
with open('hello.txt', 'a') as f:
f.write('\nI\'m fine')
f.close()
print(open('hello.txt', 'r').read())
# Hello File!
# How are you?
# I'm fine
with open('404.txt', 'w') as f:
f.write('New File')
f.close()
print(open('404.txt', 'r').read())
# New File
except:
print('Smthg wrong') | [
"[email protected]"
] | |
abb6d54a01cfb9eb330d64b5b58c4a9309b4d8e3 | 04b85710bbf73f807b7bf8506982fd019e3898e1 | /app/user/serializers.py | 20380f41ebbe29264af31739c0a3ada8d4fe7b1f | [
"MIT"
] | permissive | wamuntu/recipe-app-api | 774bd6d097b63a962474962161c422f46b833031 | 7976ff4fa13872097e1018e3305c00206c54fcf5 | refs/heads/master | 2022-12-01T06:45:45.046421 | 2020-07-28T22:25:29 | 2020-07-28T22:25:29 | 281,071,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
| [
"[email protected]"
] | |
342d09cf61ccf5c4f339d5c86f0e7204551a0b93 | 368c8d27f42d2789e84b0463b09018017858e383 | /tests/test_scan.py | cb28934a266e8cae990bf7825374528ac27bd8e0 | [
"MIT"
] | permissive | ConnectBox/wifi-configurator | 6cce0ae5048a327fd9508f672a8359d178dd0c87 | 07031bdd2ea1f2a59b15cbff13149091642ea3f0 | refs/heads/master | 2023-08-27T10:13:58.107992 | 2023-08-15T21:22:06 | 2023-08-15T21:22:06 | 183,581,318 | 1 | 1 | MIT | 2022-12-26T20:47:21 | 2019-04-26T07:36:48 | Python | UTF-8 | Python | false | false | 8,756 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wifi_configurator import scan
def freq_signal_dict_as_scan_output(cs_dict):
scan_output = ""
for freq, signal in cs_dict.items():
scan_output = "%sBSS: blah\nfreq: %s\nsignal: %s\n" % \
(scan_output, freq, signal)
return scan_output
def test_get_country_count_populated(iw_dev_scan_0):
c = scan.get_country_count_from_iw_output(iw_dev_scan_0)
assert c.most_common(1)[0][0] == "TR"
assert len(list(c.elements())) == 4
assert c["AL"] == 1
def test_get_country_count_unpopulated(iw_dev_scan_1):
c = scan.get_country_count_from_iw_output(iw_dev_scan_1)
assert c.most_common(1) == []
assert not list(c.elements())
def test_get_country_count_empty(iw_dev_scan_2):
c = scan.get_country_count_from_iw_output(iw_dev_scan_2)
assert c.most_common(1) == []
assert not list(c.elements())
def test_get_country_count_populated2(iw_dev_scan_3):
c = scan.get_country_count_from_iw_output(iw_dev_scan_3)
assert c.most_common(1)[0][0] == "AU"
assert len(list(c.elements())) == 2
AU_REGDB = """\
country AU: DFS-ETSI
(2402.000 - 2482.000 @ 40.000), (20.00), (N/A)
(5170.000 - 5250.000 @ 80.000), (17.00), (N/A), AUTO-BW
(5250.000 - 5330.000 @ 80.000), (24.00), (N/A), DFS, AUTO-BW
(5490.000 - 5710.000 @ 160.000), (24.00), (N/A), DFS
(5735.000 - 5835.000 @ 80.000), (30.00), (N/A)"""
def test_get_country_rules_block_matching(regdb_lines):
block_lines = scan.get_country_rules_block("AU", regdb_lines)
assert block_lines == AU_REGDB.split("\n")
def test_au_freq_extraction(regdb_lines):
block_lines = scan.get_country_rules_block("AU", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
assert freq_blocks == [
(2402, 2482),
(5170, 5250),
(5250, 5330),
(5490, 5710),
(5735, 5835),
]
def test_flattening_of_au_freqs(regdb_lines):
block_lines = scan.get_country_rules_block("AU", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
freq_blocks = scan.flatten_frequency_blocks(freq_blocks)
assert scan.flatten_frequency_blocks(freq_blocks) == [
(2402, 2482),
(5170, 5330),
(5490, 5710),
(5735, 5835),
]
def test_channel_list_au(regdb_lines):
block_lines = scan.get_country_rules_block("AU", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
freq_blocks = scan.flatten_frequency_blocks(freq_blocks)
assert scan.get_channel_list_from_frequency_blocks(freq_blocks) == \
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UNSET_REGDB = """\
country 00: DFS-UNSET
(2402.000 - 2472.000 @ 40.000), (20.00), (N/A)
(2457.000 - 2482.000 @ 20.000), (20.00), (N/A), NO-IR, AUTO-BW
(2474.000 - 2494.000 @ 20.000), (20.00), (N/A), NO-OFDM, NO-IR
(5170.000 - 5250.000 @ 80.000), (20.00), (N/A), NO-IR, AUTO-BW
(5250.000 - 5330.000 @ 80.000), (20.00), (N/A), DFS, NO-IR, AUTO-BW
(5490.000 - 5730.000 @ 160.000), (20.00), (N/A), DFS, NO-IR
(5735.000 - 5835.000 @ 80.000), (20.00), (N/A), NO-IR
(57240.000 - 63720.000 @ 2160.000), (N/A), (N/A)"""
def test_get_country_rules_block_first(regdb_lines):
block_lines = scan.get_country_rules_block("00", regdb_lines)
assert block_lines == UNSET_REGDB.split("\n")
def test_unset_freq_extraction(regdb_lines):
block_lines = scan.get_country_rules_block("00", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
assert freq_blocks == [
(2402, 2472),
(2457, 2474),
(5170, 5250),
(5250, 5330),
(5490, 5730),
(5735, 5835),
(57240, 63720),
]
def test_flattening_of_unset_freqs(regdb_lines):
block_lines = scan.get_country_rules_block("00", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
assert scan.flatten_frequency_blocks(freq_blocks) == [
(2402, 2474),
(5170, 5330),
(5490, 5730),
(5735, 5835),
(57240, 63720),
]
def test_channel_list_unset(regdb_lines):
block_lines = scan.get_country_rules_block("00", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
freq_blocks = scan.flatten_frequency_blocks(freq_blocks)
assert scan.get_channel_list_from_frequency_blocks(freq_blocks) == \
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
JP_REGDB = """\
country JP: DFS-JP
(2402.000 - 2482.000 @ 40.000), (20.00), (N/A)
(2474.000 - 2494.000 @ 20.000), (20.00), (N/A), NO-OFDM
(4910.000 - 4990.000 @ 40.000), (23.00), (N/A)
(5030.000 - 5090.000 @ 40.000), (23.00), (N/A)
(5170.000 - 5250.000 @ 80.000), (20.00), (N/A), AUTO-BW
(5250.000 - 5330.000 @ 80.000), (20.00), (N/A), DFS, AUTO-BW
(5490.000 - 5710.000 @ 160.000), (23.00), (N/A), DFS
(59000.000 - 66000.000 @ 2160.000), (10.00), (N/A)"""
def test_get_country_rules_block_jp(regdb_lines):
block_lines = scan.get_country_rules_block("JP", regdb_lines)
assert block_lines == JP_REGDB.split("\n")
def test_jp_freq_extraction(regdb_lines):
block_lines = scan.get_country_rules_block("JP", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
assert freq_blocks == [
(2402, 2474),
(4910, 4990),
(5030, 5090),
(5170, 5250),
(5250, 5330),
(5490, 5710),
(59000, 66000),
]
def test_flattening_of_jp_freqs(regdb_lines):
block_lines = scan.get_country_rules_block("JP", regdb_lines)
freq_blocks = scan.get_frequency_blocks_from_country_block(block_lines)
assert scan.flatten_frequency_blocks(freq_blocks) == [
(2402, 2474),
(4910, 4990),
(5030, 5090),
(5170, 5330),
(5490, 5710),
(59000, 66000),
]
def test_get_country_rules_block_unmatched(regdb_lines):
block_lines = scan.get_country_rules_block("NOMATCH", regdb_lines)
assert not block_lines
def test_get_freq_signals_0(iw_dev_scan_0):
assert scan.get_freq_signal_tuples_from_iw_output(iw_dev_scan_0) == [
(2412, -48.0),
(2432, -84.0),
(2442, -42.0),
(2442, -83.0),
(2442, -82.0),
(2462, -85.0),
(5180, -89.0),
(2412, -84.0),
(2452, -88.0),
]
def test_get_freq_signals_1(iw_dev_scan_1):
assert scan.get_freq_signal_tuples_from_iw_output(iw_dev_scan_1) == [
(2412, -48.0),
]
def test_get_freq_signals_2(iw_dev_scan_2):
assert scan.get_freq_signal_tuples_from_iw_output(iw_dev_scan_2) == []
def test_get_freq_signals_3(iw_dev_scan_3):
assert scan.get_freq_signal_tuples_from_iw_output(iw_dev_scan_3) == [
(2412, -32.0),
(2442, -36.0),
(5180, -25.0),
]
def test_overlap_empty_full():
assert not scan.channel_overlaps_with_others(1, [])
assert scan.channel_overlaps_with_others(
1,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
)
def test_overlap_end_spectrum():
assert not scan.channel_overlaps_with_others(
1,
[5, 6, 7, 8, 9, 10, 11, 12, 13]
)
assert scan.channel_overlaps_with_others(1, [4])
assert not scan.channel_overlaps_with_others(
13,
[1, 2, 3, 4, 5, 6, 7, 8, 9]
)
assert scan.channel_overlaps_with_others(13, [10])
def test_overlap_mid_spectrum():
assert not scan.channel_overlaps_with_others(
5,
[1, 9, 10, 11, 12, 13]
)
assert scan.channel_overlaps_with_others(
5,
[1, 2, 9, 10, 11, 12, 13]
)
assert scan.channel_overlaps_with_others(
5,
[1, 8, 9, 10, 11, 12, 13]
)
assert not scan.channel_overlaps_with_others(
9,
[1, 2, 3, 4, 5, 13]
)
assert scan.channel_overlaps_with_others(
9,
[1, 2, 3, 4, 5, 6, 13]
)
assert scan.channel_overlaps_with_others(
9,
[1, 2, 3, 4, 5, 12, 13]
)
assert not scan.channel_overlaps_with_others(
9,
[5, 13]
)
assert scan.channel_overlaps_with_others(
9,
[6]
)
assert scan.channel_overlaps_with_others(
9,
[12]
)
def test_uncontested_channels():
# 3 knocks out 1-6, 9 knocks out 7-12, 13 knocks out 10-13
assert scan.get_available_uncontested_channel(
range(1, 14),
freq_signal_dict_as_scan_output(
{2422: -50, 2452: -50, 2472: -50}
)
) == scan.NO_CHANNEL
# 3 knocks out 1-5, 9 knocks out 7-12, leaving 13
assert scan.get_available_uncontested_channel(
range(1, 14),
freq_signal_dict_as_scan_output({2422: -50, 2452: -50})
) == 13
| [
"[email protected]"
] | |
d517c0304e985ead906169b47b976447619f523e | b54f9626c1b4fb8b930bf2ad73821cda24df73ee | /onnxmltools/convert/sparkml/operator_converters/word2vec.py | b66edf23938a6e5a08b2b5ace52b4f2eb84246e9 | [
"MIT"
] | permissive | vinitra/onnxmltools | 6e5e8100ce1914f8d265d32e7c44ba590fc1c2ce | dd843d392c5790320b3b597a0618c4910473b5a2 | refs/heads/master | 2021-06-25T00:11:12.705711 | 2020-10-12T03:29:10 | 2020-10-12T03:29:10 | 152,326,441 | 3 | 1 | MIT | 2018-11-17T00:07:20 | 2018-10-09T21:54:36 | Python | UTF-8 | Python | false | false | 4,079 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pandas
import numpy
from onnx import onnx_pb as onnx_proto
from ..utils import SparkMlConversionError
from ...common._apply_operation import apply_add, apply_mul, apply_sum
from ...common._registration import register_converter, register_shape_calculator
from ...common.data_types import StringTensorType, FloatTensorType
from ...common.utils import check_input_and_output_numbers, check_input_and_output_types
def convert_word2vec(scope, operator, container):
op = operator.raw_operator
vectors = op.getVectors().toPandas().vector.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
cats_strings = op.getVectors().toPandas().word.values.tolist()
cats_int64s = [x for x in range(0, len(cats_strings))]
word_count = operator.inputs[0].type.shape[1]
vectors_tensor = scope.get_unique_variable_name('vectors_tensor')
container.add_initializer(vectors_tensor, onnx_proto.TensorProto.FLOAT, vectors.shape, vectors.flatten())
word_indices = scope.get_unique_variable_name('word_indices_tensor')
container.add_node('CategoryMapper', operator.input_full_names, word_indices,
op_domain='ai.onnx.ml',
cats_int64s=cats_int64s,
cats_strings=cats_strings,
default_int64=-1)
one = scope.get_unique_variable_name('one_tensor')
container.add_initializer(one, onnx_proto.TensorProto.INT64, [1], [1])
zero = scope.get_unique_variable_name('zero_tensor')
container.add_initializer(zero, onnx_proto.TensorProto.INT64, [1], [0])
word_count_tensor = scope.get_unique_variable_name('word_count_tensor')
container.add_initializer(word_count_tensor, onnx_proto.TensorProto.INT64, [1], [word_count])
sliced_outputs = []
for i in range(0, word_count):
index = scope.get_unique_variable_name('index_tensor')
container.add_initializer(index, onnx_proto.TensorProto.INT64, [1], [i])
selected_index = scope.get_unique_variable_name('selected_index_tensor')
container.add_node('ArrayFeatureExtractor', [word_indices, index], selected_index,
op_domain='ai.onnx.ml')
reshaped_index = scope.get_unique_variable_name('reshaped_tensor')
container.add_node('Reshape', [selected_index, one], reshaped_index,
op_version=5)
end_index = scope.get_unique_variable_name('end_index_tensor')
apply_add(scope, [one, reshaped_index], end_index, container, axis=0)
sliced_output = scope.get_unique_variable_name('sliced_tensor')
container.add_node('DynamicSlice', [vectors_tensor, reshaped_index, end_index, zero], sliced_output)
sliced_outputs.append(sliced_output)
sum_vector = scope.get_unique_variable_name('sum_tensor')
apply_sum(scope, sliced_outputs, sum_vector, container)
factor = scope.get_unique_variable_name('factor_tensor')
container.add_initializer(factor, onnx_proto.TensorProto.FLOAT, [1], [1/operator.inputs[0].type.shape[1]])
apply_mul(scope, [factor, sum_vector], operator.output_full_names, container)
register_converter('pyspark.ml.feature.Word2VecModel', convert_word2vec)
def calculate_word2vec_output_shapes(operator):
check_input_and_output_numbers(operator, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[StringTensorType])
N = operator.inputs[0].type.shape[0]
if N != 1:
raise SparkMlConversionError('Word2Vec converter cannot handle batch size of more than 1')
C = operator.raw_operator.getOrDefault('vectorSize')
operator.outputs[0].type = FloatTensorType([N, C])
register_shape_calculator('pyspark.ml.feature.Word2VecModel', calculate_word2vec_output_shapes)
| [
"[email protected]"
] | |
5c5014ac32e47fc41d41eaf0a112c52d610840dc | e9a322607be1740c4d1f69944f9bf4ac46c29f4d | /cyvlfeat/sift/sift.py | 46419daeeee11b4edf767bd224c35f3ccadd94ee | [
"BSD-2-Clause"
] | permissive | simmimourya/cyvlfeat | 71fc3ac92b1ca12cc08d796296fb379269f2e10f | efc83dca40d335658620c49017ca0c814ad77132 | refs/heads/master | 2022-01-14T10:39:37.630437 | 2019-06-16T16:28:28 | 2019-06-16T16:28:28 | 59,349,633 | 2 | 0 | BSD-2-Clause | 2019-06-16T16:28:29 | 2016-05-21T08:26:19 | Python | UTF-8 | Python | false | false | 5,888 | py | import numpy as np
from .cysift import cy_sift
def sift(image, n_octaves=None, n_levels=3, first_octave=0, peak_thresh=0,
edge_thresh=10, norm_thresh=None, magnification=3, window_size=2,
frames=None, force_orientations=False, float_descriptors=False,
compute_descriptor=False, verbose=False):
r"""
Extracts a set of SIFT features from ``image``. ``image`` must be
``float32`` and greyscale (either a single channel as the last axis, or no
channel). Each column of ``frames`` is a feature frame and has the format
``[Y, X, S, TH]``, where ``(Y, X)`` is the floating point center of the
keypoint, ``S`` is the scale and ``TH`` is the orientation (in radians).
If ``compute_descriptors=True``, computes the SIFT descriptors as well. Each
column of ``descriptors`` is the descriptor of the corresponding frame in
``frames``. A descriptor is a 128-dimensional vector of type ``uint8``.
Parameters
----------
image : [H, W] or [H, W, 1] `float32` `ndarray`
A single channel, greyscale, `float32` numpy array (ndarray)
representing the image to calculate descriptors for.
n_octaves : `int`, optional
The number of octaves of the DoG scale space. If ``None``, the maximum
number of octaves will be used.
n_levels : `int`, optional
The number of levels per octave of the DoG scale space.
first_octave : `int`, optional
The index of the first octave of the DoG scale space.
peak_thresh : `int`, optional
The peak selection threshold. The peak threshold filters peaks of the
DoG scale space that are too small (in absolute value).
edge_thresh : `int`, optional
The edge selection threshold. The edge threshold eliminates peaks of the
DoG scale space whose curvature is too small (such peaks yield badly
localized frames).
norm_thresh : `float`, optional
Set the minimum l2-norm of the descriptors before normalization.
Descriptors below the threshold are set to zero. If ``None``,
norm_thresh is ``-inf``.
magnification : `int`, optional
Set the descriptor magnification factor. The scale of the keypoint is
multiplied by this factor to obtain the width (in pixels) of the spatial
bins. For instance, if there are there are 4 spatial bins along each
spatial direction, the ``side`` of the descriptor is approximately ``4 *
magnification``.
window_size : `int`, optional
Set the variance of the Gaussian window that determines the
descriptor support. It is expressed in units of spatial bins.
frames : `[F, 4]` `float32` `ndarray`, optional
If specified, set the frames to use (bypass the detector). If frames are
not passed in order of increasing scale, they are re-orderded. A frame
is a vector of length 4 ``[Y, X, S, TH]``, representing a disk of center
f[:2], scale f[2] and orientation f[3].
force_orientations : `bool`, optional
If ``True``, compute the orientations of the frames, overriding the
orientation specified by the ``frames`` argument.
float_descriptors : `bool`, optional
If ``True``, the descriptor are returned in floating point rather than
integer format.
compute_descriptor : `bool`, optional
If ``True``, the descriptors are also returned, as well as the keypoints
(frames). This means that the output of calling this function changes
from a single value ``frames``, to a tuple of output values ``(frames,
descriptors)``.
verbose : `bool`, optional
If ``True``, be verbose.
Returns
-------
frames : `(F, 4)` `float32` `ndarray`
``F`` is the number of keypoints (frames) used. This is the center
of every dense SIFT descriptor that is extracted.
descriptors : `(F, 128)` `uint8` or `float32` `ndarray`, optional
``F`` is the number of keypoints (frames) used. The 128 length vectors
per keypoint extracted. ``uint8`` by default. Only returned if
``compute_descriptors=True``.
"""
# Remove last channel
if image.ndim == 3 and image.shape[-1] == 1:
image = image[..., 0]
# Validate image size
if image.ndim != 2:
raise ValueError('Only 2D arrays are supported')
if frames is not None:
if frames.ndim != 2 or frames.shape[-1] != 4:
raise ValueError('Frames should be a 2D array of size '
'(n_keypoints, 4)')
frames = np.require(frames, dtype=np.float32, requirements='C')
# Validate all the parameters
if n_octaves is not None and n_octaves < 0:
raise ValueError('n_octaves must be >= 0')
if n_octaves is None:
n_octaves = -1
if n_levels < 1:
raise ValueError('n_levels must be > 0')
if first_octave < 0:
raise ValueError('first_octave must be >= 0')
if edge_thresh < 1:
raise ValueError('edge_thresh must be > 0')
if peak_thresh < 0:
raise ValueError('peak_thresh must be >= 0')
if norm_thresh is not None and norm_thresh < 0:
raise ValueError('norm_thresh must be >= 0')
if norm_thresh is None:
norm_thresh = -1
if window_size < 0:
raise ValueError('window_size must be >= 0')
# Ensure types are correct before passing to Cython
image = np.require(image, dtype=np.float32, requirements='C')
result = cy_sift(image, n_octaves, n_levels,
first_octave, peak_thresh,
edge_thresh, norm_thresh, magnification,
window_size, frames, force_orientations,
float_descriptors, compute_descriptor,
verbose)
# May be a tuple or a single return of only the calculated frames
return result
| [
"[email protected]"
] | |
1e9740a8e1b78a322a5f0a91333bbb1a7d3abc3c | 3d2901bb0dc06b24f40544b92b999b76256b2a5b | /05_django/01_django_intro_2/config/urls.py | 84d5a8a8031e1ad68880dfe1f48eda2d3b2088a9 | [] | no_license | kyunghee2/TIL | 3ac949000900cdd58c8b63685dbe45561a1523e4 | 8e3a27d99beabee6342da571700ad08b35e62bb9 | refs/heads/master | 2022-12-11T10:44:37.494808 | 2020-12-29T07:15:02 | 2020-12-29T07:15:02 | 216,502,861 | 0 | 4 | null | 2022-12-08T06:47:25 | 2019-10-21T07:21:19 | Python | UTF-8 | Python | false | false | 998 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from pages import views
urlpatterns = [
# 사용자가 pages/로 시작하는 경로로 들어오면
# pages 앱 안의 urls.py에서 처리하도록 설정
path('pages/',include('pages.urls')),
path('utilities/',include('utilities.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
08a4954ba693a54f34fe62d504239de31695ae34 | 5452fc2155808581ba7448de23a2879c27e411a1 | /accounts/models.py | edb676378a47f85bc1131b953e72d73d1b72e1a9 | [] | no_license | eferroni/django3-star_social | 9e991ea7f3a869b7b33f9b171ea620000b7a2aca | 266d3c1e1ac9596e6fdd3774a1609b3327841955 | refs/heads/main | 2023-04-24T08:18:47.627003 | 2021-05-05T23:33:08 | 2021-05-05T23:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from django.db import models
from django.contrib import auth
# Create your models here.
class User(auth.models.User,auth.models.PermissionsMixin):
def __str__(self):
return f'@{self.username}'
| [
"[email protected]"
] | |
c36b256ce526feca9b8fd7feb9a7ee6860c355db | 18d9fe191a7dcccaa989ac16d0c693526f9f43dc | /manage.py | c86a46ea1870ec971a072381f1188f5ad3f96561 | [] | no_license | david654100/messingwithdjango | 787fb6bda6ca0e3a237081b022e1c15d0ddd6dfd | 8086429645001c02607d4d241829e838f66c7d67 | refs/heads/master | 2020-05-30T00:11:33.439830 | 2019-05-30T17:25:27 | 2019-05-30T17:25:27 | 189,454,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'messingwithdjango.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5d2afa784fddf2b41a2109e47a38a43e9cbbc586 | 34cd68ebcb2d8c2cacc8c3c5550881b998de969a | /my_site/blog/forms.py | 7370321b58f6c732fc4aa6705a9598d23f9ad1c0 | [] | no_license | kenzli/waterloo-blog | 793bc8873ba544e55863faaaf82227d014c06a18 | 66a7c7897181751332e5a578cb14807d7ac4e8ec | refs/heads/master | 2020-12-14T06:43:02.500688 | 2020-01-18T20:44:16 | 2020-01-18T20:44:16 | 234,671,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class RegistrationForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(label='First Name', max_length=50)
last_name = forms.CharField(label='Last Name', max_length=50)
class Meta:
model = User
fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2']
class UserUpdate(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['first_name', 'last_name', 'username', 'email']
| [
"[email protected]"
] | |
069713aff4aa6e1329cdf6ccd1b28edd3cecf71f | 70f41a06d733e680af3bb1f00d8ff33574f4f4bb | /src/fh_tools/language_test/DeepLearningNotes/Note-2 RNN处理非线性回归/sonnet/examples/rnn_shakespeare.py | f53e8156f989b18659775bba702b787f26b79ac1 | [
"MIT"
] | permissive | mmmaaaggg/RefUtils | 209f7136acc63c880e60974c347e19adc4c7ac2e | f127658e75b5c52b4db105a22176ee0931ceacae | refs/heads/master | 2021-06-11T16:06:06.245275 | 2021-03-10T05:32:14 | 2021-03-10T05:32:14 | 139,413,962 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,434 | py | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train a stacked LSTM on the Tiny Shakespeare dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet as snt
from sonnet.examples import dataset_shakespeare
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("num_training_iterations", 10000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 1000,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_integer("reduce_learning_rate_interval", 2500,
"Iterations between learning rate reductions.")
tf.flags.DEFINE_integer("lstm_depth", 3, "Number of LSTM layers.")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("num_embedding", 32, "Size of embedding layer.")
tf.flags.DEFINE_integer("num_hidden", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("truncation_length", 64, "Sequence size for training.")
tf.flags.DEFINE_integer("sample_length", 1000, "Sequence size for sampling.")
tf.flags.DEFINE_float("max_grad_norm", 5, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Optimizer learning rate.")
tf.flags.DEFINE_float("reduce_learning_rate_multiplier", 0.1,
"Learning rate is multiplied by this when reduced.")
tf.flags.DEFINE_float("optimizer_epsilon", 0.01,
"Epsilon used for Adam optimizer.")
tf.flags.DEFINE_string("checkpoint_dir", "/tmp/tf/rnn_shakespeare",
"Checkpointing directory.")
tf.flags.DEFINE_integer("checkpoint_interval", 500,
"Checkpointing step interval.")
def _configure_saver(checkpoint_dir, checkpoint_interval):
"""Returns a tf.train.CheckpointSaverHook for autosaving checkpoints."""
saver = tf.train.Saver()
return tf.train.CheckpointSaverHook(
checkpoint_dir=checkpoint_dir,
save_steps=checkpoint_interval,
saver=saver)
def build_graph(lstm_depth=3, batch_size=32, num_embedding=32, num_hidden=128,
truncation_length=64, sample_length=1000, max_grad_norm=5,
initial_learning_rate=0.1, reduce_learning_rate_multiplier=0.1,
optimizer_epsilon=0.01):
"""Constructs the computation graph."""
# Get datasets.
dataset_train = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="train",
random=True,
name="shake_train")
dataset_valid = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="valid",
random=False,
name="shake_valid")
dataset_test = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="test",
random=False,
name="shake_test")
# Define model.
model = TextModel(
num_embedding=num_embedding,
num_hidden=num_hidden,
lstm_depth=lstm_depth,
output_size=dataset_valid.vocab_size,
use_dynamic_rnn=True,
use_skip_connections=True)
# Get the training loss.
train_input_sequence, train_target_sequence = dataset_train()
train_output_sequence_logits, train_final_state = model(train_input_sequence) # pylint: disable=not-callable
train_loss = dataset_train.cost(train_output_sequence_logits,
train_target_sequence)
# Get the validation loss.
valid_input_sequence, valid_target_sequence = dataset_valid()
valid_output_sequence_logits, _ = model(valid_input_sequence) # pylint: disable=not-callable
valid_loss = dataset_valid.cost(valid_output_sequence_logits,
valid_target_sequence)
# Get the test loss.
test_input_sequence, test_target_sequence = dataset_test()
test_output_sequence_logits, _ = model(test_input_sequence) # pylint: disable=not-callable
test_loss = dataset_test.cost(test_output_sequence_logits,
test_target_sequence)
# Build graph to sample some strings during training.
initial_logits = train_output_sequence_logits[truncation_length - 1]
train_generated_string = model.generate_string(
initial_logits=initial_logits,
initial_state=train_final_state,
sequence_length=sample_length)
# Set up global norm clipping of gradients.
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), max_grad_norm)
# Get learning rate and define annealing.
learning_rate = tf.get_variable(
"learning_rate",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(initial_learning_rate),
trainable=False)
reduce_learning_rate = learning_rate.assign(
learning_rate * reduce_learning_rate_multiplier)
# Get training step counter.
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])
# Define optimizer and training step.
optimizer = tf.train.AdamOptimizer(
learning_rate, epsilon=optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step)
graph_tensors = {
"train_loss": train_loss,
"valid_loss": valid_loss,
"test_loss": test_loss,
"train_generated_string": train_generated_string,
"reduce_learning_rate": reduce_learning_rate,
"global_step": global_step,
"train_step": train_step
}
# Return dataset_train for translation to human readable text.
return graph_tensors, dataset_train
def train(num_training_iterations, report_interval,
reduce_learning_rate_interval):
"""Trains a deep LSTM model on the Tiny Shakespeare dataset."""
# Build the computation graph.
graph_tensors, dataset_train = build_graph(
lstm_depth=FLAGS.lstm_depth, batch_size=FLAGS.batch_size,
num_embedding=FLAGS.num_embedding, num_hidden=FLAGS.num_hidden,
truncation_length=FLAGS.truncation_length,
sample_length=FLAGS.sample_length, max_grad_norm=FLAGS.max_grad_norm,
initial_learning_rate=FLAGS.learning_rate,
reduce_learning_rate_multiplier=FLAGS.reduce_learning_rate_multiplier,
optimizer_epsilon=FLAGS.optimizer_epsilon)
# Configure a checkpoint saver.
saver_hook = _configure_saver(FLAGS.checkpoint_dir,
FLAGS.checkpoint_interval)
# Train the network.
with tf.train.SingularMonitoredSession(
hooks=[saver_hook], checkpoint_dir=FLAGS.checkpoint_dir) as sess:
start_iteration = sess.run(graph_tensors["global_step"])
for train_iteration in range(start_iteration, num_training_iterations):
if (train_iteration + 1) % report_interval == 0:
train_loss_v, valid_loss_v, _ = sess.run(
(graph_tensors["train_loss"],
graph_tensors["valid_loss"],
graph_tensors["train_step"]))
train_generated_string_v = sess.run(
graph_tensors["train_generated_string"])
train_generated_string_human = dataset_train.to_human_readable(
(train_generated_string_v, 0), indices=[0])
tf.logging.info("%d: Training loss %f. Validation loss %f. Sample = %s",
train_iteration,
train_loss_v,
valid_loss_v,
train_generated_string_human)
else:
train_loss_v, _ = sess.run((graph_tensors["train_loss"],
graph_tensors["train_step"]))
tf.logging.info("%d: Training loss %f.", train_iteration, train_loss_v)
if (train_iteration + 1) % reduce_learning_rate_interval == 0:
sess.run(graph_tensors["reduce_learning_rate"])
tf.logging.info("Reducing learning rate.")
test_loss = sess.run(graph_tensors["test_loss"])
tf.logging.info("Test loss %f", test_loss)
class TextModel(snt.AbstractModule):
"""A deep LSTM model, for use on the Tiny Shakespeare dataset."""
def __init__(self, num_embedding, num_hidden, lstm_depth, output_size,
use_dynamic_rnn=True, use_skip_connections=True,
name="text_model"):
"""Constructs a `TextModel`.
Args:
num_embedding: Size of embedding representation, used directly after the
one-hot encoded input.
num_hidden: Number of hidden units in each LSTM layer.
lstm_depth: Number of LSTM layers.
output_size: Size of the output layer on top of the DeepRNN.
use_dynamic_rnn: Whether to use dynamic RNN unrolling. If `False`, it uses
static unrolling. Default is `True`.
use_skip_connections: Whether to use skip connections in the
`snt.DeepRNN`. Default is `True`.
name: Name of the module.
"""
super(TextModel, self).__init__(name=name)
self._num_embedding = num_embedding
self._num_hidden = num_hidden
self._lstm_depth = lstm_depth
self._output_size = output_size
self._use_dynamic_rnn = use_dynamic_rnn
self._use_skip_connections = use_skip_connections
with self._enter_variable_scope():
self._embed_module = snt.Linear(self._num_embedding, name="linear_embed")
self._output_module = snt.Linear(self._output_size, name="linear_output")
self._lstms = [
snt.LSTM(self._num_hidden, name="lstm_{}".format(i))
for i in range(self._lstm_depth)
]
self._core = snt.DeepRNN(self._lstms,
skip_connections=self._use_skip_connections,
name="deep_lstm")
def _build(self, one_hot_input_sequence):
"""Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size, output_size]`, and the
final state of the unrolled core,.
"""
input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_state = self._core.initial_state(batch_size)
if self._use_dynamic_rnn:
output_sequence, final_state = tf.nn.dynamic_rnn(
cell=self._core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
else:
rnn_input_sequence = tf.unstack(input_sequence)
output, final_state = tf.contrib.rnn.static_rnn(
cell=self._core,
inputs=rnn_input_sequence,
initial_state=initial_state)
output_sequence = tf.stack(output)
batch_output_module = snt.BatchApply(self._output_module)
output_sequence_logits = batch_output_module(output_sequence)
return output_sequence_logits, final_state
@snt.experimental.reuse_vars
def generate_string(self, initial_logits, initial_state, sequence_length):
"""Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sampling from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`.
"""
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
# Sample a character index from distribution.
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_hot)
# Feed character back into the deep_lstm.
gen_out_seq, current_state = self._core(
tf.nn.relu(self._embed_module(char_one_hot)),
current_state)
current_logits = self._output_module(gen_out_seq)
generated_string = tf.stack(generated_letters)
return generated_string
def main(unused_argv):
train(
num_training_iterations=FLAGS.num_training_iterations,
report_interval=FLAGS.report_interval,
reduce_learning_rate_interval=FLAGS.reduce_learning_rate_interval)
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
] | |
60b4941d89cf2d18278c8cc3cc21bb76ed4d4ee1 | 2b2350241420638b2ea0e7068648c801e24c908c | /notchlist/notchlistApi/models/cocktail_ingredient.py | 09669015acdc1697a30aa82b052cd8f97505811f | [] | no_license | RockMurdock/Notch-List-API | a2133fd5179fca7901efe5debc2fe62af7a2c12d | 6907992357b08c0c34710dab96fdad1daf183e9d | refs/heads/master | 2023-08-02T09:54:02.883186 | 2020-07-01T05:05:14 | 2020-07-01T05:05:14 | 272,433,980 | 0 | 0 | null | 2021-09-22T19:15:11 | 2020-06-15T12:34:07 | Python | UTF-8 | Python | false | false | 400 | py | from django.db import models
from .cocktail import Cocktail
from .ingredient import Ingredient
class Cocktail_Ingredient(models.Model):
cocktail = models.ForeignKey(Cocktail, on_delete=models.CASCADE)
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)
class Meta:
verbose_name = "cocktail ingredient"
verbose_name_plural = "cocktails ingredients"
| [
"[email protected]"
] | |
4537ab6a765731e146c3aed202b3496d609ec0f6 | cfee5e56465060cec89cb779f8c5196aeae29730 | /bot.py | 221c27e8a8446474a189514afe027fe96170f666 | [] | no_license | r3dinforesearcher/ircbot | 5b43fdb5cb29924858e407fde1feec75a6cd5d5f | 76461457948409225b15ff8631915e0559291201 | refs/heads/master | 2021-01-10T04:47:43.839499 | 2015-10-04T13:48:32 | 2015-10-04T13:48:32 | 43,637,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,072 | py | # Author: r3dinfoguy
# Source code integrated from below mentioned links
# http://wiki.shellium.org/w/Writing_an_IRC_bot_in_Python
# http://www.primalsecurity.net/0xc-python-tutorial-python-malware/
import socket
import getpass
import os
import time
import random
import re
# Set up our commands function
def commands(nick,channel,message):
if message.find(botnick+': shellium')!=-1:
ircsock.send('PRIVMSG %s :%s: Shellium is awesome!\r\n' % (channel,nick))
elif message.find(botnick+': help')!=-1:
ircsock.send('PRIVMSG %s :%s: My other command is shellium.\r\n' % (channel,nick))
# Some basic variables used to configure the bot
server = "irc.freenode.net" # Server
channel = "#r3dinfo" # Channel
botnick = socket.gethostname()+'-'+getpass.getuser() # Your bots nick
def ping(): # This is our first function! It will respond to server Pings.
ircsock.send("PONG :pingis\n")
def sendmsg(chan , msg): # This is the send message function, it simply sends messages to the channel.
ircsock.send("PRIVMSG "+ chan +" :"+ msg +"\n")
def joinchan(chan): # This function is used to join channels.
ircsock.send("JOIN "+ chan +"\n")
def hello(): # This function responds to a user that inputs "Hello Mybot"
ircsock.send("PRIVMSG "+ channel +" :Hello!\n")
def executecommand():
command = ircmsg.split(' @')[1]
channel = "#r3dinfo" # Channel
output = os.popen(command).read()
lines = re.split('\n',output)
for line in lines:
ircsock.send("PRIVMSG "+ channel +" :%s\n" % line)
time.sleep(1)
ircsock.send("PRIVMSG "+ channel +" :------ Command Completed --------\n")
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667)) # Here we connect to the server using the port 6667
ircsock.send("USER "+ botnick +" "+ botnick +" "+ botnick +" :This bot is a result of a tutorial covered on http://shellium.org/wiki.\n") # user authentication
ircsock.send("NICK "+ botnick +"\n") # here we actually assign the nick to the bot
joinchan(channel) # Join the channel using the functions we previously defined
#ircsock.send("Host connected: "+socket.gethostname()+"\n")
#ircsock.send("Current User: "+getpass.getuser()+"\n")
while 1: # Be careful with these! it might send you to an infinite loop
ircmsg = ircsock.recv(2048) # receive data from the server
ircmsg = ircmsg.strip('\n\r') # removing any unnecessary linebreaks.
print(ircmsg) # Here we print what's coming from the server
hello = "welcome Sir"
ircsock.send("PONG %s\r\n" % hello)
#ircsock.send('PRIVMSG %s :%s: My other command is shellium.\r\n' % (channel,nick))
if ircmsg.find(' PRIVMSG ')!=-1:
nick=ircmsg.split('!')[0][1:]
channel=ircmsg.split(' PRIVMSG ')[-1].split(' :')[0]
commands(nick,channel,ircmsg)
if ircmsg.find(":Hello "+ botnick) != -1: # If we can find "Hello Mybot" it will call the function hello()
hello()
if ircmsg.find("PING :") != -1: # if the server pings us then we've got to respond!
ping()
if ircmsg.find("YOYO @") !=-1:
executecommand()
| [
"[email protected]"
] | |
5577785d91f8ed0b861ba5b46a164fa8802bcbd5 | 951c392e439e4a3576ce3f4469aedae7e4167cd9 | /modules/site_watcher.py | f1bd75fc7a473bce6b4bcf7c6b6e07ea09e27a35 | [] | no_license | DonoA/Thompson-Discord | bfbe524f0b6bd0cc3d565b626a5ed41be1647659 | 7a04497887bbb6355e8ded39e7e0a18a53738598 | refs/heads/master | 2021-01-19T09:35:50.553880 | 2017-04-17T04:34:39 | 2017-04-17T04:34:39 | 87,770,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from threading import Thread
from splinter import Browser
import discord_bot
class SiteWatch(Thread):
def __init__(self, url, xpath, timeout, total_time, logger, channel):
self.url = url
self.xpath = xpath
self.timeout = timeout
self.total_time = total_time
self.logger = logger.log
self.message = await discord_bot.discord_bot.send_message(channel, "Fetching...")
self.fetch()
def fetch(self):
with Browser() as browser:
self.logger("Screen ready")
browser.visit(self.url)
val = browser.find_by_xpath(self.xpath).first["text"]
await discord_bot.discord_bot.edit_message(self.message, val)
| [
"[email protected]"
] | |
49ce8a859130b9c6410b6506e684134bc91d97bb | cadb30d66cfb3352d333ecf131356eb67d602399 | /AstroObject.py | 22a294c6a05df5efc23d9e7acda968fd9011502e | [] | no_license | LoganBenham/SpaceGame | 4ae37f12899c279cd6f5fc591644e6e5492abc88 | 9b4bcd5847c9a48eeaedef48398b9f32c581119e | refs/heads/master | 2021-01-11T15:57:56.467364 | 2017-01-25T00:22:54 | 2017-01-25T00:22:54 | 79,968,509 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,434 | py | from UnitNumber import *
import random as rand
class StationaryObject: # Newtonian object, mainly for centers of galaxies
def __init__(self, mass=UNum(0, suns=1), position=UVector2D(value=UNum(0, ly=1), angle=0.)):
self.mass = mass
if type(position)!=UVector2D:
raise TypeError('Position must be UVector2D')
self.real_position = position
self.orbiters = []
self.type_name = 'Newtonian Object'
self.orbiting = False
def get_pos(self, time):
return self.real_position
class Orbit:
def __init__(self, focus, semimajor_axis, eccentricity, periapsis=None, angle=None, time=UNum(0, yrs=1), clockwise=False):
if type(semimajor_axis)!=UNum:
raise TypeError('Semimajor Axis has no units')
if eccentricity >= 1 or eccentricity < 0:
raise ValueError('Eccentricity can only be from 0-0.999...', eccentricity)
self.focus = focus
self.semimajor_axis = semimajor_axis
self.eccentricity = eccentricity
self.start_time = time
self.clockwise = clockwise
if periapsis is not None:
self.periapsis = periapsis
else:
self.periapsis = 2 * math.pi * rand.random() # angle
if angle is not None:
self.initial_angle = angle
else:
self.initial_angle = 2 * math.pi * rand.random()
self.mu = Physics.G_myunits * self.focus.mass
self.period = UNum(0, yrs=1)
if not self.mu.number==0:
self.period = (((self.semimajor_axis**3) / self.mu)**0.5) * 2 * math.pi
return
# noinspection PyPep8Naming
def get_pos(self, time, debug=False):
if self.semimajor_axis.number==0.:
return self.focus.real_position
elif self.focus.mass.number==0:
raise ValueError('Orbiting something with no mass')
mean_anomaly = (self.period**-1 * (time - self.start_time - self.period) * 2 * math.pi).number
eccentric_anomaly = self.inversekepler(mean_anomaly)
#print('mean anomaly:', mean_anomaly)
#print('eccentric anomaly:', eccentric_anomaly)
cos_E = math.cos(eccentric_anomaly)
sin_E = math.sin(eccentric_anomaly)
e = self.eccentricity
cos_true = (cos_E - e) / (1 - e * cos_E)
sin_true = math.sqrt(1 - e**2) * sin_E / (1 - e * cos_E)
true_anomaly = math.atan2(sin_true, cos_true)
if self.clockwise:
true_anomaly = -true_anomaly
distance = self.semimajor_axis * (1 - e**2) / (1 + (e * math.cos(true_anomaly)))
return UVector2D(value=distance,
angle=true_anomaly + self.initial_angle + self.periapsis) + self.focus.real_position
def get_vel(self, time):
x = self.get_pos(time, debug=True) - self.focus.get_pos(time)
dx = self.get_pos(time + self.period/100) - self.get_pos(time - self.period/100)
angle = dx.angle
term1 = x.value**-1
term2 = self.semimajor_axis**-1
speed = (self.mu*(term1*2 - term2))**0.5
return UVector2D(value=speed, angle=angle)
def orbital_position(self, time):
return (self.get_pos(time)-self.focus.position).rotate(-self.periapsis)
def orbit_points(self, position_unit, n=30):
points = []
for i in range(n):
eccentric_anomaly = 2 * math.pi * i / n
cos_E = math.cos(eccentric_anomaly)
sin_E = math.sin(eccentric_anomaly)
e = self.eccentricity
cos_true = (cos_E - e) / (1 - e * cos_E)
sin_true = math.sqrt(1 - e**2) * sin_E / (1 - e * cos_E)
true_anomaly = math.atan2(sin_true, cos_true)
if self.clockwise:
true_anomaly = -true_anomaly
distance = self.semimajor_axis * (1 - self.eccentricity**2) / (1 + (self.eccentricity * math.cos(true_anomaly)))
vec = UVector2D(value=distance, angle=true_anomaly + self.periapsis + self.initial_angle)
vec = (vec + self.focus.real_position).convert([position_unit])
points.append(Vector2D(x=vec.x.number, y=vec.y.number))
return points
def inversekepler(self, mean_anom):
ecc_anom = mean_anom
for i in range(35):
ecc_anom = mean_anom + self.eccentricity * math.sin(ecc_anom)
return ecc_anom
def print(self):
#print('SM-Axis:', self.semimajor_axis)
print('e:', self.eccentricity)
print('periapsis:', self.periapsis)
#print('period:', self.period)
print('start time:', self.start_time)
print('start angle:', self.initial_angle)
def get_system(self):
focus = self.focus
while focus.type_name!='Star System':
focus = focus.orbit.focus
return focus
def __getattr__(self, attr):
if attr == 'apoapsis_dist':
return self.semimajor_axis * (1+self.eccentricity)
elif attr == 'periapsis_dist':
return self.semimajor_axis * (1-self.eccentricity)
else:
raise AttributeError("%r object has no attribute %r" % (self.__class__, attr))
class OrbitFromVectors(Orbit):
def __init__(self, focus, position, velocity, time=UNum(0, yrs=1)):
pos = position - focus.get_pos(time)
vel = velocity
angular_momentum = pos.value * vel.value * math.sin(vel.angle - pos.angle) #in 3rd dim
mu = Physics.G.convert(['AU', 'yrs']) * focus.mass
term1 = pos * (vel.value**2 - mu/pos.value)
term2 = vel * pos.dot(vel)
e_vec = ((term1 - term2) / mu)
e_vec = e_vec.number()
if angular_momentum.number >= 0:
term1_2 = Vector2D(value=(vel.value*angular_momentum/mu).number, angle=vel.angle-math.pi/2)
else:
term1_2 = Vector2D(value=(vel.value*angular_momentum/mu).number, angle=vel.angle+math.pi/2)
term2_2 = Vector2D(value=1, angle=pos.angle)
e_vec_2 = term1_2 - term2_2
e = e_vec.value
mech_energy = (vel.value**2)/2 - mu/pos.value
semimajor_axis = mu / (mech_energy * -2)
periapsis = e_vec.angle
if angular_momentum.number < 0:
periapsis = 2*math.pi - periapsis
while periapsis > 2*math.pi:
periapsis -= 2*math.pi
true_anomaly = pos.angle - periapsis
super().__init__(focus, semimajor_axis, e, periapsis=periapsis, angle=true_anomaly, time=time)
self.print()
class Orbiter:
def __init__(self, focus, semimajor_axis, mass=UNum(0, kg=1), eccentricity=0., clockwise=False):
if not hasattr(self, 'type_name'):
self.type_name = 'Generic Orbiter'
if not hasattr(self, 'name'):
self.name = 'noname'
self.color = 'orange'
self.draw_radius = 2
self.mass = mass
self.orbiting = True
self.orbit = Orbit(focus, semimajor_axis, eccentricity, clockwise=clockwise)
if self.orbit.period.number==0.:
self.moving = False
self.real_position = self.orbit.focus.real_position
else:
self.moving = True
def get_pos(self, time):
return self.orbit.get_pos(time)
def get_vel(self, time):
return self.orbit.get_vel(time)
def set_pos(self, time):
self.real_position = self.get_pos(time)
def __str__(self):
return self.name + ' - ' + self.type_name | [
"[email protected]"
] | |
ff4167eca5c1c06c214655bc62f72432e12d13f8 | 35bb03caac708f73497a88c1a81cef6f5f481197 | /WeightofEvidence.py | c0abe497e7fa5ad0a1324be8b3bfe22a68897703 | [] | no_license | aleespa/Equipo-3 | c70c7a3b6458aa0c14571036ea2634895314d7f0 | 5a28b3fde05d3eca6c32570c79dd4b0e054753df | refs/heads/master | 2020-04-06T07:09:05.047232 | 2018-05-16T20:38:35 | 2018-05-16T20:38:35 | 124,180,706 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | py | from __future__ import division
import pandas as pd
import numpy as np
from math import log
class WoE:
def __init__(self, disc=None, cont=None):
self.maps = None
self.disc = disc
self.cont = cont
self.IV = None
def fit(self, Z, y,bins=None):
X = Z.copy()
self.IV = pd.DataFrame([np.zeros(len(X.columns))],columns = X.columns)
self.maps = pd.DataFrame()
cols = X.columns
X['var'] = y
X['ID'] = range(len(X))
for col in self.disc:
a = X.pivot_table(aggfunc='count',columns='var',fill_value=0, index=col,values='ID').reset_index()
a.loc[-1] =["TOTAL", sum(a[0]), sum(a[1])]
lis = []
for y in set(X[col].values):
g = int(a[a[col]==y][1])/int(a[a[col]=='TOTAL'][1])
b = int(a[a[col]==y][0])/int(a[a[col]=='TOTAL'][0])
if g*b == 0 :
d = log((g+0.5)/(b+0.5))
else:
d = log(g/b)
self.IV[col] += float((g-b)*d)
lis.append((y,d))
lis1 = pd.DataFrame(columns=[col])
lis1[col] = lis
self.maps = pd.concat([self.maps, lis1],axis=1)
for col in self.cont:
IV = []
for i in bins:
IV.append(0)
X[col] = pd.cut(Z[col], bins = i)
a = X.pivot_table(aggfunc='count',columns='var',fill_value=0, index=col,values='ID').reset_index()
a.loc[-1] =["TOTAL", sum(a[0]), sum(a[1])]
for y in set(X[col].values):
goods = float(int(a[a[col]==y][1])/int(a[a[col]=='TOTAL'][1]))
bads = float(a[a[col]==y][0]/int(a[a[col]=='TOTAL'][0]))
if (bads != 0)&(goods !=0):
d = log(bads/goods)
IV[-1] += float((bads-goods)*d)
else:
IV[-1] += -np.inf
IV = np.array(IV)
armax = np.argmax(IV[IV <np.inf])
M = int(bins[armax])
y1 = min(Z[col])
y2 = max(Z[col])
B = [-np.inf]+[y1 + n*(y2-y1)/M for n in range(1,M)]+[np.inf]
X[col] = pd.cut(Z[col], bins = M,include_lowest=True,right=True,labels= [x for x in range(1,M+1)])
a = X.pivot_table(aggfunc='count',columns='var',fill_value=0, index=col,values='ID').reset_index()
a.loc[-1] =["TOTAL", sum(a[0]), sum(a[1])]
lis = []
for y in set(X[col].values):
g = int(a[a[col]==y][1])/int(a[a[col]=='TOTAL'][1])
b = int(a[a[col]==y][0])/int(a[a[col]=='TOTAL'][0])
if g*b == 0 :
d = log((g+0.5)/(b+0.5))
else:
d = log(g/b)
self.IV[col] += float((g-b)*d)
lis.append((B[y-1],B[y],d))
lis1 = pd.DataFrame(columns=[col])
lis1[col] = lis
self.maps = pd.concat([self.maps, lis1],axis=1)
def transform(self, W):
Z = W.copy()
for col in self.disc:
for value in Z[col].values:
Aux = [x for x in self.maps[col] if type(x)==tuple]
if value in [x[0] for x in Aux]:
aux = [x[1] for x in Aux if x[0]==value]
Z[col].replace(value,aux[0]*100,inplace=True)
else:
print str(value)+" No se observo en la variable original " + str(col)
for col in self.cont:
for pairs in [x for x in self.maps[col] if type(x)==tuple ]:
for value in Z[col].values:
if (pairs[0]<= value) & (value<= pairs[1]):
Z[col].replace(value,pairs[2]*100,inplace=True)
return Z
| [
"[email protected]"
] | |
7fa2c95485f833a66f82eeeb2eac977db70d395a | d31d744f62c09cb298022f42bcaf9de03ad9791c | /lingvo/lingvo/core/generic_input_test.py | f9ce2cc34f92681e17e8c56079cff145b69a067b | [
"Apache-2.0"
] | permissive | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,217 | py | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generic_input_op."""
import collections
import os
import pickle
import unittest
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import generic_input
from lingvo.core import py_utils
from lingvo.core import test_utils
import numpy as np
def get_test_input(path, bucket_batch_limit=8, **kwargs):
return generic_input.GenericInput(
file_pattern='tfrecord:' + path,
file_random_seed=0,
file_buffer_size=32,
file_parallelism=4,
bucket_batch_limit=[bucket_batch_limit],
**kwargs)
def run_basic_graph(use_nested_map,
bucket_fn=lambda x: 1,
bucket_batch_limit=8):
# Generate a test file w/ 100 records.
tmp = os.path.join(tf.test.get_temp_dir(), 'basic')
with tf.python_io.TFRecordWriter(tmp) as w:
for i in range(100):
w.write(('%08d' % i).encode('utf-8'))
# A simple string parsing routine. Just convert a string to a
# number.
def str_to_num(s):
return np.array(float(s), dtype=np.float32)
# A record processor written in TF graph.
def _process(source_id, record):
num, = tf.py_func(str_to_num, [record], [tf.float32])
num = tf.stack([num, tf.square(num)])
if use_nested_map:
return py_utils.NestedMap(
source_id=source_id, record=record, num=num), bucket_fn(num)
else:
return [source_id, record, num], bucket_fn(num)
# Samples random records from the data files and processes them
# to generate batches.
inputs, _ = get_test_input(
tmp,
bucket_batch_limit=bucket_batch_limit,
bucket_upper_bound=[1],
processor=_process)
if use_nested_map:
return inputs
else:
src_ids, strs, vals = inputs
return py_utils.NestedMap(source_id=src_ids, record=strs, num=vals)
class GenericInputOpTest(test_utils.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('OutputList', False, 8),
('OutputNestedMap', True, 8),
('OutputNestedMap_Batch1', True, 1))
def testBasic(self, use_nested_map, bucket_batch_limit):
input_batch = run_basic_graph(
use_nested_map=use_nested_map, bucket_batch_limit=bucket_batch_limit)
with self.session():
record_seen = set()
for i in range(100):
ans_input_batch = self.evaluate(input_batch)
for s in ans_input_batch.record:
record_seen.add(s)
self.assertEqual(ans_input_batch.source_id.shape, (bucket_batch_limit,))
self.assertEqual(ans_input_batch.record.shape, (bucket_batch_limit,))
self.assertEqual(ans_input_batch.num.shape, (bucket_batch_limit, 2))
ans_vals = ans_input_batch.num
self.assertAllEqual(np.square(ans_vals[:, 0]), ans_vals[:, 1])
for i in range(100):
self.assertIn(('%08d' % i).encode('utf-8'), record_seen)
def testPadding(self):
# Generate a test file w/ 50 records of different lengths.
tmp = os.path.join(tf.test.get_temp_dir(), 'basic')
with tf.python_io.TFRecordWriter(tmp) as w:
for n in range(1, 50):
w.write(pickle.dumps(np.full([n, 3, 3], n, np.int32)))
g = tf.Graph()
with g.as_default():
# A record processor written in TF graph.
def _process(record):
num = tf.py_func(pickle.loads, [record], tf.int32)
bucket_key = tf.shape(num)[0]
return [num, tf.transpose(num, [1, 0, 2])], bucket_key
# Samples random records from the data files and processes them
# to generate batches.
(vals_t, transposed_vals_t), _ = get_test_input(
tmp,
bucket_upper_bound=[10],
processor=_process,
dynamic_padding_dimensions=[0, 1],
dynamic_padding_constants=[0] * 2)
with self.session(graph=g):
for _ in range(10):
vals, transposed_vals = self.evaluate([vals_t, transposed_vals_t])
print(vals, np.transpose(transposed_vals, [0, 2, 1, 3]))
self.assertEqual(vals.shape[0], 8)
self.assertEqual(vals.shape[2], 3)
self.assertEqual(vals.shape[3], 3)
largest = np.amax(vals)
self.assertLessEqual(largest, 10)
self.assertEqual(vals.shape[1], largest)
for j in range(8):
n = vals[j, 0, 0, 0]
self.assertTrue(np.all(vals[j, :n] == n))
self.assertTrue(np.all(vals[j, n:] == 0))
self.assertAllEqual(vals, np.transpose(transposed_vals, [0, 2, 1, 3]))
def testDropRecordIfNegativeBucketKey(self):
def bucket_fn(num):
# Drops record if num[0] is odd.
return tf.cond(
tf.equal(tf.math.floormod(num[0], 2), 0), lambda: 1,
lambda: -tf.cast(num[0], tf.int32))
input_batch = run_basic_graph(use_nested_map=False, bucket_fn=bucket_fn)
with self.session():
record_seen = set()
for i in range(100):
ans_input_batch = self.evaluate(input_batch)
for s in ans_input_batch.record:
record_seen.add(s)
for i in range(100):
if i % 2 == 0:
self.assertIn(('%08d' % i).encode('utf-8'), record_seen)
else:
self.assertNotIn(('%08d' % i).encode('utf-8'), record_seen)
def testWithinBatchMixing(self):
# Generate couple files.
def generate_test_data(tag, cnt):
tmp = os.path.join(tf.test.get_temp_dir(), tag)
with tf.python_io.TFRecordWriter(tmp) as w:
for i in range(cnt):
w.write(('%s:%08d' % (tag, i)).encode('utf-8'))
return tmp
path1 = generate_test_data('input1', 100)
path2 = generate_test_data('input2', 200)
path3 = generate_test_data('input3', 10)
g = tf.Graph()
with g.as_default():
# A record processor written in TF graph.
def _process(source_id, record):
return py_utils.NestedMap(source_id=source_id, record=record), 1
# Samples random records from the data files and processes them
# to generate batches.
input_batch, buckets = generic_input.GenericInput(
file_pattern=','.join(
['tfrecord:' + path1, 'tfrecord:' + path2, 'tfrecord:' + path3]),
input_source_weights=[0.2, 0.3, 0.5],
file_random_seed=0,
file_buffer_size=32,
file_parallelism=4,
bucket_batch_limit=[8],
bucket_upper_bound=[1],
processor=_process)
with self.session(graph=g):
source_id_count = collections.defaultdict(int)
tags_count = collections.defaultdict(int)
total_count = 10000
for _ in range(total_count):
ans_input_batch, ans_buckets = self.evaluate([input_batch, buckets])
for s in ans_input_batch.source_id:
source_id_count[s] += 1
for s in ans_input_batch.record:
tags_count[s.split(b':')[0]] += 1
self.assertEqual(ans_input_batch.source_id.shape, (8,))
self.assertEqual(ans_input_batch.record.shape, (8,))
self.assertAllEqual(ans_buckets, [1] * 8)
self.assertEqual(sum(source_id_count.values()), total_count * 8)
self.assertEqual(sum(tags_count.values()), total_count * 8)
num_records = 8. * total_count
self.assertAlmostEqual(
tags_count[b'input1'] / num_records, 0.2, delta=0.01)
self.assertAlmostEqual(
tags_count[b'input2'] / num_records, 0.3, delta=0.01)
self.assertAlmostEqual(
tags_count[b'input3'] / num_records, 0.5, delta=0.01)
self.assertAlmostEqual(source_id_count[0] / num_records, 0.2, delta=0.01)
self.assertAlmostEqual(source_id_count[1] / num_records, 0.3, delta=0.01)
self.assertAlmostEqual(source_id_count[2] / num_records, 0.5, delta=0.01)
def testBoolDType(self):
tmp = os.path.join(tf.test.get_temp_dir(), 'bool')
with tf.python_io.TFRecordWriter(tmp) as w:
for i in range(50):
w.write(pickle.dumps(True if i % 2 == 0 else False))
g = tf.Graph()
with g.as_default():
# A record processor written in TF graph.
def _process(record):
bucket_key = 1
num, = tf.py_func(pickle.loads, [record], [tf.bool])
return [num], bucket_key
# Samples random records from the data files and processes them
# to generate batches.
inputs, _ = get_test_input(
tmp, bucket_upper_bound=[1], processor=_process)
with self.session(graph=g):
for _ in range(10):
inputs_vals = self.evaluate(inputs)[0]
self.assertEqual(inputs_vals.dtype, bool)
def testExtraArgs(self):
def _parse_record(record):
del record
example = py_utils.NestedMap(t=tf.convert_to_tensor(0))
bucketing_key = 1
return example, bucketing_key
def _parse_record_stateful(record):
del record
extra = tf.Variable(0)
example = py_utils.NestedMap(t=extra.value())
bucketing_key = 1
return example, bucketing_key
generic_input.GenericInput(
_parse_record,
file_pattern='',
bucket_upper_bound=[1],
bucket_batch_limit=[1])
with self.assertRaisesRegex(AssertionError, 'is not pure: extra_args='):
generic_input.GenericInput(
_parse_record_stateful,
file_pattern='',
bucket_upper_bound=[1],
bucket_batch_limit=[1])
def testTfData(self):
"""Checks that GenericInput can be invoked from a tf.data.Dataset."""
def _input_batch():
return run_basic_graph(use_nested_map=True)
# Trick to create dataset from tensor coming from custom op.
dummy_dataset = tf.data.Dataset.from_tensors(0).repeat()
dataset = dummy_dataset.map(lambda _: _input_batch())
with self.session(use_gpu=False) as sess:
it = tf.compat.v1.data.make_initializable_iterator(dataset)
sess.run(it.initializer)
batch = it.get_next()
for _ in range(10): # Read 10 batches.
print(sess.run(batch))
@unittest.skip('This test is expected to crash.')
def testFatalErrors(self):
tmp = os.path.join(tf.test.get_temp_dir(), 'fatal')
with tf.python_io.TFRecordWriter(tmp) as w:
for i in range(50):
w.write(str((i % 2) * 2**33))
def _parse_record(record):
# tf.strings.to_number raises error on overflow.
i = tf.strings.to_number(record, tf.int32)
example = py_utils.NestedMap(record=i)
bucketing_key = 1
return example, bucketing_key
with self.session():
# Without specifying fatal_errors all records not 0 are skipped.
input_batch, _ = generic_input.GenericInput(
_parse_record,
file_pattern=f'tfrecord:{tmp}',
bucket_upper_bound=[1],
bucket_batch_limit=[1])
for i in range(25):
ans_input_batch = self.evaluate(input_batch)
self.assertEqual(ans_input_batch.record[0], 0)
# With fatal_errors it dies instead.
input_batch, _ = generic_input.GenericInput(
_parse_record,
file_pattern=f'tfrecord:{tmp}',
bucket_upper_bound=[1],
bucket_batch_limit=[1],
fatal_errors=[tf.errors.INVALID_ARGUMENT])
# NOTE: There is no way to catch LOG(FATAL) from python side, so running
# this test will cause a crash.
for i in range(10):
self.evaluate(input_batch)
class GenericInputOpBenchmark(tf.test.Benchmark):
def benchmark_basic(self):
input_batch = run_basic_graph(use_nested_map=True)
with tf.Session() as sess:
print(self.run_op_benchmark(sess, input_batch, min_iters=10))
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
683a5c5baaba469274e36b437dc1f159aad7593e | 572f63126e81fd12c3eaa3c9293190a04c4808ba | /text_file.py | 51fdea75a5b60348dd4ef3f7438dde20f6a53f27 | [] | no_license | ibbocus/file_handling | caa32718f41634e71b42a728f26e70fb4582b522 | b4528a886b2c7c8c1b95508eadf35aa4c9c89dfe | refs/heads/master | 2022-11-14T09:37:33.579256 | 2020-07-08T18:00:57 | 2020-07-08T18:00:57 | 278,157,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | class TextFileHandling:
def __init__(self, file_path, text_storage = None):
self.file_path = file_path
self.text_storage = text_storage
# Going to read in two ways and write in two ways
def readtextfile(self):
#open file
#read the file
#close the file
try:
file = open(self.file_path, 'r') #try = put the code you think will raise an error
except Exception as e: # except - catches the thrown exception
print(e)
else: # if the exception is not thrown then run the code as normal. You can add a finally clause that will perform regardless of whether an excpetion is thrown or no
# self.text_storage = file.read(3) # this reads 3 characters in the text file
self.text_storage = file.readline()
self.text_storage = file.readline()
print(file.tell()) # outputs the location of the pointer, after read lines the pointer moves to the end of what has been read
file.seek(0) # file.seek moves the pointer to the character stated. 1 is current postion, 3 is distance?
self.text_storage = file.readlines()
file.close()
return self.text_storage
def writetextfile(self):
file = open("writer.txt", 'w')
file.write("my first python created file\n")
file.close()
file = open("writer.txt", "a+") # a+ means append and read
file.write("adding to txt file")
file.seek(0)
self.text_storage = file.read()
file.close()
return self.text_storage
def readtextfilesusingwith(self):
# reduces the overhead of closing files
# just opens it and closes it
# automatically closes the file and also closes it during the times of execution
with open("order.txt", "r") as file:
self.text_storage = file.read()
return self.text_storage
def writetextfilesusingwith(self):
with open("writer.txt", "w+") as file:
file.write("using writer with with")
file.seek(0)
self.text_storage = file.read()
return self.text_storage
def playingwithpythonosmodule(self):
import os
print(os.getcwd(), "is the current folder") # cwd = current window
# os.remove("writer.txt")
print(os.listdir()) # if empty, will return a list of the files in cwd
# os.chdir("C:\Users\Ib_Bo\PycharmProjects\Com_1")
# os.mkdir("Ayb")
os.rmdir("Ayb") # this will remove the directory stated
| [
"[email protected]"
] | |
1d7ed26d68903d80c4ab5c736012326121c90ce6 | 806f10d2b56a3f4b79f3d90e8ac6d0addd96c16a | /pyictacp/record/input.py | 7f6db8cb1e1699fb78cca4f0d4e54ae9c48aea19 | [] | no_license | HexF/pyictacp | 3f41e348d4c84d45876725e092c3967f08328b43 | a742e92dee0f2666c30069aed4427da981cee5fb | refs/heads/main | 2023-08-12T00:10:49.567906 | 2021-10-06T13:26:19 | 2021-10-06T13:26:19 | 412,511,915 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | from pyictacp.connection import Connection
from pyictacp.record import Record
from pyictacp.packet.data import InputStatusDataPacketData
from pyictacp.packet.command import PermanentInputBypassCommandPacket, RemoveInputBypassCommandPacket, RequestInputStatusCommandPacket, TemporaryInputBypassCommandPacket
class Input(Record,
data_class = InputStatusDataPacketData,
data_request_command = RequestInputStatusCommandPacket,
data_index_match = lambda rec, id: rec.input_index == id):
def __init__(self, connection: Connection, record_id: int):
super().__init__(connection, record_id)
self.input_state = None
self.bypassed = None
self.bypassed_latched = None
self.siren_lockout = None
def _update(self, data: InputStatusDataPacketData):
self.input_state = data.input_state
self.bypassed = data.bypassed
self.bypassed_latched = data.bypassed_latched
self.siren_lockout = data.siren_lockout
def remove_bypass(self):
self.connection.execute_command(
RemoveInputBypassCommandPacket(self.record_id)
)
def bypass(self, temporary: bool=False):
cmd_type = TemporaryInputBypassCommandPacket if temporary else PermanentInputBypassCommandPacket
self.connection.execute_command(
cmd_type(self.record_id)
)
| [
"[email protected]"
] | |
053a6d747f2455a605247829fbc91cd44c08ac2b | 0907a339c27471b1b0da4207229d245eec75c1ca | /src/utils/timeget.py | 3a90bd0d0e5f2f68738ddd3764a34c5b932e0d5a | [
"MIT"
] | permissive | Useems/TrydRPC | 2f142b0d54469229a15abbc794d47183f535a2d1 | f149779ecb147999a339399e85a195f024698eca | refs/heads/main | 2023-04-17T04:48:53.028499 | 2021-04-24T14:34:42 | 2021-04-24T14:34:42 | 341,267,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | timeOrder = ["acabou de abrir a plataforma", "há {0} minutos", "há {0} horas", "há {0} dias"]
def getTime(time):
global timeOrder
diff = (datetime.now() - time).total_seconds()
index = 0
for i in [60, 60, 24]:
if diff >= i and index + 1 < len(timeOrder):
diff = diff/i
index += 1
else:
break
diff = math.floor(diff)
message = str(diff) + ' ' + timeOrder[index]
if diff == 1:
message = message[:-1]
return message | [
"[email protected]"
] | |
0158367327597b0ac63d733ad9deb79ba2213d1e | 7c1a0ee337c4407768c343518ebee7f8a1b540ef | /env/lib/python3.6/sre_constants.py | 91beca82ad32721c6b411f90126001a8780c8fc3 | [] | no_license | spmarisa/flask_demo | 8dcce02b43664da0b6afe97975e70a675425e22f | e5be66cfe1ebddc130875fb9fddc294d81085a0e | refs/heads/master | 2020-03-23T19:58:39.017626 | 2018-07-23T12:51:14 | 2018-07-23T12:51:14 | 142,013,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | /home/phaneendra/anaconda3/lib/python3.6/sre_constants.py | [
"[email protected]"
] | |
00659e30f98681b77ccbae801214198d8020a229 | 6e08e7057b4e8ac04aa031cea19518ff6e479b62 | /TSIS1-informatics/3752-1.py | 207dbbd58d826b5a32392cb98a6f035b6834dc5a | [] | no_license | Arujazzz/Python | 1e313cab12f11e506844e969d5e5072635cfd27c | 1ea44e9bdc830f60bf627958c2a5defad54af823 | refs/heads/main | 2023-05-02T23:39:28.053098 | 2021-05-20T05:56:15 | 2021-05-20T05:56:15 | 369,092,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | s = list(map(int, input().split()))
nset = set()
for i in s:
print('YES') if i in nset else print('NO')
nset.add(i) | [
"[email protected]"
] | |
71766bce5bf64747b94fe0451bda8719bec805b1 | edf354a966b107d784580591ea6d649e9e513378 | /django_chuck_conf.py | 582494f8fbd5b652d1186c41241e9ca170258e1c | [
"BSD-2-Clause"
] | permissive | cambieri/django-chuck | bc57da06c2892c082876e3ad15fdd10f23d9bfc8 | 43980e054c450038e5143d1b08edbcb0b331369f | refs/heads/master | 2020-12-25T12:07:54.365015 | 2013-06-24T11:33:49 | 2013-06-24T11:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # Where to put virtualenvs?
virtualenv_basedir="/home/workspace-django/virtualenvs"
# Where to put project dirs?
project_basedir="/home/workspace-django/projects"
# Comma seperated list of dirs where Chuck should look for modules.
# . will be replaced with the Django Chuck modules dir
#module_basedirs = ["."]
module_basedirs = ["./modules"]
# comma seperated list of modules that always should be installed
default_modules=["core", "south"]
# comma seperated list of app that should additionally get installed
#default_additional_apps = ["south"]
# use virtualenvwrapper?
use_virtualenvwrapper = False
# default django settings module to use
# project_name will be automatically prepended
django_settings = "settings.dev"
# requirements file to install in virtualenv
# default: requirements_local.txt
requirements_file = "requirements_local.txt"
# version control system
# possible values: git, svn, cvs, hg
# default: git
version_control_system = "git"
# the branch you want to checkout / clone
# default is ""
branch = ""
# Python version to use by default
# If not set version of local python interpreter will be used
python_version = "2.7"
# Where to find virtualenvs on your server?
server_virtualenv_basedir = "/opt/django/virtualenvs"
# Where to projects on your server?
server_project_basedir = "/opt/django/sites"
# What is your email domain?
email_domain = "cambieri.it"
# module aliases are a list of modules under a single name
module_aliases = {
"test": ["unittest", "jenkins"],
"oscar": ["fabric", "jquery", "nginx", "pil", "postgres", "twitter-bootstrap", "uwsgi"],
}
# Run in debug mode
debug = False
# Dont delete project after failure?
# delete_project_on_failure=False
# Module to use as template engine
# Default: django_chuck.template.notch_interactive.engine
template_engine = "django_chuck.template.notch_interactive.engine"
| [
"[email protected]"
] | |
e6336016dcbe4e89b2a155b8b3de83bb815764d9 | 8b553f336d01a28c6e84d7c864963775073b892c | /perceptron.py | 95d698ae73dcc818c55c0acf75b6e0342af2b31f | [] | no_license | wangshuaibupt/rock-climbing-cnn | c3e8fad4e97104c2492ba4fe59cbbad35ec2e8f6 | aab26eb4fe16fe5ca1568efbd4d636aaf726a635 | refs/heads/master | 2022-12-05T22:22:15.362925 | 2020-07-26T13:58:38 | 2020-07-26T13:58:38 | 282,428,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2020 wangshuaibupt. All Rights Reserved
#
################################################################################
"""
perceptron.py
感知机
Authors: wangshuaibupt([email protected])
Date: 2020/07/25 14:57:06
"""
import random
import logging
import functools
import numpy as np
def activation(x):
"""激活函数"""
# return x
return 1 if x > 0 else 0
def activation_list(x_list):
"""列表每个元素都使用激活函数"""
out = []
for i in range(0, len(x_list)):
out.append(activation(x_list[i]))
return out
class VectorOP(object):
"""向量操作"""
@staticmethod
def dot_product(x, y, bia=None):
"""带偏置项的点积"""
if bia is None:
bia = 0.0
if len(x) == 0 or len(y) == 0:
return 0.0
return functools.reduce(lambda a, b: a + b, VectorOP.vector_multiply(x, y), bia)
@staticmethod
def vector_add(x, y):
"""向量对应元素相加"""
return list(map(lambda x_y: x_y[0] + x_y[1], zip(x, y)))
@staticmethod
def vector_subtraction(x, y):
"""向量对应元素项相减"""
return list(map(lambda x_y: x_y[0] - x_y[1], zip(x, y)))
@staticmethod
def vector_add(x, y):
"""向量对应元素相加"""
return list(map(lambda x_y: x_y[0] + x_y[1], zip(x, y)))
@staticmethod
def scala_multiply(v, s):
"""将向量v中的每个元素和标量s相乘"""
return map(lambda e: e * s, v)
@staticmethod
def vector_multiply(x, y):
"""向量对应元素相乘"""
return list(map(lambda multiply: multiply[0] * multiply[1], zip(x, y)))
class Perceptron(object):
"""感知机函数"""
def __init__(self, features, lables, iterations, learning_rate, activation):
"""初始化
:param features 特征个数
:param lables 标签
:param activation 激活函数
"""
self.features = features
self.lables = lables
self.activation = activation
self.input_parm_num = 0
# 最长迭代次数
self.iterations = iterations
# 学习速率
self.learning_rate = learning_rate
if len(self.features) > 0:
self.input_parm_num = len(self.features[0])
# 权重向量
self.w = [0.0] * self.input_parm_num
# 偏执
self.bia = 0.0
def one_iteration(self):
"""单次迭代将所有数据过一遍"""
samples = zip(self.features, self.lables)
for feature, lable in samples:
sub = self.predict(feature) - lable
delta_w = VectorOP.scala_multiply(feature, self.learning_rate * sub)
self.w = list(map(lambda a, b: a - b, self.w, delta_w))
self.bia = self.bia - self.learning_rate * sub * 1.0
def train(self):
"""函数训练"""
for i in range(0, self.iterations):
logging.info("iterations num is %s", i)
self.one_iteration()
def predict(self, x):
"""预测"""
return self.activation(VectorOP.dot_product(self.w, x, self.bia))
if __name__ == '__main__':
features = [[1, 1, 1], [1, 1, 0], [1, 0, 1], [0, 1, 1], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 0]]
lables = [1, 0, 0, 0, 0, 0, 0, 0]
iterations = 1000
learning_rate = 0.1
p_obj = Perceptron(features, lables, iterations, learning_rate, activation)
p_obj.train()
print ("权重矩阵预测值:{w}".format(w=p_obj.w))
print ("偏置量预测值:{bia}".format(bia=p_obj.bia))
print (p_obj.predict([1.0, 1.0, 1.0]))
print (p_obj.predict([1.0, 0.0, 1.0]))
print (p_obj.predict([0.0, 1.0, 1.0]))
| [
"[email protected]"
] | |
3c614fad0d1969fd449d602b813487baaeec15fc | b39530cc6370acf017f8136ae2888bceaff412e1 | /TangoProject/wsgi.py | a20a940848b4cac39d94378de2e9301542b014f9 | [] | no_license | smallcrustation/TangoProject | 375a328dd664121d32e94ea7417cd7453e6c7b87 | 5969f97af69971665ef95a46856252931ad4ee0b | refs/heads/master | 2022-12-14T09:47:16.632714 | 2019-12-10T19:42:29 | 2019-12-10T20:15:59 | 70,802,056 | 0 | 0 | null | 2022-11-22T01:18:38 | 2016-10-13T12:01:16 | Python | UTF-8 | Python | false | false | 1,146 | py | """
WSGI config for TangoProject project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TangoProject.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
] | |
eae2af2918fa99c6dbb9f80d51f087a4a62e8429 | d0581b344128cf838c9e7912bf922f8e2d37f2b3 | /08-Functions/01-BasicFunctions.py | 2db751135f260de39c5b4f10cfeb22dd5581938f | [] | no_license | ravi4all/PythonFeb_9-30 | fe60d240db3e0922332072ee3ae74cea7fab29ef | 8c660e826328c411ec7dd91564ee457e9c758aa5 | refs/heads/master | 2021-05-03T08:34:59.428138 | 2018-03-24T11:38:11 | 2018-03-24T11:38:11 | 120,567,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | x = 12
y = 13
def add():
## x = 12
## y = 13
z = x + y
print("Sum is",z)
def sub():
## x = 12
## y = 13
z = x - y if x > y else y - x
print("Difference is",z)
def mul():
## x = 12
## y = 13
z = x * y
print("Multiplication is",z)
def div():
## x = 12
## y = 13
z = x / y
print("Divison is",z)
add()
sub()
div()
mul()
| [
"[email protected]"
] | |
3e15473e0ceef6ee8b0f3a681787454452ed8b62 | 39beeca8b6862adfb7f1a55b9f5308b20cd64395 | /delivery_history/models/__init__.py | 89a0441e27055814dfc92edc0722e69a16f1c3a6 | [] | no_license | Ibrahimmardini/texmar | 98c34469618b72648686c3252b6053f1dd07e9a0 | d31416df8feb87b93d757b1451be5f870d3ca867 | refs/heads/master | 2023-08-15T20:20:57.520164 | 2021-02-02T12:02:17 | 2021-02-02T12:02:17 | 255,095,450 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # -*- coding: utf-8 -*-
from . import sale_order_line_changes
from . import sale_delivery_date_history | [
"[email protected]"
] | |
6afe39dc1e07f76c1385e9d30b2ae28b9529fbab | 738f2cb3642da73eb8aa2c617f2ae006279c96dc | /五期周末作业/start.py | d692f71898ae296f0668bda0e980d289f61e7f10 | [] | no_license | betakenname/used_modules_homework | 4c0a08b3a37da1fd8b49ee3f68c28de956047eb5 | 81814ede5fd5b48903cc5e9e7f8a91ce907f4e14 | refs/heads/master | 2020-04-10T16:12:35.273402 | 2018-12-07T09:42:02 | 2018-12-07T09:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import os,sys
sys.path.append(os.path.dirname(__file__))
import core
if __name__ == '__main__':
#功能字典
funcs = {"1":core.login,"2":core.register}
while True:
print("""
1.登录
2.注册
""")
res = input("请选择功能(q退出):")
if res == "q": # 输入q则退出
print("再见!")
break
if res in funcs:
funcs[res]()
else:
print("输入错误 请重试!")
| [
"[email protected]"
] | |
e9cf1f988cf4a51913aa29d3fd87ab9904ae843e | 49db7f622d670f08be3f48b12e40dd6e76898af2 | /readstdout.py | 2412c331f88adaeadbcfbdfefb41b30e87632b09 | [] | no_license | PlutusApp/MachineLearning | 65da8f9a37fdf5014dc889f0bc3da38602422213 | 3c310266b4916f315c0eca9c2317b28c028c6dcf | refs/heads/master | 2021-01-24T18:58:13.026393 | 2017-03-26T15:50:06 | 2017-03-26T15:50:06 | 86,162,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import subprocess
proc = subprocess.Popen(['python3','productList.py'],stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line != b'':
#the real code does filtering here
print("test:",line.rstrip())
else:
break
| [
"[email protected]"
] | |
d64ad6136478df14a748d71546ab31ce4efffa36 | 863c77a136ed5383bd4dc9240ef91fd06bb2037c | /app/__init__.py | 91596db6e6d34c7aa3a48ad27cd59798436b6cdf | [] | no_license | VianneyMI/morpion | 2b84863a80ae609330545336460e44e410557b53 | 5a4a13a137c68ea32eb12410d42d5828e3c1e291 | refs/heads/master | 2023-04-11T19:14:00.641608 | 2021-05-16T20:46:36 | 2021-05-16T20:46:36 | 367,982,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import connexion
from app import extensions
def create_app():
# Setup connexion
connexion_app = connexion.FlaskApp(__name__)
connexion_app.add_api('api.yaml')
flask_app = connexion_app.app
# Flask setup
extensions.init_app(flask_app)
# Database setup
extensions.create_db(flask_app)
return flask_app
| [
"[email protected]"
] | |
abe34e472f3eaa6b544688f444409cdee257634c | fbfcccbbf045c271ce6156113c14c40abf3cbeb6 | /homework/casadi_gen.py | c32efd0dcf40e2b578e66fc7a2a5645aa38b0004 | [
"BSD-3-Clause"
] | permissive | jmpark418/aae497-f19 | e6e4182e5068976ccb398bb0bda5c3586ba1cdb1 | adca263c4c553dc3afe634fde390fe8dfefb5fe3 | refs/heads/master | 2020-07-08T12:27:39.662266 | 2019-10-31T01:11:00 | 2019-10-31T01:11:00 | 203,671,932 | 0 | 0 | null | 2019-08-21T22:14:45 | 2019-08-21T22:14:44 | null | UTF-8 | Python | false | false | 239 | py | import casadi as ca
x = ca.SX.sym('x')
y = 2*x
f = ca.Function('double_this', [x], [y], ['x'], ['y'])
gen = ca.CodeGenerator('casadi_gen.c', {'main': False, 'mex': False, 'with_header': True, 'with_mem': True})
gen.add(f)
gen.generate()
| [
"[email protected]"
] | |
2f78fce0ae82499ddc9b90df99fdd48b92a01f86 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /model/detection_model/maskscoring_rcnn/demo/webcam.py | b4176584dda2025fc3284894978afd67f5f831c9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 2,705 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
# cam = cv2.VideoCapture(0)
# while True:
# start_time = time.time()
# ret_val, img = cam.read()
# composite = coco_demo.run_on_opencv_image(img)
# print("Time: {:.2f} s / img".format(time.time() - start_time))
# cv2.imshow("COCO detections", composite)
# if cv2.waitKey(1) == 27:
# break # esc to quit
# cv2.destroyAllWindows()
start_time = time.time()
import os
root = "/workspace/mnt/group/ocr/qiutairu/code/maskrcnn-benchmark/demo"
img = cv2.imread(os.path.join(root, "test.jpg"))
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imwrite(os.path.join(root, "test_result.jpg"), composite)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7ece072e393552c38e72e4d6a0bb231595d98128 | 6a6984544a4782e131510a81ed32cc0c545ab89c | /src/trigger-sim/resources/scripts/print_trigger_configuration.py | 0b21c3f90486a4f57234d8f37117ef7f27cd4609 | [] | no_license | wardVD/IceSimV05 | f342c035c900c0555fb301a501059c37057b5269 | 6ade23a2fd990694df4e81bed91f8d1fa1287d1f | refs/heads/master | 2020-11-27T21:41:05.707538 | 2016-09-02T09:45:50 | 2016-09-02T09:45:50 | 67,210,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | #!/usr/bin/env python
from optparse import OptionParser
usage = """ %prog -g [GCD file]
This script pulls the trigger configuration
information from the input GCD file.
"""
parser = OptionParser(usage=usage)
parser.add_option("-g","--gcd",
dest="GCD_FILENAME",
help="Name of the input GCD file.")
(options, args) = parser.parse_args()
from icecube import dataclasses, dataio
f = dataio.I3File(options.GCD_FILENAME)
frame = f.pop_frame()
while f.more() and not "I3DetectorStatus" in frame:
frame = f.pop_frame()
detector_status = frame["I3DetectorStatus"]
trigger_status_map = detector_status.trigger_status
for key, config in trigger_status_map.iteritems() :
print("TriggerKey : %s" % str(key))
print(" %s" % config.trigger_name)
for name, setting in config.trigger_settings.iteritems():
print(" %s = %s" % (name,setting))
| [
"[email protected]"
] | |
50214a889c1615e1112e87bcafae8b641515c699 | 864d9402cc77f97ab8c015fa0557667a567785f4 | /assign5.1.py | 4d54afcdca802e3fad42c69d6c3e1763bd9c591d | [] | no_license | fkxodls/python | 6068e453c132a83468960175b9d100c13370db4c | 89200102f4bb60e35ecfc60aec6fb8674fd0a838 | refs/heads/master | 2020-07-21T15:53:04.895707 | 2019-12-07T06:23:38 | 2019-12-07T06:23:38 | 206,913,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | lnum = None
snum = None
while True:
num = input("enter a number ")
if num == 'done':
break
try:
num = int(num)
except:
print('invalid value')
continue
if lnum == None:
lnum = num
snum = num
elif num > lnum:
lnum = num
elif num < snum:
snum = num
print("Maximum is",lnum)
print("Minimum is",snum)
| [
"[email protected]"
] | |
6ed89e3db6fe588de88fde566e4c388509b50cbe | 68be706be391fce3667bbcb9ebb9a7298196149c | /setup.py | 82768fc5110675b35f69199e0a4fdab3425ee5ae | [] | no_license | chaoflow/example.packagerepo | 764ecaa46f59f756b758b2b60debe41be1c3d2f7 | 657a39ba5ee1229046dedf208c879333f01c9439 | refs/heads/master | 2021-01-22T19:44:57.367420 | 2009-10-29T02:02:48 | 2009-10-29T02:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | from setuptools import setup
name = 'example.packagerepo'
version = 0.1
# get packages from the package name: '1.2.3' -> ['1','1.2','1.2.3']
packages = [name.rsplit('.',x)[0] for x in reversed(range(len(name.split('.'))))]
setup(name=name,
version=version,
description="Example package repository, containing one egg",
#long_description=open("README.txt").read() + "\n" +
# open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers = [],
keywords='',
author='Florian Friesdorf',
author_email='[email protected]',
url='',
license='',
packages = packages,
package_dir = {'': 'src'},
# all except the last are treated as namespace_packages
namespace_packages=packages[:-1],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| [
"[email protected]"
] | |
cf9dad94cae2dbc0f9a4b994ff370ceeb7fc8021 | 346e3729151491493c6a61f958ba9ddd8028c3aa | /ICDARRecTs_task2/ICDARRecTs_2Train.py | 69d40541eea710ccab4318a9cca5b000304e1702 | [] | no_license | ResearchingDexter/ICDAR2019RecTS | 16687db52b38efcdd44083e8235c2a276c19276c | 274978435455671dc22ca18e4d9b1ec8b8bb80e9 | refs/heads/master | 2020-05-16T08:35:41.612158 | 2019-08-31T13:22:14 | 2019-08-31T13:22:14 | 182,917,144 | 11 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,686 | py | import torch
from torch.autograd import Variable
from datetime import datetime
from torch.utils.data import DataLoader
from torch.nn import CTCLoss
from torch.optim import Adam,Adadelta
import json
from torchvision import transforms
from IPython.display import clear_output
from ICDARRecTs_2DataSet import ICDARRecTs_2DataSet
from ICDARRecTs_2NN import DenseLSTM,VGGLSTM,DenseCNN,VGGFC,ResNetLSTM
import pdb
import os
import sys
sys.path.append('../')
from Logging import *
torch.backends.cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES']='0'
DEVICE='cuda'
BATCH_SIZE=8
EPOCH=10000
PATH=r'E:\Files\ICDAR2019RecTs\ReCTS\\'
DICTIONARY_NAME='RecTs2dictionary.json'
IMAGE_PATH=r'E:\Files\ICDAR2019RecTs\ReCTS\task2_cropped_img_less_30\\'
MODEL_PATH=r'E:\Files\ICDAR2019RecTs\ReCTS\\'
MODEL_NAME='DenseCNN.pkl'
PRETRAIN=False
NUM_CLASS=4134+1
LR=0.001
MAX_ACCURACY=0
def train(pretrain=PRETRAIN):
logging.debug('pretrain:{}'.format(pretrain))
if DEVICE=='cuda':
if torch.cuda.is_available()==False:
logging.error("can't find a GPU device")
pdb.set_trace()
#model=DenseLSTM(NUM_CLASS)
#model=VGGLSTM(NUM_CLASS)
#model=DenseCNN(NUM_CLASS)
#model=VGGFC(NUM_CLASS)
model=ResNetLSTM(NUM_CLASS)
if os.path.exists(MODEL_PATH)==False:
os.makedirs(MODEL_PATH)
if os.path.exists(PATH+DICTIONARY_NAME)==False:
logging.error("can't find the dictionary")
pdb.set_trace()
with open(PATH+DICTIONARY_NAME,'r') as f:
dictionary=json.load(f)
if pretrain==True:
model.load_state_dict(torch.load(MODEL_PATH+MODEL_NAME,map_location=DEVICE))
model.to(DEVICE).train()
model.register_backward_hook(backward_hook)#transforms.Resize((32,400))
dataset=ICDARRecTs_2DataSet(IMAGE_PATH,dictionary,BATCH_SIZE,img_transform=transforms.Compose([transforms.ColorJitter(brightness=0.5,contrast=0.5,saturation=0.5,hue=0.3),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]))
dataloader=DataLoader(dataset,batch_size=BATCH_SIZE,shuffle=True,num_workers=4,drop_last=False)#collate_fn=dataset.collate
#optimizer=Adam(model.parameters(),lr=LR,betas=(0.9,0.999),weight_decay=0)
optimizer=Adadelta(model.parameters(),lr=0.01,rho=0.9,weight_decay=0)
criterion=CTCLoss(blank=0)
length=len(dataloader)
max_accuracy=0
if os.path.exists('max_accuracy.txt')==True:
with open('max_accuracy.txt','r') as f:
max_accuracy=float(f.read())
for epoch in range(EPOCH):
epoch_time=datetime.now()
epoch_correct=0
epoch_loss=0
min_loss=100
for step,data in enumerate(dataloader):
step_time=datetime.now()
imgs,names,label_size,img_name=data
#print(names,label_size)
logging.debug("imgs' size:{}".format(imgs.size()))
imgs=Variable(imgs,requires_grad=True).to(DEVICE)
label,batch_label=dataset.transform_label(batch_name=names)
label=Variable(label).to(DEVICE)
label_size=Variable(label_size).to(DEVICE)
preds=model(imgs)
logging.debug("preds size:{}".format(preds.size()))
preds_size=Variable(torch.LongTensor([preds.size(0)]*BATCH_SIZE)).to(DEVICE)
loss=criterion(preds,label,preds_size,label_size)
epoch_loss+=loss.item()
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
if min_loss>loss.item():
min_loss=loss.item()
torch.save(model.state_dict(),MODEL_PATH+MODEL_NAME)
num_same=if_same(preds.cpu().data,batch_label)
epoch_correct+=num_same
logging.debug("Epoch:{}|length:{}|step:{}|num_same:{}|loss:{:.4f}|min loss:{:.4f}".format(epoch,length,step,num_same,loss.item(),min_loss))
logging.debug("the time of one step:{}".format(datetime.now()-step_time))
if step%100==0:
clear_output(wait=True)
accuracy=epoch_correct/(length)*BATCH_SIZE
if accuracy>max_accuracy:
max_accuracy=accuracy
with open('max_accuracy.txt','w') as f:
f.write(str(max_accuracy))
torch.save(model.state_dict(),MODEL_PATH+MODEL_NAME)
torch.save(model.state_dict(),MODEL_PATH+'optimal'+str(max_accuracy)+MODEL_NAME)
mean_loss=epoch_loss/length
logging.info('Epoch:{}|accuracy:{}|mean loss:{}|the time of one epoch:{}|max accuracy:{}'.format(epoch,accuracy,mean_loss,datetime.now()-epoch_time,max_accuracy))
with open('accuracy.txt','a+') as f:
f.write('Epoch:{}|accuracy:{}|mean loss:{}|the time of one epoch:{}|max accuracy:{}\n'.format(epoch,accuracy,mean_loss,datetime.now()-epoch_time,max_accuracy))
def backward_hook(module,grad_input,grad_output):
for g in grad_input:
#print('g:{}'.format(g))
g[g!=g]=0#replace all nan or inf in gradients to zero
def if_same(preds,batch_label):
#print(batch_label)
t,b,n_class=preds.size()
preds=preds.permute(1,0,2)
_,preds=preds.max(2)
count=0
def condense(pred):
result=[]
original_pred=[]
for i,p in enumerate(pred):
original_pred.append(p.item())
if p!=0 and (not(i>0 and pred[i-1]==pred[i])):
result.append(p.item())
return result,original_pred
for pred,label in zip(preds,batch_label):
flag=0
pred,original_pred=condense(pred)
label,_=condense(label)
if(len(pred)==len(label)):
for i,p in enumerate(pred):
if(p!=label[i]):
flag=1
break
if(flag==0 and len(pred)==len(label)):
count+=1
"""if(count==1):
print('label:{}'.format(label))
print('pred:{}'.format(pred))
print('original pred:{}'.format(original_pred))"""
print('label:{}'.format(label))
print('pred:{}'.format(pred))
if(len(pred)==0):
pass
#return (0,1)
return count
if __name__=='__main__':
train(PRETRAIN)
"""
temp=PATH + DICTIONARY_NAME
with open(temp,'r') as f:#train_ReCTS_019633.12.jpg,¡
a=json.load(f)
i=len(a)
a['¡']=i
print(len(a))
with open(PATH+'1'+DICTIONARY_NAME,'w') as f:
json.dump(a,f)
#print(a.get('¡'))"""
| [
"[email protected]"
] | |
b57f96130b2ac339bfe0abfe7a141f41ec8080a2 | c122dcd3eedccfbdd3e087076fb67dc7cfc77282 | /seg_models/models/deeplab.py | 5adaaf909a1935faa8963032b4dc4f92c8b6eb67 | [
"MIT"
] | permissive | daniel-zeng/SegSort | 4bba641ce4ce376ad9e9fff05b561fc868637132 | 7a50e6253df23a7719f962b34acff2626c916354 | refs/heads/master | 2023-06-29T18:43:47.981883 | 2021-08-01T20:11:41 | 2021-08-01T20:11:41 | 298,406,985 | 0 | 0 | MIT | 2020-09-24T22:15:55 | 2020-09-24T22:15:55 | null | UTF-8 | Python | false | false | 3,398 | py | import tensorflow as tf
from network.common.resnet_v1 import resnet_v1_101
import network.common.layers as nn
def _deeplab_builder(x,
name,
cnn_fn,
num_classes,
is_training,
use_global_status,
reuse=False):
"""Helper function to build Deeplab v2 model for semantic segmentation.
The Deeplab v2 model is composed of one base network (ResNet101) and
one ASPP module (4 Atrous Convolutional layers of different size). The
segmentation prediction is the summation of 4 outputs of the ASPP module.
Args:
x: A tensor of size [batch_size, height_in, width_in, channels].
name: The prefix of tensorflow variables defined in this network.
cnn_fn: A function which builds the base network (ResNet101).
num_classes: Number of predicted classes for classification tasks.
is_training: If the tensorflow variables defined in this network
would be used for training.
use_global_status: enable/disable use_global_status for batch
normalization. If True, moving mean and moving variance are updated
by exponential decay.
reuse: enable/disable reuse for reusing tensorflow variables. It is
useful for sharing weight parameters across two identical networks.
Returns:
A tensor of size [batch_size, height_in/8, width_in/8, num_classes].
"""
# Build the base network.
x = cnn_fn(x, name, is_training, use_global_status, reuse)
with tf.variable_scope(name, reuse=reuse) as scope:
# Build the ASPP module.
aspp = []
for i,dilation in enumerate([6, 12, 18, 24]):
score = nn.atrous_conv(
x,
name='fc1_c{:d}'.format(i),
filters=num_classes,
kernel_size=3,
dilation=dilation,
padding='SAME',
relu=False,
biased=True,
bn=False,
is_training=is_training)
aspp.append(score)
score = tf.add_n(aspp, name='fc1_sum')
return score
def deeplab_resnet101(x,
num_classes,
is_training,
use_global_status,
reuse=False):
"""Builds Deeplab v2 based on ResNet101.
Args:
x: A tensor of size [batch_size, height_in, width_in, channels].
name: The prefix of tensorflow variables defined in this network.
num_classes: Number of predicted classes for classification tasks.
is_training: If the tensorflow variables defined in this network
would be used for training.
use_global_status: enable/disable use_global_status for batch
normalization. If True, moving mean and moving variance are updated
by exponential decay.
reuse: enable/disable reuse for reusing tensorflow variables. It is
useful for sharing weight parameters across two identical networks.
Returns:
A tensor of size [batch_size, height_in/8, width_in/8, num_classes].
"""
h, w = x.get_shape().as_list()[1:3] # NxHxWxC
scores = []
for i,scale in enumerate([1]):
with tf.name_scope('scale_{:d}'.format(i)) as scope:
x_in = x
score = _deeplab_builder(
x_in,
'resnet_v1_101',
resnet_v1_101,
num_classes,
is_training,
use_global_status,
reuse=reuse)
scores.append(score)
return scores
| [
"[email protected]"
] | |
797b90c5844bc3a338d7aeb8c53eab43e5b3da5c | 5ae650f569d9929140dd61f86b7dd69187125dde | /resorce/page_costome.py | 5040f298ce9c004ce29c16205f2c44dd7e90b2df | [] | no_license | liuxingyuxx/LXY | 17bc641b9ff029cab2fd313ea908a47ecfdb5c6f | c43e3a78ab99a8f5923d0e3d4eb6d9ccba2ffaad | refs/heads/master | 2020-03-19T07:34:16.058725 | 2018-06-05T05:40:23 | 2018-06-05T05:40:23 | 135,989,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #--*-- coding:utf-8 --*--
'''
在学老男孩python全栈开发时写的,用于自制的分页功能
'''
class PageInfo(object):
def __init__(self, current_page,all_data_num, per_page_num):
try:
self.current_page = int(current_page)
except:
self.current_page = 1
self.per_page_num = per_page_num
self.all_page_num = all_date_num/per_page_num
def start(self):
return (self.current_page-1)*per_page_num
def end(self):
return (self.current_page)*per_page_num
def pager(self):
#用于显示当前页左右的3页
page_list = []
def costom(request):
current_page = request.GET.get('page')
page_info = PageInfo(current_page, 10)
user_list = models.UserInfo.objects.all()[page_info.start:page_info.end]
| [
"[email protected]"
] | |
9355c2e7458a18116cb2e9ed51a955260f7fbaaa | 81d9e83bbcfcd98836f6df6f0aba89a55c5c9adf | /store/migrations/0005_auto_20201108_0234.py | 821ffead7487267c75d149cbb33334dcb7029034 | [] | no_license | AnikaTahsin06/PeriwinkleRose | 3405b47391d9544a67f940df9231753aeae52167 | f4bf1c98b0645243aa45a45d4c5d1f66fe9cab37 | refs/heads/main | 2023-02-03T23:48:06.205837 | 2020-12-27T18:52:48 | 2020-12-27T18:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # Generated by Django 3.1.2 on 2020-11-07 20:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0004_auto_20201107_0258'),
]
operations = [
migrations.RenameField(
model_name='customer',
old_name='lasr_name',
new_name='last_name',
),
]
| [
"[email protected]"
] | |
b852e1a665b8052d9d91c5710556b67df67ddb7f | fa2a2cd5e0089f216a0df15defa6a3abf09be083 | /mlp/prediction/debug_utils.py | 1c3a160e8b47a734001f3605c4b5c60e87a5b303 | [] | no_license | dominikheinisch/neural_network | d2a925ea77ea13090da9c89da439541438be09ec | 4705f9c74fdfc33757f7e9e9d5ea5ad2d97aaaa6 | refs/heads/master | 2020-08-08T06:55:39.349768 | 2019-11-27T19:08:38 | 2019-11-27T19:08:38 | 213,767,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,708 | py | import numpy as np
from loader.loader import load
from loader.mnist_loader import load_data_wrapper
from saver.saver import save
# from prediction.network import calc_prediction_accuracy
from prediction.activation_function import SIGMOID, RELU
from utils.timer import elapsed_timer
# def print_result(filename, test_data):
# activation_func = np.vectorize(SIGMOID[0])
# print(filename)
# with elapsed_timer() as timer:
# te_in, te_out = test_data
# weights = load(filename=filename)['weights']
# for i in range(len(weights)):
# print(f'{i} {calc_prediction_accuracy(activation_func, *weights[i], te_in, te_out)}')
# print(f'timer: {timer():.2f}')
def prepare(filename):
data = load(filename)
data['activation'] = 'sigmoid'
str_to_find = '_simulation_'
index = filename.find(str_to_find) + len(str_to_find)
new_filename = f'{filename[:index]}sigmoid_{filename[index:]}'
# data = load(filename)
# hidden_neurones = 'hidden_neurones'
# data[hidden_neurones] = 50
# str_to_find = '_draw_range_'
# index = filename.find(str_to_find) + len(str_to_find) + 4
# new_filename = f'{filename[:index]}hidden_neurones_50_{filename[index:]}'
# save(data=data, filename=new_filename)
print("'" + new_filename + "',")
if __name__ == "__main__":
_, validation_data , test_data = load_data_wrapper("../data")
# print_result('test_weights.pkl', test_data)
# filename = 'test_alpha_0.04_batch_100_draw_range_1.0_hidden_neurones_50.pkl'
# print_result(filename=filename, test_data=test_data)
# file = '2_test_alpha_0.01_batch_100_draw_range_0.05_hidden_neurones_25.pkl'
# file = 'once_sigmoid_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_100_res_0.9739.pkl'
# test_data = test_data[0], test_data[1]
# print_result(filename=file, test_data=test_data)
# from saver.saver import save
files = [
# 'draw_range_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_24.4_times_5.pkl',
# 'draw_range_simulation_alpha_0.04_batch_100_draw_range_0.4_hidden_neurones_50_avg_epochs_24.2_times_5.pkl',
# 'draw_range_simulation_alpha_0.04_batch_100_draw_range_0.6_hidden_neurones_50_avg_epochs_25.0_times_5.pkl',
# 'draw_range_simulation_alpha_0.04_batch_100_draw_range_0.8_hidden_neurones_50_avg_epochs_23.8_times_5.pkl',
# 'draw_range_simulation_alpha_0.04_batch_100_draw_range_1.0_hidden_neurones_50_avg_epochs_27.6_times_5.pkl',
# 'alpha_simulation_alpha_0.005_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_51.4_times_5.pkl',
# 'alpha_simulation_alpha_0.01_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_54.0_times_5.pkl',
# 'alpha_simulation_alpha_0.02_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_31.8_times_5.pkl',
# 'alpha_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_24.4_times_5.pkl',
# 'alpha_simulation_alpha_0.08_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_19.6_times_5.pkl',
# 'batch_simulation_alpha_0.04_batch_10_draw_range_0.2_hidden_neurones_50_avg_epochs_22.4_times_5.pkl',
# 'batch_simulation_alpha_0.04_batch_25_draw_range_0.2_hidden_neurones_50_avg_epochs_25.8_times_5.pkl',
# 'batch_simulation_alpha_0.04_batch_50_draw_range_0.2_hidden_neurones_50_avg_epochs_23.2_times_5.pkl',
# 'batch_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_24.4_times_5.pkl',
# 'batch_simulation_alpha_0.04_batch_200_draw_range_0.2_hidden_neurones_50_avg_epochs_24.2_times_5.pkl','
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_15_avg_epochs_22.6_times_5.pkl',
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_25_avg_epochs_20.6_times_5.pkl',
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_50_avg_epochs_24.4_times_5.pkl',
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_75_avg_epochs_26.0_times_5.pkl',
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_100_avg_epochs_31.2_times_5.pkl',
# 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_150_avg_epochs_38.0_times_2.pkl',
]
#
# # goodF = [
# # 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_15_avg_epochs_22.6_times_5.pkl',
# # 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_25_avg_epochs_20.6_times_5.pkl',
# # 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_75_avg_epochs_26.0_times_5.pkl',
# # 'hidden_neurones_simulation_alpha_0.04_batch_100_draw_range_0.2_hidden_neurones_100_avg_epochs_31.2_times_5.pkl',
# # ]
# #
#
# for f in files:
# prepare(f)
# print(load('once_relu_alpha_0.003_batch_10_draw_range_0.2_hidden_neurones_50.pkl'))
print(load('hidden_neurones4_simulation_relu_alpha_0.01_batch_5_draw_range_0.2_hidden_neurones_25_avg_epochs_16.0_times_5.pkl'))
# def calc_avg_times(input):
# max_len = max(len(sub_list) for sub_list in input)
# result_acc = [0] * max_len
# divider_acc = [0] * max_len
# for sub_list in input:
# for i in range(len(sub_list)):
# result_acc[i] += sub_list[i]
# divider_acc[i] += 1
# return [result_acc[i] / divider_acc[i] for i in range(max_len)]
#
# a = [[1, 3], [2, 4, 6, 9], [], [-1]]
# print(calc_avg_times(a))
| [
"[email protected]"
] | |
ab4e452b493ea04924e42fbd382980755e7f27c5 | 7bcd811a6e575f2bbc658510c7c64e3cf78a0aa5 | /ui/settingsitems/numbox.py | b06982684d73f627efbf43fc5568b57a56d6b379 | [] | no_license | alang321/Shellmania | 1e32ae2ae36767b842ad2f3a03021628dd989e10 | fc489ffda77cec2c1e0376e481b1cf3907eaf472 | refs/heads/master | 2023-01-24T11:49:22.354544 | 2020-12-08T22:58:57 | 2020-12-08T22:58:57 | 268,876,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,432 | py | import pygame
class numbox:
def __init__(self, keydict, key, isint, hasfocus, font, pos, w, h, bordercolor, bordercolorhover, backgroundcoloractive, backgroundcolorinactive, minvalue, maxvalue, lostfocusfunction, textcolor=pygame.color.THECOLORS["black"], maxtextlength=5):
self.isint = isint
if self.isint:
self.convert = int
else:
self.convert = float
self.keydict = keydict
# key for dict values
self.key = key
# checked
#convert value to specified data type
self.value = self.convert(self.keydict[self.key])
self.text = str(self.value)
self.font = font
self.textcolor = textcolor
self.maxtextlength = maxtextlength
self.minval = self.convert(minvalue)
self.maxval = self.convert(maxvalue)
self.rendertext(self.text)
self.key = key
#text box
self.pos = pos
self.textbox = pygame.Surface((w, h))
self.rect = self.textbox.get_rect()
self.rect.center = self.pos
self.borderrect = pygame.Rect((0, 0), (w-1, h-1))
#colors
self.bordercolor = bordercolor
self.bordercolordefualt = bordercolor
self.bordercolorhover = bordercolorhover
self.inactivecolorbackground = backgroundcolorinactive
self.activecolorbackground = backgroundcoloractive
#function that is called when button is pressed
self.lostfocusfunction = lostfocusfunction
self.delete = False
self.hasfocus = hasfocus
self.hovering = False
#if the mousebutton down is pressed, calls function if mousebutton one is released still on button
self.clicked = False
def rendertext(self, text):
self.textsurface = self.font.render(text, True, self.textcolor)
self.textsurfacerect = self.textsurface.get_rect()
def _converttext(self, text):
try:
value = self.convert(text)
if value <= self.maxval:
self.value = value
return True
else:
return False
except:
return False
def _changefocus(self, value):
if self.hasfocus != value:
self.hasfocus = value
if not self.hasfocus:
if not self.minval <= self.value <= self.maxval:
self.value = self.minval
self.text = str(self.value)
self.rendertext(self.text)
if self.lostfocusfunction != None:
self.lostfocusfunction(self)
#keydown event handler
def eventhandler(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
self._changefocus(self.rect.collidepoint(event.pos))
# if text box has foxus and
if event.type == pygame.KEYDOWN and self.hasfocus:
if event.key == pygame.K_RETURN:
self._changefocus(False)
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
self._converttext(self.text)
else:
if len(self.text) < self.maxtextlength:
text = self.text
text += event.unicode
if self._converttext(text):
self.text += event.unicode
self.rendertext(self.text)
def update(self):
mousepos = pygame.mouse.get_pos()
if self.rect.collidepoint(mousepos):
self.bordercolor = self.bordercolorhover
else:
self.bordercolor = self.bordercolordefualt
return
def draw(self, screen):
#switch color to current state color, clicked before hovering
if self.hasfocus:
color = self.activecolorbackground
else:
color = self.inactivecolorbackground
#draw button rect
self.textbox.fill(color)
pygame.draw.rect(self.textbox, self.bordercolor, self.borderrect, 2)
screen.blit(self.textbox, (self.pos[0]-self.rect.w/2, self.pos[1]-self.rect.h/2))
#draw text
screen.blit(self.textsurface, (self.pos[0]-self.textsurfacerect.w/2, self.pos[1]-self.textsurfacerect.h/2))
def valuefromfile(self):
self.value = self.convert(self.keydict[self.key])
self.text = str(self.value)
self.rendertext(self.text)
return
| [
"[email protected]"
] | |
fc9fee71cf034ce62cc202be2f52f1ff8a766382 | 7fe646e7d425bc3e1e0f2bac03c4df0f33dcf4ec | /chamado/apps.py | 73571d3c357d5521e84ff34963e3d71378a5a17c | [] | no_license | equeirozdenoronha/tickets | d77d13da08cf2f96ef6d6d2a28c8919719981b69 | 560b0f3dce2e2058b6875ba5f93ef175ab7afcb7 | refs/heads/master | 2022-12-11T19:05:18.367194 | 2018-04-22T19:25:19 | 2018-04-22T19:25:19 | 130,600,070 | 1 | 0 | null | 2022-12-07T23:51:09 | 2018-04-22T19:22:27 | Python | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class ChamadoConfig(AppConfig):
name = 'chamado'
| [
"[email protected]"
] | |
9281930d21b644e11996b40a59b910ca73f0d356 | d4b701776eb649dc28e9c892fd83a8d3f1351fb4 | /schooltogether/wsgi.py | c3d02c316fac0e2b7ff33bff0edac25885de46c4 | [] | no_license | jouanneaur/Projet-Applicatif | 4d7788ef45d500ff33c221e4d397f41da2469cb8 | d8b6f3575d1f7027142e02d4229f3bae5482252d | refs/heads/master | 2020-08-11T22:10:35.371909 | 2020-01-20T17:37:00 | 2020-01-20T17:37:00 | 214,637,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for schooltogether project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'schooltogether.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
a2ab682b9768d771d61598f9a4775200e11716dd | e520ddd5000de801d670b28132b7bee2c6925ffa | /estudo_portatil/views/correctionViewSet.py | 40bee06bc72b6b119e4c528ffde3653924ef363d | [] | no_license | salvachz/estudoPortatil | 7fba16b4b5555c402e29784ffa4d2d35abc0b199 | 74e285541f6f4b99dc8b09a4d6460a9784b6a3aa | refs/heads/master | 2020-06-19T17:38:04.004987 | 2017-04-14T01:50:16 | 2017-04-14T01:50:16 | 74,843,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | from rest_framework.response import Response
from rest_framework import authentication, permissions, viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from django.utils import timezone
from estudo_portatil.models import UserProfile, Correction, Wording, CorrectionItem
from estudo_portatil.serializers import CorrectionSerializer, CorrectionItemSerializer
class CorrectionViewSet(viewsets.ModelViewSet):
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (permissions.IsAuthenticated,)
#permission_classes = ()
queryset = Correction.objects.all()
serializer_class = CorrectionSerializer
def create(self, request, format = None):
data = request.data
print 'data:',data
if data.get('wording_id',None):
wording = Wording.objects.get(id=data['wording_id'])
profile = UserProfile.objects.get(id=request.user.id)
print profile
correction, created = Correction.objects.get_or_create(wording=wording, corrected_by=profile)
correction.score = data.get('score',0)
for item_id in xrange(1,10):
item_in = data.get(str(item_id), None)
if item_in:
correctionItem = CorrectionItem.objects.create(correction=correction,number=item_id, item_text=item_in)
correctionItem.save()
correction.save()
serializer = CorrectionSerializer(correction, many=False)
return Response(serializer.data)
def retrive(self, request, pk=None):
profile = UserProfile.objects.get(id=request.user.id)
wording = Wording.objects.get(id=pk)
queryset = Correction.objects.filter(wording=wording, corrected_by=profile)
serializer = CorrectionSerializer(queryset, many=True)
if serializer.data:
return Response(serializer.data[0])
return Response([])
| [
"[email protected]"
] | |
ea9c40dce4b1d666e29be1c4368b84744ddb28b7 | dbbcc80f4468db1d08d8e44dcb02b086fe2debfe | /manage.py | b8c9ad11decd1e19114c6a4ce32283abb95a5afb | [] | no_license | drsherlock/movienalyse | ee2b04b5f2b6a35e1496681720c191f4430585c8 | 1f1d01858bcd590f29cbb817aa31d0295ea20afe | refs/heads/master | 2021-01-10T12:21:19.010722 | 2016-03-15T17:41:58 | 2016-03-15T17:41:58 | 50,301,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "movienalyse.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
d90002bacce97e271e447cada97bfd8130957162 | c3367f9919cfc13702224ba248d72030e477f872 | /drift correction.py | 1650a05b9166edf1baecdaeeed6d42185a7e44a9 | [] | no_license | jaianthv/Nanoparticles_py | a88d9db9288c3721da014389148a2d96be6c19e2 | 5b2e5ac730e572e0375f5aa32dbdeb82b686b316 | refs/heads/main | 2023-04-25T11:14:26.534222 | 2021-05-17T16:18:27 | 2021-05-17T16:18:27 | 368,249,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | import peempy.imageproc as imp
import matplotlib.pyplot as plt
from skimage.data import astronaut
from skimage.color import rgb2grey
from scipy.ndimage import shift
import numpy as np
# n images
n_images = 20
base_img = rgb2grey(astronaut())
print (np.shape(base_img))
# Generate randomised shift vectors
rand_scale = 2
mean_drift_scale = 1
mean_drift_vec = np.random.random(2) * mean_drift_scale + np.array([1, 1])
insert_vec = np.random.random((n_images, 2)) * rand_scale
ind = np.arange(n_images).reshape((n_images, 1))
drift = ind * mean_drift_vec + insert_vec
drift -= drift[4]
print (np.shape(drift))
# Create the drifted image set
dataset = []
for i in range(n_images):
drifted = shift(base_img, drift[i])
dataset.append(drifted)
dataset = np.asarray(dataset)
print (np.shape(dataset))
# Construct corrector object
cor = imp.DriftCorrector(dataset, 4, ((411, 34), (325, 112)))
cor.calc_drifts()
cor.super_sample = 4
#%
# Plot
plt.subplot(211)
plt.plot(drift[:, 0], drift[:, 1], 'r-x', label="Image drift")
plt.plot(-cor.drifts[:, 0], -cor.drifts[:, 1], 'b-x', label="detected")
plt.legend()
plt.subplot(212)
plt.plot(-cor.drifts[:, 0] - drift[:, 0],
-cor.drifts[:, 1] - drift[:, 1],
".-",
label="Error")
plt.legend()
plt.show()
| [
"[email protected]"
] | |
e324c70afaed5d9a592520cfd6f9d8ce868489b1 | e1e08ca2df1caadc30b5b62263fa1e769d4904d8 | /stream/models/utils.py | ee41104f14bd821b66dd2e63eaca0fbc99d93663 | [
"LicenseRef-scancode-public-domain"
] | permissive | tiench189/ClassbookStore | 509cedad5cc4109b8fb126ad59e25b922dfae6be | 4fff9bc6119d9ec922861cbecf23a3f676551485 | refs/heads/master | 2020-12-02T07:48:26.575023 | 2017-07-10T02:45:09 | 2017-07-10T02:45:09 | 96,728,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,077 | py | #@author: hant
# Those functions are inserted in many service.
from datetime import *
from contrib.pbkdf2 import *
import os
import sys
import traceback
import usercp
import fs.path
import StringIO
import urllib2
sys.path.append('/home/pylibs/pdflib')
ERR_TIME_OUT = CB_0011
ERR_TOKEN = CB_0012
SUCCES = CB_0000
db_RQ_FAILD = CB_0003
MAXIMUM_ALLOWABLE_DEVICE_ACTIVE= CB_0017
MAXIMUM_ALLOWABLE_DEVICE_IN_USE = CB_0020
MAXIMUM_ALLOWABLE_TIME_SET_DEVICE_IN_USE = CB_0021
#DOMAIN_VDC = "123.30.179.205"
DOMAIN_VDC = "classbook.vn"
def timeOut(name, t):
if not name or not t:
return CB_0019 #NULL_ARGUMENT
table = 'clsb_user'
# name = request.args(0)
# t = request.args(1)
row = db(db[table].username==name).select(db[table].user_token, db[table].lastLoginTime).first()
if not row:
return USER_NAME_NOT_EXIST
if row['user_token'] != None and t != None and row['user_token'] == t:
#if datetime.now() < row['lastLoginTime'] + TIME_OUT:
db(db[table].username == name).update(lastLoginTime = datetime.now())
return SUCCES
#else:
# return ERR_TIME_OUT
else:
#db(db[table].username == name).update(user_token = None, lastLoginTime = datetime.now())
return ERR_TOKEN
def checkTimeOut(name, t):
return SUCCES
if not name or not t:
return CB_0019 #NULL_ARGUMENT
table = 'clsb_user'
# name = request.args(0)
# t = request.args(1)
row = db(db[table].username==name).select(db[table].user_token, db[table].lastLoginTime).first()
if not row:
return USER_NAME_NOT_EXIST
#if datetime.now() < row['lastLoginTime'] + TIME_OUT:
if row['user_token'] != None and t != None and row['user_token'] == t:
db(db[table].username == name).update(lastLoginTime = datetime.now())
return SUCCES
else:
return ERR_TOKEN
#else:
# db(db[table].username == name).update(user_token = None, lastLoginTime = datetime.now())
# return ERR_TIME_OUT
def updateLogTime(name):
table = 'clsb_user'
try:
db(db[table].username == name).update(lastLoginTime = datetime.now())
return SUCCES
except Exception as e:
return db_RQ_FAILD
# get a user's number-devices-active. Params: user_id, url to disable device to have the required nb of device. maxNbD=3
def verifyNbDevice(userid):
table = 'clsb_device'
try:
devices_active = db(db[table].user_id == userid)(db[table].status==True).select()
if devices_active and len(devices_active) >= MAX_D_ACTIVE:
return MAXIMUM_ALLOWABLE_DEVICE_ACTIVE
else:
return SUCCES
except Exception as e:
return db_RQ_FAILD
# get a user's number-devices-in-use. Params: user_id, url to disable device to have the required nb of device. maxNbD=1
def verifyInUseDevice(userid):
table = 'clsb_device'
try:
devices_active = db(db[table].user_id == id)(db[table].in_use==True).select()
if devices_active and len(devices_active) != MAX_D_IN_USE:
return MAXIMUM_ALLOWABLE_DEVICE_IN_USE
else:
return SUCCES
except Exception as e:
return db_RQ_FAILD
def verifyValidTime(userid):
table = 'clsb_user'
try:
valid_time = db(db[table]._id == userid).select(db[table].valid_time).as_list()
valid_time = valid_time[0]['valid_time']
if valid_time == None:
return 'test'#CB_0007 # data error, for exp: value is None
if valid_time < MAX_VALID_TIME_SET_DEFAULT:
return SUCCES
else:
return MAXIMUM_ALLOWABLE_TIME_SET_DEVICE_IN_USE
except Exception as e:
return db_RQ_FAILD
#add log for store 2.0
def log_20(params, insert):
try:
db.clsb_user_log.insert(user_id=params['userID'], user_action='DOWNLOAD',
date_created=datetime.now,
search_text=params['searchTxt'],
product_code=params['pcode'],
ip_address=params['clientIP'],
)
product = db(db.clsb_product.product_code == params['pcode']).select(db.clsb_product._id)
if len(product) > 0:
try:
product_id = product.first()['id']
device_serial = params['dserial']
except_device = db(db.clsb20_device_exception.device_serial == device_serial).select(db.clsb20_device_exception.device_serial).as_list()
if len(except_device) > 0:
price = 0
else:
price = params['price']
if insert:
new_log = db.clsb_download_archieve.insert(
user_id=params['userID'],
product_id=product_id,
price=price,
download_time=datetime.now(),
purchase_type=params['purchase_type'],
rom_version=params['rom_version'],
device_serial=params['dserial'],
status=params['status']
)
#return id from log
return new_log['id']
else:
#update log by id
log_data = db((db.clsb_download_archieve.id == params['log_id']) & (db.clsb_download_archieve.status.like("Inprogress")))
if len(log_data.select()) > 0:
db(db.clsb_download_archieve.id == params['log_id']).update(price=price, status=params['status'])
return "OK"
else:
return False
except Exception as e:
print e
return False
except Exception as e:
print e
return False
def log(params, insert):
# STATUS = 'Completed'
try:
db.clsb_user_log.insert(user_id=params['userID'], user_action='DOWNLOAD',
date_created=datetime.now,
search_text=params['searchTxt'],
product_code=params['pcode'],
ip_address=params['clientIP'],
)
mytable = db.clsb_download_archieve
prodID = None
try:
prodID = db(db.clsb_product.product_code==params['pcode']).select(db.clsb_product._id).as_list()
prodID = prodID[0]['id']
if prodID :
# and not db(mytable.user_id==params['userID'] and
# mytable.device_serial==params['dserial'] and
# mytable.product_id==prodID).select():
if insert:
db.clsb_download_archieve.insert(user_id=params['userID'],
product_id=prodID,
price=params['price'],#add price for download_archieve
download_time=datetime.now(),
purchase_type=params['purchase_type'],
rom_version=params['rom_version'],
device_serial=params['dserial'],
status=params['status'],)
else:
download_id = db(db.clsb_download_archieve.user_id == params['userID'])\
(db.clsb_download_archieve.product_id == prodID)\
(db.clsb_download_archieve.device_serial == params['dserial'])\
(~db.clsb_download_archieve.status.like("Completed")).select(orderby=db.clsb_download_archieve.download_time).as_list()
download_id = download_id[-1]['id']
db(db.clsb_download_archieve.id == download_id).update(price=params['price'], status=params['status'])
return SUCCES
except Exception as e:
if not prodID:
return PRODUCT_CODE_NOT_EXIST
return db_RQ_FAILD + str(e)
except Exception as e:
return db_RQ_FAILD + str(e)
def pay(username, total, product_id, oldCBM):
import applications.cbs.modules.transaction as transaction
try:
#message content to send user
message = 'Tài khoản của bạn đã bị khóa, vui lòng liên hệ với quản trị viên để biết thêm chi tiết !'
subject = 'Tài khoản ClassBook bị khóa'
user_id = db(db.clsb_user.username == username).select(db.clsb_user.id).as_list()[0]['id']
if not user_id:
return CB_0010 # Tên đăng nhập không tồn tại
# total = request.args(1)
# username = request.args(0)
user_cash = db(db.clsb_user.username == username).select(db.clsb_user.fund, db.clsb_user.data_sum).as_list()
user_cash = user_cash[0]['fund']
# remove check user_cash /TanBM 03/01/201
# if user_cash < total or user_cash < 0:
# return dict(error=CB_0023)
#if db.clsb_user.data_sum != transaction.encrypt(db, user_cash, username):
#db(db.clsb_user.username == username).update(status=False)
# send mail to user
# get user email
#user_email = db(db.clsb_user.username == username).select(db.clsb_user.email).as_list()
#user_email = user_email[0]['email']
#try:
# mail.send(to=[user_email], subject=subject, message=message)
# return dict(item=CB_0000)
#except Exception as e:
# print str(e)
# return dict(error=CB_0006)
#return CB_0006
# check new purchase
new_fund = user_cash
query = db(db["clsb_product"].id == product_id)
query = query(db["clsb20_product_purchase_item"].product_code == db["clsb_product"].product_code)
query = query(db["clsb20_purchase_item"].id == db["clsb20_product_purchase_item"].purchase_item)
query = query(db["clsb20_purchase_type"].id == db["clsb20_purchase_item"].purchase_type)
product_purchases = query.select(
# db["clsb20_product_purchase_item"].discount,
db["clsb20_purchase_type"].name,
# db["clsb20_purchase_type"].name, db["clsb20_purchase_item"].times,
db["clsb20_purchase_item"].duration, db['clsb20_purchase_item'].id)
if len(product_purchases) == 0:
rows = db((db.clsb_download_archieve.user_id == user_id) & (db.clsb_download_archieve.product_id == product_id) & (db.clsb_download_archieve.status.like("Completed") | db.clsb_download_archieve.status.like("TestSuccess"))).select(db.clsb_download_archieve.status)
if len(rows) > 0:
pass
else:
if oldCBM:
new_fund -= total / 2
else:
new_fund -= total
else:
from datetime import datetime
from datetime import timedelta
product_purchase = product_purchases.first()
if product_purchase.clsb20_purchase_type.name.upper() != "FREE":
if product_purchase.clsb20_purchase_type.name.upper() != "NONCONSUMABLE":
query = db(db["clsb20_user_purchase_item"].user_id == user_id)
query = query(db["clsb20_user_purchase_item"].purchase_id == product_purchase.clsb20_purchase_item.id)
user_purchases = query.select(db["clsb20_user_purchase_item"].id,
# db["clsb20_user_purchase_item"].times,
db["clsb20_user_purchase_item"].day_end)
is_expired_date_or_time = False
if len(user_purchases) > 0:
user_purchase = user_purchases.first()
# if user_purchase.times > 0:
# db(db["clsb20_user_purchase_item"].id == user_purchase.id).update(times=user_purchase.times-1)
# elif user_purchase.times < 0 or user_purchase.day_end > datetime.today():
# pass
# else:
# if oldCBM:
# new_fund -= total / 2
# else:
# new_fund -= total
# day_end = datetime.today() + timedelta(days=product_purchase.clsb20_purchase_item.duration)
# query = db(db["clsb20_user_purchase_item"].id == user_purchase.id)
# query.update(day_end=day_end, times=product_purchase.clsb20_purchase_item.times)
# db["clsb20_purchase_renew_history"].insert(user_id=user_id, product_id=product_id, date_do_renew=datetime.today())
if oldCBM:
new_fund -= total / 2
else:
new_fund -= total
day_end = datetime.today() + timedelta(days=product_purchase.clsb20_purchase_item.duration)
query = db(db["clsb20_user_purchase_item"].id == user_purchase.id)
query.update(day_end=day_end)
# , times=product_purchase.clsb20_purchase_item.times)
db["clsb20_purchase_renew_history"].insert(user_id=user_id, product_id=product_id, date_do_renew=datetime.today())
else:
if oldCBM:
new_fund -= total / 2
else:
new_fund -= total
# if product_purchase.clsb20_purchase_item.times == 0:
# day_end = datetime.today() + timedelta(days=product_purchase.clsb20_purchase_item.duration)
# db["clsb20_user_purchase_item"].insert(user_id=user_id, product_id=product_id,
# times=product_purchase.clsb20_purchase_item.times,
# day_end=day_end)
# db["clsb20_purchase_renew_history"].insert(user_id=user_id, product_id=product_id,
# date_do_renew=datetime.today())
day_end = datetime.today() + timedelta(days=product_purchase.clsb20_purchase_item.duration)
db["clsb20_user_purchase_item"].insert(user_id=user_id, purchase_id=product_purchase.clsb20_purchase_item.id,
# times=product_purchase.clsb20_purchase_item.times,
day_end=day_end)
db["clsb20_purchase_renew_history"].insert(user_id=user_id, product_id=product_id,
date_do_renew=datetime.today())
else:
change_time_first = db(db.clsb20_product_price_history.product_id == product_id)\
(db.clsb20_product_price_history.purchase_item == product_purchase.clsb20_purchase_item.id).select(orderby=db.clsb20_product_price_history.changing_time)
if len(change_time_first) > 0:
change_time_first = change_time_first.first()
rows = db(db.clsb_download_archieve.user_id == user_id)(db.clsb_download_archieve.product_id == product_id)\
(db.clsb_download_archieve.download_time >= change_time_first.changing_time).select(db.clsb_download_archieve.status)
if len(rows) > 0:
pass
else:
if oldCBM:
new_fund -= total / 2
else:
new_fund -= total
else:
rows = db((db.clsb_download_archieve.user_id == user_id) & (db.clsb_download_archieve.product_id == product_id) & (db.clsb_download_archieve.status.like("Completed") | db.clsb_download_archieve.status.like("TestSuccess"))).select(db.clsb_download_archieve.status)
if len(rows) > 0:
pass
else:
if oldCBM:
new_fund -= total / 2
else:
new_fund -= total
data_sum = transaction.encrypt(new_fund, username)
db(db.clsb_user.username == username).update(fund=new_fund, data_sum=data_sum)
data = dict(record_id=user_id, table_name='clsb_user', key_unique='username')
insert_to_log_temp(data)
# url_update_fund = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="update_user_fund",
# vars=dict(fund=new_fund, data_sum=data_sum, username=username))
# print(url_update_fund)
# urllib2.urlopen(url_update_fund)
return CB_0000 # SUCCESS
except:
import traceback
traceback.print_exc()
# print "Error at pay() in modules/transaction.py: " + str(e) +" on line: "+str(sys.exc_traceback.tb_lineno)
return str(sys.exc_traceback.tb_lineno) # db_RQ_FAILD
# Temporal fct to add cash for user account when user add a new device
def fund(username):
# username = request.args(0)
CASH = 200000
if db(db.clsb_user.username == username).update(fund=db.clsb_user.fund + CASH):
user_id = db(db.clsb_user.username == username).select().first()['id']
data = dict(record_id=user_id, table_name='clsb_user', key_unique='username')
insert_to_log_temp(data)
# url_update_fund = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="update_user_fund",
# vars=dict(fund=db.clsb_user.fund + CASH, data_sum="cash", username=username))
# print(url_update_fund)
# urllib2.urlopen(url_update_fund)
return CB_0000 #SUCCESS
else:
return CB_0006 #Faillure
def str2price(value):
i = 0
price = ''
for index in range(len(value) - 1, -1, -1):
i += 1
price = value[index] + price
if i == 3:
price = '.' + price
i = 0
if price[0] == '.':
price = price[1:]
return u'Không thu phí' if price == '0' else price + '₫'
#add check product for old version
def check_product_for_old_version(product_code):
query = db(db["clsb_product"].product_code == product_code)
query = query(db["clsb20_product_purchase_item"].product_code == db["clsb_product"].product_code)
query = query(db["clsb20_purchase_item"].id == db["clsb20_product_purchase_item"].purchase_item)
query = query(db["clsb20_purchase_type"].id == db["clsb20_purchase_item"].purchase_type)
product_purchases = query.select(
db["clsb_product"].product_price,
db["clsb20_purchase_type"].name,
db["clsb20_purchase_item"].duration, db['clsb20_purchase_item'].id
)
purchase = False
if len(product_purchases) > 0:
product_purchase = product_purchases.first()
if (product_purchase.clsb20_purchase_type.name.upper() != "NONCONSUMABLE") & (product_purchase.clsb20_purchase_type.name.upper() != "FREE"):
purchase = True
type_name = db((db.clsb_product.product_code == product_code) & (db.clsb_product.product_price > 0))\
(db.clsb_product.product_category == db.clsb_category.id)\
(db.clsb_category.category_type == db.clsb_product_type.id)\
(db.clsb_product_type.type_name.like("Application") | db.clsb_product_type.type_name.like("Exercise")).select()
if len(type_name) > 0:
purchase = True
return purchase
def get_purchase_description(code):
purchase = db(db.clsb20_product_purchase_item.product_code == code)\
(db.clsb20_purchase_item.id == db.clsb20_product_purchase_item.purchase_item)\
(db.clsb20_purchase_type.id == db.clsb20_purchase_item.purchase_type).select()
if len(purchase) > 0:
purchase = purchase.first()["clsb20_purchase_type"]["description"]
else:
purchase = "Thanh toán cho lần đầu tiên tải về"
return purchase
#add check ota_update
def check_ota_update(code):
data = db(db.clsb_ota_version.software.like(code)).select()
if len(data) > 0:
return True
return False
def pay_to_log(user, product, classbook_device, end_buy=False):
if classbook_device and check_free_for_classbook(product['clsb_category']['id']):
return True
if not check_free_for_classbook(product['clsb_category']['id']):
downloaded = db(db.clsb_download_archieve.product_id == product['clsb_product']['id'])(db.clsb_download_archieve.status.like("Completed"))(db.clsb_download_archieve.user_id == user['id']).select()
if len(downloaded) > 0:
return True
"""
Mua cho thiet bi class book voi gia sach SGK se ko ghi log
"""
import applications.cbs20.modules.transaction as transaction
check_buy = db(db.clsb30_product_history.product_id == product['clsb_product']['id'])(db.clsb30_product_history.user_id == user['id']).select()
if len(check_buy) > 0:
return True
if not end_buy:
user_cash = db(db.clsb_user.id == user['id']).select(db.clsb_user.fund, db.clsb_user.data_sum).as_list()
user_cash = user_cash[0]['fund']
new_fund = user_cash - product['clsb_product']['product_price']
if new_fund < 0:
return dict(error='Tiền trong tài khoản không đủ')
data_sum = transaction.encrypt(new_fund, user['username'])
db(db.clsb_user.username == user['username']).update(fund=new_fund, data_sum=data_sum)
user_id = db(db.clsb_user.username == user['username']).select().first()['id']
data = dict(record_id=user_id, table_name='clsb_user', key_unique='username')
insert_to_log_temp(data)
# url_update_fund = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="update_user_fund",
# vars=dict(fund=new_fund, data_sum=data_sum, username=user['username']))
# print(url_update_fund)
# urllib2.urlopen(url_update_fund)
if len(db(db.clsb30_product_history.product_id == product['clsb_product']['id'])(db.clsb30_product_history.user_id == user['id']).select()) <= 0:
insert_buy = db.clsb30_product_history.insert(
product_title=product['clsb_product']['product_title'],
product_id=product['clsb_product']['id'],
user_id=user['id'],
category_id=product['clsb_category']['id'],
product_price=product['clsb_product']['product_price']
)
data = dict(record_id=str(insert_buy), table_name='clsb30_product_history', key_unique='user_id.product_id')
insert_to_log_temp(data)
# url_sign = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="sign_buy_product", args=["Product",
# product['clsb_product']['id'],
# user['username'],
# product['clsb_category']['id'],
# pay])
# print(url_sign)
# urllib2.urlopen(url_sign)
return True
def pay_to_log_divide(user, product, classbook_device, isMedia, pay, end_buy=False):
# if classbook_device and check_free_for_classbook(product['clsb_category']['id']):
# print('return1')
# return "True 1"
if isMedia.lower() == 'false':
if not check_free_for_classbook(product['clsb_category']['id']):
downloaded = db(db.clsb_download_archieve.product_id == product['clsb_product']['id'])(db.clsb_download_archieve.status.like("Completed"))(db.clsb_download_archieve.user_id == user['id']).select()
if len(downloaded) > 0:
print('return2')
return "True 2"
"""
Mua cho thiet bi class book voi gia sach SGK se ko ghi log
"""
import applications.cbs20.modules.transaction as transaction
if isMedia.lower() == 'true':
check_buy = db(db.clsb30_media_history.product_id == product['clsb_product']['id'])(db.clsb30_media_history.user_id == user['id']).select()
print(check_buy)
if len(check_buy) > 0:
print('return3')
return "True 3"
else:
check_buy = db(db.clsb30_product_history.product_id == product['clsb_product']['id'])(db.clsb30_product_history.user_id == user['id']).select()
print(check_buy)
if len(check_buy) > 0:
print('return4')
return "True 4"
if not end_buy:
user_cash = db(db.clsb_user.id == user['id']).select(db.clsb_user.fund, db.clsb_user.data_sum).as_list()
user_cash = user_cash[0]['fund']
new_fund = int(user_cash) - int(pay)
if new_fund < 0:
print('return5')
return dict(error='Tiền trong tài khoản không đủ')
print('tiench new_fund: ' + str(new_fund))
data_sum = transaction.encrypt(new_fund, user['username'])
db(db.clsb_user.username == user['username']).update(fund=new_fund, data_sum=data_sum)
user_id = db(db.clsb_user.username == user['username']).select().first()['id']
data = dict(record_id=user_id, table_name='clsb_user', key_unique='username')
insert_to_log_temp(data)
# url_update_fund = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="update_user_fund",
# vars=dict(fund=new_fund, data_sum=data_sum, username=user['username']))
# print(url_update_fund)
# update_result = urllib2.urlopen(url_update_fund)
# print(update_result.read())
if isMedia.lower() == 'true':
print("tiench insert media: " + str(isMedia))
if len(db(db.clsb30_media_history.product_id == product['clsb_product']['id'])(db.clsb30_media_history.user_id == user['id']).select()) <= 0:
media_insert = db.clsb30_media_history.insert(
product_title=product['clsb_product']['product_title'],
product_id=product['clsb_product']['id'],
user_id=user['id'],
category_id=product['clsb_category']['id'],
product_price=pay
)
params = {'searchTxt': 'ND',
'clientIP': '',
'dserial': "",
'pcode': product['clsb_product']['product_code'],
'purchase_type': 'WEB_PAY',
'rom_version': "CLASSBOOK.APP",
'userID': user_id,
'price': int(pay),
'status': 'Completed'}
log_20(params, True)
db.clsb30_payment_log.insert(user_id=user_id, product_id=product['clsb_product']['id'],
product_type='MEDIA', pay=int(pay))
data = dict(record_id=str(media_insert), table_name='clsb30_media_history', key_unique='user_id.product_id')
insert_to_log_temp(data)
# url_sign = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="sign_buy_media", args=["Product",
# product['clsb_product']['id'],
# user['username'],
# product['clsb_category']['id'],
# pay])
# print(url_sign)
# urllib2.urlopen(url_sign)
else:
print("tiench insert product: " + str(isMedia))
try:
if len(db(db.clsb30_product_history.product_id == product['clsb_product']['id'])(db.clsb30_product_history.user_id == user['id']).select()) <= 0:
product_insert = db.clsb30_product_history.insert(
product_title=product['clsb_product']['product_title'],
product_id=product['clsb_product']['id'],
user_id=user['id'],
category_id=product['clsb_category']['id'],
product_price=pay
)
params = {'searchTxt': 'ND',
'clientIP': '',
'dserial': "",
'pcode': product['clsb_product']['product_code'],
'purchase_type': 'WEB_PAY',
'rom_version': "CLASSBOOK.APP",
'userID': user_id,
'price': int(pay),
'status': 'Completed'}
log_20(params, True)
db.clsb30_payment_log.insert(user_id=user_id, product_id=product['clsb_product']['id'],
product_type='PRODUCT', pay=int(pay))
data = dict(record_id=str(product_insert), table_name='clsb30_product_history', key_unique='user_id.product_id')
insert_to_log_temp(data)
# url_sign = URL(host=DOMAIN_VDC, a='cbs20', c="sync2vdc", f="sign_buy_product", args=["Product",
# product['clsb_product']['id'],
# user['username'],
# product['clsb_category']['id'],
# pay])
# print(url_sign)
# urllib2.urlopen(url_sign)
except Exception as err:
print("Error: " + err)
return dict(error=str(err))
return "True final"
def check_free_for_classbook(category_id):
try:
parent_id = db(db.clsb_category.id == category_id).select().first()['category_parent']
list = db((db.clsb30_category_classbook_device.product_category == category_id) | (db.clsb30_category_classbook_device.product_category == parent_id)).select()
if len(list) <= 0:
return False
return True
except Exception as ex:
print ex.message + " on line: "+str(sys.exc_traceback.tb_lineno)
return False
def make_zip_nomedia(path, code, file):
path_in = os.path.join(path, file)
path_out = os.path.join(path, file+".nomedia")
if os.path.exists(path_out):
pass
else:
import zipfile
z_in = zipfile.ZipFile(settings.home_dir+path_in, "r")
z_out = zipfile.ZipFile(settings.home_dir+path_out, "w")
try:
z_out.writestr(code+"/book_config/config.xml", z_in.read(code+"/book_config/config.xml"))
z_out.writestr(code+"/book_config/.nomedia", z_in.read(code+"/book_config/.nomedia"))
z_out.writestr(code+"/book_config/cover.clsbi21", z_in.read(code+"/book_config/cover.clsbi21"))
z_out.writestr(code+"/book_config/cover.clsbi21", z_in.read(code+"/book_config/cover.clsbi20"))
except:
pass
z_in.close()
z_out.close()
return path_out
## bo dau tieng viet
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import unicodedata
def remove_viet_accents(str):
''' Helper function: Remove Vietnamese accent for string '''
nkfd_form = unicodedata.normalize('NFKD', unicode(str, 'utf-8'))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)]).replace(u'\u0111','d').replace(u'\u0110', 'D')
def check_media(product_code):#params: product_code
try:
response.generic_patterns = ['*']
# product_code = request.args(0)
check_cp = db(db.clsb_product.product_code.like(product_code))(db.clsb20_product_cp.product_code.like(product_code)).select()
import Image
if len(check_cp) > 0:
cpid = usercp.user_get_id_cp(check_cp.first()['clsb20_product_cp']['created_by'], db)
path = fs.path.pathjoin(settings.cp_dir, "CP%s" % cpid, 'published', product_code)
else:
path = fs.path.pathjoin(product_code)
product_files = osFileServer.listdir(path=path, wildcard=product_code + ".[Zz][Ii][Pp]", files_only=True)
if len(product_files) == 0:
return dict(check=False)
else:
check = check_media_in_zip(fs.path.pathjoin(settings.home_dir, path, product_files[0]), product_code)
print('check' + str(check))
return dict(check=check)
except Exception as ex:
print('tiench' + str(ex))
return dict(check=False)
def check_media_in_zip(path, product_code):
try:
import zipfile
zip_file = zipfile.ZipFile(path, "r")
for name in [member.filename for member in zip_file.infolist()]:
# print(name)
if str.startswith(name, product_code.upper()+"/media/"):
zip_file.close()
return True
else:
zip_file.close()
return False
except Exception as err:
print('tiench' + str(err))
return False
def server_version():
return settings.server_ver;
def check_version_mp(app_ver):
if 'ios' not in app_ver.lower():
return False
version = app_ver.split('_')[len(app_ver.split('_')) - 1]
print(version)
check = db(db.clsb30_fake_ios.fake_name == 'ios').select()
if len(check) == 0:
return False
if int(check[0]['fake_value']) == 0:
return False
elif int(check[0]['fake_value']) == 1:
return True
else:
try:
data = parsr_data_istore("942940905")
if int(data['resultCount']) > 0:
if version > data['results'][0]['version']:
return True
else:
return False
else:
return True
except Exception as err:
return True
def parsr_data_istore(id):
import urllib, json
url = "https://itunes.apple.com/lookup?id=" + id
response = urllib.urlopen(url)
data = json.loads(response.read())
return data
#########tiench insert to log temp###################
INIT = "init"
import time
from datetime import datetime
def write_log(file_name, content):
try:
log_file = open("/home/www-data/web2py/applications/" + file_name + ".txt", 'a+')
log_file.write(content + " " + str(datetime.now()) + "\n")
log_file.close()
return True
except Exception as err:
return str(err) + " on line: "+str(sys.exc_traceback.tb_lineno)
def sync_a_record():
try:
write_log("sync_log", "0")
data_log = db(db.clsb30_sync_temp.status == INIT).select()
if len(data_log) == 0:
return dict(result=False, code="FINISH")
write_log("sync_log", "1")
data_log = data_log.first()
get_data = get_data_sync(data_log['record_id'], data_log['table_name'], data_log['key_unique'])
write_log("sync_log", "2")
get_data['table_name'] = data_log['table_name']
print("get_data: " + str(get_data))
result = sync_data_to_db(get_data)
write_log("sync_log", "3")
db(db.clsb30_sync_temp.id == data_log['id']).delete()
write_log("sync_log", str(result))
if result['result']:
return True
else:
db.clsb30_sync_temp.insert(record_id=data_log['record_id'], table_name=data_log['table_name'],
status=INIT, key_unique=data_log['key_unique'])
return False
return dict(result=True)
except Exception as err:
write_log("sync_log", "4" + str(err) + " on line: "+str(sys.exc_traceback.tb_lineno))
return dict(result=False, code="ERROR", error=str(err) + " on line: "+str(sys.exc_traceback.tb_lineno))
def insert_to_log_temp(data):
try:
record_id = data['record_id']
table_name = data['table_name']
key_unique = data['key_unique']
check_exist = db(db.clsb30_sync_temp.record_id == str(data['record_id']))\
(db.clsb30_sync_temp.table_name == str(data['table_name']))\
(db.clsb30_sync_temp.status == "init").select()
if len(check_exist) == 0:
try:
#get_data = get_data_sync(record_id, table_name, key_unique)
#get_data['table_name'] = table_name
#print("get_data: " + str(get_data))
#result = sync_data_to_db(get_data)
#if not result['result']:
db.clsb30_sync_temp.insert(record_id=record_id, table_name=table_name,
status=INIT, key_unique=key_unique)
#return result
except Exception as err:
print(err)
except Exception as e:
print(e)
return False
def sync_data_to_db(get_data):
db_sync = connect_db_sync()
query_bug = ""
try:
unique_data = get_data['unique']
data = get_data['data']
table_name = get_data['table_name']
if 'username' in unique_data:
users = db_sync.executesql("SELECT * FROM clsb_user WHERE username" + "='" + unique_data['username'] + "'")
if len(users) == 0 and table_name != 'clsb_user':
return dict(result=False, error=CB_0001)
if len(users) > 0:
user = users[0]
if 'user_id' in data:
data['user_id'] = user[0]
unique_data['user_id'] = user[0]
if 'username' not in data:
del unique_data['username']
# return dict(data=data)
query_unique = ""
for key in unique_data.keys():
if table_name != 'clsb_device' or key != 'user_id':
if query_unique != "":
query_unique += " AND "
query_unique += str(key) + "='" + str(unique_data[key]) + "'"
check_exist = False
query_bug = "SELECT * FROM " + table_name + " WHERE " + query_unique
if len(unique_data) > 0:
check_data = db_sync.executesql("SELECT * FROM " + table_name + " WHERE " + query_unique)
if len(check_data) > 0:
check_exist = True
print("exist: " + str(check_exist))
if check_exist:
query_data_update = "UPDATE " + table_name + " SET "
data_update = ""
for key in data.keys():
if key not in unique_data:
print(key)
if data_update != "":
data_update += ","
if data[key] == None:
data_update += str(key) + "=null"
else:
try:
data_update += str(key) + "='" + data[key].encode('utf-8') + "'"
except Exception as err:
try:
data_update += str(key) + "='" + str(data[key]) + "'"
except Exception as e:
print("ERR: " + str(e))
print(data_update)
query_data_update += data_update
query_data_update += " WHERE " + query_unique
print(query_data_update)
query_bug = query_data_update
try:
db_sync.executesql(query_data_update)
return dict(result=True)
except Exception as err:
print('err sql: ' + str(err))
return dict(result=False, error=str(err) + " on line: "+str(sys.exc_traceback.tb_lineno))
else:
str_field = ""
str_value = ""
for key in data.keys():
if data.keys().index(key) > 0:
str_field += ","
str_value += ","
str_field += str(key)
print("data " + str(key) + ": ")
if data[key] == None:
str_value += "null"
else:
try:
str_value += "'" + data[str(key)].encode("utf-8") + "'"
except Exception as err:
print(err)
str_value += "'" + str(data[str(key)]) + "'"
query_data_insert = "INSERT INTO " + table_name + "(" + str_field + ") VALUES (" + str_value + ")"
print(query_data_insert)
query_bug = query_data_insert
try:
db_sync.executesql(query_data_insert)
return dict(result=True)
except Exception as err:
return dict(result=False, error=str(err) + " on line: "+str(sys.exc_traceback.tb_lineno))
except Exception as err:
return dict(result=False, error=str(err) + " - " + query_bug + " on line: "+str(sys.exc_traceback.tb_lineno) + ":" + str(get_data))
def get_data_sync(record_id, table_name, key_unique): #record_id, table_name, key_unique
try:
data_result = db.executesql("SELECT * FROM " + table_name + " WHERE id =" + str(record_id), as_dict=True)
if len(data_result) == 0:
return dict(result=False, err="no record")
print(data_result[0])
data = data_result[0]
del data['id']
unique_dict = dict()
for unique in key_unique.split('.'):
if unique == 'user_id':
user = db(db.clsb_user.id == data['user_id']).select().first()
unique_dict['username'] = user['username']
else:
unique_dict[unique] = data[unique]
return dict(data=data, unique=unique_dict)
except Exception as err:
return dict(result=False, err=str(err)+" on line: "+str(sys.exc_traceback.tb_lineno))
return dict(result=False, err="UNKNOWN")
def connect_db_sync():
return DAL(settings.database_sync,
pool_size=1, check_reserved=['all'],
migrate_enabled=settings.migrate, decode_credentials=True, db_codec='UTF-8') | [
"[email protected]"
] | |
304196f5503465038ec51dcf77bc133495327bf5 | 08f1cd2ba1f5c441c609e44219148b49ecc27f97 | /商城优惠券系统.py | 20a6a0aeb071370db93b448316aa421465a1178b | [] | no_license | 919074006/first | fe05537ff4506fb3dcfa7b4b19b05fe282afdc26 | 8baee6b072c4ada8cb02d1d24b0af3f32cf16a51 | refs/heads/master | 2023-06-15T05:00:36.012607 | 2021-06-09T06:16:46 | 2021-06-09T06:16:46 | 373,369,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | shop=[
["联想电脑",6000],
["Iphone 16x plus",15000],
["PS5游戏机",3500],
["老干妈",7.5],
["老于妈",5.5],
["卫龙辣条",10],
["HUA WEI watch",1200],
["MAC PC",15000]
]
coupon=[
["联想电脑",6000],
["卫龙辣条",10]
]
mycart=[]
salary=input("请输入您的余额:")
salary=int(salary)
M=salary
import random
S=random.choice(coupon) #choice随机获得列表中的某个列表数据
if salary<0:
print("对不起,您的输入有误,请重新输入!")
salary = input("请输入您的余额:")
salary = int(salary)
M = salary
else:
print("恭喜您获得",S[0],"优惠券")
while True:
for index, value in enumerate(shop):
print(index, value)
choes = input("请输入商品序号:")
if choes.isdigit():
choes = int(choes)
if choes < len(shop):
if salary >= shop[choes][1]:
mycart.append(shop[choes])
salary = salary - shop[choes][1]
print("恭喜,添加成功!您的余额还剩:¥", salary)
print("已有", mycart)
else:
print("穷鬼,钱不够,请选择其他商品!")
elif choes >= len(shop):
print("对不起,您的输入有误,请重新输入!")
elif choes == 'Q' or choes == 'q':
print("欢迎下次光临,再见!")
break
else:
print("对不起,您的输入有误,请重新输入!")
some=mycart.count(["联想电脑",6000])
same=mycart.count(["卫龙辣条",10])
J=(M-salary)/10
J=int(J)
print("您可获得积分",J)
if S==["联想电脑",6000]:
salary = salary + some * 3000
elif S==["卫龙辣条",10] and same>3:
salary=salary+300
print("优惠券优惠后","您剩余",salary)
| [
"[email protected]"
] | |
ac1ec3210e1bc44987c279340c27129430ee2fad | 8fd472f7f5e7c8868fbda91ef9bff4d9fb53823c | /bjoj_2446.py | 8e69870ea3cfbcf98943aa50fb7e3841cee01220 | [] | no_license | basekim14/BJOJ_py | 13810537bda4663e59e00cf0398468dd1366cb46 | 986f8394f9883f64f31961ba68f9639636ccd17f | refs/heads/master | 2023-07-18T16:10:13.467785 | 2021-09-19T14:07:44 | 2021-09-19T14:07:44 | 270,268,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | """
ㄱㄱㅊ <[email protected]>, 20-06-18
Baekjoon Online Judge Study - 2446(print * - 9)
"""
from sys import stdin
N = int(stdin.readline())
for i in range(N):
print(" " * i + "*" * (2 * (N-i) - 1))
for i in range(N-1, 0, -1):
print(" " * (i-1) + "*" * (2 * (N-i+1) - 1))
| [
"[email protected]"
] | |
3834aad9ce09ccfbb6453c057e638122965eb47e | 0c9e39df287b55bb5088ed95df88d8a14cfdf381 | /test.py | 618fd02249f0b31002869d3a6c48d6e5b5f1c292 | [
"Apache-2.0",
"CC0-1.0"
] | permissive | afcarl/accelerator-gzutil | 9e599d0313dbe4c96539a0b1ea8eec65aaf60801 | 7ea2b9ca48bcdd1c395b4eb3649a075e99bb463f | refs/heads/master | 2020-03-18T07:19:29.281153 | 2018-04-20T20:37:15 | 2018-04-20T20:37:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,496 | py | #!/usr/bin/env python
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
# Verify general operation and a few corner cases.
from __future__ import division, print_function, unicode_literals
from datetime import datetime, date, time
from sys import version_info
from itertools import compress
import gzutil
TMP_FN = "_tmp_test.gz"
inf, ninf = float("inf"), float("-inf")
if version_info[0] > 2:
l = lambda i: i
else:
l = long
# The Bits types don't accept floats, the others Int types do.
# This wasn't really intentional, but the right thing.
dttm0 = datetime(1789, 7, 14, 12, 42, 1, 82933)
dttm1 = datetime(2500, 12, 31, 23, 59, 59, 999999)
dttm2 = datetime(2015, 1, 1, 0, 0, 0, 0)
dt0 = date(1985, 7, 10)
tm0 = time(0, 0, 0, 0)
tm1 = time(2, 42, 0, 3)
tm2 = time(23, 59, 59, 999999)
for name, data, bad_cnt, res_data in (
("Float64" , ["0", float, 0 , 4.2, -0.01, 1e42, inf, ninf, None], 2, [0.0, 4.2, -0.01, 1e42, inf, ninf, None]),
("Float32" , ["0", float, l(0), 4.2, -0.01, 1e42, inf, ninf, None], 2, [0.0, 4.199999809265137, -0.009999999776482582, inf , inf, ninf, None]),
("Int64" , ["0", int, 0x8000000000000000, -0x8000000000000000, 0.1, 0x7fffffffffffffff, l(-5), None], 4, [0, 0x7fffffffffffffff, -5, None]),
("Bits64" , ["0", int, None, l(-5), -5, 0.1, 0x8000000000000000, 0x7fffffffffffffff, l(0x8000000000000000)], 6, [0x8000000000000000, 0x7fffffffffffffff, 0x8000000000000000]),
("Int32" , ["0", int, 0x80000000, -0x80000000, 0.1, 0x7fffffff, l(-5), None], 4, [0, 0x7fffffff, -5, None]),
("Bits32" , ["0", int, None, l(-5), -5, 0.1, 0x80000000, 0x7fffffff, l(0x80000000)], 6, [0x80000000, 0x7fffffff, 0x80000000]),
("Number" , ["0", int, 1 << 1007, -(1 << 1007), 1, l(0), -1, 0.5, 0x8000000000000000, -0x800000000000000, 1 << 340, (1 << 1007) - 1, -(1 << 1007) + 1, None], 4, [1, 0, -1, 0.5, 0x8000000000000000, -0x800000000000000, 1 << 340, (1 << 1007) - 1, -(1 << 1007) + 1, None]),
("Bool" , ["0", bool, 0.0, True, False, 0, l(1), None], 2, [False, True, False, False, True, None]),
("BytesLines" , [42, str, b"\n", u"a", b"a", b"foo bar baz", None], 4, [b"a", b"foo bar baz", None]),
("AsciiLines" , [42, str, b"\n", u"foo\xe4", b"foo\xe4", u"a", b"foo bar baz", None], 5, [str("a"), str("foo bar baz"), None]),
("UnicodeLines" , [42, str, u"\n", b"a", u"a", u"foo bar baz", None], 4, [u"a", u"foo bar baz", None]),
("DateTime" , [42, "now", tm0, dttm0, dttm1, dttm2, None], 3, [dttm0, dttm1, dttm2, None]),
("Date" , [42, "now", tm0, dttm0, dttm1, dttm2, dt0, None], 3, [dttm0.date(), dttm1.date(), dttm2.date(), dt0, None]),
("Time" , [42, "now", dttm0, tm0, tm1, tm2, None], 3, [tm0, tm1, tm2, None]),
("ParsedFloat64" , [float, "1 thing", "", "0", " 4.2", -0.01, "1e42 ", " inf", "-inf ", None], 3, [0.0, 4.2, -0.01, 1e42, inf, ninf, None]),
("ParsedFloat32" , [float, "1 thing", "", "0", " 4.2", -0.01, "1e42 ", " inf", "-inf ", None], 3, [0.0, 4.199999809265137, -0.009999999776482582, inf , inf, ninf, None]),
("ParsedNumber" , [int, "", str(1 << 1007), str(-(1 << 1007)), "0.0", 1, 0.0, "-1", "9223372036854775809", -0x800000000000000, str(1 << 340), str((1 << 1007) - 1), str(-(1 << 1007) + 1), None, "1e25"], 4, [0.0, 1, 0, -1, 0x8000000000000001, -0x800000000000000, 1 << 340, (1 << 1007) - 1, -(1 << 1007) + 1, None, 1e25]),
("ParsedInt64" , [int, "", "9223372036854775808", -0x8000000000000000, "0.1", 1, 0.1, "9223372036854775807", " -5 ", None], 5, [1, 0, 0x7fffffffffffffff, -5, None]),
("ParsedBits64" , [int, "", None, l(-5), "-5", 0.1, " 9223372036854775808", "9223372036854775807 ", "0", 1], 5, [0, 0x8000000000000000, 0x7fffffffffffffff, 0, 1]),
("ParsedInt32" , [int, "", 0x80000000, -0x80000000, "0.1", 0.1, "-7", "-0", "2147483647", " -5 ", None, 1], 5, [0, -7, 0, 0x7fffffff, -5, None, 1]),
("ParsedBits32" , [int, "", None, l(-5), -5, 0.1, "2147483648", "2147483647", l(0x80000000), 1], 5, [0, 0x80000000, 0x7fffffff, 0x80000000, 1]),
):
print(name)
r_name = "Gz" + name[6:] if name.startswith("Parsed") else "Gz" + name
r_typ = getattr(gzutil, r_name)
w_typ = getattr(gzutil, "GzWrite" + name)
# verify that failuses in init are handled reasonably.
for typ in (r_typ, w_typ,):
try:
typ("/NONEXISTENT")
raise Exception("%r does not give IOError for /NONEXISTENT" % (typ,))
except IOError:
pass
try:
typ("/NONEXISTENT", nonexistent_keyword="test")
raise Exception("%r does not give TypeError for bad keyword argument" % (typ,))
except TypeError:
pass
# test that the right data fails to write
with w_typ(TMP_FN) as fh:
count = 0
for ix, value in enumerate(data):
try:
fh.write(value)
count += 1
assert ix >= bad_cnt, repr(value)
except (ValueError, TypeError, OverflowError):
assert ix < bad_cnt, repr(value)
assert fh.count == count, "%s: %d lines written, claims %d" % (name, count, fh.count,)
if "Lines" not in name:
want_min = min(filter(lambda x: x is not None, res_data))
want_max = max(filter(lambda x: x is not None, res_data))
assert fh.min == want_min, "%s: claims min %r, not %r" % (name, fh.min, want_min,)
assert fh.max == want_max, "%s: claims max %r, not %r" % (name, fh.max, want_max,)
# Okay, errors look good
with r_typ(TMP_FN) as fh:
res = list(fh)
assert res == res_data, res
# Data comes back as expected.
if name.endswith("Lines"):
continue # no default support
for ix, default in enumerate(data):
# Verify that defaults are accepted where expected
try:
with w_typ(TMP_FN, default=default) as fh:
pass
assert ix >= bad_cnt, repr(default)
except AssertionError:
raise
except Exception:
assert ix < bad_cnt, repr(default)
if ix >= bad_cnt:
with w_typ(TMP_FN, default=default) as fh:
count = 0
for value in data:
try:
fh.write(value)
count += 1
except (ValueError, TypeError, OverflowError):
assert 0, "No default: %r" % (value,)
assert fh.count == count, "%s: %d lines written, claims %d" % (name, count, fh.count,)
# No errors when there is a default
with r_typ(TMP_FN) as fh:
res = list(fh)
assert res == [res_data[ix - bad_cnt]] * bad_cnt + res_data, res
# Great, all default values came out right in the file!
# Verify hashing and slicing
def slice_test(slices, spread_None):
res = []
sliced_res = []
total_count = 0
for sliceno in range(slices):
with w_typ(TMP_FN, hashfilter=(sliceno, slices, spread_None)) as fh:
count = 0
for ix, value in enumerate(data):
try:
wrote = fh.write(value)
count += wrote
assert ix >= bad_cnt, repr(value)
assert fh.hashcheck(value) == wrote or (spread_None and value is None), "Hashcheck disagrees with write"
except (ValueError, TypeError, OverflowError):
assert ix < bad_cnt, repr(value)
assert fh.count == count, "%s (%d, %d): %d lines written, claims %d" % (name, sliceno, slices, count, fh.count,)
if "Lines" not in name:
got_min, got_max = fh.min, fh.max
total_count += count
with r_typ(TMP_FN) as fh:
tmp = list(fh)
assert len(tmp) == count, "%s (%d, %d): %d lines written, claims %d" % (name, sliceno, slices, len(tmp), count,)
for v in tmp:
assert (spread_None and v is None) or w_typ.hash(v) % slices == sliceno, "Bad hash for %r" % (v,)
if "Bits" not in name or v < 0x8000000000000000:
assert w_typ.hash(v) == gzutil.hash(v), "Inconsistent hash for %r" % (v,)
res.extend(tmp)
sliced_res.append(tmp)
if "Lines" not in name:
tmp = list(filter(lambda x: x is not None, tmp))
if tmp:
want_min = min(tmp)
want_max = max(tmp)
assert got_min == want_min, "%s (%d, %d): claims min %r, not %r" % (name, sliceno, slices, got_min, want_min,)
assert got_max == want_max, "%s (%d, %d): claims max %r, not %r" % (name, sliceno, slices, got_max, want_max,)
else:
assert got_min is None and got_max is None
assert len(res) == total_count, "%s (%d): %d lines written, claims %d" % (name, slices, len(res), total_count,)
assert len(res) == len(res_data), "%s (%d): %d lines written, should be %d" % (name, slices, len(res), len(res_data),)
assert set(res) == set(res_data), "%s (%d): Wrong data: %r != %r" % (name, slices, res, res_data,)
# verify reading back with hashfilter gives the same as writing with it
with w_typ(TMP_FN) as fh:
for value in data[bad_cnt:]:
fh.write(value)
for sliceno in range(slices):
with r_typ(TMP_FN, hashfilter=(sliceno, slices, spread_None)) as fh:
slice_values = list(compress(res_data, fh))
assert slice_values == sliced_res[sliceno], "Bad reader hashfilter: slice %d of %d gave %r instead of %r" % (sliceno, slices, slice_values, sliced_res[sliceno],)
for slices in range(1, 24):
slice_test(slices, False)
slice_test(slices, True)
# and a simple check to verify that None actually gets spread too
if "Bits" not in name:
with w_typ(TMP_FN, hashfilter=(slices - 1, slices, True)) as fh:
for _ in range(slices * 3):
fh.write(None)
with r_typ(TMP_FN) as fh:
tmp = list(fh)
assert tmp == [None, None, None], "Bad spread_None for %d slices" % (slices,)
print("Hash testing, false things")
for v in (None, "", b"", 0, 0.0, False,):
assert gzutil.hash(v) == 0, "%r doesn't hash to 0" % (v,)
print("Hash testing, strings")
for v in ("", "a", "0", "foo", "a slightly longer string", "\0", "a\0b",):
u = gzutil.GzWriteUnicodeLines.hash(v)
a = gzutil.GzWriteAsciiLines.hash(v)
b = gzutil.GzWriteBytesLines.hash(v.encode("utf-8"))
assert u == a == b, "%r doesn't hash the same" % (v,)
assert gzutil.hash(b"\xe4") != gzutil.hash("\xe4"), "Unicode hash fail"
assert gzutil.GzWriteBytesLines.hash(b"\xe4") != gzutil.GzWriteUnicodeLines.hash("\xe4"), "Unicode hash fail"
try:
gzutil.GzWriteAsciiLines.hash(b"\xe4")
raise Exception("Ascii.hash acceptet non-ascii")
except ValueError:
pass
print("Hash testing, numbers")
for v in (0, 1, 2, 9007199254740991, -42):
assert gzutil.GzWriteInt64.hash(v) == gzutil.GzWriteFloat64.hash(float(v)), "%d doesn't hash the same" % (v,)
assert gzutil.GzWriteInt64.hash(v) == gzutil.GzWriteNumber.hash(v), "%d doesn't hash the same" % (v,)
print("BOM test")
def test_read_bom(num, prefix=""):
with gzutil.GzBytesLines(TMP_FN) as fh:
data = list(fh)
assert data == [prefix.encode("utf-8") + b"\xef\xbb\xbfa", b"\xef\xbb\xbfb"], (num, data)
with gzutil.GzBytesLines(TMP_FN, strip_bom=True) as fh:
data = list(fh)
assert data == [prefix.encode("utf-8") + b"a", b"\xef\xbb\xbfb"], (num, data)
with gzutil.GzUnicodeLines(TMP_FN) as fh:
data = list(fh)
assert data == [prefix + "\ufeffa", "\ufeffb"], (num, data)
with gzutil.GzUnicodeLines(TMP_FN, strip_bom=True) as fh:
data = list(fh)
assert data == [prefix + "a", "\ufeffb"], (num, data)
with gzutil.GzUnicodeLines(TMP_FN, "latin-1") as fh:
data = list(fh)
assert data == [prefix.encode("utf-8").decode("latin-1") + u"\xef\xbb\xbfa", u"\xef\xbb\xbfb"], (num, data)
with gzutil.GzUnicodeLines(TMP_FN, "latin-1", strip_bom=True) as fh:
data = list(fh)
assert data == [prefix.encode("utf-8").decode("latin-1") + u"a", u"\xef\xbb\xbfb"], (num, data)
with gzutil.GzUnicodeLines(TMP_FN, "ascii", "ignore") as fh:
data = list(fh)
assert data == ["a", "b"], (num, data)
if version_info[0] > 2:
with gzutil.GzAsciiLines(TMP_FN) as fh:
try:
next(fh)
raise Exception("GzAsciiLines allowed non-ascii in python3")
except ValueError:
pass
with open(TMP_FN, "wb") as fh:
fh.write(b"\xef\xbb\xbfa\n\xef\xbb\xbfb")
test_read_bom(0)
with gzutil.GzWriteUnicodeLines(TMP_FN, write_bom=True) as fh:
fh.write("a")
fh.write("\ufeffb")
test_read_bom(1)
with gzutil.GzWriteUnicodeLines(TMP_FN, write_bom=True) as fh:
fh.write("\ufeffa")
fh.write("\ufeffb")
test_read_bom(2, "\ufeff")
with gzutil.GzWriteUnicodeLines(TMP_FN) as fh:
fh.write("a")
assert next(gzutil.GzBytesLines(TMP_FN)) == b"a", "GzWriteUnicodeLines writes BOM when not requested"
print("Append test")
# And finally verify appending works as expected.
with gzutil.GzWriteInt64(TMP_FN) as fh:
fh.write(42)
with gzutil.GzWriteInt64(TMP_FN, mode="a") as fh:
fh.write(18)
with gzutil.GzInt64(TMP_FN) as fh:
assert list(fh) == [42, 18]
print("Untyped writer test")
with gzutil.GzWrite(TMP_FN) as fh:
class SubString(bytes): pass
for v in (b"apa", "beta", 42, None, SubString(b"\n"), b"foo"):
try:
fh.write(v)
assert isinstance(v, bytes), "GzWrite accepted %r" % (type(v),)
except ValueError:
assert not isinstance(v, bytes), "GzWrite doesn't accept %r" % (type(v),)
pass
with gzutil.GzAsciiLines(TMP_FN) as fh:
res = list(fh)
assert res == ["apa", "foo"], "Failed to read back GzWrite written stuff: %r" % (res,)
print("Line boundary test")
Z = 128 * 1024 # the internal buffer size in gzutil
a = [
"x" * (Z - 2) + "a", # \n at end of buffer
"X" * (Z - 1) + "A", # \n at start of 2nd buffer
"y" * (Z - 4) + "b", # leave one char in 1st buffer
"Y" * (Z * 2 - 1) + "B", # \n at start of 3rd buffer
"12345" * Z + "z" * (Z - 1), # \n at end of 6th buffer
"Z",
]
with gzutil.GzWriteAsciiLines(TMP_FN) as fh:
for v in a:
fh.write(v)
with gzutil.GzAsciiLines(TMP_FN) as fh:
b = list(fh)
assert a == b, b
print("Number boundary test")
with gzutil.GzWriteNumber(TMP_FN) as fh:
todo = Z - 100
while todo > 0:
fh.write(42)
todo -= 9
# v goes over a block boundary.
v = 0x2e6465726f6220657261206577202c6567617373656d20676e6f6c207974746572702061207369207374696220646e6173756f6874206120796c6c6175746341203f7468676972202c6c6c657720736120746867696d206577202c65726568206567617373656d2074726f68732061206576616820732774656c20796548
want = [42] * fh.count + [v]
fh.write(v)
with gzutil.GzNumber(TMP_FN) as fh:
assert want == list(fh)
print("Number max_count large end test")
with gzutil.GzWriteNumber(TMP_FN) as fh:
fh.write(2 ** 1000)
fh.write(7)
with gzutil.GzNumber(TMP_FN, max_count=1) as fh:
assert [2 ** 1000] == list(fh)
print("Callback tests")
with gzutil.GzWriteNumber(TMP_FN) as fh:
for n in range(1000):
fh.write(n)
def callback(num_lines):
global cb_count
cb_count += 1
if cb_interval > 1:
assert num_lines in good_num_lines or num_lines == 1000 + cb_offset
for cb_interval, max_count, expected_cb_count in (
(300, -1, (3,)),
(250, 300, (1,)),
(250, 200, (0,)),
(1, -1, (999, 1000,)),
(5, -1, (199, 200,)),
(5, 12, (2,)),
(10000, -1, (0,)),
):
for cb_offset in (0, 50000000, -10000):
cb_count = 0
good_num_lines = range(cb_interval + cb_offset, (1000 if max_count == -1 else max_count) + cb_offset, cb_interval)
with gzutil.GzNumber(TMP_FN, max_count=max_count, callback=callback, callback_interval=cb_interval, callback_offset=cb_offset) as fh:
lst = list(fh)
assert len(lst) == 1000 if max_count == -1 else max_count
assert cb_count in expected_cb_count
def callback2(num_lines):
raise StopIteration
with gzutil.GzNumber(TMP_FN, callback=callback2, callback_interval=1) as fh:
lst = list(fh)
assert lst == [0]
def callback3(num_lines):
1 / 0
with gzutil.GzNumber(TMP_FN, callback=callback3, callback_interval=1) as fh:
good = False
try:
lst = list(fh)
except ZeroDivisionError:
good = True
assert good
| [
"[email protected]"
] | |
235cf824591304d36567de455c94e8ee24396e90 | ee51d8f7180ee75bc3c2e84872ae60bc8edeacd1 | /evalfun.py | 8bd5a20a411c235d970c903800dd91d9de35a719 | [] | no_license | cboin1996/Chesster | d2a822936aad2fca6355fd0e66be5930c11e35ad | c7dfdc9bcbfcc69bef9856d94cb08d7ac8810101 | refs/heads/master | 2022-03-01T17:24:21.342397 | 2021-09-15T13:08:09 | 2021-09-15T13:08:09 | 244,189,301 | 2 | 0 | null | 2022-02-09T23:43:42 | 2020-03-01T17:05:15 | Python | UTF-8 | Python | false | false | 567 | py | import math
import random
import itertools
import os
import chess
def start(config: Config):
return EvaluateWorker(config).start()
"""
^ creates a new function to start the evaluation function.
This "Config" is from the utils package which may need to be downloaded
using pip install. If this doesn't work, will probably sub with
configparser instead.
"""
class evalFunc(someGameState):
def intialize(agent, config: Config):
self.config = config
self.play_config = config.eval.play_config
self.current
| [
"[email protected]"
] | |
ecb83b8c2c02fcc3dd01c5da3e6b777228dae06e | 32c0c21fc97cb2429b92a488663a34dddf360008 | /src/resources/api.py | 5be37f581e69a59d8f0aaf0c603d502ca1881402 | [] | no_license | fuse-sujan/Basic-Flask-Setup | 5a2038317668ec604e3b90fb90d9a14950defd96 | 32b7f2c7ed180a0a450da44ee242871d7648c587 | refs/heads/master | 2021-06-27T18:08:49.813756 | 2019-12-04T09:48:09 | 2019-12-04T09:48:09 | 223,720,475 | 0 | 0 | null | 2021-03-20T02:22:18 | 2019-11-24T09:31:05 | Python | UTF-8 | Python | false | false | 109 | py | from flask_restplus import Api
api = Api(version='1.0', title='Todo API', description='A simple Todo App')
| [
"[email protected]"
] | |
3eb0594982efc90f12c71373cf28806e98a9a2c1 | f9bb9466f575166c578167f7fff0c3a210ab409a | /settings/base.py | 8acd839dcba05e11c4831b3b7f1021f66026ad21 | [
"MIT"
] | permissive | jameelhamdan/mlfaati | 77edaa4230298f8807029c446d61643b68ae69ae | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | refs/heads/master | 2023-04-20T12:32:17.811344 | 2021-05-23T13:45:10 | 2021-05-23T13:45:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | import os
import dj_database_url
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
ADMINS = (
('Jameel', '[email protected]'),
)
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'qsessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# Plugins
'rest_framework',
'crispy_forms',
'menu_generator',
# Apps
'frontend',
'users',
'core',
'processing',
'cdn',
'console',
'api',
'docs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'qsessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'app.context_processors.config',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ASGI_APPLICATION = 'app.server.appplication'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DEFAULT_DATABASE_PATH = os.path.join(BASE_DIR, 'db.sqlite3')
DATABASE_URL = os.getenv('DATABASE_URL', f'sqlite:///{DEFAULT_DATABASE_PATH}')
DATABASES = {
'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600),
}
DEFAULT_REDIS_URL = os.getenv('DEFAULT_REDIS_URL', 'redis://localhost:6379/0')
CELERY_REDIS_URL = os.getenv('CELERY_REDIS_URL', DEFAULT_REDIS_URL)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': DEFAULT_REDIS_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
},
'celery': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': CELERY_REDIS_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
},
}
AUTH_USER_MODEL = 'users.User'
SESSION_ENGINE = 'qsessions.backends.cached_db'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
APPEND_SLASH = False
LOGIN_URL = '/auth/login'
LOGOUT_REDIRECT_URL = LOGIN_URL
LOGIN_REDIRECT_URL = '/console/'
# Email setup
EMAIL_USE_TLS = True
EMAIL_PORT = os.getenv('EMAIL_PORT', 587)
EMAIL_HOST = os.getenv('EMAIL_HOST', '')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', '')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Rest Framework
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'api.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
]
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-ca'
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = False
USE_L10N = False
# Best datetime format
DATETIME_FORMAT = 'Y-m-d H:i:s'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i:s'
# Best date format
DATE_FORMAT = 'Y-m-d'
SHORT_DATE_FORMAT = 'Y-m-d'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
STATICFILES_STORAGE = 'app.staticfiles.StaticFilesStorage'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '.static')
STATICFILES_DIRS = []
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Celery Settings
CELERY_TIMEZONE = 'UTC'
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_BROKER_URL = CELERY_REDIS_URL
CELERY_RESULT_BACKEND = CELERY_REDIS_URL
ENABLE_TRANSFORMATIONS = True
ENABLE_ASYNC = True
IMAGE_CLASSIFY_MODEL = 'image_classify'
# Override this in your conf.py to set custom image classification model file
# To download these files you can use ./scripts/download_models.sh script
MODEL_OPTIONS = {
IMAGE_CLASSIFY_MODEL: {
'path': os.path.join(BASE_DIR, 'bin', 'models', 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5'),
}
}
| [
"[email protected]"
] | |
a38096882f016407f2967c5cc1d40ac61dadb377 | 83ad61aee2c828978a8e4a5db5c4e8a6bf7574c6 | /setup.py | af15176d6fdba12593d114f09124549cfe1f8e78 | [
"Apache-2.0"
] | permissive | zengchen1024/mindinsight | 24b2877af1cc32da5f3cfab8af36c26416b8dcd5 | 228a448b46707e889efc1fb23502158e27ab56ca | refs/heads/master | 2021-05-19T05:16:26.548635 | 2020-03-30T12:01:47 | 2020-03-30T12:01:47 | 251,543,681 | 0 | 0 | Apache-2.0 | 2020-03-31T08:30:33 | 2020-03-31T08:30:32 | null | UTF-8 | Python | false | false | 5,606 | py | # Copyright 2019 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup."""
import sys
import os
import shutil
import stat
import platform
import shlex
import subprocess
import types
from importlib import import_module
from setuptools import setup
from setuptools.command.egg_info import egg_info
from setuptools.command.build_py import build_py
from setuptools.command.install import install
def get_version():
"""Get version."""
machinery = import_module('importlib.machinery')
version_path = os.path.join(os.path.dirname(__file__), 'mindinsight', '_version.py')
module_name = '__mindinsightversion__'
version_module = types.ModuleType(module_name)
loader = machinery.SourceFileLoader(module_name, version_path)
loader.exec_module(version_module)
return version_module.VERSION
def get_os():
"""Get OS."""
os_system = platform.system().lower()
return os_system
def get_description():
"""Get description."""
os_info = get_os()
cpu_info = platform.machine()
cmd = "git log --format='[sha1]:%h, [branch]:%d' -1"
process = subprocess.Popen(
shlex.split(cmd),
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = process.communicate()
if not process.returncode:
git_version = stdout.decode()
return 'mindinsight platform: %s, cpu: %s, git version: %s' % (os_info, cpu_info, git_version)
return 'mindinsight platform: %s, cpu: %s' % (os_info, cpu_info)
def get_install_requires():
"""Get install requirements."""
with open('requirements.txt') as file:
return file.read().splitlines()
def update_permissions(path):
"""
Update permissions.
Args:
path (str): Target directory path.
"""
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
dir_fullpath = os.path.join(dirpath, dirname)
os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP)
for filename in filenames:
file_fullpath = os.path.join(dirpath, filename)
os.chmod(file_fullpath, stat.S_IREAD)
def run_script(script):
"""
Run script.
Args:
script (str): Target script file path.
"""
cmd = '/bin/bash {}'.format(script)
process = subprocess.Popen(
shlex.split(cmd),
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
while True:
line = process.stdout.readline()
if not line and process.poll() is not None:
break
if line:
sys.stdout.write(line.decode())
if process.returncode:
sys.exit(1)
class EggInfo(egg_info):
"""Egg info."""
def run(self):
self.build_dependencies()
egg_info_dir = os.path.join(os.path.dirname(__file__), 'mindinsight.egg-info')
shutil.rmtree(egg_info_dir, ignore_errors=True)
super().run()
update_permissions(egg_info_dir)
def build_dependencies(self):
build_dir = os.path.join(os.path.dirname(__file__), 'build')
sys.stdout.write('building crc32 ...\n')
crc32_script = os.path.join(build_dir, 'scripts', 'crc32.sh')
run_script(crc32_script)
sys.stdout.write('building ui ...\n')
ui_script = os.path.join(build_dir, 'scripts', 'ui.sh')
run_script(ui_script)
class BuildPy(build_py):
"""Build py files."""
def run(self):
mindinsight_lib_dir = os.path.join(os.path.dirname(__file__), 'build', 'lib', 'mindinsight')
shutil.rmtree(mindinsight_lib_dir, ignore_errors=True)
super().run()
update_permissions(mindinsight_lib_dir)
class Install(install):
"""Install."""
def run(self):
super().run()
if sys.argv[-1] == 'install':
pip = import_module('pip')
mindinsight_dir = os.path.join(os.path.dirname(pip.__path__[0]), 'mindinsight')
update_permissions(mindinsight_dir)
if __name__ == '__main__':
version_info = sys.version_info
if (version_info.major, version_info.minor) < (3, 7):
sys.stderr.write('Python version should be at least 3.7\r\n')
sys.exit(1)
setup(name='mindinsight',
version=get_version(),
author='MindInsight Team',
description=get_description(),
license='Apache 2.0',
keywords='mindinsight',
install_requires=get_install_requires(),
packages=['mindinsight'],
platforms=[get_os()],
include_package_data=True,
cmdclass={
'egg_info': EggInfo,
'build_py': BuildPy,
'install': Install,
},
entry_points={
'console_scripts': [
'mindinsight=mindinsight.utils.command:main',
],
})
| [
"[email protected]"
] | |
ac80b72867b0693300017bf2be9315b5d866cc98 | aafdea5ae489a951818851610f1096bd6ba6c2e0 | /Symphony/exitpnl.py | 28d1f9702ee60d320c2f3ae62a587ce2eb4a7731 | [] | no_license | ChetanKoranga/RMS_Trade | 5edd8b673e2654b9d6e147d376601911f9f4fb8b | 6aadd005463bda87549a359af0455af0480c6274 | refs/heads/master | 2022-11-18T23:17:51.606122 | 2020-07-20T14:53:51 | 2020-07-20T14:53:51 | 232,064,690 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,589 | py | from pymongo import MongoClient
import datetime
from time import sleep
date = datetime.date.today()
new_collec = f'finalpnl_{date}'
try:
client = MongoClient()
db = client['newTotalPnl']
collec = f'newTotalPnl_{date}'
db.create_collection(collec)
print(f"created new collection '{collec}'")
except Exception as e:
print(e)
try:
new_client = MongoClient()
exitpnl_db = new_client['finalpnl']
exitpnl_db[new_collec].drop()
print('finalpnl Collec Deleted')
except:
pass
try:
new_client = MongoClient()
new_db = new_client['finalpnl']
new_db.create_collection(new_collec)
print(f"created new collection '{new_collec}'")
except Exception as e:
print(e)
# algoname_unique = db[collec].find().distinct("algoname")
# print(algoname_unique)
# ClientID_unique = db[collec].find().distinct("clientID")
# print(ClientID_unique)
# l = []
# for x in algoname_unique:
# for y in ClientID_unique:
# conca = x + y
# l.append(conca)
# print(l)
while True:
check = db[collec].find()
li = []
for z in check:
conca = z["clientID"]+ z["algoname"]
if conca in li:
continue
else:
li.append(conca)
match = new_db[new_collec].find_one({ "$and" : [{"algoname": z['algoname']},{"clientID": z['clientID']}] })
if match:
try:
new_db[new_collec].update({'_id' : match['_id']}, {"$set": {"strategywise_pnl": z['strategywise_pnl']}})
except Exception:
print("Waiting for PnL")
pass
else:
try:
post={"algoname":z['algoname'], "clientID":z['clientID'],"strategywise_pnl":z['strategywise_pnl']}
new_db[new_collec].insert_one(post)
except Exception:
print("Waiting for PnL")
pass
# check = db[collec].find()
# l = []
# for x in check:
# conca = x["clientID"]+ x["algoname"]
# if conca in l:
# continue
# else:
# l.append(conca)
# new_db[new_collec].insert(db[collec].find({},{ "_id": 0, "algoname": 1, "clientID": 1, "strategywise_pnl": 1 }))
| [
"[email protected]"
] | |
2239f04929b2dc1222a6a620b16e4b17090d4853 | 79b7e662e9010f7675e0c2406e4eb5105fa864cf | /weatherApp/weather.py | f677bd2d19de6f0d8787426a512a56caf39d7ea3 | [] | no_license | kav98/cloud2 | 7bd27cac7e2dbe794cfcbb90ccd4ce73257f1759 | 1d59a564a3d93c0726e1d5329328b4e13ee261b9 | refs/heads/main | 2023-03-07T11:22:06.735152 | 2021-03-01T20:32:05 | 2021-03-01T20:32:05 | 340,958,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from datetime import datetime
import os
import pytz
import requests
import math
API_KEY = '2447efb69513604bd845ca8e0a73fb81'
API_URL = ('http://api.openweathermap.org/data/2.5/weather?q={}&mode=json&units=metric&appid={}')
def query_api(city):
try:
print(API_URL.format(city, API_KEY))
data = requests.get(API_URL.format(city, API_KEY)).json()
#print(data)
except Exception as exc:
print(exc)
data = None
return data
| [
"[email protected]"
] | |
995fe38e4e3c1c437b481edebf11ccf292bc0490 | 9626164f8c550cf1529fe4909494defa114218be | /news_ie/views.py | 7a6d1eb8156ba8f284fa9f6e9875e44c99b12208 | [] | no_license | Anmeet/News-Information-Extraction-and-Visualization | 85e2f1bbf9e924f1ca9e3ba5319e9d424d1c63a0 | 34a48458df0126928fdf5f52ae63ef181ecdb492 | refs/heads/master | 2021-07-03T09:28:04.647042 | 2020-02-23T01:06:02 | 2020-02-23T01:06:02 | 242,434,860 | 0 | 0 | null | 2021-06-10T22:36:01 | 2020-02-23T00:57:04 | Python | UTF-8 | Python | false | false | 8,805 | py | import datetime
import string
import sys
from django.contrib.gis.geos import GEOSGeometry, Point, fromstr
from django.http import HttpResponse
from django.shortcuts import render
from world.models import WayPoint
from .extraction.getdate import extract_date
from .extraction.getday import get_day
from .extraction.getdeathinjury import *
from .extraction.getnewlocation import geotraverseTree
from .extraction.ner import getlocation
from .extraction.vehicle_no import vehicle_no
from .forms import NameForm
from .geocoder import *
from .models import News
from .sentoken import sentences
from .up import rep
# Create your views here.
def index(request):
now = datetime.datetime.now()
return render(request, 'news_ie/index.html', {'date': now})
def get_news(request):
if request.method == 'POST':
form = NameForm(request.POST)
# To display waypoints on the maps
waypoints = WayPoint.objects.order_by('name')
if form.is_valid():
data = form.cleaned_data
# extract_items(data['news_text'])
story = News()
story.body = data['news_text']
#data['news'] = rep(data['news'])
# print("Befor Splitting \n")
# print(data['news_text'])
#data['news_text'] = rep(data['news_text'])
# Split the news into sentences [pre-processing]
# Create Sentence Object
sentclass = sentences()
sentlist = sentclass.split_into_sentences(data['news_text'])
splited_sen = []
# # print each sentences
# # print("\n" + "After Spliting " + "\n")
for sent in sentlist:
splited_sen.append(sent)
# # print(sent + "\n")
#
sentences_dic = dict((i, splited_sen[i]) for i in range(0, len(splited_sen)))
# # print(sentences_dic)
#
# # Get the vehicle no. Here number_plate is the dictionary
number_plate = vehicle_no(splited_sen)
print(number_plate)
story.vehicle_no = number_plate
# Get death count and injury count
death = death_no(splited_sen)
if death == "None":
actualdeath = death
deathNo = 0
else:
actualdeath = remove_date(death)
deathNo = convertNum(death)
print("Death No: ")
print(death, actualdeath, deathNo)
story.death = death
injury = injury_no(splited_sen)
if injury == "None":
actualinjury = "None"
injuryNo = 0
else:
actualinjury = remove_date(injury)
injuryNo = convertNum(injury)
print("Injury No:")
print(injury, actualinjury, injuryNo)
story.injury = injury
extdate = extract_date(sentlist)
print("Date:", extdate)
s = extdate[0]
story.date = datetime.datetime.strptime(s, "%Y-%m-%d").date()
# Get location from 1st sentences list
# from the classifier
location = geotraverseTree(splited_sen[0])
print(location)
story.location = location
# Get day from the total sentence list
day = get_day(sentlist)
print(day)
story.day = day
# from standford, dont forget to use ' '.join(location)
# location = getlocation(splited_sen[0])
# print(' '.join(location))
# story.location = ' '.join(location)
# location_coordinates = find_lat_lng(location)
try:
location_coordinates = find_lat_lng(location)
except Exception:
location_coordinates = [0.0, 0.0]
# print(location_coordinates[0])
# print(location_coordinates[1])
# Save the Coordinate of the location to Database as WayPoint
lat = str(location_coordinates[0])
lng = str(location_coordinates[1])
#gem = "POINT(" + str(lat) + ' ' + str(lng) + ")"
gem = GEOSGeometry('POINT(%s %s)' % (lng, lat))
my_long_lat = lat + " " + lng
gem = fromstr('POINT(' + my_long_lat + ')')
WayPoint(name=' '.join(location), geometry=gem).save()
# Now save the story
# story.save()
save_story(story, data)
return render(request, 'news_ie/index.html', {'waypoints': waypoints, 'form': form, 'date': extdate, 'day': day, 'sentences_dic': sentences_dic, 'death': actualdeath, "deathnum": deathNo, 'injury': actualinjury, 'injurynum': injuryNo, 'number_plate': number_plate, 'location': location,'lat':lat,'lng':lng, 'coordintae': location_coordinates})
else:
form = NameForm()
return render(request, 'news_ie/index.html', {'form': form})
def extract_items(n):
# print(n)
story = News()
story.body = n
#data['news'] = rep(data['news'])
# print("Befor Splitting \n")
# print(data['news_text'])
#data['news_text'] = rep(data['news_text'])
# Split the news into sentences [pre-processing]
# Create Sentence Object
sentclass = sentences()
sentlist = sentclass.split_into_sentences(n)
splited_sen = []
# print each sentences
# print("\n" + "After Spliting " + "\n")
for sent in sentlist:
splited_sen.append(sent)
# print(sent + "\n")
sentences_dic = dict((i, splited_sen[i]) for i in range(0, len(splited_sen)))
# print(sentences_dic)
# Get the vehicle no. Here number_plate is the dictionary
number_plate = vehicle_no(splited_sen)
print(number_plate)
story.vehicle_no = number_plate
# Get death count and injury count
death = death_no(splited_sen)
if death == "None":
actualdeath = death
deathNo = 0
else:
actualdeath = remove_date(death)
deathNo = convertNum(death)
print("Death No: ")
# print(death, actualdeath, deathNo)
story.death = actualdeath
story.death_no = deathNo
injury = injury_no(splited_sen)
if injury == "None":
actualinjury = "None"
injuryNo = 0
else:
actualinjury = remove_date(injury)
injuryNo = convertNum(injury)
print("Injury No:")
# print(injury, actualinjury, injuryNo)
story.injury = actualinjury
story.injury_no = injuryNo
extdate = extract_date(splited_sen)
print("Date:", extdate)
s = extdate[0]
story.date = datetime.datetime.strptime(s, "%Y-%m-%d").date()
# Get location from 1st sentences list
# from the classifier
location = geotraverseTree(splited_sen[0])
# print(location)
story.location = location
# Get day from the total sentence list
day = get_day(sentlist)
# print(day)
story.day = day
# from standford, dont forget to use ' '.join(location)
# location = getlocation(splited_sen[0])
# print(' '.join(location))
# story.location = ' '.join(location)
# location_coordinates = find_lat_lng(location)
try:
location_coordinates = find_lat_lng(location)
except Exception:
location_coordinates = [0.0, 0.0]
print(location_coordinates[0])
print(location_coordinates[1])
# Save the Coordinate of the location to Database as WayPoint
# lat = str(location_coordinates[0])
# lng = str(location_coordinates[1])
#gem = "POINT(" + str(lat) + ' ' + str(lng) + ")"
# gem = GEOSGeometry('POINT(%s %s)' % (lng, lat))
# my_long_lat = lat + " " + lng
# gem = fromstr('POINT(' + my_long_lat + ')')
# WayPoint(name=' '.join(location), geometry=gem).save()
#
# # Now save the story
story.save()
# save_story(story, data)
#
return story
# Try Jaccard coefficient
def similar_story(news1, news2):
doc1 = set(news1.split())
doc2 = set(news2.split())
# find union
union = list(doc1 | doc2)
intersec = list(doc2.intersection(doc1))
#intersection = list(set(doc1) - (set(doc1) - set(doc2)))
jacc_coef = float(len(intersec)) / len(union)
return jacc_coef
# Save the story from the data
def save_story(story, data):
sim = []
# get all the saved story
savedStory = News.objects.all()
for s in savedStory:
doc2 = set(s.body.split())
coefficient = similar_story(data['news_text'], s.body)
sim.append(coefficient)
# print(sim)
jacc_max = max(sim)
# print(jacc_max)
# set the threshold value to identify Duplicate
thresHold = .90
if jacc_max < thresHold:
s = story.save()
print("Save Successful:")
else:
print("Duplicate News Exists:")
| [
"[email protected]"
] | |
bce010e018cf7d38188930cbfbdc14ff095e91d4 | 11eea5de39fcdb28ee928c252c93eadd82cc19d6 | /robotics/build/robot_description/catkin_generated/pkg.installspace.context.pc.py | 19d0f2067ceba31d43929362359cb61e2647b4ef | [] | no_license | eandualem/Robotics-Final-Project | f5df467ca0f9892921b5506f830f6f4715fd1b1e | f6ca90dfd5dcf945c61758a779d5a1f4af5ac5c2 | refs/heads/master | 2020-12-24T03:58:48.423089 | 2020-01-31T08:06:56 | 2020-01-31T08:06:56 | 237,374,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robot_description"
PROJECT_SPACE_DIR = "/home/elias/Desktop/ws/src/robotics/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
2594c37f8a6e7f0892fb7ed620e30d9339c1c69e | e4e44097320d056f3768eb3a53f28f4c19cdc7ce | /recoverTree.py | b06eb61e5a0f4c3bae3d675badf8f4208cced976 | [] | no_license | amisyy/leetcode | 0640e009c02956778f402eb89b74c98c36882d44 | ba8ab343a246aa3eead75a23dc69b5a76680d290 | refs/heads/master | 2021-06-03T06:27:38.216035 | 2020-11-08T06:59:40 | 2020-11-08T06:59:40 | 103,757,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
pre_cur = None
pre = None
p1 = None
p2 = None
cur = root
found = False
while cur is not None:
if cur.left is None:
if pre_cur is not None and pre_cur.val>cur.val:
if not found:
found = True
p1 = pre_cur
p2 = cur
pre_cur = cur
cur = cur.right
else:
pre = cur.left
while pre.right is not None and not pre.right == cur:
pre = pre.right
if pre.right is None:
pre.right = cur
cur = cur.left
else:
if pre_cur is not None and pre_cur.val > cur.val:
if not found:
found = True
p1 = pre_cur
p2 = cur
pre_cur = cur
pre.right = None
cur = cur.right
if p1 is not None and p2 is not None:
temp = p1.val
p1.val = p2.val
p2.val = temp
u = Solution()
test = [
1
]
for x in test:
print(u.isInterleave("aa","ab","abaa"))
| [
"[email protected]"
] | |
2478f96af98a0a8eb5353f436c45ba8ffa79ba4f | f3d86b5f622a407dc30233c4a609a410dc048920 | /profile_app/urls.py | b06c6239840a4fbe76df0861ae2695880bb6fbcb | [] | no_license | KozlovKV/django-votings | 1adb350919ea1ff0a3bb4f450cf6a43fe7b779e8 | 9d917c9c4d8719ae6ce326a46cc6bb6a5e837414 | refs/heads/master | 2023-03-02T02:43:17.414803 | 2021-02-08T17:48:06 | 2021-02-08T17:48:06 | 337,081,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | from django.contrib.auth import views
from django.urls import path
import profile_app.view_subclasses as reg_subclasses
import menu_app.view_subclasses as menu_subclasses
urlpatterns = [
path('login/', reg_subclasses.LoginViewDetailed.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
# path('password_change/', views.PasswordChangeView.as_view(), name='password_change'),
# path('password_change/done/', views.PasswordChangeDoneView.as_view(), name='password_change_done'),
path('password_reset/', reg_subclasses.PasswordResetViewDetailed.as_view(), name='password_reset'),
path('password_reset/done/', menu_subclasses.TemplateViewWithMenu.as_view(
template_name='registration/password_reset_done.html'), name='password_reset_done'),
path('reset/<uidb64>/<token>/', reg_subclasses.PasswordResetConfirmViewDetailed.as_view(),
name='password_reset_confirm'),
path('reset/done/', menu_subclasses.TemplateViewWithMenu.as_view(
template_name='registration/password_reset_complete.html'), name='password_reset_complete'),
]
reg_patterns = [
path(
"activate/complete/",
menu_subclasses.TemplateViewWithMenu.as_view(
template_name="django_registration/activation_complete.html",
),
name="django_registration_activation_complete",
),
path(
"activate/<str:activation_key>/",
reg_subclasses.ActivationViewDetailed.as_view(),
name="django_registration_activate",
),
path(
"register/",
reg_subclasses.RegistrationViewDetailed.as_view(),
name="django_registration_register",
),
path(
"register/complete/",
menu_subclasses.TemplateViewWithMenu.as_view(
template_name="django_registration/registration_complete.html",
),
name="django_registration_complete",
),
path(
"register/closed/",
menu_subclasses.TemplateViewWithMenu.as_view(
template_name="django_registration/registration_closed.html",
),
name="django_registration_disallowed",
),
]
urlpatterns += reg_patterns
| [
"[email protected]"
] | |
d5986cb730eac15b8464e6d259d06074a79643ef | 0b0a947c10038152fc56efbdde13eef3330adb34 | /hackerrank-problem-solving-solutions/39. Find Angle MBC.py | c17a91d5e4e612e6dddf54c97dc3dfea98b81da3 | [] | no_license | swapnanildutta/Python-programs | 9c382eb8c823571e4f098fff263d126665fbc575 | d47e2e3c4d648e0cc0ae1b89b83ce4f99db89f63 | refs/heads/master | 2021-11-18T22:16:57.276910 | 2021-09-04T13:07:36 | 2021-09-04T13:07:36 | 197,773,723 | 1 | 26 | null | 2023-04-09T10:51:57 | 2019-07-19T13:02:26 | Python | UTF-8 | Python | false | false | 307 | py | # Author Aman Shekhar
import math
ab = float(input())
bc = float(input())
ac = math.sqrt((ab*ab)+(bc*bc))
bm = ac / 2.0
mc = bm
b = mc
c = bm
a = bc
angel_b_radian = math.acos(a / (2*b))
angel_b_degree = int(round((180 * angel_b_radian) / math.pi))
output_str = str(angel_b_degree)+'°'
print(output_str) | [
"Aman Shekhar"
] | Aman Shekhar |
fc2b1670dc0d8c57a7933598d24f307c9bf42883 | ab3d5455b4644e643679a8cc6263b17b1d616a64 | /custom.py | 824c9ad6a2bc48cff7c24a7b6a3ada6beaeec775 | [] | no_license | ayrton22/IA_Final_Project_DH | 3ee1aa3aa09d229707ce3dd4391ac3f0f3aaabc7 | 08a03380eadebc80a37dd669fa6f2497bcc1ea5b | refs/heads/master | 2023-03-05T02:25:12.312710 | 2021-02-11T20:11:15 | 2021-02-11T20:11:15 | 336,387,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | '''
Custom cce, plate_acc and acc for plate recognition using CNN
'''
from tensorflow.keras import backend as K
import tensorflow as tf
# Custom Metrics
def cat_acc(y_true, y_pred):
y_true = K.reshape(y_true, shape=(-1, 7, 37))
y_pred = K.reshape(y_pred, shape=(-1, 7, 37))
return K.mean(tf.keras.metrics.categorical_accuracy(y_true, y_pred))
def plate_acc(y_true, y_pred):
'''
How many plates were correctly classified
If Ground Truth is ABC 123
Then prediction ABC 123 would score 1
else ABD 123 would score 0
Avg these results (1 + 0) / 2 -> Gives .5 accuracy
(Half of the plates were completely corrected classified)
'''
y_true = K.reshape(y_true, shape=(-1, 7, 37))
y_pred = K.reshape(y_pred, shape=(-1, 7, 37))
et = K.equal(K.argmax(y_true), K.argmax(y_pred))
return K.mean(
K.cast(K.all(et, axis=-1, keepdims=False), dtype='float32')
)
def top_3_k(y_true, y_pred):
# Reshape into 2-d
y_true = K.reshape(y_true, (-1, 37))
y_pred = K.reshape(y_pred, (-1, 37))
return K.mean(
tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
)
# Custom loss
def cce(y_true, y_pred):
y_true = K.reshape(y_true, shape=(-1, 37))
y_pred = K.reshape(y_pred, shape=(-1, 37))
return K.mean(
tf.keras.losses.categorical_crossentropy(
y_true, y_pred, from_logits=False, label_smoothing=0.2
)
)
| [
"[email protected]"
] | |
30fe5f0a451f6fe0b398ee8ef69538f3765c29b4 | b2eed268ec55b0e0b7299a042d1cbf2e39212ed7 | /lista_videos.py | 669092621a9dedd8046ccd7d517da493e23cfa65 | [] | no_license | jul21unac/codigo | 5772f2706e493b1020268fbdaff59e6b5d7676e2 | 3c24b439c083cb3d2da21f3ab2f69da02ecd50f6 | refs/heads/master | 2020-04-28T04:50:59.884582 | 2019-05-23T09:25:27 | 2019-05-23T09:25:27 | 174,996,541 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
import random
driver = webdriver.Firefox()
driver.get("https://www.youtube.com/user/CirculosPodemos/videos")
ultima_medida = driver.execute_script("return document.documentElement.scrollHeight;")
i=0
while True:
#bucle para desplazarnos poco a poco
while i <= ultima_medida:
cadena_medida = "window.scrollTo(0," + str(i) + ");"
driver.execute_script(cadena_medida)
#el incremento lo hacemos ramdon para que no sospechen
incremento = random.randint(50,100)
i = i + incremento
#el tiempo de espera tambien
tiempo_espera = random.uniform(0.4,0.5)
time.sleep(tiempo_espera)
nueva_medida = driver.execute_script("return document.documentElement.scrollHeight;")
if nueva_medida == ultima_medida:
break
ultima_medida = nueva_medida
videos = driver.find_elements_by_xpath("//*[@id='video-title']")
f = open('links_podemos.txt','w')
#podemos lanzar la funcion de recoleccion de comentarios en vez de guardar los links de los videos
for link in videos:
print(link.get_attribute("href"),file = f)
print("-"*80,file = f)
| [
"[email protected]"
] | |
2eb0b07281848c75fdfbbebad5323ee7aad041b6 | b064696e34a31d2f23eb5da4f364a09542428b44 | /tf_agents/environments/test_envs.py | 6a387d171858712c9f4f239a123767929ea08075 | [
"Apache-2.0"
] | permissive | vraoresearch/agents | affead659efd3b5ac232d3d9ff60a1fabe74250e | 58ffe1eec6e38a2cddcf34834d795b37e3b8843b | refs/heads/master | 2022-11-19T10:01:54.906271 | 2022-10-27T14:41:56 | 2022-10-27T14:42:23 | 293,401,771 | 0 | 1 | Apache-2.0 | 2020-09-07T02:23:54 | 2020-09-07T02:23:53 | null | UTF-8 | Python | false | false | 6,095 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of simple environments useful for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
from tf_agents import specs
from tf_agents.environments import py_environment
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
# TODO(b/156832202) Replace with EpisodeCountingEnv
@gin.configurable
class CountingEnv(py_environment.PyEnvironment):
"""Counts up in the observation as steps are taken.
Step observation values are of the form (10 ** episodes + self._current_step)
if steps_per_episode is greater than 10 then on reset the value of the
observation count may go down.
"""
def __init__(self, steps_per_episode: types.Int = 10, dtype=np.int32):
self._steps_per_episode = steps_per_episode
self._dtype = np.dtype(dtype)
self._episodes = 0
self._current_step = np.array(0, dtype=self._dtype)
super(CountingEnv, self).__init__(handle_auto_reset=True)
def observation_spec(self) -> types.NestedArraySpec:
return specs.BoundedArraySpec((), dtype=self._dtype)
def action_spec(self) -> types.NestedArraySpec:
return specs.BoundedArraySpec((), dtype=self._dtype, minimum=0, maximum=1)
def _step(self, action):
del action # Unused.
self._current_step = np.array(1 + self._current_step,
dtype=self._dtype)
if self._current_step < self._steps_per_episode:
return ts.transition(self._get_observation(), 0) # pytype: disable=wrong-arg-types
return ts.termination(self._get_observation(), 1) # pytype: disable=wrong-arg-types
def _get_observation(self):
if self._episodes:
return np.array(10 * self._episodes + self._current_step,
dtype=self._dtype)
return self._current_step
def _reset(self):
if self._current_time_step and self._current_time_step.is_last():
self._episodes += 1
self._current_step = np.array(0, dtype=self._dtype)
return ts.restart(self._get_observation())
def get_info(self):
return {}
@gin.configurable
class EpisodeCountingEnv(py_environment.PyEnvironment):
"""Counts up in the observation as steps are taken.
Step observation values are of the form (episodes, self._current_step)
"""
def __init__(self, steps_per_episode=10):
self._steps_per_episode = steps_per_episode
self._episodes = 0
self._steps = 0
super(EpisodeCountingEnv, self).__init__(handle_auto_reset=True)
def observation_spec(self):
return (specs.BoundedArraySpec((), dtype=np.int32),
specs.BoundedArraySpec((), dtype=np.int32))
def action_spec(self):
return specs.BoundedArraySpec((), dtype=np.int32, minimum=0, maximum=1)
def _step(self, action):
del action # Unused.
self._steps += 1
if self._steps < self._steps_per_episode:
return ts.transition(self._get_observation(), 0) # pytype: disable=wrong-arg-types
return ts.termination(self._get_observation(), 1) # pytype: disable=wrong-arg-types
def _get_observation(self):
return (np.array(self._episodes, dtype=np.int32),
np.array(self._steps, dtype=np.int32))
def _reset(self):
if self._current_time_step and self._current_time_step.is_last():
self._episodes += 1
self._steps = 0
return ts.restart(self._get_observation())
@gin.configurable
class NestedCountingEnv(py_environment.PyEnvironment):
"""Counts up in the observation as steps are taken.
Step observation values are of the form
{
'total_steps': (10 ** episodes + self._current_step),
'current_steps': (self._current_step)
}
if steps_per_episode is greater than 10 then on reset the value of the
observation count may go down.
"""
def __init__(self, steps_per_episode: types.Int = 10, nested_action=False):
self._steps_per_episode = steps_per_episode
self._episodes = 0
self._current_step = np.array(0, dtype=np.int32)
self._nested_action = nested_action
super(NestedCountingEnv, self).__init__(handle_auto_reset=True)
def observation_spec(self) -> types.NestedArraySpec:
return {
'total_steps': specs.BoundedArraySpec((), dtype=np.int32),
'current_steps': specs.BoundedArraySpec((), dtype=np.int32)
}
def action_spec(self) -> types.NestedArraySpec:
if self._nested_action:
return {
'foo':
specs.BoundedArraySpec((), dtype=np.int32, minimum=0, maximum=1),
'bar':
specs.BoundedArraySpec((), dtype=np.int32, minimum=0, maximum=1)
}
else:
return specs.BoundedArraySpec((), dtype=np.int32, minimum=0, maximum=1)
def _step(self, action):
del action # Unused.
self._current_step = np.array(1 + self._current_step, dtype=np.int32)
if self._current_step < self._steps_per_episode:
return ts.transition(self._get_observation(), 0) # pytype: disable=wrong-arg-types
return ts.termination(self._get_observation(), 1) # pytype: disable=wrong-arg-types
def _get_observation(self):
return {
'total_steps':
np.array(10 * self._episodes + self._current_step, dtype=np.int32),
'current_steps':
self._current_step
}
def _reset(self):
if self._current_time_step and self._current_time_step.is_last():
self._episodes += 1
self._current_step = np.array(0, dtype=np.int32)
return ts.restart(self._get_observation())
| [
"[email protected]"
] | |
145fa56faeef37542e22de8faa7b5458191b40fd | 4139b937d2710e809a3883554f8717271277b5a0 | /ytube/mainapp/urls.py | 97b68c554252e5162218c69ab690e68bb20160e7 | [] | no_license | debaghosh/Youtube-Data-API-Project | 5a24ebd8e05665b31f3451cc620b14d29446eb49 | 0f4c79b7c03e222e4978327d310fd5f95b53aebc | refs/heads/main | 2023-04-18T00:34:53.734390 | 2021-04-28T19:11:21 | 2021-04-28T19:11:21 | 305,747,146 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('video/',views.video,name="video"),
path('channel/',views.channel,name="channel"),
]
| [
"[email protected]"
] | |
de1fb4c2e52ad648d612c61f606df65673435378 | 422c7b6f117bb78483fffdc58bc4dce97ece3a06 | /weatherapp/app/views.py | 090c18fa303cc6319572324faab9a7cdd487b756 | [] | no_license | xiasuke/WeatherApp | 300e076ab0868f23b7cb2329befd2412de4c007a | edffd0b581955a41b0419722fe8dc8327d8bd693 | refs/heads/master | 2021-06-30T00:42:03.245952 | 2017-09-18T13:32:04 | 2017-09-18T13:32:04 | 103,694,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | # -*- coding: utf-8 -*-
import json
from flask import render_template, url_for, redirect, flash, request
from app import app
from .forms import CityInputForm, SelectCityForm
from .models import Cities, CurrentWeather
from controller import InputHandler
from common.constants import JAVASCRIPT_PATH
@app.route('/', methods=['GET', 'POST'])
def index():
form = CityInputForm()
if form.validate_on_submit():
city_name = form.city.data
return redirect(url_for('find_city', city_name=city_name))
return render_template("home.html",
title='Home',
searchform=form,
entries=Cities().get_all_cities())
@app.route('/find_city/<city_name>', methods=['GET', 'POST'])
def find_city(city_name):
search_form = CityInputForm()
duplicate_city_form = SelectCityForm()
if duplicate_city_form.is_submitted() and not search_form.validate_on_submit():
print "go here"
value = duplicate_city_form.city_list.data
if value == "None":
return render_template("home.html",
title="Invalid Entry",
searchform=search_form,
entries=Cities().get_all_cities())
value = json.loads(duplicate_city_form.city_list.data)
print "value_id: {}, {}".format(value, type(value))
return redirect(url_for('current_weather', city_id=value.get("id")))
# print city_name
if search_form.validate_on_submit():
new_city_name = search_form.city.data
return redirect(url_for('find_city', city_name=new_city_name))
city_info = Cities().get_city_info(city_name)
new_city_info = InputHandler().get_requested_city_state(city_name, city_info)
if new_city_info is not None and len(new_city_info) != 1:
duplicate_city_form.city_list.choices = [(json.dumps(city), ", ".join((city_name, city.get("state"), city.get("country")))) for city in new_city_info]
return render_template("home.html",
title=city_name,
searchform=search_form,
cityinfo=duplicate_city_form,
numcityinfo=len(new_city_info),
entries=Cities().get_all_cities())
elif new_city_info is not None and len(new_city_info) == 1:
print new_city_info[0].get("id")
return redirect(url_for('current_weather', city_id=new_city_info[0].get("id")))
flash("%s is not a city" % (city_name))
return redirect(url_for("index"))
@app.route('/current_weather/<city_id>', methods=['GET', 'POST'])
def current_weather(city_id):
cur_weather_result = InputHandler().get_city_cur_weather(city_id)
cur_response_builder = CurrentWeather(cur_weather_result)
cur_response = cur_response_builder.build_current_weather_response()
forecast_result = InputHandler().get_city_forecast(city_id)
return render_template("current_weather.html",
title=cur_response_builder.city_name + ", " + cur_response_builder.city_country,
weatherdescrip=cur_response_builder.weather_description,
temp=cur_response_builder.current_temp,
curweather = cur_response,
curweathericon=cur_weather_result['weather'][0]["icon"],
forecast_info=forecast_result["list"])
| [
"[email protected]"
] | |
ff42e4c1e25395242bc420cf6c2299df49eeed88 | 662207b37b6d77b43c164b9feacdbf928790bf17 | /day5/csv_file_reader-4.py | 71ef476d3185e56ed6b74d536cdfcf95ce22baa0 | [] | no_license | shamanthaka/mypython_work | fc26983488059eb59462f8ab966aaad2ab93532a | 410f655939387b709cefa4d452aa323aede1c4d7 | refs/heads/master | 2020-07-10T03:12:59.246227 | 2019-08-24T12:34:34 | 2019-08-24T12:34:34 | 204,152,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | import csv
from datetime import datetime
file = open("google_stock_data-1.csv", newline='')
reader = csv.reader(file)
header = next(reader) #The first line is the header
data = []
for row in reader:
#row = [Date, Open, High, Low, Close, Volume, Adj.Close
date = datetime.strptime(row[0], "%m/%d/%Y")
open_price = float(row[1]) #'open' is builtin function
high = float(row[2])
low = float(row[3])
close = float(row[4])
volume = int(row[5])
adj_close = float(row[6])
data.append([date, open_price, high, low, close, volume, adj_close])
print(data[0])
#compute and store daily stock returns
return_path = "google_returns.csv"
file = open(return_path, 'w')
writer1 = csv.writer(file)
writer1.writerow(["Date", "Returns"])
for i in range(len(data) - 1):
todays_row = data[i]
todays_date = todays_row[0]
todays_price = todays_row[-1]
yesterdays_row = data[i+1]
yesterdays_price = yesterdays_row[-1]
daily_return = (todays_price - yesterdays_price) / yesterdays_price
formatted_date = todays_date.strftime('%m/%d/%Y')
writer1.writerow([formatted_date, daily_return])
print(header)
print(data[0])
| [
"[email protected]"
] | |
d4d251c3b3289e5e59f6f69637eadebeaa3d7d65 | f286fa74dc20579274cc61281326446fa05e0e45 | /python_bale_bot/models/messages/base_message.py | 524159d0e63e0f952aac44dd6aec8cc98853c726 | [] | no_license | mmdaz/Bot | 91b39d8ff0cce16834ca2d80e77232020b31bd90 | fb7b207dc8c07e4e7d766ced61c0dce6eccf11b7 | refs/heads/master | 2021-06-13T10:32:15.215663 | 2018-12-25T02:52:57 | 2018-12-25T02:52:57 | 145,236,800 | 0 | 0 | null | 2021-03-31T18:47:50 | 2018-08-18T16:49:58 | Python | UTF-8 | Python | false | false | 105 | py | from python_bale_bot.models.base_models.jsonable import Jsonable
class BaseMessage(Jsonable):
pass
| [
"[email protected]"
] | |
a742a33db13716c957bb8912fdcb12eeb8c67fc9 | e4ceb24675f326ee985271761ceea1f7499c008f | /shredder.py | 106463d860d699b6eeb4dc91b0c6d9019f394391 | [] | no_license | vdugar/Instagram-Challenge | c15b0584bb6cbf87be4c856b2ac4e92418848b35 | fe5ce6d79e91235d967d998ea2a268f2600e2fcf | refs/heads/master | 2016-09-06T09:43:12.606514 | 2012-01-08T05:43:56 | 2012-01-08T05:43:56 | 2,802,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | """
Usage: python shredder.py source_image NO_OF_SHREDS
"""
import sys
from PIL import Image
from random import shuffle
def shredder():
image = Image.open(sys.argv[1])
SHREDS = int(sys.argv[2])
shredded = Image.new('RGBA', image.size)
width, height = image.size
shred_width = width/SHREDS
sequence = range(0, SHREDS)
shuffle(sequence)
for i, shred_index in enumerate(sequence):
shred_x1, shred_y1 = shred_width * shred_index, 0
shred_x2, shred_y2 = shred_x1 + shred_width, height
region =image.crop((shred_x1, shred_y1, shred_x2, shred_y2))
shredded.paste(region, (shred_width * i, 0))
shredded.save('shredded.jpg')
if __name__ == '__main__':
shredder() | [
"[email protected]"
] | |
d8c7395042d0f2082aff13bcb11066c45b458b79 | ddca79e37a1731104e7e469d349797c6c46cfa9b | /egs/thchs30/steps/data/augment_data_dir.py | 432b136e3b1f54793a1e224686852689243038b9 | [
"Apache-2.0"
] | permissive | zyzisyz/tf-kaldi-speaker | 9608302d9430fae6916cb0367a79e227e06a3795 | 320ed4d6255addda4e86f9d4c2911c1fec21ba42 | refs/heads/master | 2020-08-11T14:21:13.280003 | 2019-10-12T14:22:16 | 2019-10-12T14:22:16 | 214,579,180 | 0 | 0 | Apache-2.0 | 2019-10-12T04:46:17 | 2019-10-12T04:46:17 | null | UTF-8 | Python | false | false | 9,158 | py | #!/usr/bin/env python3
# Copyright 2017 David Snyder
# 2017 Ye Bai
# Apache 2.0
#
# This script generates augmented data. It is based on
# steps/data/reverberate_data_dir.py but doesn't handle reverberation.
# It is designed to be somewhat simpler and more flexible for augmenting with
# additive noise.
from __future__ import print_function
import sys, random, argparse, os, imp
sys.path.append("steps/data/")
from reverberate_data_dir import ParseFileToDict
from reverberate_data_dir import WriteDictToFile
data_lib = imp.load_source('dml', 'steps/data/data_dir_manipulation_lib.py')
def GetArgs():
parser = argparse.ArgumentParser(description="Augment the data directory with additive noises. "
"Noises are separated into background and foreground noises which are added together or "
"separately. Background noises are added to the entire recording, and repeated as necessary "
"to cover the full length. Multiple overlapping background noises can be added, to simulate "
"babble, for example. Foreground noises are added sequentially, according to a specified "
"interval. See also steps/data/reverberate_data_dir.py "
"Usage: augment_data_dir.py [options...] <in-data-dir> <out-data-dir> "
"E.g., steps/data/augment_data_dir.py --utt-suffix aug --fg-snrs 20:10:5:0 --bg-snrs 20:15:10 "
"--num-bg-noise 1:2:3 --fg-interval 3 --fg-noise-dir data/musan_noise --bg-noise-dir "
"data/musan_music data/train data/train_aug", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fg-snrs', type=str, dest = "fg_snr_str", default = '20:10:0',
help='When foreground noises are being added, the script will iterate through these SNRs.')
parser.add_argument('--bg-snrs', type=str, dest = "bg_snr_str", default = '20:10:0',
help='When background noises are being added, the script will iterate through these SNRs.')
parser.add_argument('--num-bg-noises', type=str, dest = "num_bg_noises", default = '1',
help='Number of overlapping background noises that we iterate over. For example, if the input is "1:2:3" then the output wavs will have either 1, 2, or 3 randomly chosen background noises overlapping the entire recording')
parser.add_argument('--fg-interval', type=int, dest = "fg_interval", default = 0,
help='Number of seconds between the end of one foreground noise and the beginning of the next.')
parser.add_argument('--utt-suffix', type=str, dest = "utt_suffix", default = "aug", help='Suffix added to utterance IDs.')
parser.add_argument('--random-seed', type=int, dest = "random_seed", default = 123, help='Random seed.')
parser.add_argument("--bg-noise-dir", type=str, dest="bg_noise_dir",
help="Background noise data directory")
parser.add_argument("--fg-noise-dir", type=str, dest="fg_noise_dir",
help="Foreground noise data directory")
parser.add_argument("input_dir", help="Input data directory")
parser.add_argument("output_dir", help="Output data directory")
print(' '.join(sys.argv))
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not args.fg_interval >= 0:
raise Exception("--fg-interval must be 0 or greater")
if args.bg_noise_dir is None and args.fg_noise_dir is None:
raise Exception("Either --fg-noise-dir or --bg-noise-dir must be specified")
return args
def GetNoiseList(noise_wav_scp_filename):
noise_wav_scp_file = open(noise_wav_scp_filename, 'r').readlines()
noise_wavs = {}
noise_utts = []
for line in noise_wav_scp_file:
toks=line.split(" ")
wav = " ".join(toks[1:])
noise_utts.append(toks[0])
noise_wavs[toks[0]] = wav.rstrip()
return noise_utts, noise_wavs
def AugmentWav(utt, wav, dur, fg_snr_opts, bg_snr_opts, fg_noise_utts, \
bg_noise_utts, noise_wavs, noise2dur, interval, num_opts):
# This section is common to both foreground and background noises
new_wav = ""
dur_str = str(dur)
noise_dur = 0
tot_noise_dur = 0
snrs=[]
noises=[]
start_times=[]
# Now handle the background noises
if len(bg_noise_utts) > 0:
num = random.choice(num_opts)
for i in range(0, num):
noise_utt = random.choice(bg_noise_utts)
noise = "wav-reverberate --duration=" \
+ dur_str + " \"" + noise_wavs[noise_utt] + "\" - |"
snr = random.choice(bg_snr_opts)
snrs.append(snr)
start_times.append(0)
noises.append(noise)
# Now handle the foreground noises
if len(fg_noise_utts) > 0:
while tot_noise_dur < dur:
noise_utt = random.choice(fg_noise_utts)
noise = noise_wavs[noise_utt]
snr = random.choice(fg_snr_opts)
snrs.append(snr)
noise_dur = noise2dur[noise_utt]
start_times.append(tot_noise_dur)
tot_noise_dur += noise_dur + interval
noises.append(noise)
start_times_str = "--start-times='" + ",".join(list(map(str,start_times))) + "'"
snrs_str = "--snrs='" + ",".join(list(map(str,snrs))) + "'"
noises_str = "--additive-signals='" + ",".join(noises).strip() + "'"
# If the wav is just a file
if wav.strip()[-1] != "|":
new_wav = "wav-reverberate --shift-output=true " + noises_str + " " \
+ start_times_str + " " + snrs_str + " " + wav + " - |"
# Else if the wav is in a pipe
else:
new_wav = wav + " wav-reverberate --shift-output=true " + noises_str + " " \
+ start_times_str + " " + snrs_str + " - - |"
return new_wav
def CopyFileIfExists(utt_suffix, filename, input_dir, output_dir):
if os.path.isfile(input_dir + "/" + filename):
dict = ParseFileToDict(input_dir + "/" + filename,
value_processor = lambda x: " ".join(x))
if len(utt_suffix) > 0:
new_dict = {}
for key in dict.keys():
new_dict[key + "-" + utt_suffix] = dict[key]
dict = new_dict
WriteDictToFile(dict, output_dir + "/" + filename)
def main():
args = GetArgs()
fg_snrs = list(map(int, args.fg_snr_str.split(":")))
bg_snrs = list(map(int, args.bg_snr_str.split(":")))
input_dir = args.input_dir
output_dir = args.output_dir
num_bg_noises = list(map(int, args.num_bg_noises.split(":")))
reco2dur = ParseFileToDict(input_dir + "/reco2dur",
value_processor = lambda x: float(x[0]))
wav_scp_file = open(input_dir + "/wav.scp", 'r').readlines()
noise_wavs = {}
noise_reco2dur = {}
bg_noise_utts = []
fg_noise_utts = []
# Load background noises
if args.bg_noise_dir:
bg_noise_wav_filename = args.bg_noise_dir + "/wav.scp"
bg_noise_utts, bg_noise_wavs = GetNoiseList(bg_noise_wav_filename)
bg_noise_reco2dur = ParseFileToDict(args.bg_noise_dir + "/reco2dur",
value_processor = lambda x: float(x[0]))
noise_wavs.update(bg_noise_wavs)
noise_reco2dur.update(bg_noise_reco2dur)
# Load background noises
if args.fg_noise_dir:
fg_noise_wav_filename = args.fg_noise_dir + "/wav.scp"
fg_noise_reco2dur_filename = args.fg_noise_dir + "/reco2dur"
fg_noise_utts, fg_noise_wavs = GetNoiseList(fg_noise_wav_filename)
fg_noise_reco2dur = ParseFileToDict(args.fg_noise_dir + "/reco2dur",
value_processor = lambda x: float(x[0]))
noise_wavs.update(fg_noise_wavs)
noise_reco2dur.update(fg_noise_reco2dur)
random.seed(args.random_seed)
new_utt2wav = {}
new_utt2spk = {}
# Augment each line in the wav file
for line in wav_scp_file:
toks = line.rstrip().split(" ")
utt = toks[0]
wav = " ".join(toks[1:])
dur = reco2dur[utt]
new_wav = AugmentWav(utt, wav, dur, fg_snrs, bg_snrs, fg_noise_utts,
bg_noise_utts, noise_wavs, noise_reco2dur, args.fg_interval,
num_bg_noises)
new_utt = utt + "-" + args.utt_suffix
new_utt2wav[new_utt] = new_wav
if not os.path.exists(output_dir):
os.makedirs(output_dir)
WriteDictToFile(new_utt2wav, output_dir + "/wav.scp")
CopyFileIfExists(args.utt_suffix, "reco2dur", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "utt2dur", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "utt2spk", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "utt2lang", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "text", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "utt2spk", input_dir, output_dir)
CopyFileIfExists(args.utt_suffix, "vad.scp", input_dir, output_dir)
CopyFileIfExists("", "spk2gender", input_dir, output_dir)
data_lib.RunKaldiCommand("utils/fix_data_dir.sh {output_dir}".format(output_dir = output_dir))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
54261356c34affbb8adc4b8e18917f0b93ecbf45 | 3bdc9855c32ed0a7e1b433f1538f57264ae2e4dd | /Day-4/Hands-on/MultiNetworkExample/RGM/utils.py | bfbc401a5a5a51a897363163829527eacd302c6e | [] | no_license | SSDS-Croatia/SSDS-2020 | b184cef79b38f1973cd04f42063ef7de22585ed8 | fd3b18ed36aa33a31c24e45d5562aa7b371eb760 | refs/heads/master | 2022-12-18T14:35:52.898610 | 2020-09-23T11:45:14 | 2020-09-23T11:45:14 | 279,528,225 | 4 | 6 | null | 2020-09-06T18:41:28 | 2020-07-14T08:31:35 | null | UTF-8 | Python | false | false | 2,780 | py | import numpy as np, networkx as nx
import scipy.io as sio
from config import *
import scipy.sparse as sp
from scipy.sparse import coo_matrix
dataset_lookup = {"mutag" : "MUTAG",
"nci" : "NCI1",
"ptc" : "PTC_MR",
"imdb-b": "IMDB-BINARY",
"imdb-m" : "IMDB-MULTI",
"collab": "COLLAB"}
#Input: list of n embs of shape m_1 x d, m_2 x d, ...m_n x d
#Output: combined matrix of shape (sum_i = 1 to n m_i) x d
def combine_embs(embs):
combined_embs = embs[0]
for i in range(1, len(embs)):
combined_embs = np.vstack((combined_embs, embs[i]))
return combined_embs
#Combine multiple graphs into one big block diagonal graph
#Handles sparse graphs
def create_combined_graph(graphs, emb_method):
dim_starts = [0] #where to start new graph
for g in graphs:
dim_starts.append(g.N + dim_starts[-1])
combined_row = np.asarray([])
combined_col = np.asarray([])
combined_data = np.asarray([])
combined_node_labels = None
combined_edge_labels = None
combined_edgelabel_row = np.asarray([])
combined_edgelabel_col = np.asarray([])
combined_edgelabel_data = np.asarray([])
for i in range(len(graphs)):
adj = graphs[i].adj.tocoo()
combined_row = np.concatenate((combined_row, adj.row + dim_starts[i]))
combined_col = np.concatenate((combined_col, adj.col + dim_starts[i]))
combined_data = np.concatenate((combined_data, adj.data))
if graphs[i].edge_labels is not None:
#add edge labels
edge_labels = graphs[i].edge_labels.tocoo()
combined_edgelabel_row = np.concatenate((combined_edgelabel_row, edge_labels.row + dim_starts[i]))
combined_edgelabel_col = np.concatenate((combined_edgelabel_col, edge_labels.col + dim_starts[i]))
combined_edgelabel_data = np.concatenate((combined_edgelabel_data, edge_labels.data))
#add node label data
if graphs[i].node_labels is not None:
if combined_node_labels is None:
combined_node_labels = graphs[i].node_labels
else:
combined_node_labels = np.concatenate((combined_node_labels, graphs[i].node_labels))
combined_shape = (dim_starts[-1], dim_starts[-1])
combined_adj = coo_matrix((combined_data, (combined_row, combined_col)), shape = combined_shape).tocsr()
if combined_edgelabel_data.size > 0: #we have edge labels
combined_edge_labels = coo_matrix((combined_edgelabel_data, (combined_edgelabel_row, combined_edgelabel_col)), shape = combined_shape).tocsr()
#use node label as attribute
combined_graph = Graph(combined_adj, node_labels = combined_node_labels, edge_labels = combined_edge_labels, node_attributes = combined_node_labels)
return combined_graph, dim_starts
#Input list of graphs
#Output: array of combined node labels
def combine_labels(graphs):
combined_labels = list()
for graph in graphs:
combined_labels += graph.node_labels
return combined_labels
| [
"[email protected]"
] | |
122be19a61a0639cc28993bfa38839b6a35b7df6 | 2821a9e144d27e70aacebed3bc4e58137dc7c191 | /app/auth/forms.py | 387f678d01142d6847f971ab03d0ca788f6e0d75 | [] | no_license | bluesnoblue/BluesServer | 296fd3b281616b82d0773902ba7a06c089740935 | 96db1d10d6ce565db46c0bdb72e493fc0dbdd23b | refs/heads/master | 2022-12-13T18:47:17.140086 | 2019-03-12T07:28:40 | 2019-03-12T07:28:40 | 156,386,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class ResetPassWordRequestForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),Email()])
submit = SubmitField('Request Password Reset')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit =SubmitField('Request Password Reset') | [
"[email protected]"
] | |
eaf3251ef24f1745953ed74171cac3e1b9abf7d3 | df93e960b8be38d82d76cc2656a16f7b441d320d | /obfuscate.py | 18e5e3ef1f2d578017b810761247397e821a4009 | [] | no_license | kabads/obfuscate | ce8ecfbf67cef8cbd31b934b432d040ae5839428 | 88005a1d28557c9b5e2defb03095c5f4ce736e7b | refs/heads/master | 2023-04-07T04:27:30.966242 | 2021-03-31T07:38:17 | 2021-03-31T07:38:17 | 353,115,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,248 | py | import csv
import random
import argparse
LOWER_VOWEL_LIST = ['a', 'e', 'i', 'o', 'u']
UPPER_VOWEL_LIST = ['A', 'E', 'I', 'O', 'U']
LOWER_CONSONANT_LIST = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p', 'q', 'r', 's', 't', 'w', 'x', 'y',
'z']
UPPER_CONSONANT_LIST = ['B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'W', 'X', 'Y',
'Z']
DIGIT_LIST = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def obfuscate(string):
new_string = ''
for i in string:
if i in LOWER_VOWEL_LIST:
i = random.choice(LOWER_VOWEL_LIST)
elif i in UPPER_VOWEL_LIST:
i = random.choice(UPPER_VOWEL_LIST)
elif i in LOWER_CONSONANT_LIST:
i = random.choice(LOWER_CONSONANT_LIST)
elif i in UPPER_CONSONANT_LIST:
i = random.choice(UPPER_CONSONANT_LIST)
elif i in DIGIT_LIST:
i = random.choice(DIGIT_LIST)
new_string = new_string + i
return new_string
def read_file(file, columns, outfilename=None):
# Prepare the output file:
outfilename = ""
print(outfilename)
if outfilename == "":
outfilename = file.name + str("-bak.csv")
# Outfile is going to be where we write the file to.
outfile = (open(str(outfilename), "w"))
csv_writer = csv.writer(outfile, delimiter=',', lineterminator='\n')
# Open the file to obfuscate
with open(str(file.name), newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
# Next just moves it on one row (to take out the header row)
rowcount = 0 # keep a count of which row we are on
# Iterate over the rows in the csv file
for row in csv_reader:
# Iterate through all the columns, but if a column matches something in columns,
# then it should be obfuscated
newrow = [] # an array to hold our new row as we read the columns.
if rowcount != 0: # if this is zero, then we are at the header
# Iterate over the columns in the row keeping columncount as a counter
columncount = 0
for column in row:
newcell = "" # This will hold the contents of the new cell
if str(columncount) in columns: # Check if this is one of the files that needs obfuscation:
for i in column: # Loop through each character and change it
i = obfuscate(i) # This is the character being passed to obfuscate and returned
newcell = newcell + i # Collect all the random characters in newcell:
newrow.append(newcell) # Append the newcell to the new row
else: # This column is not obfuscated
newrow.append(column) # Append the newcell to the new row
columncount = columncount + 1
# If the cell doesn't need obfuscating, then just carry on
else:
# This must be the header as this is row 0, so lets add it to the new outfile:
csv_writer.writerow(row)
# OK - now we will increase our rowcount by one.
rowcount = rowcount + 1
# And write the newrow to the file:
csv_writer.writerow(newrow)
# We've finished all the files - so close the file:
outfile.close()
def open_file(filename):
file = open(filename)
return file
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--file", required=True, help="Which file do you want to read in?", dest="infile")
parser.add_argument("-o", "--outfile", help="Which file do you want to write out to?")
parser.add_argument("-r", "--rows", required=True, help="Which rows do you want to be obfuscated (in ascending"
" order)?", dest="rows", nargs='+')
args = parser.parse_args()
file = open_file(args.infile)
# print("Args: ", args.infile)
# print("file opened successfully.")
read_file(file, args.rows, args.outfile)
file.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5a376adc61df0a82a554729b16161f47b55484e0 | cf72581c792fa74f959c74550c167f9e18def815 | /dominio_ag_tsp.py | 2acc9f158e926e78aaa485d4b23213164fd2583e | [
"Apache-2.0"
] | permissive | ITCRStevenLPZ/Proyecto3-Analisis-de-Algoritmos | 295319f918bb14d48e651c3c7b28feebb92091fc | 86d73834da5f4ca7cd94671dc665bd576ed32e4b | refs/heads/master | 2023-08-14T15:45:23.249396 | 2021-10-07T06:04:22 | 2021-10-07T06:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | from dominio_ag import DominioAG
from dominio_tsp import DominioTSP
from random import random
class DominioAGTSP(DominioAG, DominioTSP):
"""
Representa el objeto de dominio que conoce los detalles de implementación y modelamiento
del problema del vendedor viajero para ser resuelto con algoritmos genéticos.
Las soluciones se modelan como listas de enteros, donde cada número representa
una ciudad específica. Si el grafo contiene n ciudades, la lista siempre contiene
(n-1) elementos. La lista nunca contiene elementos repetidos y nunca contiene la
ciudad de inicio y fin del circuito.
Métodos:
generar(n)
Construye aleatoriamente una lista de listas que representa n
posibles soluciones al problema.
cruzar(sol_a, sol_b)
Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
mutar(sol)
Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
"""
def __init__(self, ciudades_rutacsv, ciudad_inicio):
"""Construye un objeto de modelo de dominio para una instancia
específica del problema del vendedor viajero para ser resuelto
con algoritmos genéticos.
Entradas:
ciudades_rutacsv (str)
Ruta al archivo csv que contiene la matriz de pesos entre las ciudades
para las que se quiere resolver el problema del vendedor viajero.
ciudad_inicio (str)
Nombre de la ciudad que será el inicio y fin del circuito a calcular.
Salidas:
Una instancia de DominioAGTSP correctamente inicializada.
"""
super().__init__(ciudades_rutacsv,ciudad_inicio)
def generar_n(self, n):
"""Construye aleatoriamente una lista de listas que representa n
posibles soluciones al problema.
Entradas:
n (int)
Número de soluciones aleatorias a generar.
Salidas:
(sols) Lista que contiene n listas, cada una representando
una posible solución al problema modelado por el objeto de dominio.
"""
sols = []
for x in range(n):
nuevo = self.generar()
valido = self.validar(nuevo)
while(not valido):
nuevo = self.generar()
valido = self.validar(nuevo)
sols.append(nuevo)
return sols
def cruzar(self, sol_a, sol_b):
"""Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
Entradas:
sol_a (estructura de datos)
Estructura de datos que modela la solución antecesora A que será cruzada con la B
sol_b (estructura de datos)
Estructura de datos que modela la solución antecesora B que será cruzada con la A
Salidas:
(H1) Una nueva solución producto del cruzamiento entre las soluciones A y B
"""
P1 = sol_a
P2 = sol_b
lenOri = len(sol_a)
H1 = [-1] * lenOri
geneA = 0
geneB = 0
while (geneA == geneB):#para que lo rango no sea igual a vacio
geneA = int(random() * lenOri)
geneB = int(random() * lenOri)
startGene = min(geneA, geneB)
endGene = max(geneA, geneB)
for i in range(startGene, endGene):
H1[i] = P1[i]
rec = 0#recorrido del padre
recorrido = 0#pos en la que va el nuevo elemento
while (rec < lenOri):
while (H1[recorrido] != -1 and recorrido + 1 < lenOri):
recorrido += 1
if (P2[rec] not in H1):
H1[recorrido] = P2[rec]
rec += 1
return H1
def mutar(self, sol):
"""Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
Entradas:
sol (estructura de datos)
La solución a mutar.
Salidas:
(vecino(sol)) Una nueva solución que refleja un ligero cambio con respecto
a la solución dada por parámetro utilizando el método de vecino del SIM ANNEALING
"""
return super().vecino(sol) | [
"[email protected]"
] | |
a936dc2e461b068adb1d7e0afb575895d7118f90 | 59ac4d6d130fa83a9a38e0838c4ff52ed6a944d2 | /tourism survey multi-tasking/ILS_master_2.py | edb75cfddf590aa10936e3404f5feb54ed10c87a | [] | no_license | GaryGrimes/Multi_Tasking_Main | f97c3aad02c95dde259ef148350ef230d6ed5347 | fd3924ae9e64f1163e5d9fc23a6c61a9d9fbb7aa | refs/heads/master | 2020-11-27T02:32:27.591003 | 2019-12-20T14:17:46 | 2019-12-20T14:17:46 | 229,273,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,873 | py | import numpy as np
import pickle
class IlsUtility(object):
def __init__(self, NodeNum, alpha, beta, phi, UtilMatrix, TimeMatrix, CostMatrix, DwellArray):
if len(alpha) != 2:
raise ValueError('alpha should be a 1*2 array!')
if len(beta) != 3:
raise ValueError('beta should be a 1*3 array!')
self.NodeNum = NodeNum
self.alpha, self.beta, self.phi = alpha, beta, phi
self.utilmatrix = UtilMatrix
self.timematrix = TimeMatrix
self.costmatrix = CostMatrix
self.dwellarray = DwellArray
def modify_travel_time(self):
timematrix = self.timematrix
for i in range(timematrix.shape[0]):
for j in range(timematrix.shape[1]):
min_cost = timematrix[i][j]
for k in range((timematrix.shape[1])):
cost = timematrix[i][k] + timematrix[k][j]
min_cost = cost if cost < min_cost else min_cost
timematrix[i][j] = min(min_cost, timematrix[i][j])
timematrix = (timematrix + timematrix.T) / 2
return timematrix
def eval_util(self, route): # use array as input
util, AcumUtil = 0, np.zeros([3])
if len(route) <= 2:
return 0
else:
for k in range(1, len(route) - 1):
# arc and node utility
util += self.arc_util_callback(route[k - 1], route[k]) + self.node_util_callback(route[k], Pref,
AcumUtil)
AcumUtil += self.exp_util_callback(route[k], AcumUtil) # Accumulated utility; travel history
util += self.arc_util_callback(route[k], route[k + 1])
return util
pass
def cost_change(self, n1, n2, n3, n4):
cost_matrix = self.costmatrix
return cost_matrix[n1][n3] + cost_matrix[n2][n4] - cost_matrix[n1][n2] - cost_matrix[n3][n4]
def time_callback(self, route):
DwellArray = self.dwellarray
if len(route) <= 2:
return 0
else:
time = 0
for k in range(1, len(route) - 1):
time += self.travel_time_callback(route[k - 1], route[k]) + DwellArray[route[k]]
time += self.travel_time_callback(route[k], route[k + 1])
return time
def travel_time_callback(self, from_node, to_node):
return self.timematrix[from_node][to_node]
def arc_util_callback(self, from_node, to_node):
alpha1, alpha2, phi = self.alpha[0], self.alpha[1], self.phi
return alpha1 * self.timematrix[from_node][to_node] + alpha2 * phi * self.costmatrix[from_node][to_node]
def exp_util_callback(self, to_node, AcumUtil):
beta2 = self.beta[1]
return self.utilmatrix[to_node] * np.exp(-beta2 * AcumUtil)
def node_util_callback(self, to_node, Pref, AcumUtil):
[beta1, beta2, beta3] = self.beta
return beta1 * np.dot(Pref, self.utilmatrix[to_node] * np.exp(-beta2 * AcumUtil)) + beta3 * self.dwellarray[
to_node]
def util_max_insert(self, o, d, tmax, must_node=None):
if must_node:
_path = [o, must_node, d]
else:
_path = [o, d]
# construct new path into cur_nop
distance, benefit = [], []
for _i in range(self.NodeNum):
cost = self.timematrix[o][_i] + self.timematrix[_i][d] + self.dwellarray[_i]
distance.append(cost)
bene = np.dot(Pref, self.utilmatrix[_i]) / cost
benefit.append(bene)
# index is sorted such that the first entry has smallest benefit for insertion (from o to d)
index = list(np.argsort(benefit))
# except for node_j
if must_node in index:
index.remove(must_node)
# check time limitation
available_nodes = [x for x in index if distance[x] <= tmax][::-1] # nodes with higher benefits at front
while available_nodes:
# try all available nodes, even if current node cannot be inserted due to big cost
cur_node = available_nodes.pop(0)
min_cost = 999999
for k in range(1, len(_path)): # available positions for insertion
# try all possible insertions
newpath = _path[:k] + [cur_node] + _path[k:]
newcost = self.time_callback(newpath)
if newcost < tmax and newcost < min_cost:
min_cost, bespos = newcost, k
nochange = min_cost == 999999
if not nochange:
_path = _path[:bespos] + [cur_node] + _path[bespos:]
return _path
def initialization(self, tmax, o, d):
# for the points within ellipse, insert onto paths with cheapest insertion cost while ignoring the scores.
distance = []
for _node in range(self.NodeNum):
distance.append(self.timematrix[o][_node] + self.timematrix[_node][d] + self.dwellarray[_node])
# index is sorted such that the first entry has smallest benefit for insertion (from o to d)
index = np.argsort(distance)
# check time limitation
available_nodes = [x for x in index if distance[x] <= tmax]
L = min(10, len(available_nodes))
# find L nodes with largest distance from start and end
if L < 1:
return None
# index is node indices with distances from smallest to largest
# build solutions. Reference: a fast and efficient heuristic for... Chao et al
solutions = []
path_op_set, path_nop_set = [], []
for l in range(L):
paths = [] # to store available paths (available nodes have to be on one of the paths)
# construct 1st path
cur_node_set = list(available_nodes)
cur_path = [o, cur_node_set.pop(-(l + 1)), d] # insert l-th largest node into the first path
no_improvement = 0 # either path full (time limit exceeded) or no available nodes to be inserted
while not no_improvement:
cur_cost = self.time_callback(cur_path) # regarding distance not score
best_node, best_pos, best_cost = -1, -1, 999999
for idx, node in enumerate(cur_node_set):
for pos in range(1, len(cur_path)): # check all positions on current path for insertion
_path = cur_path[:pos] + [node] + cur_path[pos:]
_cost = self.time_callback(_path) - cur_cost
if self.time_callback(_path) < tmax and _cost < best_cost:
best_node_idx, best_pos, best_cost = idx, pos, _cost
no_improvement = best_cost == 999999
if not no_improvement:
cur_path = cur_path[:best_pos] + [cur_node_set.pop(best_node_idx)] + cur_path[best_pos:]
paths.append(cur_path)
# other paths
# assign nodes to all paths
while cur_node_set:
cur_path = [o, cur_node_set.pop(0),
d] # cur_node_set is already sorted, the first node is with smallest distance from o to d
no_improvement = 0
while not no_improvement:
cur_cost = time_callback(cur_path) # regarding distance not score
best_node, best_pos, best_cost = -1, -1, 999999
for idx, node in enumerate(cur_node_set):
for pos in range(1, len(cur_path)): # check all positions on current path for insertion
_path = cur_path[:pos] + [node] + cur_path[pos:]
_cost = time_callback(_path) - cur_cost
if time_callback(_path) < tmax and _cost < best_cost:
best_node_idx, best_pos, best_cost = idx, pos, _cost
no_improvement = best_cost == 999999
if not no_improvement:
cur_path = cur_path[:best_pos] + [cur_node_set.pop(best_node_idx)] + cur_path[best_pos:]
paths.append(cur_path)
# decide the solution path by choosing a path with largest total score among the paths
score, solution = [eval_util(_path) for _path in paths], []
if score:
solution = paths.pop(np.argsort(score)[-1])
path_op_set.append(solution)
path_nop_set.append(paths)
# return best path_op and its path_nop set
best_op, best_nop = [], []
if path_op_set:
score = [eval_util(_path) for _path in path_op_set]
best_op, best_nop = path_op_set[np.argsort(score)[-1]], path_nop_set[np.argsort(score)[-1]]
return best_op, best_nop
def two_point_exchange(self, path_op, path_nop, tmax):
TimeMatrix = self.timematrix
cur_op, cur_nop = list(path_op), list(path_nop)
a_loop_nodes = list(cur_op[1:-1])
# A loop
for idx, node_j in enumerate(a_loop_nodes): # first to the last point in path_op (except for o and d)
for debugger in cur_nop:
if self.time_callback(debugger) > Tmax:
raise LookupError('Path time over limit.')
cur_op_backup = cur_op.copy() # the point remain in current position if exchange results in a bad score
_ = cur_op[1:-1]
_.remove(node_j)
cur_op = [cur_op[0]] + _ + [
cur_op[-1]] # o + attractions + d. Sometimes origin or des will also exist in attractions
# node_j is removed from cur_op
length = self.time_callback(cur_op)
found = 0 # Flag to indicate whether a candidate exchange leading to a higher total score is found.
# If found, the exchange is performed immediately, and all other exchanges are ignored.
# B loop TODO 加入best path,能在没有找到最优解的情况按deviation修改
# b_loop_records = []
# b_loop_scores = []
exchange_flag = 0
best_path_idx, best_path, best_node, best_pos, best_score, = -999999, [], -1, -1, -999999
for _path_idx, _path in enumerate(cur_nop):
if found == 1:
break
for index in range(1, len(_path) - 1):
node_i = _path[index]
# skip node_j and duplicate node
if node_i == node_j or node_i in cur_op: # avoid duplicate
continue
for pos in range(1, len(cur_op)):
# feasibility check
if TimeMatrix[cur_op[pos - 1]][node_i] + DwellArray[node_i] + TimeMatrix[node_i][cur_op[pos]] - \
TimeMatrix[cur_op[pos - 1]][cur_op[pos]] + length < tmax:
test_path = cur_op[:pos] + [node_i] + cur_op[pos:]
test_score = self.eval_util(test_path)
# find best insert position
if test_score >= best_score:
best_path_idx, best_path, best_node, best_pos, best_score = _path_idx, _path, node_i, pos, test_score
# do the exchange
if best_path: # found an insertion location indeed
# total score increase check
if best_score > record:
found = 1 # found an exchange that leads to a higher score
# exchange
cur_op = cur_op[:best_pos] + [best_node] + cur_op[best_pos:]
best_path.pop(index)
exchange_flag = 1
break
# b_loop_records.append([best_node, best_pos])
# b_loop_scores.append(best_score)
# b_loop ends
# if found no exchange, try exchanges between record and (record - deviation)
if found == 0:
if best_path:
test_path = cur_op[:best_pos] + [best_node] + cur_op[best_pos:]
test_score = eval_util(test_path)
else:
test_path, test_score = [], 0
if test_score >= record - deviation:
# exchange
# insert node_i onto cur_op
cur_op = cur_op[:best_pos] + [best_node] + cur_op[best_pos:]
# remove node_i from the best_path in path_nop
visits = list(best_path[1:-1])
visits.remove(best_node)
cur_nop[best_path_idx] = [best_path[0]] + visits + [best_path[-1]]
exchange_flag = 1
pass
# if found no exchange, cur_op remains the same
if not exchange_flag:
cur_op = cur_op_backup
# no removing nodes from path_nop
continue
# put node_j back into cur_nop
# criteria: minimum insertion cost
best_path_idx, best_path, best_pos, best_score = 999999, [], -1, 999999
for bp_idx, _path in enumerate(cur_nop):
if node_j in _path[1:-1]: # skip nodes that serve as origin or destination
raise LookupError('Duplicate nodes are not supposed to present! Debug please.')
# continue # avoid repetitive existence
for pos in range(1, len(_path)):
length = time_callback(_path)
# feasibility check
if TimeMatrix[_path[pos - 1]][node_j] + DwellArray[node_j] + TimeMatrix[node_j][_path[pos]] - \
TimeMatrix[_path[pos - 1]][_path[pos]] + length < Tmax:
test_path = _path[:pos] + [node_j] + _path[pos:]
test_score = time_callback(test_path) - length
# find best insert position
if test_score <= best_score:
best_path_idx, best_path, best_pos, best_score = bp_idx, _path, pos, test_score
# do the exchange
if not best_score == 999999: # found an insertion location indeed
# TODO check if change is made inplace
cur_nop[best_path_idx] = best_path[:best_pos] + [node_j] + best_path[best_pos:]
else:
# construct new path into cur_nop
new_path = [path_op[0], node_j, path_op[-1]]
cur_nop.append(new_path)
# pick up best from both path_op and path_nop
solutions = [cur_op] + cur_nop
# DEBUG
best_score, best_path, best_index = -999999, [], -999999
for index, solution in enumerate(solutions):
if len(set(solution[1:-1])) < len(solution[1:-1]):
raise LookupError('Duplicate nodes in a path')
cur_score = eval_util(solution)
if cur_score > best_score:
best_path, best_score, best_index = solution, cur_score, index
p_op = solutions.pop(best_index)
p_nop = solutions
return p_op, p_nop
def one_point_movement(path_op, path_nop):
# calculate points that are within ellipse
o, d = path_op[0], path_op[-1]
distance = []
for _node in range(NodeNum):
distance.append(TimeMatrix[o][_node] + TimeMatrix[_node][d] + DwellArray[_node])
# index is sorted such that the first entry has smallest benefit for insertion (from o to d)
index = np.argsort(distance)
# check time limitation
available_nodes = [x for x in index if distance[x] <= Tmax]
paths = [path_op] + path_nop # paths变了源变量也跟着变
for _node in available_nodes:
# pick out the current path that the node is on:
path_q = []
for _i, _path in enumerate(paths):
if _node in _path[1:-1]:
path_q = paths.pop(_i)
break
# movement
movement = 0
best_path_index, best_pos, best_score = -999999, -1, -999999
for path_index, _path in enumerate(paths):
for pos in range(1, len(_path)):
test_path = _path[:pos] + [_node] + _path[pos:]
# check feasibility:
if time_callback(test_path) < Tmax:
test_score = eval_util(test_path)
# check total score increase
# if test_score > eval_util(_path): # TODO total score here 是指每个path的还是record的?
if test_score > record:
paths[path_index] = test_path # do movement
_ = path_q[1:-1]
_.remove(_node)
if len(_) == len(path_q[1:-1]):
raise LookupError('Remove not successful... not found node in current path?')
path_q, movement = [path_q[0]] + _ + [path_q[-1]], 1
# TODO 这里可能是inplace的, 能不能这样放回到paths里? list()后应该就可以
paths = [list(path_q)] + paths
break
#
else:
if test_score > best_score:
best_path_index, best_pos, best_score = path_index, pos, test_score
if movement:
break
if movement == 0:
# check if the score of the best movement >= record - deviation
if best_score >= record - deviation:
# make movement
paths[best_path_index] = paths[best_path_index][:best_pos] + [_node] + paths[best_path_index][
best_pos:]
# delete current node on path_q
_ = path_q[1:-1]
_.remove(_node)
if len(_) == len(path_q[1:-1]):
raise LookupError('Remove not successful... not found node in current path?')
path_q, movement = [path_q[0]] + _ + [path_q[-1]], 1
paths = [list(path_q)] + paths
else:
paths = [list(path_q)] + paths # put path_q back if no movement
score = []
for _path in paths:
score.append(eval_util(_path))
path_op = paths.pop(np.argsort(score)[-1])
return path_op, paths
def two_opt(path_op):
best = list(path_op)
_score = eval_util(best)
improved = True
while improved:
improved = False
for _i in range(1, len(path_op) - 2):
for j in range(_i + 1, len(path_op)):
if j - _i == 1:
continue
if cost_change(TimeMatrix, best[_i - 1], best[_i], best[j - 1], best[j]) < -0.1:
best[_i:j] = best[j - 1:_i - 1:-1]
improved = True
return best if eval_util(best) > _score else path_op # check improvement of utility
def reinitialization(path_op, path_nop, k):
if k < 1:
return path_op, path_nop
ratio = []
# visited = path_op[1:-1]
for _idx in range(1, len(path_op) - 1):
# si/costi
gain = node_util_callback(path_op[_idx], Pref, np.zeros([3]))
cost = TimeMatrix[path_op[_idx - 1]][path_op[_idx]] + DwellArray[path_op[_idx]] + TimeMatrix[path_op[_idx]][
path_op[_idx + 1]] - TimeMatrix[path_op[_idx - 1]][path_op[_idx + 1]]
ratio.append(gain / cost)
# ratio is benefit/insertion cost
nodes_sorted = np.argsort(ratio) # smaller nodes are assigned in front
remove_indices = nodes_sorted[:k]
# for _i, _node in enumerate(path_op):
path_op_new = [path_op[x] for x in range(len(path_op)) if x - 1 not in remove_indices]
for _k in range(k):
remove_node_idx = remove_indices[_k] + 1
# remove node from path_op
node_j = path_op[remove_node_idx] # TODO 每次path_op都在变小,不能按idx pop
# put node_j back into path_nop
# criteria: minimum insertion cost
best_path_idx, best_path, best_pos, best_score = 999999, [], -1, 999999
for bp_idx, _path in enumerate(path_nop):
if node_j in _path[1:-1]: # skip nodes that serve as origin or destination
raise LookupError('Duplicate nodes are not supposed to present! Debug please.')
# continue # avoid repetitive existence
for pos in range(1, len(_path)):
length = time_callback(_path)
# feasibility check
if TimeMatrix[_path[pos - 1]][node_j] + DwellArray[node_j] + TimeMatrix[node_j][_path[pos]] - \
TimeMatrix[_path[pos - 1]][_path[pos]] + length < Tmax:
test_path = _path[:pos] + [node_j] + _path[pos:]
test_score = time_callback(test_path) - length
# find best insert position
if test_score <= best_score:
best_path_idx, best_path, best_pos, best_score = bp_idx, _path, pos, test_score
# do the exchange
if not best_score == 999999: # found an insertion location indeed
path_nop[best_path_idx] = best_path[:best_pos] + [node_j] + best_path[best_pos:]
else:
# construct new path into cur_nop
new_path = [path_op[0], node_j, path_op[-1]]
path_nop.append(new_path)
path_op = path_op_new
return path_op, path_nop
if __name__ == '__main__':
# %% Solver Setup
NodeNum = 37 # number of attractions. Origin and destination are excluded.
# UtilMatrix = 10 * np.random.rand(NodeNum, 3)
# UtilMatrix[0] = [0, 0, 0]
#
# TimeMatrix = 100 * np.random.rand(NodeNum, NodeNum)
# np.fill_diagonal(TimeMatrix, 0)
#
# CostMatrix = 20 * np.random.rand(NodeNum, NodeNum)
# np.fill_diagonal(CostMatrix, 0)
#
# DwellArray = 60 * np.random.rand(NodeNum)
alpha1, alpha2 = -0.05, -0.05
beta1, beta2, beta3 = 1, 0.03, 0.08 # TODO beta2该怎么定
phi = 0.1
Tmax = 500 # person specific time constraints
Origin, Destination = 0, 0
# %% save data
# pickle.dump(UtilMatrix, open('UtilMatrix.txt', 'wb'))
# pickle.dump(TimeMatrix, open('TimeMatrix.txt', 'wb'))
# pickle.dump(CostMatrix, open('CostMatrix.txt', 'wb'))
# pickle.dump(DwellArray, open('DwellArray.txt', 'wb'))
UtilMatrix = pickle.load(open('UtilMatrix.txt', 'rb'))
TimeMatrix = pickle.load(open('TimeMatrix.txt', 'rb'))
CostMatrix = pickle.load(open('CostMatrix.txt', 'rb'))
DwellArray = pickle.load(open('DwellArray.txt', 'rb'))
TimeMatrix = modify_travel_time(TimeMatrix)
# %% start solver
# warning: total utility of a path must >= 0
Pref = np.array([0.5, 0.3, 0.2])
route = [0, 2, 4, 0]
print('test %.2f \n' % eval_util(route))
# initialization
PathOp, PathNop = initialization(Tmax, Origin, Destination)
print('Scores after initial insertion: \n')
print('Optimal path score: {}, time: {}'.format(eval_util(PathOp), time_callback(PathOp)))
print(PathOp)
for i in PathNop:
print('Non-optimal path score: {}, time: {}'.format(eval_util(i), time_callback(i)))
print(i)
record, p = eval_util(PathOp), 0.1
deviation = p * record
best_solution = PathOp.copy()
K = 3
for _K in range(K):
print('\nCurrent K loop number: {}'.format(_K))
for itr in range(4):
print('\nCurrent iteration: {}'.format(itr))
# two-point exchange
Path_op, Path_nop = two_point_exchange(PathOp, PathNop)
visited = []
print('\nScores after two-point exchange: \n')
score = eval_util(Path_op)
print('Optimal path score: {}, time: {}'.format(score, time_callback(Path_op)))
print(Path_op)
visited.extend(Path_op[1:-1])
for i, path in enumerate(Path_nop):
visited.extend(path[1:-1])
print('Current path number: {}, score as {}, time: {}'.format(i, eval_util(path), time_callback(path)))
print(path)
print('Number of attractions visited: {}, duplicate nodes: {}.'.format(len(visited),
len(visited) - len(set(visited))))
if score > record:
best_solution, record = list(Path_op), score
deviation = p * record
# one-point movement
Path_op, Path_nop = one_point_movement(Path_op, Path_nop)
visited = []
print('\nScores after one-point movement: \n')
score = eval_util(Path_op)
print('Optimal path score: {}, time: {}'.format(score, time_callback(Path_op)))
print(Path_op)
visited.extend(Path_op[1:-1])
if score > record:
best_solution, record = list(Path_op), score
deviation = p * record
for i, path in enumerate(Path_nop):
visited.extend(path[1:-1])
print('Current path number: {}, score as {}, time: {}'.format(i, eval_util(path), time_callback(path)))
print(path)
print('Number of attractions visited: {}, duplicate nodes: {}.'.format(len(visited),
len(visited) - len(set(visited))))
# 2-opt (clean-up)
print('\nPath length before 2-opt: {}, with score: {}'.format(time_callback(Path_op), eval_util(Path_op)))
Path_op_2 = two_opt(Path_op)
cost_2_opt = eval_util(Path_op_2)
print('Path length after 2-opt: {}, with score: {}'.format(time_callback(Path_op_2), cost_2_opt))
PathOp, PathNop = Path_op_2, Path_nop
# if no movement has been made, end I loop
if Path_op_2 == best_solution:
break
# if a new better solution has been obtained, then set new record and new deviation
if cost_2_opt > record:
best_solution, record = list(Path_op_2), cost_2_opt
deviation = p * record
# perform reinitialization
PathOp, PathNop = reinitialization(PathOp, PathNop, 3)
print('\nBest solution score: {}, time: {} \nSolution: {}'.format(record, time_callback(best_solution),
best_solution))
| [
"[email protected]"
] | |
e859c41c360c09031092ca20ba7456f1a4d99789 | 63168b3cc1a8019583b331ebc8c4ec58c241753c | /ngraph/python/tests/test_onnx/test_zoo_models.py | dd2534600445cc7195cdfa606889c5cf8b590520 | [
"Apache-2.0"
] | permissive | generalova-kate/openvino | 2e14552ab9b1196fe35af63b5751a96d0138587a | 72fb7d207cb61fd5b9bb630ee8785881cc656b72 | refs/heads/master | 2023-08-09T20:39:03.377258 | 2021-09-07T09:43:33 | 2021-09-07T09:43:33 | 300,206,718 | 0 | 0 | Apache-2.0 | 2020-10-01T08:35:46 | 2020-10-01T08:35:45 | null | UTF-8 | Python | false | false | 9,766 | py | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import tests
from operator import itemgetter
from pathlib import Path
from typing import Sequence, Any
import numpy as np
from tests.test_onnx.utils import OpenVinoOnnxBackend
from tests.test_onnx.utils.model_importer import ModelImportRunner
from tests import (
xfail_issue_38701,
xfail_issue_43742,
xfail_issue_45457,
xfail_issue_37957,
xfail_issue_38084,
xfail_issue_39669,
xfail_issue_38726,
xfail_issue_37973,
xfail_issue_47430,
xfail_issue_47495,
xfail_issue_48145,
xfail_issue_48190,
xfail_issue_58676,
xfail_issue_onnx_models_140)
MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR
def yolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]:
concat_out_index = 2
# remove all elements with value -1 from yolonms_layer_1/concat_2:0 output
concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1]
concat_out = np.expand_dims(concat_out, axis=0)
outputs[concat_out_index] = concat_out
return outputs
def tinyyolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]:
concat_out_index = 2
# remove all elements with value -1 from yolonms_layer_1:1 output
concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1]
concat_out = concat_out.reshape((outputs[concat_out_index].shape[0], -1, 3))
outputs[concat_out_index] = concat_out
return outputs
post_processing = {
"yolov3" : {"post_processing" : yolov3_post_processing},
"tinyyolov3" : {"post_processing" : tinyyolov3_post_processing},
"tiny-yolov3-11": {"post_processing": tinyyolov3_post_processing},
}
tolerance_map = {
"arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001},
"fp16_inception_v1": {"atol": 0.001, "rtol": 0.001},
"mobilenet_opset7": {"atol": 0.001, "rtol": 0.001},
"resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001},
"test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001},
"test_resnet101v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet18v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet34v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet50v2": {"atol": 0.001, "rtol": 0.001},
"mosaic": {"atol": 0.001, "rtol": 0.001},
"pointilism": {"atol": 0.001, "rtol": 0.001},
"rain_princess": {"atol": 0.001, "rtol": 0.001},
"udnie": {"atol": 0.001, "rtol": 0.001},
"candy": {"atol": 0.003, "rtol": 0.003},
"densenet-3": {"atol": 1e-7, "rtol": 0.0011},
"arcfaceresnet100-8": {"atol": 0.001, "rtol": 0.001},
"mobilenetv2-7": {"atol": 0.001, "rtol": 0.001},
"resnet101-v1-7": {"atol": 0.001, "rtol": 0.001},
"resnet101-v2-7": {"atol": 0.001, "rtol": 0.001},
"resnet152-v1-7": {"atol": 1e-7, "rtol": 0.003},
"resnet152-v2-7": {"atol": 0.001, "rtol": 0.001},
"resnet18-v1-7": {"atol": 0.001, "rtol": 0.001},
"resnet18-v2-7": {"atol": 0.001, "rtol": 0.001},
"resnet34-v2-7": {"atol": 0.001, "rtol": 0.001},
"vgg16-7": {"atol": 0.001, "rtol": 0.001},
"vgg19-bn-7": {"atol": 0.001, "rtol": 0.001},
"tinyyolov2-7": {"atol": 0.001, "rtol": 0.001},
"tinyyolov2-8": {"atol": 0.001, "rtol": 0.001},
"candy-8": {"atol": 0.001, "rtol": 0.001},
"candy-9": {"atol": 0.007, "rtol": 0.001},
"mosaic-8": {"atol": 0.003, "rtol": 0.001},
"mosaic-9": {"atol": 0.001, "rtol": 0.001},
"pointilism-8": {"atol": 0.001, "rtol": 0.001},
"pointilism-9": {"atol": 0.001, "rtol": 0.001},
"rain-princess-8": {"atol": 0.001, "rtol": 0.001},
"rain-princess-9": {"atol": 0.001, "rtol": 0.001},
"udnie-8": {"atol": 0.001, "rtol": 0.001},
"udnie-9": {"atol": 0.001, "rtol": 0.001},
"mxnet_arcface": {"atol": 1.5e-5, "rtol": 0.001},
"resnet100": {"atol": 1.5e-5, "rtol": 0.001},
"densenet121": {"atol": 1e-7, "rtol": 0.0011},
"resnet152v1": {"atol": 1e-7, "rtol": 0.003},
"test_shufflenetv2": {"atol": 1e-05, "rtol": 0.001},
"tiny_yolov2": {"atol": 1e-05, "rtol": 0.001},
"mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001},
"resnet101v1": {"atol": 1e-04, "rtol": 0.001},
"resnet101v2": {"atol": 1e-06, "rtol": 0.001},
"resnet152v2": {"atol": 1e-05, "rtol": 0.001},
"resnet18v2": {"atol": 1e-05, "rtol": 0.001},
"resnet34v2": {"atol": 1e-05, "rtol": 0.001},
"vgg16": {"atol": 1e-05, "rtol": 0.001},
"vgg19-bn": {"atol": 1e-05, "rtol": 0.001},
"test_tiny_yolov2": {"atol": 1e-05, "rtol": 0.001},
"test_resnet152v2": {"atol": 1e-04, "rtol": 0.001},
"test_mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001},
"yolov3": {"atol": 0.001, "rtol": 0.001},
"yolov4": {"atol": 1e-04, "rtol": 0.001},
"tinyyolov3": {"atol": 1e-04, "rtol": 0.001},
"tiny-yolov3-11": {"atol": 1e-04, "rtol": 0.001},
"GPT2": {"atol": 5e-06, "rtol": 0.01},
"GPT-2-LM-HEAD": {"atol": 4e-06},
"test_retinanet_resnet101": {"atol": 1.3e-06},
}
zoo_models = []
# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR"
for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"):
mdir = path.parent
file_name = path.name
if path.is_file() and not file_name.startswith("."):
model = {"model_name": path, "model_file": file_name, "dir": mdir}
basedir = mdir.stem
if basedir in tolerance_map:
# updated model looks now:
# {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...}
model.update(tolerance_map[basedir])
if basedir in post_processing:
model.update(post_processing[basedir])
zoo_models.append(model)
if len(zoo_models) > 0:
zoo_models = sorted(zoo_models, key=itemgetter("model_name"))
# Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
OpenVinoOnnxBackend.backend_name = tests.BACKEND_NAME
# import all test cases at global scope to make them visible to pytest
backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__, MODELS_ROOT_DIR)
test_cases = backend_test.test_cases["OnnxBackendModelImportTest"]
# flake8: noqa: E501
if tests.MODEL_ZOO_XFAIL:
import_xfail_list = [
# ONNX Model Zoo
(xfail_issue_38701, "test_onnx_model_zoo_text_machine_comprehension_bidirectional_attention_flow_model_bidaf_9_bidaf_bidaf_cpu"),
(xfail_issue_43742, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"),
(xfail_issue_38726, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"),
# Model MSFT
(xfail_issue_43742, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_37957, "test_MSFT_opset10_mask_rcnn_keras_mask_rcnn_keras_cpu"),
]
for test_case in import_xfail_list:
xfail, test_name = test_case
xfail(getattr(test_cases, test_name))
del test_cases
test_cases = backend_test.test_cases["OnnxBackendModelExecutionTest"]
if tests.MODEL_ZOO_XFAIL:
execution_xfail_list = [
# ONNX Model Zoo
(xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"),
(xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet50_11_fcn_resnet50_11_model_cpu"),
(xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet101_11_fcn_resnet101_11_model_cpu"),
(xfail_issue_48145, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"),
(xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"),
(xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"),
# Model MSFT
(xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"),
(xfail_issue_37973, "test_MSFT_opset8_tf_inception_v2_model_cpu"),
(xfail_issue_37973, "test_MSFT_opset9_tf_inception_v2_model_cpu"),
(xfail_issue_37973, "test_MSFT_opset11_tf_inception_v2_model_cpu"),
(xfail_issue_37973, "test_MSFT_opset10_tf_inception_v2_model_cpu"),
(xfail_issue_58676, "test_MSFT_opset7_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"),
(xfail_issue_58676, "test_MSFT_opset8_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"),
(xfail_issue_38084, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_38084, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"),
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_45457, "test_MSFT_opset10_mlperf_ssd_resnet34_1200_ssd_resnet34_mAP_20.2_cpu"),
]
for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case
xfail(getattr(test_cases, test_name))
del test_cases
globals().update(backend_test.enable_report().test_cases)
| [
"[email protected]"
] | |
8c06bb5e519ec1686a774be59d66a0b85a8aa078 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/synapsespark_compute.py | 91246ce9f13fa7101e3c320b4f62ab2e90df2b91 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 9,495 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import Dict, Optional
from azure.ai.ml._restclient.v2022_10_01_preview.models import (
AutoPauseProperties,
AutoScaleProperties,
ComputeResource,
SynapseSpark,
)
from azure.ai.ml._schema.compute.synapsespark_compute import SynapseSparkComputeSchema
from azure.ai.ml._utils._experimental import experimental
from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE
from azure.ai.ml.constants._compute import ComputeType
from azure.ai.ml.entities import Compute
from azure.ai.ml.entities._credentials import IdentityConfiguration
from azure.ai.ml.entities._util import load_from_dict
class AutoScaleSettings:
"""Auto-scale settings for Synapse Spark compute.
:keyword min_node_count: The minimum compute node count.
:paramtype min_node_count: Optional[int]
:keyword max_node_count: The maximum compute node count.
:paramtype max_node_count: Optional[int]
:keyword enabled: Specifies if auto-scale is enabled.
:paramtype enabled: Optional[bool]
.. admonition:: Example:
.. literalinclude:: ../../../../../samples/ml_samples_spark_configurations.py
:start-after: [START synapse_spark_compute_configuration]
:end-before: [END synapse_spark_compute_configuration]
:language: python
:dedent: 8
:caption: Configuring AutoScaleSettings on SynapseSparkCompute.
"""
def __init__(
self,
*,
min_node_count: Optional[int] = None,
max_node_count: Optional[int] = None,
enabled: Optional[bool] = None,
) -> None:
self.min_node_count = min_node_count
self.max_node_count = max_node_count
self.auto_scale_enabled = enabled
def _to_auto_scale_settings(self) -> AutoScaleProperties:
return AutoScaleProperties(
min_node_count=self.min_node_count,
max_node_count=self.max_node_count,
auto_scale_enabled=self.auto_scale_enabled,
)
@classmethod
def _from_auto_scale_settings(cls, autoscaleprops: AutoScaleProperties) -> "AutoScaleSettings":
return cls(
min_node_count=autoscaleprops.min_node_count,
max_node_count=autoscaleprops.max_node_count,
enabled=autoscaleprops.enabled,
)
class AutoPauseSettings:
"""Auto pause settings for Synapse Spark compute.
:keyword delay_in_minutes: The time delay in minutes before pausing cluster.
:paramtype delay_in_minutes: Optional[int]
:keyword enabled: Specifies if auto-pause is enabled.
:paramtype enabled: Optional[bool]
.. admonition:: Example:
.. literalinclude:: ../../../../../samples/ml_samples_spark_configurations.py
:start-after: [START synapse_spark_compute_configuration]
:end-before: [END synapse_spark_compute_configuration]
:language: python
:dedent: 8
:caption: Configuring AutoPauseSettings on SynapseSparkCompute.
"""
def __init__(self, *, delay_in_minutes: Optional[int] = None, enabled: Optional[bool] = None) -> None:
self.delay_in_minutes = delay_in_minutes
self.auto_pause_enabled = enabled
def _to_auto_pause_settings(self) -> AutoPauseProperties:
return AutoPauseProperties(
delay_in_minutes=self.delay_in_minutes,
auto_pause_enabled=self.auto_pause_enabled,
)
@classmethod
def _from_auto_pause_settings(cls, autopauseprops: AutoPauseProperties) -> "AutoPauseSettings":
return cls(
delay_in_minutes=autopauseprops.delay_in_minutes,
enabled=autopauseprops.enabled,
)
@experimental
class SynapseSparkCompute(Compute):
"""SynapseSpark Compute resource.
:keyword name: The name of the compute.
:paramtype name: str
:keyword description: The description of the resource. Defaults to None.
:paramtype description: Optional[str]
:keyword tags: The set of resource tags defined as key/value pairs. Defaults to None.
:paramtype tags: Optional[[dict[str, str]]
:keyword node_count: The number of nodes in the compute.
:paramtype node_count: Optional[int]
:keyword node_family: The node family of the compute.
:paramtype node_family: Optional[str]
:keyword node_size: The size of the node.
:paramtype node_size: Optional[str]
:keyword spark_version: The version of Spark to use.
:paramtype spark_version: Optional[str]
:keyword identity: The configuration of identities that are associated with the compute cluster.
:paramtype identity: Optional[~azure.ai.ml.entities.IdentityConfiguration]
:keyword scale_settings: The scale settings for the compute.
:paramtype scale_settings: Optional[~azure.ai.ml.entities.AutoScaleSettings]
:keyword auto_pause_settings: The auto pause settings for the compute.
:paramtype auto_pause_settings: Optional[~azure.ai.ml.entities.AutoPauseSettings]
:keyword kwargs: Additional keyword arguments passed to the parent class.
:paramtype kwargs: Optional[dict]
.. admonition:: Example:
.. literalinclude:: ../../../../../samples/ml_samples_spark_configurations.py
:start-after: [START synapse_spark_compute_configuration]
:end-before: [END synapse_spark_compute_configuration]
:language: python
:dedent: 8
:caption: Creating Synapse Spark compute.
"""
def __init__(
self,
*,
name: str,
description: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
node_count: Optional[int] = None,
node_family: Optional[str] = None,
node_size: Optional[str] = None,
spark_version: Optional[str] = None,
identity: Optional[IdentityConfiguration] = None,
scale_settings: Optional[AutoScaleSettings] = None,
auto_pause_settings: Optional[AutoPauseSettings] = None,
**kwargs,
) -> None:
kwargs[TYPE] = ComputeType.SYNAPSESPARK
super().__init__(name=name, description=description, location=kwargs.pop("location", None), tags=tags, **kwargs)
self.identity = identity
self.node_count = node_count
self.node_family = node_family
self.node_size = node_size
self.spark_version = spark_version
self.scale_settings = scale_settings
self.auto_pause_settings = auto_pause_settings
@classmethod
def _load_from_rest(cls, rest_obj: ComputeResource) -> "SynapseSparkCompute":
prop = rest_obj.properties
scale_settings = (
# pylint: disable=protected-access
AutoScaleSettings._from_auto_scale_settings(prop.properties.auto_scale_properties)
if prop.properties.auto_scale_properties
else None
)
auto_pause_settings = (
# pylint: disable=protected-access
AutoPauseSettings._from_auto_pause_settings(prop.properties.auto_pause_properties)
if prop.properties.auto_pause_properties
else None
)
return SynapseSparkCompute(
name=rest_obj.name,
id=rest_obj.id,
description=prop.description,
location=rest_obj.location,
resource_id=prop.resource_id,
tags=rest_obj.tags if rest_obj.tags else None,
created_on=prop.created_on if prop.properties else None,
node_count=prop.properties.node_count if prop.properties else None,
node_family=prop.properties.node_size_family if prop.properties else None,
node_size=prop.properties.node_size if prop.properties else None,
spark_version=prop.properties.spark_version if prop.properties else None,
# pylint: disable=protected-access
identity=IdentityConfiguration._from_compute_rest_object(rest_obj.identity) if rest_obj.identity else None,
scale_settings=scale_settings,
auto_pause_settings=auto_pause_settings,
provisioning_state=prop.provisioning_state,
provisioning_errors=prop.provisioning_errors[0].error.code
if (prop.provisioning_errors and len(prop.provisioning_errors) > 0)
else None,
)
def _to_dict(self) -> Dict:
# pylint: disable=no-member
return SynapseSparkComputeSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "SynapseSparkCompute":
loaded_data = load_from_dict(SynapseSparkComputeSchema, data, context, **kwargs)
return SynapseSparkCompute(**loaded_data)
def _to_rest_object(self) -> ComputeResource:
synapsespark_comp = SynapseSpark(
name=self.name,
compute_type=self.type,
resource_id=self.resource_id,
description=self.description,
)
return ComputeResource(
location=self.location,
properties=synapsespark_comp,
name=self.name,
identity=(
# pylint: disable=protected-access
self.identity._to_compute_rest_object()
if self.identity
else None
),
tags=self.tags,
)
| [
"[email protected]"
] | |
f91b75f58f0b50595c5280517be0d491c69e8a3d | cc8d7864ded79da1822441d1b223349e79fc4a2d | /for문/for문 ) 빠른 A+B.py | c169219ba2d76bf9192dbeea72e5f7feaea2b47b | [] | no_license | yaena1223/BAEKJOON- | 0b3afbad1ece5074cdd41dde51e4f262407a16da | bd2e5396183a2aaba30c38adcc3751aa05d951c0 | refs/heads/main | 2023-08-16T23:34:47.901537 | 2021-09-29T16:28:59 | 2021-09-29T16:28:59 | 368,310,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | import sys
sum = []
num = int(input())
for i in range(num):
a, b = map(int, sys.stdin.readline().split())
sum.append(a+b)
for i in range(num):
print(sum[i]) | [
"[email protected]"
] | |
378fed1855d570a576642186d5941fdcf0477fcc | bd0959ad5d0d6cf57acaaf3785cf78e92fcf36af | /ScriptingLab/PyCol/changeString.py | 023b9e9de4289b4cfccf55044efa11260289a160 | [] | no_license | sach999/ScriptingLab | 3b39080d89c2feeddd6db7cd98e0290fef3e3d47 | 4ff340b65337e4c228ed9cb906605e8860a6dd47 | refs/heads/master | 2020-07-20T07:14:20.635313 | 2019-12-22T16:50:30 | 2019-12-22T16:50:30 | 206,596,385 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | def ChangeStr(str1):
#increment each alphabet by 1
temp=""
temp1=""
l=len(str1)
for i in range(0,l):
ch=str1[i]
if ch>="a" and ch<="z":
if(ch=='z'):
temp=temp+"a"
else:
ch1=ord(ch)
ch1=ch1+1
temp=temp+chr(ch1)
else:
temp=temp+ch
print(temp)
for i in range(0,l):
ch2=temp[i]
if (ch2=="a" or ch2=="e" or ch2=="i" or ch2=="o" or ch2=="u"):
s=ord(ch2)
s=s-32
temp1=temp1+chr(s)
else:
temp1=temp1+ch2
print(temp1)
ChangeStr(input("Enter the string:"))
| [
"[email protected]"
] | |
a1c33c1c1624f756c1e2048314d0fbef2bb9a278 | 7d131ff01b3955ab892ac4323060712882febcf7 | /setup.py | e1ec27da120ccbd79585c4d36de47f6608323d0c | [
"MIT"
] | permissive | TomekPulkiewicz/pywebby | d441973a0884b680198fab718b8d83adebd1271a | a7f8bd22a697ee4a4f09612e8a4384941ad08074 | refs/heads/master | 2023-06-17T11:42:14.903957 | 2021-07-18T12:47:08 | 2021-07-18T12:47:08 | 370,953,575 | 0 | 0 | MIT | 2021-05-27T23:01:09 | 2021-05-26T07:59:42 | Python | UTF-8 | Python | false | false | 923 | py | from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
LONG_DESCRIPTION = "\n" + fh.read()
VERSION = '0.0.2'
DESCRIPTION = 'An open-source python WebFramework.'
# Setting up
setup(
name="pywebby",
version=VERSION,
author="Tomek Pulkiewicz",
author_email="[email protected]",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[],
keywords=['python', 'web', 'web framework','sockets'],
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) | [
"[email protected]"
] | |
d6835d4de5c1a6c84a8eb54508afc0f74d9de13c | 0a8d1b0d6ad3d836781373cdb3f1d58f8e246a59 | /feature_1_2.py | 50270ea6909ffb70365db289e6fa3e2a6d221933 | [] | no_license | kunwardeepak/2020-interns | 80c1d2e09c18ccb8c016dadd4f592e8ef78b51c0 | 6c073cf5aa8f9cfd605539a00b7d1a039199f8fb | refs/heads/master | 2022-11-24T12:28:40.619037 | 2020-07-22T05:59:36 | 2020-07-22T05:59:36 | 281,129,122 | 0 | 0 | null | 2020-07-20T13:49:01 | 2020-07-20T13:49:01 | null | UTF-8 | Python | false | false | 7,617 | py | #Python 3.7.4
from tkinter import *
import json
import datetime
import turtle
import urllib.request
import random
def reset():
pass
c = (0,0,0)
def drawBar(t, height,old,date):
""" Get turtle t to draw one bar, of height. """
global c
try:
if c[0]<220 and c[1]<220 and c[2]<220:
r, g, b = c
r += 30
g += 30
b += 30
c = (r, g, b)
else:
try:
c = (random.randint(0,220),random.randint(0,220),random.randint(0,220))
except:
pass
except:
pass
# start drawing this shape
t.begin_fill()
t.right(180)
t.backward(12)
t.color('black')
t.write(date,font=("Arial", 8, "normal"),align='left')
t.color(c)
t.forward(10)
t.left(90)
t.backward(0)
t.left(90)
t.left(90)
t.color("black")
t.forward((old))
t.color('black')
t.forward(((height-old)))
t.right(90)
t.color('blue')
t.pensize(1)
t.forward(13)
t.color("red")
t.forward(13)
t.color('blue')
t.write(format(height), font=("Arial", 6, "normal"),align='center')
t.color('blue')
t.forward(13)
t.color(c)
t.right(90)
t.forward((height))
t.left(90)
def plot():
if month.get()=='select month':
print('Please select month')
return
if cur.get()=='select':
print('Please select currency')
return
if len(year_entry.get())>4 or int(year_entry.get())<2009:
print('year is not valid')
return
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July','August', 'September', 'October', 'November', 'December']
try:
yy=year_entry.get()
mm=months.index(month.get())+1
if int(mm)<10:
mm=str('0'+str(mm))
ss='1'
if int(mm)==2:
ee='26'
if int(mm)%2!=0 or int(mm)==1:
ee=('31')
if int(mm)%2==0 and int(mm)!=2:
ee=('30')
startdate=(str(yy)+'-'+str(mm)+'-'+ss)
enddate=(str(yy)+'-'+str(mm)+'-'+ee)
info['text']='Please wait fetching data from site....'
url='https://api.exchangeratesapi.io/history?start_at='+str(startdate)+'&end_at='+str(enddate)
try:
print('Please wait fetching data from site....')
x = urllib.request.urlopen(url)
print('Data Revecied successfully')
info['text']='Data Revecied successfully'
data = json.loads(x.read())
except:
data=json.load(open('data.json'))
d={}
d1=1
dates=[]
rupees=[]
while d1<10:
for k,v in data.items():
for k1 in v:
if k1==str(yy)+'-'+str(mm)+'-0'+str(d1):
dates.append(k1)
d=v[k1]
for (k2,v2) in d.items():
if k2==str(cur.get()):
rupees.append(v2)
d1+=1
while d1<32:
for p_id, p_info in data.items():
for key in p_info:
if key==str(yy)+'-'+str(mm)+'-'+str(d1):
dates.append(key)
d=p_info[key]
for (k,v) in d.items():
if k==str(cur.get()):
rupees.append(v)
d1+=1
date1=[]
for x in dates:
a=x.split('-')
y=a[0]
m=a[1]
dates=a[2]
date1.append(int(dates))
date=date1
xs=rupees
print(date)
print(xs)
try:
turtle.reset()
except:
pass
try:
turtle.clear()
except:
pass
maxheight = max(xs)
numbars = len(xs)
border = 10
wn = turtle.Screen() # Set up the window and its attributes
wn.setworldcoordinates(0-border, 0-border, 40*numbars+border, maxheight+border)
wn.bgcolor("white")
tess = turtle.Turtle()
tess.speed('fastest')
turtle.colormode(255)
tess.pensize(3)
tess.sety(-0.0125)
tess.color("green")
m=max(xs)
tess.left(90)
tess.backward(3)
tess.write(' x,y')
tess.forward(3)
tess.forward(m+3)
tess.write(cur.get()+' exchange rate against EUR')
tess.backward(m+3)
tess.right(90)
tess.backward(20)
tess.forward(20)
tess.forward((len(xs)*39)+30)
tess.right(90)
tess.color('white')
tess.forward(3)
tess.color('green')
tess.write('Date',align='right')
tess.color('white')
tess.backward(3)
tess.color('green')
tess.left(90)
tess.forward(40)
tess.backward((len(xs)*39)+70)
tess.forward(((len(xs)*39)-100)/2)
tess.right(90)
tess.color("white")
tess.forward(5)
tess.color("blue")
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July','August', 'September', 'October', 'November', 'December']
mm=months.index(month.get())+1
if int(mm)<10:
mm=str('0'+str(mm))
ss='1'
if int(mm)==2:
ee='26'
if int(mm)%2!=0 or int(mm)==1:
ee=('31')
if int(mm)%2==0 and int(mm)!=2:
ee=('30')
tess.write(" 1 "+month.get()+" "+year_entry.get()+" to "+ee+ " " +month.get()+" "+year_entry.get(),font=('Arial',10,'normal'))
tess.color("white")
tess.backward(5)
tess.color("green")
tess.left(90)
tess.backward(((len(xs)*39)-100)/2)
old=0
i=0
for a in xs:
drawBar(tess, a,old,str(date[i]))
old=a
i+=1
wn.exitonclick()
except Exception as e:
print(e)
info['text']='Ops! Try Again (Check internet connection/No data found'
app = Tk()
app.title('Stats')
app.geometry('900x550')
info = Label(app, text='Data source (https://api.exchangeratesapi.io)', font=('bold', 10),pady=20)
info.grid(row=50, column=2)
start_txt = StringVar()
year_txt = StringVar()
year_label = Label(app, text='Year', font=('bold', 12),pady=20)
year_label.grid(row=0, column=1)
year_label1 = Label(app, text='(above 2008)', font=('Arial', 8),pady=20)
year_label1.grid(row=0, column=3)
year_entry =Entry(app,textvariable =year_txt)
year_entry.grid(row =0 ,column=2)
month = StringVar()
month.set('select month')
month_list = OptionMenu(app ,month, 'January', 'February', 'March', 'April', 'May', 'June', 'July','August', 'September', 'October', 'November', 'December')
month_list.grid(row =0 ,column=0)
select_label = Label(app, text=' Select currency : ', font=('bold', 12),pady=20)
select_label.grid(row=10 , column=0)
cur = StringVar()
cur.set('select')
cur_list = OptionMenu(app ,cur, 'CAD','HKD','ISK','PHP','DKD','HUF','CZK','AUD','RON','SEK','IDR','INR','BRL','RUB','HRK','JPY','THB','CHF','SGD','PLN','BGN','TRY','CNY','NOK','NZD','ZAR','USD','MXN','ILS','GBP','KRW','MYR')
cur_list.grid(row =10 ,column=1)
submit_btn = Button(app, text='Submit',width = 12,command =plot)
submit_btn.grid(row=10,column=2)
reset_btn = Button(app, text='Reset',width = 12,command =reset)
reset_btn.grid(row=10,column=3)
app.mainloop() | [
"[email protected]"
] | |
a99ce3fd7eafe338c38c4823f88c6f2e0aecf982 | 4acd48fd26b40891e4f58b33bb86be95d935c018 | /models/LSTMEnsembleEncoder.py | b5b11d43430cc8085831116e553291b872af18cb | [] | no_license | ezosa/topic-aware-moderation | 84052ef8b766348c4cfe25859801920f7d9f89d0 | 7721aab0625a49d0307a065783aae120cb140bf4 | refs/heads/main | 2023-08-21T09:00:18.839930 | 2021-10-20T06:23:28 | 2021-10-20T06:23:28 | 397,535,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from models.MLP import MLP
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LSTMEncoder(nn.Module):
def __init__(self, lstm_args, mlp_args):
super(LSTMEncoder, self).__init__()
# embedding layer
self.embedding_dim = lstm_args['emb_dim']
self.embedding = nn.Embedding(lstm_args['vocab_size'], self.embedding_dim)
# initialize with pretrained word emb if provided
if 'pretrained_emb' in lstm_args:
self.embedding.weight.data.copy_(lstm_args['pretrained_emb'])
# bi-LSTM layer
self.hidden_dim = lstm_args['hidden_dim']
self.input_dim = lstm_args['emb_dim'] + lstm_args['num_topics']
self.lstm = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True,
bidirectional=True)
self.drop = nn.Dropout(p=0.5)
# MLP classifier
mlp_input_size = int(self.hidden_dim * 2)
self.mlp = MLP(mlp_input_size,
mlp_args['hidden_size'])
def forward(self, text, text_len, topics):
text_emb = self.embedding(text)
doc_size = text_emb.shape[1]
topic_input = topics.unsqueeze(1).repeat(1, doc_size, 1)
lstm_input_emb = torch.cat((text_emb, topic_input), dim=2)
lstm_input_len = text_len
# packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_input = pack_padded_sequence(lstm_input_emb, lstm_input_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out_forward = output[range(len(output)), text_len - 1, :self.hidden_dim]
out_reverse = output[:, 0, self.hidden_dim:]
out_reduced = torch.cat((out_forward, out_reverse), 1)
text_fea = self.drop(out_reduced)
mlp_output = self.mlp(text_fea)
return mlp_output
| [
"[email protected]"
] | |
0ff4b26b697e8ddb4643086d96acfea6f61ee817 | ca3d3a34db5363141de2338b56a2dc41c545762d | /file_python1.py | 893ba9712f3796cd12f99f14d30bb579e1747656 | [] | no_license | marmiksp/First-Git-Based-Project | ede432d4aa4cc39d0b5a2bf5cb200ade345e0be6 | 4a4d7f3e557187085e23a4acc9bce0e2917e892d | refs/heads/master | 2020-06-01T17:47:55.724890 | 2019-06-08T11:11:53 | 2019-06-08T11:11:53 | 190,870,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py |
# In this Program, we will going to move
# file(.txt , .jpg etc) from source to Destination
# and not Folders
import os
import shutil
src = 'C:/Users/MSP/Desktop/A'
dest = 'C:/Users/MSP/Desktop/B'
filess = os.listdir(src)
print("A -> ",filess)
filesd = os.listdir(dest)
print("B -> ",filesd)
os.chdir(src)
#for fil in filess: # this will create error
# shutil.copy(fil, dest) # of not copying folder
# to overcome that follow right next code
for fil in filess:
if os.path.isfile(fil):
shutil.copy(fil, dest) | [
"[email protected]"
] | |
c29467258a701902de51c13c6b259165f83ade76 | ed39a9306f9fec3e61849d5aec98e70c25b5ea42 | /SrikanthFTL/SrikanthFTL/wsgi.py | ec26cab72385951eec06ccfeb050bc47ea79ab78 | [] | no_license | srikanthch95/MYAPI | 84323768d59c89a5780c19c3deff72ed97c71c57 | 09685d0902f12ec37a5fe77e037bdb7330365857 | refs/heads/main | 2023-03-08T17:38:27.375267 | 2021-02-20T16:23:59 | 2021-02-20T16:23:59 | 340,362,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for SrikanthFTL project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SrikanthFTL.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
a0cedfaad3d595e93689cbd4a6c82b83bac81f28 | 3b499249387fc42800d7eba3b2f802911a9b4ece | /Exercize3.py | 2ac01f2b1d849fc45c458d6e0e2f29d23c4ac0f9 | [] | no_license | ArtemMonk/Homework2 | 636b051f48c8a4e379be581ee1527ccec5b7c1d2 | 9f8ee37d78e5055eb309ffa937bcb02a51b3d694 | refs/heads/master | 2023-03-03T07:50:35.353901 | 2021-02-09T13:41:26 | 2021-02-09T13:41:26 | 337,415,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | # 3. Пользователь вводит месяц в виде целого числа от 1 до 12.
# Сообщить к какому времени года относится месяц (зима, весна, лето, осень).
# Напишите решения через list и через dict.
seasons = ((1, 'Зима'), (2, 'Зима'), (3, 'Весна'), (4, 'Весна'), (5, 'Весна'), (6, 'Лето'), (7, 'Лето'), (8, 'Лето'), (9, 'Осень'), (10, 'Осень'), (11, 'Осень'), (12, 'Зима'))
seasons_dict = dict(seasons)
month = int(input("Введите число месяца: "))
if month in seasons_dict:
print("Этот месяц входит в", seasons_dict[month])
else:
print('Нет такого месяца')
#print(seasons_dict)
| [
"[[email protected]]"
] | |
06a7697c93e8cfd130164bfb4344619cf2bf77df | 04a4a951516e760fe1d9b6577d80776f03e306a7 | /ZenPacks/community/bridge/BridgeDevice.py | b56e9e000be2008ced0ca53337f4f4d88c017dad | [] | no_license | Hemie143/ZenPacks.community.bridge | 5f07a0ac87561ce04b60abd5cb2410791ad81ca3 | aacdb3ce7f55e9e51392cb8cbb5725e47d7fc6b7 | refs/heads/master | 2021-01-21T17:10:37.352806 | 2017-08-02T10:13:58 | 2017-08-02T10:13:58 | 98,509,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | ########################################################################################################################
# BridgeDevice
########################################################################################################################
from Globals import InitializeClass
from Products.ZenRelations.RelSchema import *
from Products.ZenModel.Device import Device
# from Products.ZenModel.ZenossSecurity import ZEN_VIEW
from copy import deepcopy
class BridgeDevice(Device):
# Bridge Device
_relations = Device._relations + (
('BridgeInt', ToManyCont(ToOne, 'ZenPacks.community.bridge.BridgeInterface', 'BridgeDev')),
)
factory_type_information = deepcopy(Device.factory_type_information)
def __init__(self, *args, **kw):
Device.__init__(self, *args, **kw)
self.buildRelations()
InitializeClass(BridgeDevice)
| [
"[email protected]"
] | |
e8fd4267fb33d903fe484ed060f7d7fc78e425a8 | 81873e1a4e3b8c52e39c0995134129a1836df526 | /propertylisting/proplistapp/models.py | ca820cc35665b1c7deeba95a874debbbd0fe6903 | [
"MIT"
] | permissive | sameem420/iProperty | 7edcd279d03c49adb7c5647b96473393b19509f1 | 3e15a42f7119c347b6a4cad86d86ce79a3c9b7ed | refs/heads/main | 2023-02-23T21:04:10.469716 | 2021-02-02T16:50:11 | 2021-02-02T16:50:11 | 323,579,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone_number = models.CharField(max_length=20, blank=True, default='')
profile_picture = models.ImageField(upload_to="profile_images/", blank=True, default='profile_images//default.png')
city = models.CharField(max_length=100, default='', blank=True)
country = models.CharField(max_length=100, default='', blank=True)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.userprofile.save()
class PostAd(models.Model):
address = models.CharField(max_length=50)
rooms = models.IntegerField()
bathrooms = models.IntegerField()
house_images = models.ImageField(upload_to='images/')
uploaded_at = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
28f1fee81bfc9e4b70161d7ab0a2fc88e096a862 | 91d9dcd5756b42e047e039e32259f22500a5f97d | /seed_packing.py | ed55baea52d64e76806ec515441cabf553fcbd89 | [] | no_license | kmandrus/fibonacci-examples | 0485bb00986d07317ab0dd39338b55c747f5cc32 | a0c4f389b59f43f253d7fe3c2a26f3b3965d4365 | refs/heads/main | 2023-03-01T16:48:49.768051 | 2021-02-01T17:06:23 | 2021-02-01T17:06:23 | 335,023,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import plotly.graph_objects as go
import math
phi = (math.sqrt(5) - 1) / 2
radii = [math.sqrt(x) for x in range(100)]
angles = [x * phi * 360 for x in range(100)]
fig = go.Figure(data=
go.Scatterpolar(
r=radii,
theta=angles,
mode = 'markers',
))
fig.update_layout(showlegend=False)
fig.show() | [
"[email protected]"
] | |
2f60fcf9674263e0fe4727a8818e310e48a2e8e3 | 55f43ddf80f995f17123f34ac69b9360d9a0e4cc | /tiktok-master/base_user/models.py | 443c0d9c5f942a105af0ce4d71ddd9f8b18fe841 | [] | no_license | Orxan014/Tik_Tok | 36d237e6462185e8c1251acde4f27044f4c2aab9 | 08969fb0ce9b8deadfbe3fc11aeebeb03daa4b53 | refs/heads/master | 2022-02-10T03:15:49.863792 | 2019-06-29T12:25:46 | 2019-06-29T12:25:46 | 194,402,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | from django.db import models
from django.utils import timezone
from django.conf import settings
from django.core import validators
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from base_user.tools.common import get_user_profile_photo_file_name, GENDER
from oscar.apps.wishlists.models import WishList, Line
USER_MODEL = settings.AUTH_USER_MODEL
# Customize User model
class MyUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=100, unique=True,
help_text=_('Tələb olunur. 75 simvol və ya az. Hərflər, Rəqəmlər və '
'@/./+/-/_ simvollar.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Düzgün istifadəçi adı daxil edin.'),
'yanlışdır')
])
first_name = models.CharField(_('first name'), max_length=255, blank=True)
last_name = models.CharField(_('last name'), max_length=255, blank=True)
email = models.EmailField(_('email address'), max_length=255)
profile_picture = models.ImageField(upload_to=get_user_profile_photo_file_name, null=True, blank=True)
gender = models.IntegerField(choices=GENDER, verbose_name="cinsi", null=True, blank=True)
place = models.IntegerField(default=0)
is_play = models.BooleanField(default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
"""
Important non-field stuff
"""
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = 'İstifadəçi'
verbose_name_plural = 'İstifadəçilər'
def get_wishlist_count(self):
wish = WishList.objects.filter(owner=self).last()
if wish:
return Line.objects.filter(wishlist=wish).count()
else:
return 0
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def get_avatar(self):
if self.profile_picture:
return self.profile_picture.url
else:
return "https://graph.facebook.com/%s/picture?type=large" % '100002461198950'
| [
"[email protected]"
] | |
16dd280bdb3aeae7c20e3fc516cfa46616cc7c11 | ecd1efa7d985f9f168cb71a8b385621269d5a2a5 | /threading_test.py | 01667dd9d6ff0e92c706c563195b6849a790e199 | [] | no_license | dtward/pylddmm | 90c1f1b7537831d0dce57918895116ab0a0350d4 | 41da878fd0447bb60d19e54775e08f32bc5537a4 | refs/heads/master | 2021-08-23T03:01:31.195450 | 2017-12-02T19:45:39 | 2017-12-02T19:45:39 | 109,031,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 08:56:54 2017
@author: dtward
"""
from multiprocessing import Pool
from multiprocessing import Process
import os
import time
def f(x):
return x*x
data = [1,2,3,4,5]
n = len(data)
p = Pool(n)
out = p.map(f,data)
print(out)
def info(title):
print title
print 'module name:', __name__
if hasattr(os, 'getppid'): # only available on Unix
print 'parent process:', os.getppid()
print 'process id:', os.getpid()
def f(name):
info('function f')
print 'hello', name
info('main line')
p = Process(target=f, args=('bob',))
p.start()
p.join()
def do_nothing(x,test=1):
print('hi')
return x
def multiply(x,y):
x = do_nothing(x)
return x*y
def multiply_tuple(x):
return multiply(*x)
p = Pool(2)
out = p.map(multiply_tuple,((5.0,6.0),(7,8),(8,9)))
print(out)
workers = p._pool
print(workers)
print([w.is_alive() for w in workers])
p.close()
p.join()
p = None
DELTA = 0.1
time.sleep(DELTA*2)
print(workers)
print([w.is_alive() for w in workers])
p = Pool(2)
out = p.map(multiply_tuple,((5.0,6.0),(7,8),(8,9)))
| [
"[email protected]"
] | |
158d0868e5c8cc7e84b71ac90477cfc0929d2c76 | 8538d36b6aae027aa4f2a6eb5bacb33927f3ae3f | /Main.py | 95eec8672ba40ffac23557ff743645d3e292f071 | [] | no_license | jordi1307/exam_extra_iPY | 1b14971efcd2f9dab90114fa9a252a3824887a45 | d1b95da47c9d703178491f9670ff493c0741f834 | refs/heads/master | 2016-09-06T17:59:03.019642 | 2014-06-13T16:45:39 | 2014-06-13T16:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,162 | py | __author__ = 'jor'
import datetime
import re
def main():
#nif=input("Cliente Identifiquese ,con su nif")
nif="47179315J"
if nif=="":
print("se creara un nuebo cliente:")
nombre=input("Nombre: ")
apellido=input("Apellido: ")
nifcli=input("Nif: ")
clien=cliente(nombre,apellido,nifcli)
clien.seif()
else:
cli=""
print(nif)
with open('clientes.txt', mode='r',encoding ='utf-8') as clientes:
aux2=clientes.read()
aux=aux2.split(";")
clien=cliente(aux[0],aux[1],aux[2])
# print("buscan")
#print(clien.getNif())
#if clien.getNif() ==nif.strip():
# print("hola")
cli=clien
cli.Alquilar()
class cliente:
nombre=""
apellido=""
nif=""
def __init__(self,nombre,apellido,nif):
self.nombre=nombre
self.apellido=apellido
self.nif=nif
def getNif(self):
return self.nif
def getNombre(self):
return self.nombre
def getApellido(self):
return self.apellido
def Alquilar(self):#1.2.1
matricula=input("inserte matricula del coche a alquilar")
with open('veiculos.txt', mode='r',encoding ='utf-8') as veiculos:
for veiculo in veiculos:
print(veiculo)
veic=veiculo.split(";", 5)
for a in veic:
print(a)
cochet=coche(veic[0],veic[1],veic[2],veic[3],veic[4])
print(cochet.getDisponible(matricula))
if cochet.getDisponible(matricula)==str(True):
fecha_debolucion=input("que dia debolbera el beiculo?")
num=input("numero de dias")
tramite=alquiler(matricula,self.getNif(),datetime.datetime.now().strftime('%d/%m/%Y'),fecha_debolucion,cochet.getPrecio_dia(),False)
tramite.seif()
cochet.setDisponible("False")
cochet.seif()
def SercarCochesDisponibles(self):
with open('veiculos.txt', mode='w',encoding ='utf-8') as veiculos:#1.2.2
for veiculo in veiculos:
veic=veiculo.split(";")
cochet=Coche.coche(veic[0],veic[1],veic[2],veic[3],veic[4],veic[5])
if cochet.getDisponible()==True:
print("Coche:\n"
"\tMatricula: % \n"
"\tMarca: %\n"
"\tModelo: %\n"
"\tPrecio por dia: %\n"
"\tDisponible: %\n"
% cochet.getMatricula(),
cochet.getMarca(),
cochet.getModelo(),
cochet.getPrecio_dia())
def SercarCochesEnLloger(self):#1.2.3
with open('veiculos.txt', mode='w',encoding ='utf-8') as veiculos:
for veiculo in veiculos:
veic=veiculo.split(";")
cochet=Coche.coche(veic[0],veic[1],veic[2],veic[3],veic[4],veic[5])
if cochet.getDisponible()==False:
print("Coche:\n"
"\tMatricula: % \n"
"\tMarca: %\n"
"\tModelo: %\n"
"\tPrecio por dia: %\n"
"\tDisponible: %\n"
% cochet.getMatricula(),
cochet.getMarca(),
cochet.getModelo(),
cochet.getPrecio_dia())
def ToString(self):
return self.getNombre()+";"+self.getApellido()+";"+self.getNif()
def seif(self):
with open('clientes.txt', mode='a',encoding ='utf-8') as clientes:
clientes.write("\n"+self.ToString())
class coche:
matricula=""
marca=""
modelo=""
precio_dia=""
disponible = False
def __init__(self,matricula,marca,modelo,precio_dia,disponible):
self.matricula=matricula
self.marca=marca
self.modelo=modelo
self.precio_dia=precio_dia
self.disponible = disponible
def getMatricula(self):
return self.matricula
def getMarca(self):
return self.marca
def getModelo(self):
return self.modelo
def getPrecio_dia(self):
return self.precio_dia
def getDisponible(self,matricula):
if matricula==self.matricula:
return self.disponible
def setDisponible(self,disponible):
self.disponible=disponible
self.seif()
def setPrecio_dia(self,precio_dia):
self.precio_dia=precio_dia
self.seif()
def ToString(self):
return str(self.matricula)+";"+str(self.marca)+";"+str(self.modelo)+";"+str(self.precio_dia)+";"+str(self.disponible)
def seif(self):
with open('alquiler.txt', mode='r+',encoding ='utf-8') as f_alquiler:
if re.search(f_alquiler.read(),self.matricula):
f_alquiler.write(self.ToString())
def addCocha(self):#1.2.4
with open('alquiler.txt', mode='a',encoding ='utf-8') as f_alquiler:
if re.search(f_alquiler.read(),self.matricula):
f_alquiler.write("\n"+self.ToString())
class alquiler:
matricula=""
nif=""
fecha_alquiler=""
fecha_debolucion=""
importe=""
completada=False
def __init__(self,matricula,nif, fecha_alquiler,fecha_debolucion,importe,completada):
self.matricula=matricula
self.nif=nif
self.fecha_alquiler=fecha_alquiler
self.fecha_debolucion=fecha_debolucion
self.importe=importe
self.completada=completada
def getMatricula(self):
return self.matricula
def getNif(self):
return self.nif
def getFecha_alquiler(self):
return self.fecha_alquiler
def getFecha_debolucion(self):
return self.fecha_debolucion
def getImporte(self):
return self.importe
def getCompletada(self):
return self.completada
def setMatricula(self,matricula):
self.matricula=matricula
def setNif(self,nif):
self.nif=nif
def setFecha_alquiler(self,fecha_alquiler):
self.fecha_alquiler=fecha_alquiler
def setFecha_debolucion(self,fecha_debolucion):
self.fecha_debolucion=fecha_debolucion
def setImporte(self,importe):
self.importe=importe
def setCompletada(self,completada):
self.completada=completada
def ToString(self):
return str(self.matricula)+";"+str(self.nif)+";"+str(self.fecha_alquiler)+";"+str(self.fecha_debolucion)+";"+str(self.importe)+";"+str(self.completada)
def seif(self):
print("seif alquiler")
with open('alquiler.txt', mode='a',encoding ='utf-8') as f_alquiler:
f_alquiler.write(self.ToString())
main() | [
"[email protected]"
] | |
3472f414bb872e3cfb085954cbbb3e0152f80455 | 04a77043cebd9415069aad4a6b8e7af077de1168 | /1-pbase/day05/exmple/for_for.py | 8b5fbd75a866aea0376e372ffee9ad02756fc2a6 | [] | no_license | yangxiangtao/biji | a935fbc4af42c81205900cb95a11e98c16d739de | 5c5f46e6c145fc02ea10b7befdc05c489fc3b945 | refs/heads/master | 2022-11-12T02:25:51.532838 | 2019-04-02T01:22:12 | 2019-04-02T01:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | #练习:
# 输入一个整数,打印正方形
# 1 2 3 4 5
# 1 2 3 4 5
# 1 2 3 4 5
# 1 2 3 4 5
# 1 2 3 4 5
n = int(input('输入一个数:'))
for x in range(1,n+1):
for x in range (x,x+5):
print(x,end=" ")
x += 1
print()
| [
"[email protected]"
] | |
edd40c8dc2627641923343107499a3309785bd4e | 73ff5a5b78b1dc9dcd1c2b9436b037ba89716f42 | /algolove/urls/snippets.py | f708dad208722bc437e9a1ccf8b68722267e132b | [] | no_license | gchandel6/algolove | f77fbc52f5b6c08e8608bd607f4e1475a04d03a3 | 2ca9fc875782547c96e2f2223ee86721d0e482fd | refs/heads/master | 2021-01-17T10:19:55.106012 | 2016-06-11T06:47:27 | 2016-06-11T06:47:27 | 58,667,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py |
from django.conf.urls import url
from algolove.views import Algo_snippets,Coding_snippets
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import render , get_object_or_404
from algolove.models import Algo_snippet,Coding_snippet
query1 = Algo_snippet.objects.all()
query2 = Coding_snippet.objects.all()
urlpatterns = [
# URL for list of all views -- (Generic View)
url(r'^$',
ListView.as_view(queryset=query1,
paginate_by=20,
),
name="algo_codes_list"
),
url(r'^$',
ListView.as_view(queryset=query2,
paginate_by=20,
),
name="compete_codes_list"
),
url(r'^(?P<snippet_id>\d+)/$' ,
Algo_snippets.algo_snippet_page,
name="code_detail"
),
]
| [
"[email protected]"
] | |
2a36aaf9a1504c34d23ddb693137eba315b58ed4 | 6f394a5f520ba50671d95aa18b3e594e427a4d21 | /flytekit/sdk/exceptions.py | 1e9ea62b88aff3b6ccd64e4a2be7395bf752621b | [
"Apache-2.0"
] | permissive | jbrambleDC/flytekit | 844cf2216954eecfe8243e1bd9ca733a2802304c | 2eb9ce7aacaab6e49c1fc901c14c7b0d6b479523 | refs/heads/master | 2022-04-15T10:30:22.433669 | 2020-04-13T17:09:15 | 2020-04-13T17:09:15 | 255,514,936 | 1 | 0 | Apache-2.0 | 2020-04-14T05:04:46 | 2020-04-14T05:04:45 | null | UTF-8 | Python | false | false | 458 | py | from __future__ import absolute_import
from flytekit.common.exceptions import user as _user
class RecoverableException(_user.FlyteRecoverableException):
"""
Raise an exception of this type if user code detects an error and would like to force a retry of the entire task.
Any exception raised from user code other than RecoverableException will NOT be considered retryable and the task
will fail without additional retries.
"""
pass
| [
"[email protected]"
] | |
0813c7423d195ac6c50cc117503fb58455f94fd3 | 6f080f12c45b995ef1fb3edc48758980cb947137 | /backend/controller/rest/main_handler.py | b1317d82e23e236501b55b82d6490e89c22d34ae | [] | no_license | MrWhiski/neinzehnaruafn | e3303d8702bffddc5ce7b5d984da9979aef2342f | 219133d4d1d2e2f10eac18721735880f9c07de6a | refs/heads/main | 2023-05-09T10:45:34.766758 | 2021-05-25T20:58:14 | 2021-05-25T20:58:14 | 367,586,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from abc import ABC
from typing import Any
import tornado.web
import tornado.httputil
class MainHandler(tornado.web.RequestHandler):
def __init__(
self,
application: "Application",
request: tornado.httputil.HTTPServerRequest,
**kwargs: Any
):
super().__init__(application, request)
def get(self):
self.write("HelloWorld")
| [
"[email protected]"
] | |
07d1754c6d472cb4a99f4e501f68369ae85c632d | 5caa7bca9872109c3cbde72b937272b4ecf77555 | /cart/migrations/0005_cart_ordered.py | 7a9bc238a15596efa00677e66d4aa270f13258ba | [] | no_license | Prejudice182/prej-milestone-four | 292ea493b6486479b328f82ca2bc77117052e241 | b05ac9ad90807a2f8b075d96548dced2116d6ced | refs/heads/master | 2022-11-13T07:19:29.986343 | 2022-11-07T22:52:09 | 2022-11-07T22:52:09 | 231,970,672 | 0 | 1 | null | 2022-08-27T00:30:59 | 2020-01-05T20:17:31 | Python | UTF-8 | Python | false | false | 381 | py | # Generated by Django 3.0.2 on 2020-01-11 00:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0004_cartitem_ordered'),
]
operations = [
migrations.AddField(
model_name='cart',
name='ordered',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
8ebd76df680408aac13b53b494f35a9d9ff7fca6 | 554b840d833b4fe9a6735521a180bf95ceaa1665 | /centro/historialesmedicos/apps.py | 675c029f22defe4a8da33ea7e32c65473ec83021 | [] | no_license | jppenuela/centro_medico | 4889718b07c35295e38ae0d755fa00e6dcaec9d8 | 439aab4ae2e2c9697339b11450eb068b0fe5252d | refs/heads/master | 2022-12-27T04:54:19.649475 | 2020-10-15T22:28:47 | 2020-10-15T22:28:47 | 304,086,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.apps import AppConfig
class HistorialesmedicosConfig(AppConfig):
name = 'historialesmedicos'
| [
"[email protected]"
] | |
818e80a9624f26542880063b7539c3636f076b3e | 76881c25248b35d925669feff7079c70b5a71344 | /nose/test_profile.py | ba1b21cbd5df00c3fce15705414bf116859b5ad6 | [] | no_license | abatten/pynbody | 4919a3e4a3ddd0b2846477b0f50466321cb25dc7 | bb4ee1e32d1674295d12f7dddb0d680802ffe086 | refs/heads/master | 2021-04-29T19:36:49.240663 | 2018-08-13T06:05:48 | 2018-08-13T06:05:48 | 121,580,496 | 0 | 0 | null | 2018-02-15T01:23:10 | 2018-02-15T01:23:10 | null | UTF-8 | Python | false | false | 1,053 | py | import pynbody
import numpy as np
np.random.seed(1)
def make_fake_bar(npart=100000, max=1, min=-1, barlength=.8, barwidth=0.05, phi=0, fraction=0.2):
x = np.random.sample(int(npart*fraction))*(max-min) + min
y = np.random.sample(int(npart*fraction))*(max-min) + min
xbar = np.random.sample(npart, )*(barlength/2+barlength/2) - barlength/2
ybar = np.random.sample(npart)*(barwidth/2+barwidth/2) - barwidth/2
x = np.concatenate([x,xbar])
y = np.concatenate([y,ybar])
good = np.where((x**2 + y**2 < 1))[0]
s = pynbody.snapshot.new(len(good))
s['x'] = x[good]
s['y'] = y[good]
s['pos'].units = 'kpc'
s['mass'] = 1.0
s['mass'].units = 'Msol'
s['vel'] = 1.0
s['vel'].units = 'km s^-1'
s.rotate_z(phi)
return s
def test_fourier_profile():
bar = make_fake_bar(phi=45)
p = pynbody.analysis.profile.Profile(bar, nbins=50)
assert(np.all(p['fourier']['amp'][2,4:20] > 0.1))
assert(np.allclose(np.abs(p['fourier']['phi'][2,4:20]/2), np.pi/4.0, rtol=0.05))
| [
"[email protected]"
] | |
da65bbdb35886a9cf93a682e86d6cdd0b982bc5a | ba1754030b7bf3f20a8bc826cfa797640470f721 | /observation/otf_IRC_radec_2021.py | e3d85e9fab58d7ca9ab84d780220cafe8d3f7874 | [] | no_license | 1p85m/necst-1p85m2019 | 46b52e59cb980e802d974a7101c672040211119f | d20724bd2cd23d6ad3524630c5755c571b619624 | refs/heads/master | 2021-06-25T15:01:32.984872 | 2021-02-09T10:47:52 | 2021-02-09T10:47:52 | 197,499,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,828 | py | #!/usr/bin/env python3
import sys
import time
import numpy
import math
import os
import datetime
sys.path.append("/home/exito/ros/src/necst-telescope/scripts")
import telescope_controller
sys.path.append("/home/exito/ros/src/necst-core/scripts")
import core_controller
sys.path.append("/home/exito/ros/src/necst-1p85m2019/scripts")
import controller_1p85m2019
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import String
###############parameter###################
name = "otf_IRC10216_2021"
param = {}
#IRC+10216
param["on_x"] = (9 + 47/60 + 57.4063/3600)*15 #deg
param["on_y"] = 13 + 16/60 + 43.5648/3600 #deg
param["on_frame"] = "fk5"
param["on_offset_x"] = 0 #deg
param["on_offset_y"] = 0 #deg
param["num_x"] = 120
param["num_y"] = 120
param["delta_x"] = 10/3600
param["delta_y"] = 10/3600
param["delta_t"] = 0.5
param["ramp"] = 2
param["off_x"] = (9 + 50/60 + 14/3600)*15
param["off_y"] = 13 + 35/60 + 40/3600
param["off_frame"] = "fk5"
param["off_integ"] = 10 #sec
param["hot_time"] = 10 #sec
param["hot_interval"] = 5 #min
param["direction"] = "V"
param["target"] = "IRC+10216"
param["dcos"] = 1
###################START OBSERVATION##########################
class otf_observation(object):
last_timestamp = 0.
interval = 10
regist_time = 0
def __init__(self):
self.logger = core_controller.logger()
self.antenna = telescope_controller.antenna()
self.load = controller_1p85m2019.load()
self.obsstatus = rospy.Publisher('/otf/status', String, queue_size=1)
self.target = rospy.Publisher('/otf/target', String, queue_size=1)
self.otfparam_on = rospy.Publisher('/otf/param/on', Float64MultiArray, queue_size=1)
self.otfparam_scan = rospy.Publisher('/otf/param/scan', Float64MultiArray, queue_size=1)
self.otfparam_off = rospy.Publisher('/otf/param/off', Float64MultiArray, queue_size=1)
self.otfparam_hot = rospy.Publisher('/otf/param/hot', Float64MultiArray, queue_size=1)
self.otfparam_direc = rospy.Publisher('/otf/param/direction', String, queue_size=1)
def hot_obs(self,hot_time):
self.load.move_hot()
self.load.check_hot()
self.obsstatus.publish("{0:9}".format('hot start'))
time.sleep(hot_time)
self.obsstatus.publish("{0:9}".format('hot end'))
self.load.move_sky()
self.load.check_hot()
time.sleep(0.01)
pass
def off_obs(self,off_x,off_y,off_frame,off_integ):
self.antenna.move_wcs(off_x,off_y,frame=off_frame)
self.antenna.tracking_check()
self.obsstatus.publish("{0:9}".format('off start'))
time.sleep(off_integ)
self.obsstatus.publish("{0:9}".format('off end'))
time.sleep(0.01)
pass
def timer_regist(self,t):
self.interval = t*60 #min->sec
self.regist_time = time.time()
pass
def timer_check(self):
now = time.time()
if self.interval <= (now - self.regist_time):
self.last_timestamp = now
return True
return False
def pub_scan_param(self,param):
on = Float64MultiArray()
off = Float64MultiArray()
scan = Float64MultiArray()
hot = Float64MultiArray()
target = String()
direc = String()
on.data = [param["on_x"],param["on_y"],param["on_offset_x"],param["on_offset_y"] ]
scan.data = [param["num_x"],param["num_y"],param["delta_x"],param["delta_y"],param["delta_t"],param["ramp"]]
off.data = [param["off_x"],param["off_y"],param["off_integ"]]
hot.data = [param["hot_time"],param["hot_interval"]]
target.data = param["target"]
direc.data = param["direction"]
time.sleep(0.01)
self.otfparam_on.publish(on)
time.sleep(0.01)
self.otfparam_scan.publish(scan)
time.sleep(0.01)
self.otfparam_off.publish(off)
time.sleep(0.01)
self.otfparam_hot.publish(hot)
time.sleep(0.01)
self.target.publish(target)
time.sleep(0.01)
self.otfparam_direc.publish(direc)
time.sleep(0.01)
def start(self,param):
name = "otf_IRC+10216_2021"
date = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
file_name = name + '/' + date + '.necstdb'
print(file_name)
hot_time = param["hot_time"]
hot_interval = param["hot_interval"]
if param["direction"] == "H":
total_scan = param["num_y"]
x = param["on_x"]
y = param["on_y"]
dx = param["delta_x"]
dy = param["delta_y"]
frame = param["on_frame"]
ramp = param["ramp"]
num_x = param["num_x"]
num_y = param["num_y"]
dt = param["delta_t"]
off_x = param["off_x"]
off_y = param["off_y"]
off_frame = param["off_frame"]
off_integ = param["off_integ"]
on_offset_x = param["on_offset_x"]
on_offset_y = param["on_offset_y"]
elif param["direction"] == "V":
total_scan = param["num_x"]
x = param["on_x"]
y = param["on_y"]
dx = param["delta_x"]
dy = param["delta_y"]
frame = param["on_frame"]
ramp = param["ramp"]
num_x = param["num_x"]
num_y = param["num_y"]
dt = param["delta_t"]
off_x = param["off_x"]
off_y = param["off_y"]
off_frame = param["off_frame"]
off_integ = param["off_integ"]
on_offset_x = param["on_offset_x"]
on_offset_y = param["on_offset_y"]
self.logger.start(file_name)
time.sleep(0.3)
self.pub_scan_param(param)
for scan_num in range(total_scan):
self.obsstatus.publish("{0:9}".format('otf line '+str(scan_num)))
time.sleep(0.1)
#################HOT##############
if self.timer_check():
print("hot")
self.antenna.move_wcs(off_x,off_y,frame=off_frame)
self.load.move_hot()
self.antenna.tracking_check()
self.hot_obs(hot_time)
self.timer_regist(hot_interval)
else:
pass
#################OFF##############
print("off")
self.off_obs(off_x,off_y,off_frame,off_integ)
#################ON##############
if param["direction"] == "H":
_lx = dx * (num_x+1)
_ly = dy * (num_y)
lx = _lx + dx/dt*ramp
ly = 0
ctr_x = x + on_offset_x
ctr_y = y + on_offset_y
sx = ctr_x - _lx/2 - dx/dt*ramp
sy = ctr_y - _ly/2 + dy*scan_num
scan_t = dt*(num_x+1) + ramp
elif param["direction"] == "V":
_lx = dx * (num_x)
_ly = dy * (num_y+1)
lx = 0
ly = _ly + dy/dt*ramp
ctr_x = x + on_offset_x
ctr_y = y + on_offset_y
sx = ctr_x - _lx/2 + dx*scan_num
sy = ctr_y - _ly/2 - dy/dt*ramp
scan_t = dt*(num_y+1) + ramp
pass
self.obsstatus.publish("{0:9}".format('on start'))
print("scan "+str(scan_num))
self.antenna.move_raster_wcs(sx,sy,lx,ly,scan_t,l_unit="deg",frame=frame)
self.obsstatus.publish("{0:9}".format('on finish'))
time.sleep(0.1)
self.antenna.finalize()
self.logger.stop()
return
if __name__ == "__main__":
rospy.init_node(name)
otf = otf_observation()
otf.start(param)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.