blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6cb9926ae305eeb61a1736944a2a2d904e853d9 | dd6385ef2b1ab63c7142d0a6c317c57aa1865b24 | /Estimating_the_root.py | 6820d4a098c5d9f08327d3f34e859cf7bdd3cfe2 | []
| no_license | sam505/Roots-of-all-Positive-Numbers | 1d43cbaf8cf2fc0bfd7276f7b0abf449b8f4c0e6 | ff99fa1f63912883ed7d90958f5baf0187c42105 | refs/heads/master | 2022-07-15T21:12:41.658937 | 2020-05-13T10:50:35 | 2020-05-13T10:50:35 | 263,601,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | number = input("Enter the number to get the square root: ")
root = input("Enter the root you want to get: ")
number = int(number)
root = int(root)
estimate = 0
solution = 1
solution_one = 1
while (solution <= 0 and solution_one <= 0) or (solution >= 0 and solution_one >= 0):
estimate += 1
solution = ((estimate**root) - number)
solution_one = (((estimate + 1)**root) - number)
if solution == 0:
break
print("Estimate used is: " + str(estimate))
soln = (estimate**root) - number
soln_one = root*(estimate**(root-1))
square_root_one = estimate
square_root = estimate - (soln/soln_one)
square_root = round(square_root, 10)
square_root_one = round(square_root_one, 10)
while square_root != square_root_one:
square_root_one = square_root
soln = (square_root ** root) - number
soln_one = root*(square_root_one**(root-1))
square_root = square_root - (soln / soln_one)
square_root = round(square_root, 10)
print (square_root)
print("The square root is: " + str(square_root))
| [
"[email protected]"
]
| |
6f15b015cc568d504c63e07a6f239f77a6181e82 | 9127f61210d9dfc011cdd7159d1a61573d1c5f0f | /BUCEALoginner/bucea_loginner.py | e55b2459ebea19847a30cab0e5943cc813c88455 | []
| no_license | YuhangJi/daily | 1a69c97736fb8d060e7a992701a5f25cbbe41564 | 23a3f761e2293e7ac77140a7e7b31b6274fee866 | refs/heads/master | 2023-02-06T05:16:46.582895 | 2020-04-19T22:45:50 | 2020-04-19T22:45:50 | 256,724,517 | 0 | 0 | null | 2021-01-02T01:48:30 | 2020-04-18T10:24:24 | Python | UTF-8 | Python | false | false | 7,802 | py | # -*- coding: utf-8 -*-
import time
import random
import argparse
import urllib.error
import urllib.parse
import urllib.request
import http.cookiejar
class BuceaLoginner:
"""
2020年4月18日
"""
url_dict = {"get_url": "http://10.1.1.131:903/srun_portal_pc.php?ac_id=1&",
"post_url": "http://10.1.1.131:903/srun_portal_pc.php?ac_id=1&url=www.msftconnecttest.com"}
ua_list = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/78.0.3904.108 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.88 Safari/537.36 ",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363"]
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
def __init__(self, user_name=None, pass_word=None,
start_time=1, end_time=5):
self.__ps = {"user_name": user_name, # ps <=> parameters dictionary
"pass_word": pass_word,
"start_time": start_time,
"end_time": end_time}
@staticmethod
def _get_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
@staticmethod
def _get_content_length(data):
length = len(data.keys()) * 2 - 1
total = ''.join(list(data.keys()) + list(data.values()))
length += len(total)
return length
def _get_ua(self):
random.shuffle(self.ua_list)
return self.ua_list[0]
def down_time(self):
hour = int(self._get_time().split(" ")[-1].split(":")[0])
if self.__ps["start_time"] <= hour < self.__ps["end_time"]:
return True
else:
return False
def bucea_login(self):
state_code = {200: "Success to connect!", 400: "Connection has been failed!", 0: "unknown"}
ua = self._get_ua() # user-agent
get_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': '10.1.1.131:903',
'Referer': 'http://10.1.1.131/index_1.html',
'Upgrade-Insecure-Requests': '1',
'User-Agent': ua}
try:
req = urllib.request.Request(url=self.url_dict["get_url"],
headers=get_headers)
self.opener.open(req)
except TimeoutError:
print(self._get_time(), "Program raised a TimeoutError with the long waiting time.")
except urllib.error.HTTPError as e:
print(self._get_time(), "HTTPError:{}".format(e.code))
except urllib.error.URLError as e:
print(self._get_time(), e.reason)
data_dict = {
'action': 'login',
'ac_id': '1',
'user_ip': '',
'nas_ip': '',
'user_mac': '',
'url': '',
'username': self.__ps["user_name"],
'password': self.__ps["pass_word"]}
content_length = str(self._get_content_length(data_dict)) # content_length
post_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Length': content_length,
'Content-Type': 'application/x-www-form-urlencoded',
'Host': '10.1.1.131:903',
'Origin': 'http://10.1.1.131:903',
'Referer': 'http://10.1.1.131:903/srun_portal_pc.php?ac_id=1&url=www.msftconnecttest.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': ua}
form_data = urllib.parse.urlencode(data_dict).encode(encoding='utf-8') # encode post data
code = 0
try:
request = urllib.request.Request(url=self.url_dict["post_url"],
data=form_data,
headers=post_headers)
response = self.opener.open(request)
if response.getcode() == 200:
code = response.getcode()
except TimeoutError:
print(self._get_time(), "TimeoutError")
code = 400
except urllib.error.HTTPError as e:
print(self._get_time(), "HTTPError:{}".format(e.code))
code = 400
except urllib.error.URLError as e:
print(self._get_time(), e.reason)
code = 400
finally:
print(self._get_time(), state_code[code])
return code
def main():
# load parameters from command line
parser = argparse.ArgumentParser(prog="bucealoginner")
parser.add_argument('--username', default=None)
parser.add_argument('--password', default=None)
parser.add_argument('--delaytime', default=7200)
parser.add_argument('--starttime', default=1)
parser.add_argument('--endtime', default=5)
parser.add_argument('--resttime', default=300)
__args = parser.parse_args()
# <<<
# >>> checking parameters
__user_name = __args.username
if isinstance(__user_name, str):
button = False
for _ in __user_name:
if _ not in [str(__) for __ in range(10)]: button = True
if button:
raise ValueError("Non-number included in parameter.[{}]".format("username"))
elif __user_name is None:
raise ValueError("Please input the important parameter .[{}]".format("username"))
elif isinstance(__user_name, int):
__user_name = str(__user_name)
else:
raise ValueError("Unknown parameter .[{}]".format("username"))
__pass_word = __args.password
if isinstance(__pass_word, str):
pass
elif isinstance(__pass_word, int):
__pass_word = str(__pass_word)
elif __pass_word is None:
raise ValueError("Please input the important parameter .[{}]".format("password"))
else:
raise ValueError("Unknown parameter .[{}]".format("password"))
__delay_time = __args.delaytime
if isinstance(__delay_time, str):
__delay_time = float(__delay_time)
__delay_time = int(__delay_time)
__start_time = __args.starttime
if isinstance(__start_time, str):
__start_time = float(__start_time)
__start_time = int(__start_time)
__end_time = __args.endtime
if isinstance(__end_time, str):
__end_time = float(__end_time)
__end_time = int(__end_time)
if __end_time < __start_time:
__tran = __start_time
__start_time = __end_time
__end_time = __tran
__rest_time = __args.resttime
if isinstance(__rest_time, str):
__rest_time = float(__rest_time)
__rest_time = int(__rest_time)
# <<<
# >>> login loop
loginner = BuceaLoginner(user_name=__user_name,
pass_word=__pass_word,
start_time=__start_time,
end_time=__end_time)
while True:
if loginner.down_time():
time.sleep(__rest_time)
continue
try:
loginner.bucea_login()
except Exception:
time.sleep(__rest_time)
loginner.bucea_login()
finally:
time.sleep(__delay_time)
# <<<
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
47dd4b0d0b97967cfa1f6829d045d33383c9b932 | 96796bca1f00c5af89c695ff51691e977fda262c | /myEnvironments/multipleApps/multipleApps/urls.py | 1e1daa59868f00d10f30a34bb8adb6c29c2d563a | []
| no_license | LexiPearl/Python-Projects | 5be7ecb11ff7e332daf7b92d23e183511b67444c | c76ce5611d8abd8dfcdea24051cbdfe705a98ffd | refs/heads/master | 2021-01-19T11:35:50.624237 | 2017-04-28T04:13:13 | 2017-04-28T04:13:13 | 87,978,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | """multipleApps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.loginregistration.urls')),
url(r'^courses/users_courses/', include('apps.courses_users.urls')),
url(r'^courses/', include('apps.courses.urls')),
]
| [
"[email protected]"
]
| |
48193b8eaa353cbae3a3b247b5561548f10fecb5 | 0d8ad19fb9e980647833e7947ae2e854381006ed | /banknote/course/migrations/0015_auto_20200625_1234.py | 30d78e4839138c4f5f4d1ad89492e4e520a0fa32 | []
| no_license | amgad165/Banknote-Academy | 2fa7bcfca2f24738aa57b021d785957cf0d04192 | 5cf4070af600287b713458116f8d9b1eec343e0f | refs/heads/master | 2022-11-11T07:57:46.548623 | 2020-06-28T14:42:18 | 2020-06-28T14:42:18 | 275,605,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 2.2.5 on 2020-06-25 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0014_auto_20200625_1232'),
]
operations = [
migrations.AlterField(
model_name='course',
name='Start_date',
field=models.DateField(),
),
]
| [
"[email protected]"
]
| |
f9c962a39baa75c624eed77ea4bb3ed83b1d85ba | 4851d160a423b4a65e81a75d5b4de5218de958ee | /Number Format.py | 63d6f6c7b330bb5a08ff5f80773c51da98bf8514 | []
| no_license | LarisaOvchinnikova/python_codewars | 519508e5626303dcead5ecb839c6d9b53cb3c764 | 5399f4be17e4972e61be74831703a82ce9badffd | refs/heads/master | 2023-05-05T14:52:02.100435 | 2021-05-25T18:36:51 | 2021-05-25T18:36:51 | 319,399,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # https://www.codewars.com/kata/565c4e1303a0a006d7000127
def number_format(n):
sign = "" if n >=0 else "-"
n = str(abs(n))
if len(n) <= 3: return sign+n
s = []
while len(n)>0:
s.append(n[-3:])
n = n[:-3]
return sign+",".join(s[::-1]) | [
"[email protected]"
]
| |
9b6ac406306c33ebbc4d0ea38fad4b5bfaeb2bce | a77a8272879ccd1a4e33b97d50e66e5486f38ed6 | /random_player.py | 4f63d6cecc8359534778a0768ae91cdcd7ca91c5 | []
| no_license | LeSphax/BattleshipAI | 8a150f9f59f952a726418ef4f185f76a6ebd4830 | bf21894f544ce22959dd126e7d860a228c445c1d | refs/heads/master | 2020-04-17T14:10:30.805717 | 2019-01-20T10:39:01 | 2019-01-20T10:43:15 | 166,646,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | import random
class RandomPlayer(object):
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
def act(self, board, epsilon):
return random.randint(0, self.rows - 1), random.randint(0, self.columns - 1)
def train(self, boards, actions, rewards, batch_size, learning_rate):
pass
| [
"[email protected]"
]
| |
eaeecb735041bbbe5891d953465fba1e4783f1c7 | 43b9eb11e90dbf984f950e4885085c83daa719b2 | /migrations/versions/339a6b145e56_user_status.py | f81e899bdc79017e6803c52bc8c09c0dbee04e15 | [
"Apache-2.0"
]
| permissive | dpdi-unifor/thorn | 8ec7982812fe07906567514ad6628154ea99f620 | 37695c66607f60b29afd25ac512c0242079e1342 | refs/heads/master | 2023-01-02T19:48:27.409446 | 2020-09-09T14:31:51 | 2020-09-09T14:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """User status
Revision ID: 339a6b145e56
Revises: 9f52309f0d44
Create Date: 2020-03-26 11:53:32.044767
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '339a6b145e56'
down_revision = '9f52309f0d44'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('status', sa.Enum('ENABLED', 'DELETED', 'PENDING_APPROVAL', name='UserStatusEnumType'), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'status')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
4ebd8d9c83528e4b8f6961352320a0c777136e22 | dc4713228b15ca4b6f262203df7908c617cd3915 | /tests/test_pitch.py | e5eaa786dbbd3476e2c3a7010e5c417186e8bce5 | []
| no_license | Nelvinom/60sec-pitch | 983826d312dcb28f9646f554d4bfb0117d402670 | 29d43258c92d62da32eef13ea9a8d6dc0e7bbb96 | refs/heads/master | 2022-06-11T23:53:14.275694 | 2020-05-08T10:19:06 | 2020-05-08T10:19:06 | 262,290,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | import unittest
from app.models import User, Role
class PitchModelTest(unittest.TestCase):
def setUp(self):
self.user_test = User(username = 'Daudi',password = 'potato', email = '[email protected]')
self.new_pitch = Pitch(id=1,pitch_title='Test',pitch_content='This is a test pitch',category="interview",user = self.user_James,likes=0,dislikes=0)
def tearDown(self):
Pitch.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.pitch_title,'Test')
self.assertEquals(self.new_pitch.pitch_content,'This is a test pitch')
self.assertEquals(self.new_pitch.category,"interview")
self.assertEquals(self.new_pitch.user,self.user_test)
def test_save_pitch(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitch.query.all())>0)
def test_get_pitch_by_id(self):
self.new_pitch.save_pitch()
got_pitch = Pitch.get_pitch(1)
self.assertTrue(got_pitch is not None) | [
"[email protected]"
]
| |
3014d12db736cc4a036c7172f8e025c97af34d2f | aee7a6cca6a2674f044d7a1cacf7c72d7438b8b1 | /cup_skills/stats/average_rewardgp_reset_good_7.py | 2458b18354d006e66a6d8060bf125fd8d3f27459 | []
| no_license | lagrassa/rl-erase | efd302526504c1157fa5810e886caccba8570f1b | 0df5c8ce4835c4641a2303d11095e9c27307f754 | refs/heads/master | 2021-05-13T13:36:12.901945 | 2019-08-01T02:13:15 | 2019-08-01T02:13:15 | 116,709,555 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | 52.72727272727272,45.45454545454545,35.45454545454545,38.18181818181819,19.090909090909093,44.54545454545455,20.909090909090907,52.72727272727272,51.81818181818182,50.0,-100.0,28.18181818181818,-100.0,28.18181818181818,39.09090909090909,49.09090909090909,35.45454545454545,38.18181818181819,31.818181818181817,52.72727272727272,22.727272727272727,31.818181818181817,26.36363636363636,18.181818181818183,40.909090909090914,23.636363636363637,48.18181818181818,29.09090909090909,15.454545454545453,52.72727272727272,47.27272727272727,37.27272727272727,30.0,26.36363636363636,31.818181818181817,50.90909090909091,36.36363636363637,33.63636363636363,51.81818181818182,45.45454545454545,26.36363636363636,19.090909090909093,20.0,32.72727272727273,20.909090909090907,24.545454545454547, | [
"[email protected]"
]
| |
17c60f274d555ceefc3a340a5ea260e332aa47df | d0ec1b69f19363c893b5cfb91d6ee0b77d382d3a | /.config/ranger/commands_full.py | 8b9742d65f2988682d2226f60a0bee7118e89d2d | []
| no_license | sdsaati/home | 4cf179e920b23f69cfad3662235c08856b047ea1 | c9d7c32546c29921608517d8885ec8eeb554b8bc | refs/heads/master | 2023-07-14T03:52:57.880238 | 2021-08-29T07:39:01 | 2021-08-29T07:39:01 | 380,321,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,117 | py | # -*- coding: utf-8 -*-
# This file is part of ranger, the console file manager.
# This configuration file is licensed under the same terms as ranger.
# ===================================================================
#
# NOTE: If you copied this file to /etc/ranger/commands_full.py or
# ~/.config/ranger/commands_full.py, then it will NOT be loaded by ranger,
# and only serve as a reference.
#
# ===================================================================
# This file contains ranger's commands.
# It's all in python; lines beginning with # are comments.
#
# Note that additional commands are automatically generated from the methods
# of the class ranger.core.actions.Actions.
#
# You can customize commands in the files /etc/ranger/commands.py (system-wide)
# and ~/.config/ranger/commands.py (per user).
# They have the same syntax as this file. In fact, you can just copy this
# file to ~/.config/ranger/commands_full.py with
# `ranger --copy-config=commands_full' and make your modifications, don't
# forget to rename it to commands.py. You can also use
# `ranger --copy-config=commands' to copy a short sample commands.py that
# has everything you need to get started.
# But make sure you update your configs when you update ranger.
#
# ===================================================================
# Every class defined here which is a subclass of `Command' will be used as a
# command in ranger. Several methods are defined to interface with ranger:
# execute(): called when the command is executed.
# cancel(): called when closing the console.
# tab(tabnum): called when <TAB> is pressed.
# quick(): called after each keypress.
#
# tab() argument tabnum is 1 for <TAB> and -1 for <S-TAB> by default
#
# The return values for tab() can be either:
# None: There is no tab completion
# A string: Change the console to this string
# A list/tuple/generator: cycle through every item in it
#
# The return value for quick() can be:
# False: Nothing happens
# True: Execute the command afterwards
#
# The return value for execute() and cancel() doesn't matter.
#
# ===================================================================
# Commands have certain attributes and methods that facilitate parsing of
# the arguments:
#
# self.line: The whole line that was written in the console.
# self.args: A list of all (space-separated) arguments to the command.
# self.quantifier: If this command was mapped to the key "X" and
# the user pressed 6X, self.quantifier will be 6.
# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
# self.rest(n): The n-th argument plus everything that followed. For example,
# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
# self.start(n): Anything before the n-th argument. For example, if the
# command was "search foo bar a b c", start(2) will be "search foo"
#
# ===================================================================
# And this is a little reference for common ranger functions and objects:
#
# self.fm: A reference to the "fm" object which contains most information
# about ranger.
# self.fm.notify(string): Print the given string on the screen.
# self.fm.notify(string, bad=True): Print the given string in RED.
# self.fm.reload_cwd(): Reload the current working directory.
# self.fm.thisdir: The current working directory. (A File object.)
# self.fm.thisfile: The current file. (A File object too.)
# self.fm.thistab.get_selection(): A list of all selected files.
# self.fm.execute_console(string): Execute the string as a ranger command.
# self.fm.open_console(string): Open the console with the given string
# already typed in for you.
# self.fm.move(direction): Moves the cursor in the given direction, which
# can be something like down=3, up=5, right=1, left=1, to=6, ...
#
# File objects (for example self.fm.thisfile) have these useful attributes and
# methods:
#
# tfile.path: The path to the file.
# tfile.basename: The base name only.
# tfile.load_content(): Force a loading of the directories content (which
# obviously works with directories only)
# tfile.is_directory: True/False depending on whether it's a directory.
#
# For advanced commands it is unavoidable to dive a bit into the source code
# of ranger.
# ===================================================================
from __future__ import (absolute_import, division, print_function)
from collections import deque
import os
import re
import sys
from ranger.api.commands import Command
class alias(Command):
""":alias <newcommand> <oldcommand>
Copies the oldcommand as newcommand.
"""
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
self.fm.notify('Syntax: alias <newcommand> <oldcommand>', bad=True)
return
self.fm.commands.alias(self.arg(1), self.rest(2))
class echo(Command):
""":echo <text>
Display the text in the statusbar.
"""
def execute(self):
self.fm.notify(self.rest(1))
class cd(Command):
""":cd [-r] <path>
The cd command changes the directory.
If the path is a file, selects that file.
The command 'cd -' is equivalent to typing ``.
Using the option "-r" will get you to the real path.
"""
def execute(self):
if self.arg(1) == '-r':
self.shift()
destination = os.path.realpath(self.rest(1))
if os.path.isfile(destination):
self.fm.select_file(destination)
return
else:
destination = self.rest(1)
if not destination:
destination = '~'
if destination == '-':
self.fm.enter_bookmark('`')
else:
self.fm.cd(destination)
def _tab_args(self):
# dest must be rest because path could contain spaces
if self.arg(1) == '-r':
start = self.start(2)
dest = self.rest(2)
else:
start = self.start(1)
dest = self.rest(1)
if dest:
head, tail = os.path.split(os.path.expanduser(dest))
if head:
dest_exp = os.path.join(os.path.normpath(head), tail)
else:
dest_exp = tail
else:
dest_exp = ''
return (start, dest_exp, os.path.join(self.fm.thisdir.path, dest_exp),
dest.endswith(os.path.sep))
@staticmethod
def _tab_paths(dest, dest_abs, ends_with_sep):
if not dest:
try:
return next(os.walk(dest_abs))[1], dest_abs
except (OSError, StopIteration):
return [], ''
if ends_with_sep:
try:
return [os.path.join(dest, path) for path in next(os.walk(dest_abs))[1]], ''
except (OSError, StopIteration):
return [], ''
return None, None
def _tab_match(self, path_user, path_file):
if self.fm.settings.cd_tab_case == 'insensitive':
path_user = path_user.lower()
path_file = path_file.lower()
elif self.fm.settings.cd_tab_case == 'smart' and path_user.islower():
path_file = path_file.lower()
return path_file.startswith(path_user)
def _tab_normal(self, dest, dest_abs):
dest_dir = os.path.dirname(dest)
dest_base = os.path.basename(dest)
try:
dirnames = next(os.walk(os.path.dirname(dest_abs)))[1]
except (OSError, StopIteration):
return [], ''
return [os.path.join(dest_dir, d) for d in dirnames if self._tab_match(dest_base, d)], ''
def _tab_fuzzy_match(self, basepath, tokens):
""" Find directories matching tokens recursively """
if not tokens:
tokens = ['']
paths = [basepath]
while True:
token = tokens.pop()
matches = []
for path in paths:
try:
directories = next(os.walk(path))[1]
except (OSError, StopIteration):
continue
matches += [os.path.join(path, d) for d in directories
if self._tab_match(token, d)]
if not tokens or not matches:
return matches
paths = matches
return None
def _tab_fuzzy(self, dest, dest_abs):
tokens = []
basepath = dest_abs
while True:
basepath_old = basepath
basepath, token = os.path.split(basepath)
if basepath == basepath_old:
break
if os.path.isdir(basepath_old) and not token.startswith('.'):
basepath = basepath_old
break
tokens.append(token)
paths = self._tab_fuzzy_match(basepath, tokens)
if not os.path.isabs(dest):
paths_rel = self.fm.thisdir.path
paths = [os.path.relpath(os.path.join(basepath, path), paths_rel)
for path in paths]
else:
paths_rel = ''
return paths, paths_rel
def tab(self, tabnum):
from os.path import sep
start, dest, dest_abs, ends_with_sep = self._tab_args()
paths, paths_rel = self._tab_paths(dest, dest_abs, ends_with_sep)
if paths is None:
if self.fm.settings.cd_tab_fuzzy:
paths, paths_rel = self._tab_fuzzy(dest, dest_abs)
else:
paths, paths_rel = self._tab_normal(dest, dest_abs)
paths.sort()
if self.fm.settings.cd_bookmarks:
paths[0:0] = [
os.path.relpath(v.path, paths_rel) if paths_rel else v.path
for v in self.fm.bookmarks.dct.values() for path in paths
if v.path.startswith(os.path.join(paths_rel, path) + sep)
]
if not paths:
return None
if len(paths) == 1:
return start + paths[0] + sep
return [start + dirname + sep for dirname in paths]
class chain(Command):
""":chain <command1>; <command2>; ...
Calls multiple commands at once, separated by semicolons.
"""
resolve_macros = False
def execute(self):
if not self.rest(1).strip():
self.fm.notify('Syntax: chain <command1>; <command2>; ...', bad=True)
return
for command in [s.strip() for s in self.rest(1).split(";")]:
self.fm.execute_console(command)
class shell(Command):
escape_macros_for_shell = True
def execute(self):
if self.arg(1) and self.arg(1)[0] == '-':
flags = self.arg(1)[1:]
command = self.rest(2)
else:
flags = ''
command = self.rest(1)
if command:
self.fm.execute_command(command, flags=flags)
def tab(self, tabnum):
from ranger.ext.get_executables import get_executables
if self.arg(1) and self.arg(1)[0] == '-':
command = self.rest(2)
else:
command = self.rest(1)
start = self.line[0:len(self.line) - len(command)]
try:
position_of_last_space = command.rindex(" ")
except ValueError:
return (start + program + ' ' for program
in get_executables() if program.startswith(command))
if position_of_last_space == len(command) - 1:
selection = self.fm.thistab.get_selection()
if len(selection) == 1:
return self.line + selection[0].shell_escaped_basename + ' '
return self.line + '%s '
before_word, start_of_word = self.line.rsplit(' ', 1)
return (before_word + ' ' + file.shell_escaped_basename
for file in self.fm.thisdir.files or []
if file.shell_escaped_basename.startswith(start_of_word))
class open_with(Command):
def execute(self):
app, flags, mode = self._get_app_flags_mode(self.rest(1))
self.fm.execute_file(
files=[f for f in self.fm.thistab.get_selection()],
app=app,
flags=flags,
mode=mode)
def tab(self, tabnum):
return self._tab_through_executables()
def _get_app_flags_mode(self, string): # pylint: disable=too-many-branches,too-many-statements
"""Extracts the application, flags and mode from a string.
examples:
"mplayer f 1" => ("mplayer", "f", 1)
"atool 4" => ("atool", "", 4)
"p" => ("", "p", 0)
"" => None
"""
app = ''
flags = ''
mode = 0
split = string.split()
if len(split) == 1:
part = split[0]
if self._is_app(part):
app = part
elif self._is_flags(part):
flags = part
elif self._is_mode(part):
mode = part
elif len(split) == 2:
part0 = split[0]
part1 = split[1]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
elif self._is_mode(part1):
mode = part1
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
elif len(split) >= 3:
part0 = split[0]
part1 = split[1]
part2 = split[2]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
if self._is_mode(part2):
mode = part2
elif self._is_mode(part1):
mode = part1
if self._is_flags(part2):
flags = part2
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
return app, flags, int(mode)
def _is_app(self, arg):
return not self._is_flags(arg) and not arg.isdigit()
@staticmethod
def _is_flags(arg):
from ranger.core.runner import ALLOWED_FLAGS
return all(x in ALLOWED_FLAGS for x in arg)
@staticmethod
def _is_mode(arg):
return all(x in '0123456789' for x in arg)
class set_(Command):
""":set <option name>=<python expression>
Gives an option a new value.
Use `:set <option>!` to toggle or cycle it, e.g. `:set flush_input!`
"""
name = 'set' # don't override the builtin set class
def execute(self):
name = self.arg(1)
name, value, _, toggle = self.parse_setting_line_v2()
if toggle:
self.fm.toggle_option(name)
else:
self.fm.set_option_from_string(name, value)
def tab(self, tabnum): # pylint: disable=too-many-return-statements
from ranger.gui.colorscheme import get_all_colorschemes
name, value, name_done = self.parse_setting_line()
settings = self.fm.settings
if not name:
return sorted(self.firstpart + setting for setting in settings)
if not value and not name_done:
return sorted(self.firstpart + setting for setting in settings
if setting.startswith(name))
if not value:
value_completers = {
"colorscheme":
# Cycle through colorschemes when name, but no value is specified
lambda: sorted(self.firstpart + colorscheme for colorscheme
in get_all_colorschemes(self.fm)),
"column_ratios":
lambda: self.firstpart + ",".join(map(str, settings[name])),
}
def default_value_completer():
return self.firstpart + str(settings[name])
return value_completers.get(name, default_value_completer)()
if bool in settings.types_of(name):
if 'true'.startswith(value.lower()):
return self.firstpart + 'True'
if 'false'.startswith(value.lower()):
return self.firstpart + 'False'
# Tab complete colorscheme values if incomplete value is present
if name == "colorscheme":
return sorted(self.firstpart + colorscheme for colorscheme
in get_all_colorschemes(self.fm) if colorscheme.startswith(value))
return None
class setlocal(set_):
""":setlocal path=<regular expression> <option name>=<python expression>
Gives an option a new value.
"""
PATH_RE_DQUOTED = re.compile(r'^setlocal\s+path="(.*?)"')
PATH_RE_SQUOTED = re.compile(r"^setlocal\s+path='(.*?)'")
PATH_RE_UNQUOTED = re.compile(r'^path=(.*?)$')
def _re_shift(self, match):
if not match:
return None
path = os.path.expanduser(match.group(1))
for _ in range(len(path.split())):
self.shift()
return path
def execute(self):
path = self._re_shift(self.PATH_RE_DQUOTED.match(self.line))
if path is None:
path = self._re_shift(self.PATH_RE_SQUOTED.match(self.line))
if path is None:
path = self._re_shift(self.PATH_RE_UNQUOTED.match(self.arg(1)))
if path is None and self.fm.thisdir:
path = self.fm.thisdir.path
if not path:
return
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, localpath=path)
class setintag(set_):
""":setintag <tag or tags> <option name>=<option value>
Sets an option for directories that are tagged with a specific tag.
"""
def execute(self):
tags = self.arg(1)
self.shift()
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, tags=tags)
class default_linemode(Command):
def execute(self):
from ranger.container.fsobject import FileSystemObject
if len(self.args) < 2:
self.fm.notify(
"Usage: default_linemode [path=<regexp> | tag=<tag(s)>] <linemode>", bad=True)
# Extract options like "path=..." or "tag=..." from the command line
arg1 = self.arg(1)
method = "always"
argument = None
if arg1.startswith("path="):
method = "path"
argument = re.compile(arg1[5:])
self.shift()
elif arg1.startswith("tag="):
method = "tag"
argument = arg1[4:]
self.shift()
# Extract and validate the line mode from the command line
lmode = self.rest(1)
if lmode not in FileSystemObject.linemode_dict:
self.fm.notify(
"Invalid linemode: %s; should be %s" % (
lmode, "/".join(FileSystemObject.linemode_dict)),
bad=True,
)
# Add the prepared entry to the fm.default_linemodes
entry = [method, argument, lmode]
self.fm.default_linemodes.appendleft(entry)
# Redraw the columns
if self.fm.ui.browser:
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def tab(self, tabnum):
return (self.arg(0) + " " + lmode
for lmode in self.fm.thisfile.linemode_dict.keys()
if lmode.startswith(self.arg(1)))
class quit(Command): # pylint: disable=redefined-builtin
""":quit
Closes the current tab, if there's more than one tab.
Otherwise quits if there are no tasks in progress.
"""
def _exit_no_work(self):
if self.fm.loader.has_work():
self.fm.notify('Not quitting: Tasks in progress: Use `quit!` to force quit')
else:
self.fm.exit()
def execute(self):
if len(self.fm.tabs) >= 2:
self.fm.tab_close()
else:
self._exit_no_work()
class quit_bang(Command):
""":quit!
Closes the current tab, if there's more than one tab.
Otherwise force quits immediately.
"""
name = 'quit!'
allow_abbrev = False
def execute(self):
if len(self.fm.tabs) >= 2:
self.fm.tab_close()
else:
self.fm.exit()
class quitall(Command):
""":quitall
Quits if there are no tasks in progress.
"""
def _exit_no_work(self):
if self.fm.loader.has_work():
self.fm.notify('Not quitting: Tasks in progress: Use `quitall!` to force quit')
else:
self.fm.exit()
def execute(self):
self._exit_no_work()
class quitall_bang(Command):
""":quitall!
Force quits immediately.
"""
name = 'quitall!'
allow_abbrev = False
def execute(self):
self.fm.exit()
class terminal(Command):
""":terminal
Spawns an "x-terminal-emulator" starting in the current directory.
"""
def execute(self):
from ranger.ext.get_executables import get_term
self.fm.run(get_term(), flags='f')
class delete(Command):
""":delete
Tries to delete the selection or the files passed in arguments (if any).
The arguments use a shell-like escaping.
"Selection" is defined as all the "marked files" (by default, you
can mark files with space or v). If there are no marked files,
use the "current file" (where the cursor is)
When attempting to delete non-empty directories or multiple
marked files, it will require a confirmation.
"""
allow_abbrev = False
escape_macros_for_shell = True
def execute(self):
import shlex
from functools import partial
def is_directory_with_files(path):
return os.path.isdir(path) and not os.path.islink(path) and len(os.listdir(path)) > 0
if self.rest(1):
files = shlex.split(self.rest(1))
many_files = (len(files) > 1 or is_directory_with_files(files[0]))
else:
cwd = self.fm.thisdir
tfile = self.fm.thisfile
if not cwd or not tfile:
self.fm.notify("Error: no file selected for deletion!", bad=True)
return
# relative_path used for a user-friendly output in the confirmation.
files = [f.relative_path for f in self.fm.thistab.get_selection()]
many_files = (cwd.marked_items or is_directory_with_files(tfile.path))
confirm = self.fm.settings.confirm_on_delete
if confirm != 'never' and (confirm != 'multiple' or many_files):
self.fm.ui.console.ask(
"Confirm deletion of: %s (y/N)" % ', '.join(files),
partial(self._question_callback, files),
('n', 'N', 'y', 'Y'),
)
else:
# no need for a confirmation, just delete
self.fm.delete(files)
def tab(self, tabnum):
return self._tab_directory_content()
def _question_callback(self, files, answer):
if answer == 'y' or answer == 'Y':
self.fm.delete(files)
class trash(Command):
""":trash
Tries to move the selection or the files passed in arguments (if any) to
the trash, using rifle rules with label "trash".
The arguments use a shell-like escaping.
"Selection" is defined as all the "marked files" (by default, you
can mark files with space or v). If there are no marked files,
use the "current file" (where the cursor is)
When attempting to trash non-empty directories or multiple
marked files, it will require a confirmation.
"""
allow_abbrev = False
escape_macros_for_shell = True
def execute(self):
import shlex
from functools import partial
def is_directory_with_files(path):
return os.path.isdir(path) and not os.path.islink(path) and len(os.listdir(path)) > 0
if self.rest(1):
files = shlex.split(self.rest(1))
many_files = (len(files) > 1 or is_directory_with_files(files[0]))
else:
cwd = self.fm.thisdir
tfile = self.fm.thisfile
if not cwd or not tfile:
self.fm.notify("Error: no file selected for deletion!", bad=True)
return
# relative_path used for a user-friendly output in the confirmation.
files = [f.relative_path for f in self.fm.thistab.get_selection()]
many_files = (cwd.marked_items or is_directory_with_files(tfile.path))
confirm = self.fm.settings.confirm_on_delete
if confirm != 'never' and (confirm != 'multiple' or many_files):
self.fm.ui.console.ask(
"Confirm deletion of: %s (y/N)" % ', '.join(files),
partial(self._question_callback, files),
('n', 'N', 'y', 'Y'),
)
else:
# no need for a confirmation, just delete
self.fm.execute_file(files, label='trash')
def tab(self, tabnum):
return self._tab_directory_content()
def _question_callback(self, files, answer):
if answer == 'y' or answer == 'Y':
self.fm.execute_file(files, label='trash')
class jump_non(Command):
""":jump_non [-FLAGS...]
Jumps to first non-directory if highlighted file is a directory and vice versa.
Flags:
-r Jump in reverse order
-w Wrap around if reaching end of filelist
"""
def __init__(self, *args, **kwargs):
super(jump_non, self).__init__(*args, **kwargs)
flags, _ = self.parse_flags()
self._flag_reverse = 'r' in flags
self._flag_wrap = 'w' in flags
@staticmethod
def _non(fobj, is_directory):
return fobj.is_directory if not is_directory else not fobj.is_directory
def execute(self):
tfile = self.fm.thisfile
passed = False
found_before = None
found_after = None
for fobj in self.fm.thisdir.files[::-1] if self._flag_reverse else self.fm.thisdir.files:
if fobj.path == tfile.path:
passed = True
continue
if passed:
if self._non(fobj, tfile.is_directory):
found_after = fobj.path
break
elif not found_before and self._non(fobj, tfile.is_directory):
found_before = fobj.path
if found_after:
self.fm.select_file(found_after)
elif self._flag_wrap and found_before:
self.fm.select_file(found_before)
class mark_tag(Command):
""":mark_tag [<tags>]
Mark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are marked.
"""
do_mark = True
def execute(self):
cwd = self.fm.thisdir
tags = self.rest(1).replace(" ", "")
if not self.fm.tags or not cwd.files:
return
for fileobj in cwd.files:
try:
tag = self.fm.tags.tags[fileobj.realpath]
except KeyError:
continue
if not tags or tag in tags:
cwd.mark_item(fileobj, val=self.do_mark)
self.fm.ui.status.need_redraw = True
self.fm.ui.need_redraw = True
class console(Command):
""":console <command>
Open the console with the given command.
"""
def execute(self):
position = None
if self.arg(1)[0:2] == '-p':
try:
position = int(self.arg(1)[2:])
except ValueError:
pass
else:
self.shift()
self.fm.open_console(self.rest(1), position=position)
class load_copy_buffer(Command):
""":load_copy_buffer
Load the copy buffer from datadir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
import sys
from ranger.container.file import File
from os.path import exists
fname = self.fm.datapath(self.copy_buffer_filename)
unreadable = IOError if sys.version_info[0] < 3 else OSError
try:
fobj = open(fname, 'r')
except unreadable:
return self.fm.notify(
"Cannot open %s" % (fname or self.copy_buffer_filename), bad=True)
self.fm.copy_buffer = set(File(g)
for g in fobj.read().split("\n") if exists(g))
fobj.close()
self.fm.ui.redraw_main_column()
return None
class save_copy_buffer(Command):
""":save_copy_buffer
Save the copy buffer to datadir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
import sys
fname = None
fname = self.fm.datapath(self.copy_buffer_filename)
unwritable = IOError if sys.version_info[0] < 3 else OSError
try:
fobj = open(fname, 'w')
except unwritable:
return self.fm.notify("Cannot open %s" %
(fname or self.copy_buffer_filename), bad=True)
fobj.write("\n".join(fobj.path for fobj in self.fm.copy_buffer))
fobj.close()
return None
class unmark_tag(mark_tag):
""":unmark_tag [<tags>]
Unmark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are unmarked.
"""
do_mark = False
class mkdir(Command):
""":mkdir <dirname>
Creates a directory with the name <dirname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
from os import makedirs
dirname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(dirname):
makedirs(dirname)
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class touch(Command):
""":touch <fname>
Creates a file with the name <fname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
fname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(fname):
open(fname, 'a').close()
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class edit(Command):
""":edit <filename>
Opens the specified file in vim
"""
def execute(self):
if not self.arg(1):
self.fm.edit_file(self.fm.thisfile.path)
else:
self.fm.edit_file(self.rest(1))
def tab(self, tabnum):
return self._tab_directory_content()
class eval_(Command):
""":eval [-q] <python code>
Evaluates the python code.
`fm' is a reference to the FM instance.
To display text, use the function `p'.
Examples:
:eval fm
:eval len(fm.directories)
:eval p("Hello World!")
"""
name = 'eval'
resolve_macros = False
def execute(self):
# The import is needed so eval() can access the ranger module
import ranger # NOQA pylint: disable=unused-import,unused-variable
if self.arg(1) == '-q':
code = self.rest(2)
quiet = True
else:
code = self.rest(1)
quiet = False
global cmd, fm, p, quantifier # pylint: disable=invalid-name,global-variable-undefined
fm = self.fm
cmd = self.fm.execute_console
p = fm.notify
quantifier = self.quantifier
try:
try:
result = eval(code) # pylint: disable=eval-used
except SyntaxError:
exec(code) # pylint: disable=exec-used
else:
if result and not quiet:
p(result)
except Exception as err: # pylint: disable=broad-except
fm.notify("The error `%s` was caused by evaluating the "
"following code: `%s`" % (err, code), bad=True)
class rename(Command):
""":rename <newname>
Changes the name of the currently highlighted file to <newname>
"""
def execute(self):
from ranger.container.file import File
from os import access
new_name = self.rest(1)
if not new_name:
return self.fm.notify('Syntax: rename <newname>', bad=True)
if new_name == self.fm.thisfile.relative_path:
return None
if access(new_name, os.F_OK):
return self.fm.notify("Can't rename: file already exists!", bad=True)
if self.fm.rename(self.fm.thisfile, new_name):
file_new = File(new_name)
self.fm.bookmarks.update_path(self.fm.thisfile.path, file_new)
self.fm.tags.update_path(self.fm.thisfile.path, file_new.path)
self.fm.thisdir.pointed_obj = file_new
self.fm.thisfile = file_new
return None
def tab(self, tabnum):
return self._tab_directory_content()
class rename_append(Command):
""":rename_append [-FLAGS...]
Opens the console with ":rename <current file>" with the cursor positioned
before the file extension.
Flags:
-a Position before all extensions
-r Remove everything before extensions
"""
def __init__(self, *args, **kwargs):
super(rename_append, self).__init__(*args, **kwargs)
flags, _ = self.parse_flags()
self._flag_ext_all = 'a' in flags
self._flag_remove = 'r' in flags
def execute(self):
from ranger import MACRO_DELIMITER, MACRO_DELIMITER_ESC
tfile = self.fm.thisfile
relpath = tfile.relative_path.replace(MACRO_DELIMITER, MACRO_DELIMITER_ESC)
basename = tfile.basename.replace(MACRO_DELIMITER, MACRO_DELIMITER_ESC)
if basename.find('.') <= 0 or os.path.isdir(relpath):
self.fm.open_console('rename ' + relpath)
return
if self._flag_ext_all:
pos_ext = re.search(r'[^.]+', basename).end(0)
else:
pos_ext = basename.rindex('.')
pos = len(relpath) - len(basename) + pos_ext
if self._flag_remove:
relpath = relpath[:-len(basename)] + basename[pos_ext:]
pos -= pos_ext
self.fm.open_console('rename ' + relpath, position=(7 + pos))
class chmod(Command):
""":chmod <octal number>
Sets the permissions of the selection to the octal number.
The octal number is between 0 and 777. The digits specify the
permissions for the user, the group and others.
A 1 permits execution, a 2 permits writing, a 4 permits reading.
Add those numbers to combine them. So a 7 permits everything.
"""
def execute(self):
mode_str = self.rest(1)
if not mode_str:
if self.quantifier is None:
self.fm.notify("Syntax: chmod <octal number> "
"or specify a quantifier", bad=True)
return
mode_str = str(self.quantifier)
try:
mode = int(mode_str, 8)
if mode < 0 or mode > 0o777:
raise ValueError
except ValueError:
self.fm.notify("Need an octal number between 0 and 777!", bad=True)
return
for fobj in self.fm.thistab.get_selection():
try:
os.chmod(fobj.path, mode)
except OSError as ex:
self.fm.notify(ex)
# reloading directory. maybe its better to reload the selected
# files only.
self.fm.thisdir.content_outdated = True
class bulkrename(Command):
""":bulkrename
This command opens a list of selected files in an external editor.
After you edit and save the file, it will generate a shell script
which does bulk renaming according to the changes you did in the file.
This shell script is opened in an editor for you to review.
After you close it, it will be executed.
"""
def execute(self):
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
import sys
import tempfile
from ranger.container.file import File
from ranger.ext.shell_escape import shell_escape as esc
py3 = sys.version_info[0] >= 3
# Create and edit the file list
filenames = [f.relative_path for f in self.fm.thistab.get_selection()]
with tempfile.NamedTemporaryFile(delete=False) as listfile:
listpath = listfile.name
if py3:
listfile.write("\n".join(filenames).encode(
encoding="utf-8", errors="surrogateescape"))
else:
listfile.write("\n".join(filenames))
self.fm.execute_file([File(listpath)], app='editor')
with (open(listpath, 'r', encoding="utf-8", errors="surrogateescape") if
py3 else open(listpath, 'r')) as listfile:
new_filenames = listfile.read().split("\n")
os.unlink(listpath)
if all(a == b for a, b in zip(filenames, new_filenames)):
self.fm.notify("No renaming to be done!")
return
# Generate script
with tempfile.NamedTemporaryFile() as cmdfile:
script_lines = []
script_lines.append("# This file will be executed when you close"
" the editor.")
script_lines.append("# Please double-check everything, clear the"
" file to abort.")
new_dirs = []
for old, new in zip(filenames, new_filenames):
if old != new:
basepath, _ = os.path.split(new)
if (basepath and basepath not in new_dirs
and not os.path.isdir(basepath)):
script_lines.append("mkdir -vp -- {dir}".format(
dir=esc(basepath)))
new_dirs.append(basepath)
script_lines.append("mv -vi -- {old} {new}".format(
old=esc(old), new=esc(new)))
# Make sure not to forget the ending newline
script_content = "\n".join(script_lines) + "\n"
if py3:
cmdfile.write(script_content.encode(encoding="utf-8",
errors="surrogateescape"))
else:
cmdfile.write(script_content)
cmdfile.flush()
# Open the script and let the user review it, then check if the
# script was modified by the user
self.fm.execute_file([File(cmdfile.name)], app='editor')
cmdfile.seek(0)
script_was_edited = (script_content != cmdfile.read())
# Do the renaming
self.fm.run(['/bin/sh', cmdfile.name], flags='w')
# Retag the files, but only if the script wasn't changed during review,
# because only then we know which are the source and destination files.
if not script_was_edited:
tags_changed = False
for old, new in zip(filenames, new_filenames):
if old != new:
oldpath = self.fm.thisdir.path + '/' + old
newpath = self.fm.thisdir.path + '/' + new
if oldpath in self.fm.tags:
old_tag = self.fm.tags.tags[oldpath]
self.fm.tags.remove(oldpath)
self.fm.tags.tags[newpath] = old_tag
tags_changed = True
if tags_changed:
self.fm.tags.dump()
else:
fm.notify("files have not been retagged")
class relink(Command):
""":relink <newpath>
Changes the linked path of the currently highlighted symlink to <newpath>
"""
def execute(self):
new_path = self.rest(1)
tfile = self.fm.thisfile
if not new_path:
return self.fm.notify('Syntax: relink <newpath>', bad=True)
if not tfile.is_link:
return self.fm.notify('%s is not a symlink!' % tfile.relative_path, bad=True)
if new_path == os.readlink(tfile.path):
return None
try:
os.remove(tfile.path)
os.symlink(new_path, tfile.path)
except OSError as err:
self.fm.notify(err)
self.fm.reset()
self.fm.thisdir.pointed_obj = tfile
self.fm.thisfile = tfile
return None
def tab(self, tabnum):
if not self.rest(1):
return self.line + os.readlink(self.fm.thisfile.path)
return self._tab_directory_content()
class help_(Command):
""":help
Display ranger's manual page.
"""
name = 'help'
def execute(self):
def callback(answer):
if answer == "q":
return
elif answer == "m":
self.fm.display_help()
elif answer == "c":
self.fm.dump_commands()
elif answer == "k":
self.fm.dump_keybindings()
elif answer == "s":
self.fm.dump_settings()
self.fm.ui.console.ask(
"View [m]an page, [k]ey bindings, [c]ommands or [s]ettings? (press q to abort)",
callback,
list("mqkcs")
)
class copymap(Command):
""":copymap <keys> <newkeys1> [<newkeys2>...]
Copies a "browser" keybinding from <keys> to <newkeys>
"""
context = 'browser'
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
for arg in self.args[2:]:
self.fm.ui.keymaps.copy(self.context, self.arg(1), arg)
return None
class copypmap(copymap):
""":copypmap <keys> <newkeys1> [<newkeys2>...]
Copies a "pager" keybinding from <keys> to <newkeys>
"""
context = 'pager'
class copycmap(copymap):
""":copycmap <keys> <newkeys1> [<newkeys2>...]
Copies a "console" keybinding from <keys> to <newkeys>
"""
context = 'console'
class copytmap(copymap):
""":copytmap <keys> <newkeys1> [<newkeys2>...]
Copies a "taskview" keybinding from <keys> to <newkeys>
"""
context = 'taskview'
class unmap(Command):
""":unmap <keys> [<keys2>, ...]
Remove the given "browser" mappings
"""
context = 'browser'
def execute(self):
for arg in self.args[1:]:
self.fm.ui.keymaps.unbind(self.context, arg)
class uncmap(unmap):
""":uncmap <keys> [<keys2>, ...]
Remove the given "console" mappings
"""
context = 'console'
class cunmap(uncmap):
""":cunmap <keys> [<keys2>, ...]
Remove the given "console" mappings
DEPRECATED in favor of uncmap.
"""
def execute(self):
self.fm.notify("cunmap is deprecated in favor of uncmap!")
super(cunmap, self).execute()
class unpmap(unmap):
""":unpmap <keys> [<keys2>, ...]
Remove the given "pager" mappings
"""
context = 'pager'
class punmap(unpmap):
""":punmap <keys> [<keys2>, ...]
Remove the given "pager" mappings
DEPRECATED in favor of unpmap.
"""
def execute(self):
self.fm.notify("punmap is deprecated in favor of unpmap!")
super(punmap, self).execute()
class untmap(unmap):
""":untmap <keys> [<keys2>, ...]
Remove the given "taskview" mappings
"""
context = 'taskview'
class tunmap(untmap):
""":tunmap <keys> [<keys2>, ...]
Remove the given "taskview" mappings
DEPRECATED in favor of untmap.
"""
def execute(self):
self.fm.notify("tunmap is deprecated in favor of untmap!")
super(tunmap, self).execute()
class map_(Command):
""":map <keysequence> <command>
Maps a command to a keysequence in the "browser" context.
Example:
map j move down
map J move down 10
"""
name = 'map'
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
self.fm.notify("Syntax: {0} <keysequence> <command>".format(self.get_name()), bad=True)
return
self.fm.ui.keymaps.bind(self.context, self.arg(1), self.rest(2))
class cmap(map_):
""":cmap <keysequence> <command>
Maps a command to a keysequence in the "console" context.
Example:
cmap <ESC> console_close
cmap <C-x> console_type test
"""
context = 'console'
class tmap(map_):
""":tmap <keysequence> <command>
Maps a command to a keysequence in the "taskview" context.
"""
context = 'taskview'
class pmap(map_):
""":pmap <keysequence> <command>
Maps a command to a keysequence in the "pager" context.
"""
context = 'pager'
class scout(Command):
""":scout [-FLAGS...] <pattern>
Swiss army knife command for searching, traveling and filtering files.
Flags:
-a Automatically open a file on unambiguous match
-e Open the selected file when pressing enter
-f Filter files that match the current search pattern
-g Interpret pattern as a glob pattern
-i Ignore the letter case of the files
-k Keep the console open when changing a directory with the command
-l Letter skipping; e.g. allow "rdme" to match the file "readme"
-m Mark the matching files after pressing enter
-M Unmark the matching files after pressing enter
-p Permanent filter: hide non-matching files after pressing enter
-r Interpret pattern as a regular expression pattern
-s Smart case; like -i unless pattern contains upper case letters
-t Apply filter and search pattern as you type
-v Inverts the match
Multiple flags can be combined. For example, ":scout -gpt" would create
a :filter-like command using globbing.
"""
# pylint: disable=bad-whitespace
AUTO_OPEN = 'a'
OPEN_ON_ENTER = 'e'
FILTER = 'f'
SM_GLOB = 'g'
IGNORE_CASE = 'i'
KEEP_OPEN = 'k'
SM_LETTERSKIP = 'l'
MARK = 'm'
UNMARK = 'M'
PERM_FILTER = 'p'
SM_REGEX = 'r'
SMART_CASE = 's'
AS_YOU_TYPE = 't'
INVERT = 'v'
# pylint: enable=bad-whitespace
def __init__(self, *args, **kwargs):
super(scout, self).__init__(*args, **kwargs)
self._regex = None
self.flags, self.pattern = self.parse_flags()
def execute(self): # pylint: disable=too-many-branches
thisdir = self.fm.thisdir
flags = self.flags
pattern = self.pattern
regex = self._build_regex()
count = self._count(move=True)
self.fm.thistab.last_search = regex
self.fm.set_search_method(order="search")
if (self.MARK in flags or self.UNMARK in flags) and thisdir.files:
value = flags.find(self.MARK) > flags.find(self.UNMARK)
if self.FILTER in flags:
for fobj in thisdir.files:
thisdir.mark_item(fobj, value)
else:
for fobj in thisdir.files:
if regex.search(fobj.relative_path):
thisdir.mark_item(fobj, value)
if self.PERM_FILTER in flags:
thisdir.filter = regex if pattern else None
# clean up:
self.cancel()
if self.OPEN_ON_ENTER in flags or \
(self.AUTO_OPEN in flags and count == 1):
if pattern == '..':
self.fm.cd(pattern)
else:
self.fm.move(right=1)
if self.quickly_executed:
self.fm.block_input(0.5)
if self.KEEP_OPEN in flags and thisdir != self.fm.thisdir:
# reopen the console:
if not pattern:
self.fm.open_console(self.line)
else:
self.fm.open_console(self.line[0:-len(pattern)])
if self.quickly_executed and thisdir != self.fm.thisdir and pattern != "..":
self.fm.block_input(0.5)
def cancel(self):
self.fm.thisdir.temporary_filter = None
self.fm.thisdir.refilter()
def quick(self):
asyoutype = self.AS_YOU_TYPE in self.flags
if self.FILTER in self.flags:
self.fm.thisdir.temporary_filter = self._build_regex()
if self.PERM_FILTER in self.flags and asyoutype:
self.fm.thisdir.filter = self._build_regex()
if self.FILTER in self.flags or self.PERM_FILTER in self.flags:
self.fm.thisdir.refilter()
if self._count(move=asyoutype) == 1 and self.AUTO_OPEN in self.flags:
return True
return False
def tab(self, tabnum):
self._count(move=True, offset=tabnum)
def _build_regex(self):
if self._regex is not None:
return self._regex
frmat = "%s"
flags = self.flags
pattern = self.pattern
if pattern == ".":
return re.compile("")
# Handle carets at start and dollar signs at end separately
if pattern.startswith('^'):
pattern = pattern[1:]
frmat = "^" + frmat
if pattern.endswith('$'):
pattern = pattern[:-1]
frmat += "$"
# Apply one of the search methods
if self.SM_REGEX in flags:
regex = pattern
elif self.SM_GLOB in flags:
regex = re.escape(pattern).replace("\\*", ".*").replace("\\?", ".")
elif self.SM_LETTERSKIP in flags:
regex = ".*".join(re.escape(c) for c in pattern)
else:
regex = re.escape(pattern)
regex = frmat % regex
# Invert regular expression if necessary
if self.INVERT in flags:
regex = "^(?:(?!%s).)*$" % regex
# Compile Regular Expression
# pylint: disable=no-member
options = re.UNICODE
if self.IGNORE_CASE in flags or self.SMART_CASE in flags and \
pattern.islower():
options |= re.IGNORECASE
# pylint: enable=no-member
try:
self._regex = re.compile(regex, options)
except re.error:
self._regex = re.compile("")
return self._regex
def _count(self, move=False, offset=0):
count = 0
cwd = self.fm.thisdir
pattern = self.pattern
if not pattern or not cwd.files:
return 0
if pattern == '.':
return 0
if pattern == '..':
return 1
deq = deque(cwd.files)
deq.rotate(-cwd.pointer - offset)
i = offset
regex = self._build_regex()
for fsobj in deq:
if regex.search(fsobj.relative_path):
count += 1
if move and count == 1:
cwd.move(to=(cwd.pointer + i) % len(cwd.files))
self.fm.thisfile = cwd.pointed_obj
if count > 1:
return count
i += 1
return count == 1
class narrow(Command):
"""
:narrow
Show only the files selected right now. If no files are selected,
disable narrowing.
"""
def execute(self):
if self.fm.thisdir.marked_items:
selection = [f.basename for f in self.fm.thistab.get_selection()]
self.fm.thisdir.narrow_filter = selection
else:
self.fm.thisdir.narrow_filter = None
self.fm.thisdir.refilter()
class filter_inode_type(Command):
"""
:filter_inode_type [dfl]
Displays only the files of specified inode type. Parameters
can be combined.
d display directories
f display files
l display links
"""
def execute(self):
if not self.arg(1):
self.fm.thisdir.inode_type_filter = ""
else:
self.fm.thisdir.inode_type_filter = self.arg(1)
self.fm.thisdir.refilter()
class filter_stack(Command):
"""
:filter_stack ...
Manages the filter stack.
filter_stack add FILTER_TYPE ARGS...
filter_stack pop
filter_stack decompose
filter_stack rotate [N=1]
filter_stack clear
filter_stack show
"""
def execute(self):
from ranger.core.filter_stack import SIMPLE_FILTERS, FILTER_COMBINATORS
subcommand = self.arg(1)
if subcommand == "add":
try:
self.fm.thisdir.filter_stack.append(
SIMPLE_FILTERS[self.arg(2)](self.rest(3))
)
except KeyError:
FILTER_COMBINATORS[self.arg(2)](self.fm.thisdir.filter_stack)
elif subcommand == "pop":
self.fm.thisdir.filter_stack.pop()
elif subcommand == "decompose":
inner_filters = self.fm.thisdir.filter_stack.pop().decompose()
if inner_filters:
self.fm.thisdir.filter_stack.extend(inner_filters)
elif subcommand == "clear":
self.fm.thisdir.filter_stack = []
elif subcommand == "rotate":
rotate_by = int(self.arg(2) or self.quantifier or 1)
self.fm.thisdir.filter_stack = (
self.fm.thisdir.filter_stack[-rotate_by:]
+ self.fm.thisdir.filter_stack[:-rotate_by]
)
elif subcommand == "show":
stack = list(map(str, self.fm.thisdir.filter_stack))
pager = self.fm.ui.open_pager()
pager.set_source(["Filter stack: "] + stack)
pager.move(to=100, percentage=True)
return
else:
self.fm.notify(
"Unknown subcommand: {}".format(subcommand),
bad=True
)
return
self.fm.thisdir.refilter()
class grep(Command):
""":grep <string>
Looks for a string in all marked files or directories
"""
def execute(self):
if self.rest(1):
action = ['grep', '--line-number']
action.extend(['-e', self.rest(1), '-r'])
action.extend(f.path for f in self.fm.thistab.get_selection())
self.fm.execute_command(action, flags='p')
class flat(Command):
"""
:flat <level>
Flattens the directory view up to the specified level.
-1 fully flattened
0 remove flattened view
"""
def execute(self):
try:
level_str = self.rest(1)
level = int(level_str)
except ValueError:
level = self.quantifier
if level is None:
self.fm.notify("Syntax: flat <level>", bad=True)
return
if level < -1:
self.fm.notify("Need an integer number (-1, 0, 1, ...)", bad=True)
self.fm.thisdir.unload()
self.fm.thisdir.flat = level
self.fm.thisdir.load_content()
class reset_previews(Command):
""":reset_previews
Reset the file previews.
"""
def execute(self):
self.fm.previews = {}
self.fm.ui.need_redraw = True
# Version control commands
# --------------------------------
class stage(Command):
"""
:stage
Stage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
if self.fm.thisdir.vcs and self.fm.thisdir.vcs.track:
filelist = [f.path for f in self.fm.thistab.get_selection()]
try:
self.fm.thisdir.vcs.action_add(filelist)
except VcsError as ex:
self.fm.notify('Unable to stage files: {0}'.format(ex))
self.fm.ui.vcsthread.process(self.fm.thisdir)
else:
self.fm.notify('Unable to stage files: Not in repository')
class unstage(Command):
"""
:unstage
Unstage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
if self.fm.thisdir.vcs and self.fm.thisdir.vcs.track:
filelist = [f.path for f in self.fm.thistab.get_selection()]
try:
self.fm.thisdir.vcs.action_reset(filelist)
except VcsError as ex:
self.fm.notify('Unable to unstage files: {0}'.format(ex))
self.fm.ui.vcsthread.process(self.fm.thisdir)
else:
self.fm.notify('Unable to unstage files: Not in repository')
# Metadata commands
# --------------------------------
class prompt_metadata(Command):
"""
:prompt_metadata <key1> [<key2> [<key3> ...]]
Prompt the user to input metadata for multiple keys in a row.
"""
_command_name = "meta"
_console_chain = None
def execute(self):
prompt_metadata._console_chain = self.args[1:]
self._process_command_stack()
def _process_command_stack(self):
if prompt_metadata._console_chain:
key = prompt_metadata._console_chain.pop()
self._fill_console(key)
else:
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def _fill_console(self, key):
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
existing_value = metadata[key]
else:
existing_value = ""
text = "%s %s %s" % (self._command_name, key, existing_value)
self.fm.open_console(text, position=len(text))
class meta(prompt_metadata):
"""
:meta <key> [<value>]
Change metadata of a file. Deletes the key if value is empty.
"""
def execute(self):
key = self.arg(1)
update_dict = dict()
update_dict[key] = self.rest(2)
selection = self.fm.thistab.get_selection()
for fobj in selection:
self.fm.metadata.set_metadata(fobj.path, update_dict)
self._process_command_stack()
def tab(self, tabnum):
key = self.arg(1)
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
return [" ".join([self.arg(0), self.arg(1), metadata[key]])]
return [self.arg(0) + " " + k for k in sorted(metadata)
if k.startswith(self.arg(1))]
class linemode(default_linemode):
"""
:linemode <mode>
Change what is displayed as a filename.
- "mode" may be any of the defined linemodes (see: ranger.core.linemode).
"normal" is mapped to "filename".
"""
def execute(self):
mode = self.arg(1)
if mode == "normal":
from ranger.core.linemode import DEFAULT_LINEMODE
mode = DEFAULT_LINEMODE
if mode not in self.fm.thisfile.linemode_dict:
self.fm.notify("Unhandled linemode: `%s'" % mode, bad=True)
return
self.fm.thisdir.set_linemode_of_children(mode)
# Ask the browsercolumns to redraw
for col in self.fm.ui.browser.columns:
col.need_redraw = True
class yank(Command):
""":yank [name|dir|path]
Copies the file's name (default), directory or path into both the primary X
selection and the clipboard.
"""
modes = {
'': 'basename',
'name_without_extension': 'basename_without_extension',
'name': 'basename',
'dir': 'dirname',
'path': 'path',
}
def execute(self):
import subprocess
def clipboards():
from ranger.ext.get_executables import get_executables
clipboard_managers = {
'xclip': [
['xclip'],
['xclip', '-selection', 'clipboard'],
],
'xsel': [
['xsel'],
['xsel', '-b'],
],
'wl-copy': [
['wl-copy'],
],
'pbcopy': [
['pbcopy'],
],
}
ordered_managers = ['pbcopy', 'wl-copy', 'xclip', 'xsel']
executables = get_executables()
for manager in ordered_managers:
if manager in executables:
return clipboard_managers[manager]
return []
clipboard_commands = clipboards()
mode = self.modes[self.arg(1)]
selection = self.get_selection_attr(mode)
new_clipboard_contents = "\n".join(selection)
for command in clipboard_commands:
process = subprocess.Popen(command, universal_newlines=True,
stdin=subprocess.PIPE)
process.communicate(input=new_clipboard_contents)
def get_selection_attr(self, attr):
return [getattr(item, attr) for item in
self.fm.thistab.get_selection()]
def tab(self, tabnum):
return (
self.start(1) + mode for mode
in sorted(self.modes.keys())
if mode
)
class paste_ext(Command):
"""
:paste_ext
Like paste but tries to rename conflicting files so that the
file extension stays intact (e.g. file_.ext).
"""
@staticmethod
def make_safe_path(dst):
if not os.path.exists(dst):
return dst
dst_name, dst_ext = os.path.splitext(dst)
if not dst_name.endswith("_"):
dst_name += "_"
if not os.path.exists(dst_name + dst_ext):
return dst_name + dst_ext
n = 0
test_dst = dst_name + str(n)
while os.path.exists(test_dst + dst_ext):
n += 1
test_dst = dst_name + str(n)
return test_dst + dst_ext
def execute(self):
return self.fm.paste(make_safe_path=paste_ext.make_safe_path)
| [
"[email protected]"
]
| |
2e1be97e37cbeb8818aca9d446194858ec44726e | ef67a6bdf962db62b6d7b11431760ff04cfcd3d3 | /pulucia_web/migrations/0023_email_info.py | c41c6c4e2700084a54b0af59282479217c4b877f | []
| no_license | Mperonneau/Editionspulucia | c9f6493c15cdd07856222cf945afb1576f3661ab | 8d48519c37cbe49db21650455f8a64a080a1884e | refs/heads/master | 2023-04-16T16:44:00.312152 | 2021-04-19T20:39:47 | 2021-04-19T20:39:47 | 322,133,606 | 1 | 0 | null | 2021-04-08T03:03:35 | 2020-12-17T00:10:56 | JavaScript | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.2 on 2021-02-14 11:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pulucia_web', '0022_livre_achat'),
]
operations = [
migrations.CreateModel(
name='email_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('email', models.EmailField(max_length=254)),
],
),
]
| [
"[email protected]"
]
| |
f7c737ab5e9d924e1b8a4a35509a8b3f94a95ce0 | faf757527d9ba073da7bceeb74055eac2e980295 | /APKHelper.py | 3a761e2a9837600eb6fa362495ca1d8b6c9f97be | []
| no_license | bx-lr/android_static_dynamic_apk_test | e86832e7ab46c56361344ba316fb77da69e6d81e | 98017e470f9d7f97e4c0ce82c91a03a76d67e14d | refs/heads/master | 2022-04-26T07:54:42.756345 | 2020-04-22T16:16:21 | 2020-04-22T16:16:21 | 257,955,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/python
'''
Load APK into Database
'''
from DatabaseHelper import DBHelper
from StringIO import StringIO
import zipfile
import base64
import os
import traceback
class APKHelper():
def __init__(self, path):
self.path = path
self.db = None
self.emu = None
self.adb = None
self.load()
def load(self):
fd = open(self.path, 'rb')
apk = fd.read()
fd.close()
zipdata = StringIO()
zipdata.write(apk)
myzipfile = zipfile.ZipFile(zipdata)
self.db = DBHelper(db_name = 'output/%s.db' % self.path.split("/")[-1])
data = [""] * len(self.db.scheme)
data[0] = "apk"
data[1] = 0
data[2] = base64.encodestring(apk)
data[3] = 1
data[4] = 1
self.db.addrow(self.db.curs, self.db.table_name, data)
data[4] = 0
for name in myzipfile.namelist():
data[0] = "".join(name).decode('utf-8').encode('ascii').replace("'", "")
data[1] = 1
data[2] = base64.encodestring(myzipfile.open(name).read())
data[3] = 1
try:
self.db.addrow(self.db.curs, self.db.table_name, data)
except Exception:
os.remove('output/%s.db' % self.path.split("/")[-1])
traceback.print_exc(Exception)
raise Exception
if __name__ == "__main__":
APKHelper('/home/udev/analysis/amazon/mShop/com.amazon.mShop.android-1.apk')
| [
"[email protected]"
]
| |
17f868dc8151d4b9c9596d87a5f3bf32c01b1111 | b537246cd2f2f11ae83b95c19ce63609c40f3f30 | /apps/campaign.py | 9e672f97e9b2c3b8ad9f1dc6ec91e625cce7bc43 | []
| no_license | bij1/verkiezingen | 29f600acbc608211d8a1ec618e826f9a01b453c4 | 4955ef7f2faa7ff7555325faa43cc2df6601a9bc | refs/heads/main | 2023-07-17T00:01:38.442570 | 2021-04-06T07:43:54 | 2021-04-06T07:43:54 | 345,665,841 | 0 | 1 | null | 2021-04-06T07:43:55 | 2021-03-08T13:29:25 | Python | UTF-8 | Python | false | false | 102 | py | from app import app
import dash_html_components as html
layout = html.Div([html.H4("nothing yet")]) | [
"[email protected]"
]
| |
486a90dfe4e88e27f735a7910cba4c3fdc0a40d2 | ed4ab53110ed7d8118dd4efbc87e3e2ecdec1d89 | /ciphers/base32.py | 2ac29f441e94afe81d10f400696dc80cc17c23bc | [
"MIT"
]
| permissive | agk79/Python | 19842317dd99433f94cea147e499c9542ecf014f | 0b7a66b2084896dcd11ba2737ca3e0c507385962 | refs/heads/master | 2021-07-13T02:56:55.245139 | 2020-08-07T18:36:38 | 2020-08-07T18:36:38 | 194,342,735 | 3 | 0 | MIT | 2019-06-28T23:07:10 | 2019-06-28T23:07:10 | null | UTF-8 | Python | false | false | 334 | py | import base64
def main():
inp = input('->')
encoded = inp.encode('utf-8') #encoded the input (we need a bytes like object)
b32encoded = base64.b32encode(encoded) #b32encoded the encoded string
print(b32encoded)
print(base64.b32decode(b32encoded).decode('utf-8'))#decoded it
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
e51325d061f759541e44b00c7d91d9ff587dd11d | d69c9de28f606faed826a5c5c8f2eb8e7ec6b586 | /FreePie/ClodPIE11-generic.py | da3bed4f8013784fcaf573287dc71ee61323450f | [
"MIT"
]
| permissive | bhugh/LinuxTrackFG | 3ea5dd42f7e8d602cad85d7da9cadb72382897ee | 5a268f134dff9594d03e0b2554afbf1ee8cdd516 | refs/heads/master | 2021-01-21T10:19:30.527659 | 2017-02-28T08:30:46 | 2017-02-28T08:30:46 | 83,405,800 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,941 | py | ############################################################################
##ClodPIE: Cliffs of Dover/EDTracker View & Movement Enhancement with FreePIE
##by Brent Hugh
##
##Version 1.1
##
##[email protected] - or contact via the forums at http://twcclan.com - flug
##
##See this file, included in the distribution, for detailed instructions,
##including installation, use, and customization:
##
## README-CLOD-FreePIE-EDTracker-vJoy.txt
##
##See numerous important user customization options below.
##
##Copyright (c) 2016 Brent Hugh
##
##The MIT License (MIT)
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##
##
import random, time
from System import Int16
import ctypes # An included library with Python install.
global yaw
global pitch
global roll
global lastYaw
global x
global y
global z
global edtracker, joystickinput, jipov, enabled
global xmove, ymove, zmove, savezmove, saveymove, savezmovetime
if starting:
system.setThreadTiming(TimingTypes.HighresSystemTimer)
#system.setThreadTiming(TimingTypes.ThreadYield)
system.threadExecutionInterval = 10
enabled = 1
###########START OF USER CUSTOMIZATION OPTIONS####################
##
##
############USER CUSTOMIZATION: JOYSTICK/EDTRACKER CONFIGURATION
##
##This is the most important customization section. You must enter the exact
##name of your devices as shown in the "Game Controllers" section in your
##system settings. "EDTracker Pro" is probably the same for every EDTracker.
##But you will need to look up any other Joystick you want to use as input
##and enter its name in place of "Sidewinder Force Feedback 2 Joystick"
##
##Alternatively, you can simply use integers 0,1,2 etc to specify the
##joystick. In that case you are at the mercy of Windows, which decides
##which is numbered 0,1,2,3 etc.
##
##If you don't have an input joystick, you can still use the program--
##you just won't have access to the joystick buttons or hat for input.
##You can use the keyboard instead.
##
##If you don't have an EDTracker you can still use the program--you will
##still have access to the key and joystick button movement features, just
##none of the special EDTracker movement features
##
##Note that EDTracker is considered internally by Windows to be a type
##of joystick. This means you could specify any other joystick here
##and use it as a pseudo headtracker
##
inputtracker="EDTracker Pro" #edtracker to use. Enter name as found in "Game Controllers" system settings. Note that windows sees EDTracker as a "joystick"
inputjoystick="SideWinder Force Feedback 2 Joystick" #joystick to use for input (use for button input etc). Enter name as found in "Game Controllers" system settings
#inputtracker=0 #Example of specifying inputtracker as an integer index
#inputjoystick=0 #Example of specifying input joystick as an integer index
############USER CUSTOMIZATION: KEYBOARD SHORTCUTS
##
##You can change keys below--for example, the current Reset Key is F1,
##which is shown in the code this way:
##
## return keyboard.getKeyDown(Key.F1)
##
## If you'd like to use F2 instead, simply type "F2" in place of F1, like this:
## return keyboard.getKeyDown(Key.F2)
##
##A full list of available inputs, including the names for all keyboard keys
##and joystick buttons, is online here:
##
## https://github.com/AndersMalmgren/FreePIE/wiki/Reference
##
## If you want to completely disable a function--so that it isn't associated with
## any keyboard key or joystick button--just return the value 0, like this:
##
## PreventLeantoKey(): return 0
##
#Note that you can use a keyboard key, a joystick button, OR both/either
##to trigger. See examples below under JOYSTICK/KEYBOARD SHORTCUTS
##
def ResetKey(): return keyboard.getKeyDown(Key.F1) #resets all modes (except ME109 mode); I set this to be the same as my Opentrack reset key
def LeantoKey(): return keyboard.getKeyDown(Key.A)
def PreventLeantoKey(): return keyboard.getKeyDown(Key.S)
def PreventLeantoToggleKey(): return keyboard.getPressed(Key.LeftShift)
def VibrateLeantoToggleKey(): return keyboard.getPressed(Key.Z)
def ME109ToggleKey(): return ( keyboard.getKeyDown(Key.LeftControl) and keyboard.getPressed(Key.D9)) #Ctrl-9 for 109 mode
############USER CUSTOMIZATION: JOYSTICK/KEYBOARD SHORTCUTS
##
##joystickinput.getPressed(0) gets joystick button 1, etc.
##
##Note use of this programming idiom: (joystickinput and joystickinput.getPressed(0))
##Reason: If no joystick exists we set joystickinput=0 so that the joystickinput.getPressed(0) is never accessed.
##This prevents runtime errors when no joystick is present.
##
##jipov is "joystick input pov" and ranges from 0 to 35999. 0=up,9000=right, 18000=down, 27000=left, etc.
##If the joystick hat is not in use, or no joystick exists, jipov is set to -1
##
##Key.D1 is the digit '1' on the top row of the keyboard. Key.NumberPad1 is
##the '1' key on the number pad. Full list at
##
## https://github.com/AndersMalmgren/FreePIE/wiki/Reference
##
##Combine keys/buttons simply using logical 'or', as below. Scripting is in python. More info at:
##
## https://github.com/AndersMalmgren/FreePIE/wiki
##
def LeantoToggleKey(): return keyboard.getPressed(Key.CapsLock) or (joystickinput and joystickinput.getPressed(2))
def UpKey(): return keyboard.getKeyDown(Key.D1) or jipov >= 31500 or ( jipov <= 4500 and jipov >=0 )
def DownKey(): return keyboard.getKeyDown(Key.D2) or (jipov >= 13500 and jipov <= 22500)
def LeftKey(): return keyboard.getKeyDown(Key.Q) or (jipov >= 22500 and jipov <= 31500)
def RightKey(): return keyboard.getKeyDown(Key.W) or (jipov >= 4500 and jipov <= 13500)
############USER CUSTOMIZATION: ME109 MODE
##
##ME109 mode moves your viewport slightly rightwards, so that
##the pilot is aligned with the ME109 gunsight. You can adjust the
##relevant parameters here:
##
ME109Toggle = 0 #ME109 mode is off initially (0). Turn turn ME109 mode ON by default, just change 0 to 1.
ME109xcenter = -100 #amount to shift right in ME109 leaned-back position
ME109xmove = -250 #amount to shift right in ME109 leaned-in-to-gunsight position
############USER CUSTOMIZATION: ADVANCED OPTIONS
##
##Most users won't need to change the options below, but they are easy
##to adjust if you would like.
##
##These options adjust how far the movement is in up/down/left/right/forward
##/back directions, where the centerpoint is located, how far the
##ME109 right-offset is, what angles various left/right/up/down/etc movements
##trigger at, what multiplier is used to translate edtracker to vJoy,
##and so on.
##
##If you would like to use a different input device than EDTracker, a
##different output devices than vJoy (combined with Opentrack), or other
##more complex mods, you can probably set all that up in this section,
##or a combination of changes here and in the main code below.
##
xmove = 1500 #amount to shift in R/L direction; varies -xmove to xmove
ymove = 600 #amount to shift in up/down direction; varies -ymove to ymove
zmove = 3000 #amount to shift in forward/back direction; varies 0 to -zmove
VibrateLeantoFramerate = 0.05 #1/60 seconds is same as target frame rate for most CLOD users/most monitors. Use a decimal number here as a fraction like 1/20 will round to 0.
#center position
def xcenter():
if (ME109Toggle): return ME109xcenter
else: return 0
def ycenter(): return 0
def zcenter(): return 0
savezmove = zcenter()
saveymove = ycenter()
savezmovetime = 0
edtrackerScaleFactor = 16 #Scale factor required when transferring from EdTracker to vJoy inputs. Probably just 8 bit vs 16 bit values.
def LeftTrigger(): return (trackIR.yaw < -60)
def RightTrigger(): return (trackIR.yaw > 60)
#We trigger up at a certain point in lean into gunsight so that we can see over the nose a bit better
def UpTrigger(): return (trackIR.pitch < -10)
#def UpTrigger(): return 0 # Use this to disable UpTrigger
def DownTrigger(): return 0
#def LeantoForwardTrigger(): return ( (trackIR.pitch < -1.2) ) # lean forward into gunsite
def LeantoForwardTrigger(): return 0 # disabling this for now because the joystick button works so much better!
def LeantoBackwardTrigger(): return ( abs(trackIR.yaw) > 150 ) # look backward - we can see around the backplate better if we lean forward while looking back
def PreventLeantoTrigger(): return ( abs(trackIR.yaw)>60 and abs(trackIR.yaw) < 150) # same as Left/RightTriggers on the low side & LeantoBackwardTrigger yaw on the high side
LeantoToggle = 0
PreventLeantoToggle = 0
VibrateLeantoToggle = 0
###########END OF USER CUSTOMIZATION OPTIONS####################
##
##You are welcome to customize the code below, but warning! All the
##easy stuff is above this line!
##
try:
edtracker = joystick[inputtracker]
except:
ctypes.windll.user32.MessageBoxW(0, "No EDTracker was found. You can edit the .py file near line 67 to identify your EDTracker, or simply proceed without an EDTracker", "No EDTracker", 1)
edtracker = 0
try:
joystickinput = joystick [inputjoystick]
except:
try:
joystickinput = joystick [0] #Use default joystick
ctypes.windll.user32.MessageBoxW(0, "Using the default joystick as the input joystick. To change this, edit the .py file near line 63.", "Default Input Joystick", 1)
except:
ctypes.windll.user32.MessageBoxW(0, "No input joystick was found. You can edit the .py file near line 67 to identify your joystick, or simply proceed without joystick input", "No joystick", 1)
joystickinput=0
#diagnostics.watch(LeftTrigger())
#diagnostics.watch(RightTrigger())
#diagnostics.watch(ME109Toggle)
#diagnostics.watch(LeantoToggle)
#diagnostics.watch(PreventLeantoToggle)
#diagnostics.watch(VibrateLeantoToggle)
#diagnostics.watch(savezmove)
#diagnostics.watch(savezmovetime)
#diagnostics.watch(trackIR.x)
#diagnostics.watch(trackIR.y)
#diagnostics.watch(trackIR.z)
#diagnostics.watch(trackIR.yaw)
#diagnostics.watch(trackIR.pitch)
#diagnostics.watch(trackIR.roll)
#diagnostics.watch(edtracker.x)
#diagnostics.watch(edtracker.y)
#diagnostics.watch(edtracker.z)
#diagnostics.watch(vJoy[0].x)
#diagnostics.watch(vJoy[0].y)
#diagnostics.watch(vJoy[0].z)
#diagnostics.watch(vJoy[0].rx)
#diagnostics.watch(vJoy[0].ry)
#diagnostics.watch(vJoy[0].rz)
# EDTRACKER TO to vJoy
if edtracker:
vJoy[0].x = edtracker.x * edtrackerScaleFactor
vJoy[0].y = edtracker.y * edtrackerScaleFactor
vJoy[0].z = edtracker.z * edtrackerScaleFactor
#pov values: U 0, 4500, R 9000, 13500, D 18000, 22500, L 27000, 31500, 0
#if input joystick doesn't exist, we set joystickinput=0, so we test for that first & if =0, just return -1 always
if (joystickinput): jipov = joystickinput.pov[0]
else: jipov=-1
#diagnostics.watch(jipov)
#move left/right
vjrx = xcenter()
if (LeftKey() or LeftTrigger() and not RightKey()): vjrx = xmove
elif (RightKey() or (RightTrigger() and not LeftKey()) ): vjrx = -xmove
vjry = ycenter()
#up/down
if (UpKey()): vjry = ymove
if (UpTrigger() and not LeantoToggle): vjry = ymove #UpTrigger is separate from UpKey so can adjust upward move during lean-in here separately if necessary
if (DownKey() or DownTrigger()): vjry = -ymove
#lean to gunsight
# lean to gunsight is activated by tab OR leaning in and negated by leftshift. Also if 1 is pressed (to move up) lean in is suppressed, bec. leaning in gets your head stuck on the roof, keeping it from moving up
# Lean to gunsight also restricts from moving R or L to keep centered on the gunsights
if (LeantoKey() or LeantoToggle or ( ((LeantoForwardTrigger() and not UpKey()) or LeantoBackwardTrigger()) and not PreventLeantoTrigger() ) and not PreventLeantoKey() and not PreventLeantoToggle):
vjrz = -zmove
if (not DownKey() and vjry < ycenter() ): vjry = ycenter() #lean into gunsight & duck don't really work together
if (not LeftKey() and not RightKey() and not LeantoBackwardTrigger()): #when we lean to gunsight, we want to stay centered, but if user is pressing keys that takes precedence
if (ME109Toggle): vjrx = ME109xmove;
else: vjrx = xcenter()
else: vjrz = zcenter()
#vibrate - idea is to emulate the "transparent" quality narrow window dividers etc have when viewed with two eyes (vs "solid" when viewed with one eye)
if VibrateLeantoToggle:
currtime=time.time()
#diagnostics.watch(currtime)
if ((currtime - savezmovetime) > VibrateLeantoFramerate):
if (abs(savezmove) >= abs(zmove)):
vjrz = zcenter()
vjry = ymove
else:
vjrz = -zmove
vjry = -ymove
savezmove=vjrz
saveymove=vjry
savezmovetime=currtime
else:
vjrz=savezmove
vjry=saveymove
#prevents lean to gunsight/leanforward
#(There isn't any point in actually moving backwards as there is no room to do so. Instead, we just prevent forward movement,
#which is useful in some situations, because normal TrackIR/EdTracker movements lean us forward sometimes when we don't want it.
if (PreventLeantoKey() or PreventLeantoToggle): vjrz = zcenter()
vJoy[0].rx = vjrx
vJoy[0].ry = vjry
vJoy[0].rz = vjrz
#toggle ME109 mode on or off
#this does a slight right lean when leaning to gunsight to line up with the ME109 gunsights
if ME109ToggleKey(): ME109Toggle = not ME109Toggle
#toggle LeantoGunsight on or off
if LeantoToggleKey():
LeantoToggle = not LeantoToggle
if (LeantoToggle):
PreventLeantoToggle=0 #can't have them both toggled simultaneously
VibrateLeantoToggle = 0
#toggle PreventLeantoGunsight on or off
if PreventLeantoToggleKey():
PreventLeantoToggle = not PreventLeantoToggle
if (PreventLeantoToggle):
LeantoToggle=0 #can't have them both toggled simultaneously
VibrateLeantoToggle = 0
#toggle PreventLeantoGunsight on or off
if VibrateLeantoToggleKey():
VibrateLeantoToggle = not VibrateLeantoToggle
if (VibrateLeantoToggle):
LeantoToggle=0 #can't have them both toggled simultaneously
PreventLeantoToggle = 0
if ResetKey():
PreventLeantoToggle = 0
LeantoToggle = 0
VibrateLeantoToggle = 0
#whatever the current state of the joystick is, Opentrack will consider to be the "center" when
#the reset key is pressed. So we make sure this is 0,0,0 and not something else
#If we don't do this, strange bugs ensue
vJoy[0].rx = xcenter()
vJoy[0].ry = ycenter()
vJoy[0].rz = zcenter()
| [
"[email protected]"
]
| |
ce9fb84011732967d2451774fe713d1f2132913f | 8e6caec03ebe31ae30c056d6550840dca77801ea | /Python/algorithms/resheto_eratosfena.py | f860c288a4805ab6938ee08ded7e89d378ceb880 | []
| no_license | leff9f/pytest | c951bca3556db6496d72085d2d0836ea965f348a | c40f7239b9ea1e5772b2e347b48b91d7f735bcbf | refs/heads/master | 2020-03-19T13:56:06.463322 | 2020-02-19T15:10:52 | 2020-02-19T15:10:52 | 136,601,822 | 2 | 1 | null | 2019-02-18T15:23:11 | 2018-06-08T10:02:08 | Python | UTF-8 | Python | false | false | 192 | py | N = 50000000
A = [True]*N
A[0] = A[1] = False
for k in range(2, N):
if A[k]:
for m in range(k*2, N, k):
A[m] = False
for k in range(N):
if A[k]:
print(k)
| [
"[email protected]"
]
| |
9fa6656765c702e8f6936b48d78042e07de88013 | 3856dbedcf846f9845290e9b2efa4d18e300623d | /test/test_user_report.py | 25a240a1bb120206353d2d97b51104617f0cbe91 | []
| no_license | Valandur/webapi-client-python | 5b314da41803f5b55a5c6cce62d2384b86d0fa37 | 8502726bf3facb17c6fa681faf0f600207eb61ae | refs/heads/master | 2022-02-04T21:45:37.686703 | 2019-07-23T12:11:47 | 2019-07-23T12:11:47 | 113,748,693 | 2 | 0 | null | 2019-01-09T16:07:31 | 2017-12-10T12:38:14 | Python | UTF-8 | Python | false | false | 3,145 | py | # coding: utf-8
"""
Web-API
Access Sponge powered Minecraft servers through a WebAPI # Introduction This is the documentation of the various API routes offered by the WebAPI plugin. This documentation assumes that you are familiar with the basic concepts of Web API's, such as `GET`, `PUT`, `POST` and `DELETE` methods, request `HEADERS` and `RESPONSE CODES` and `JSON` data. By default this documentation can be found at http:/localhost:8080 (while your minecraft server is running) and the various routes start with http:/localhost:8080/api/v5... As a quick test try reaching the route http:/localhost:8080/api/v5/info (remember that you can only access \\\"localhost\\\" routes on the server on which you are running minecraft). This route should show you basic information about your server, like the motd and player count. # List endpoints Lots of objects offer an endpoint to list all objects (e.g. `GET: /world` to get all worlds). These endpoints return only the properties marked 'required' by default, because the list might be quite large. If you want to return ALL data for a list endpoint add the query parameter `details`, (e.g. `GET: /world?details`). > Remember that in this case the data returned by the endpoint might be quite large. # Debugging endpoints Apart from the `?details` flag you can also pass some other flags for debugging purposes. Remember that you must include the first query parameter with `?`, and further ones with `&`: `details`: Includes details for list endpoints `accept=[json/xml]`: Manually set the accept content type. This is good for browser testing, **BUT DON'T USE THIS IN PRODUCTION, YOU CAN SUPPLY THE `Accepts` HEADER FOR THAT** `pretty`: Pretty prints the data, also good for debugging in the browser. An example request might look like this: `http://localhost:8080/api/v5/world?details&accpet=json&pretty&key=MY-API-KEY` # Additional data Certain endpoints (such as `/player`, `/entity` and `/tile-entity` have additional properties which are not documented here, because the data depends on the concrete object type (eg. `Sheep` have a wool color, others do not) and on the other plugins/mods that are running on your server which might add additional data. You can also find more information in the github docs (https:/github.com/Valandur/Web-API/tree/master/docs/DATA.md) # noqa: E501
OpenAPI spec version: 5.4.2-S7.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.user_report import UserReport # noqa: E501
from swagger_client.rest import ApiException
class TestUserReport(unittest.TestCase):
"""UserReport unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserReport(self):
"""Test UserReport"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.user_report.UserReport() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
6d2b8a6b52fcfa85775cb6e350914e11786547cf | 71e1db21359e8e2aad16d9cf29148dceb409be6f | /python-BerryIMUv3-SPI/IMU.py | 8d2868e2dd324f68dc40fa9d37d3bd828452f4e9 | []
| no_license | FedericoLV/BerryIMU | 2db66ed0fb26cb43ac6616ce205022f2f66d8998 | 9dd77e65e0be0bba81513af3960c09759da8fc6d | refs/heads/master | 2023-06-24T15:09:47.708788 | 2021-07-23T11:34:54 | 2021-07-23T11:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | from LSM6DSL import *
import spidev
import time
READ_FLAG = 0x80
__MULTIPLE_READ = 0x40
BerryIMUversion = 99
spi = spidev.SpiDev()
def bus_open( ):
spi.open(0,0 )
spi.max_speed_hz = 10000000
def readReg( reg_address):
#bus_open()
tx = [reg_address | READ_FLAG, 0x00]
rx = spi.xfer2(tx)
return rx[1]
def writeReg(reg_address, data):
#bus_open()
tx = [reg_address, data]
rx = spi.xfer2(tx)
return rx
def detectIMU():
#The accelerometer and gyrscope on the BerryIMUv3 is a LSM6DSL, here we will try and see if it is connected.
global BerryIMUversion
bus_open()
try:
#Check for LSM6DSL on the BerryIMUv3
#If no LSM6DSL, there will be an I2C bus error and the program will exit.
#This section of code stops this from happening.
LSM6DSL_WHO_AM_I_response = readReg(LSM6DSL_WHO_AM_I)
except IOError as f:
print('') #need to do something here, so we just print a space
else:
if (LSM6DSL_WHO_AM_I_response == 0x6A) :
print("Found BerryIMUv3 (LSM6DSL)")
BerryIMUversion = 3
time.sleep(1)
def writeByte(device_address,register,value):
bus.write_byte_data(device_address, register, value)
def readACCx():
acc_l = 0
acc_h = 0
acc_l = readReg( LSM6DSL_OUTX_L_XL)
acc_h = readReg( LSM6DSL_OUTX_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCy():
acc_l = 0
acc_h = 0
acc_l = readReg( LSM6DSL_OUTY_L_XL)
acc_h = readReg( LSM6DSL_OUTY_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCz():
acc_l = 0
acc_h = 0
acc_l = readReg( LSM6DSL_OUTZ_L_XL)
acc_h = readReg( LSM6DSL_OUTZ_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readGYRx():
gyr_l = 0
gyr_h = 0
gyr_l = readReg( LSM6DSL_OUTX_L_G)
gyr_h = readReg( LSM6DSL_OUTX_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRy():
gyr_l = 0
gyr_h = 0
gyr_l = readReg( LSM6DSL_OUTY_L_G)
gyr_h = readReg( LSM6DSL_OUTY_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRz():
gyr_l = 0
gyr_h = 0
gyr_l = readReg( LSM6DSL_OUTZ_L_G)
gyr_h = readReg( LSM6DSL_OUTZ_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def initIMU():
#initialise the accelerometer
writeReg(LSM6DSL_CTRL1_XL,0b10011111) #ODR 3.33 kHz, +/- 8g , BW = 400hz
writeReg(LSM6DSL_CTRL8_XL,0b11001000) #Low pass filter enabled, BW9, composite filter
writeReg(LSM6DSL_CTRL3_C,0b01000100) #Enable Block Data update, increment during multi byte read
#initialise the gyroscope
writeReg(LSM6DSL_CTRL2_G,0b10011100) #ODR 3.3 kHz, 2000 dps
| [
"[email protected]"
]
| |
9ffa69d0309b69ed9051f6a8715c88e92b57ca70 | 001323724016038e3da88630304e58017c83b7ad | /custcount_venv/bin/django-admin.py | 5bce0adac8be4eb11bc0b9395a8166cc01c9228a | []
| no_license | afieqhamieza/CustCount | 36319ea2ad44d8bd223f0e48b4c88ce0178ac0c6 | 8bd50572db7c9e0ae7d36f810a005b6998846d98 | refs/heads/main | 2023-05-31T05:06:24.770029 | 2021-06-16T23:19:34 | 2021-06-16T23:19:34 | 377,316,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | #!/Users/afieqhamieza/Documents/github repo/CustCount/custcount_venv/bin/python3.9
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
]
| |
dab7a1b6a64e14e0306300e83625d8fd77fdcf89 | 29fda4e5cc9e1cf277a21a49d317c9529a239f77 | /env/bin/isort | aed39421a222bed7361e764a4ba74e7484ba8863 | []
| no_license | happy-cutman/django-jobfinder | 8f631a8f8d0c26f334cd4f1128f1123eeb189db7 | 9269dea86fde2f4c0a1d9ed4cc3828a4bf9dc20d | refs/heads/master | 2022-12-19T03:02:51.836480 | 2019-09-16T14:27:21 | 2019-09-16T14:27:21 | 208,819,206 | 0 | 0 | null | 2022-12-08T06:10:02 | 2019-09-16T14:22:34 | JavaScript | UTF-8 | Python | false | false | 244 | #!/home/cutman/Desktop/django-bs/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
60e03ed0c3f8968a0b036e1daf76ac590ab8bd62 | 674e6043a505a8147ceaadaebb7319ae355cc524 | /line_chart.py | 58dfb491d6477930e184b98354572f700338d6e8 | []
| no_license | szamani20/python-TA-session | d3dac47c4840549015415cdfec55e4888d69c266 | 93055d689ae0f5f209472b71fbfe8f958ade85af | refs/heads/master | 2021-09-01T07:10:08.650372 | 2017-12-25T15:37:37 | 2017-12-25T15:37:37 | 115,347,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import matplotlib.pyplot as plt
# Data
x = [1, 2, 3]
y = [4, 5, 6]
# second data
x2 = [1, 2, 3]
y2 = [10, 12, 14]
# Create Plot
plt.plot(x, y, label='First Line')
# Create second plot
plt.plot(x2, y2, label='Second Line')
# Label for x axis
plt.xlabel('X Label')
# Label for y axis
plt.ylabel('Y Label')
# title of graph
plt.title('Good Graph\nSubtitle')
# Add legend (for second data)
plt.legend()
# show graph
plt.show()
| [
"[email protected]"
]
| |
c10868cbdcc7ac694c6e4e2040eaa4f16fd73722 | 0e3f8bca9ea26b58dfe1dbb103f5a08fa384f41d | /j2.py | 9dfec787b7324e4c275737a44b134f263d5e60a8 | []
| no_license | zhanglulu15/python-machine-study | be5b7153dd851c8dacc9db3eeba7576e7e852a91 | 31f01255ca79fdc406913bb380070257e16fdbd5 | refs/heads/master | 2021-01-21T14:33:17.316770 | 2017-06-24T13:54:49 | 2017-06-24T13:54:49 | 95,299,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | #层次聚类:更多的是为层次话化的可视化提供支持,在我们比较陌生的数据层次时有帮助
#在klearn.cluster库中提供了一种AgglomerativeClustering的分类算法:基于层次的聚类方法
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
#从磁盘读取城市经度纬度表
import csv
x = []
f = csv.reader(open('/home/lulu/code/shuju/city.csv', encoding='utf-8'))
for v in f:
#x.append([float(v.split(',')[2]),float(v.split(',')[3])])
#print(v)
jd = v[2]
wd = v[3]
if jd is '' or wd is '':
continue
#print([float(jd),float(wd)])
x.append([float(jd),float(wd)])
#转换成numpy array
x = np.array(x)
#类簇的数量
n_clusters = 5
#现在把数据和对应的分类放入聚类函数中进行聚类,使用方差最小化的方法'ward‘
cls = AgglomerativeClustering(linkage = 'ward',n_clusters = n_clusters).fit(x) #基于层次聚类,现在把数据和对应的分类数放入聚类函数中进行聚类,使用方差最小化的方法'ward'
#x中每一项所属分类的一个列表
cls.labels_
markers = ['^','x','o','*','+']
for i in range(n_clusters):
menbers = cls.labels_ == i
plt.scatter(x[menbers,0],x[menbers,1],s =60,marker= markers[i],c = 'b',alpha=0.5) #s散点大小,marker = markers[i],表示i取0,1,2,3
#分别代表取各种图形,alpha表示透明度
plt.title('')
plt.show() | [
"[email protected]"
]
| |
a0aca621170bb1b7eca6916195c45407cd8bc932 | 3eb7be8ddedc446634e3d45972ce2ff293819b38 | /AnyQuantMini/python/data/StockData.py | 271b6cbabd5b00ed6d49c994bf02a33067847126 | []
| no_license | OptimusPrime1997/AnyQuant | bc7f74860796fb43ae73486b243cc2c92de986b9 | 29b6b5b82b4ffa09d3eed3f45170ad548dc9e6de | refs/heads/master | 2021-01-13T13:43:10.729447 | 2016-12-27T15:14:39 | 2016-12-27T15:14:39 | 76,332,760 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | '''
# Created on
# id:stock
# start:start time
# end:end time
# exchange:exchange place
# @author: 1
'''
import urllib2
import json
# str='http://121.41.106.89:8010/api/stock/sh600000/?start=2014-10-10&end=2015-10-10&fields=open+high+low+close+adj_price+volume+turnover'
# str='http://121.41.106.89:8010/api/stock/sh600000/?start=2014-10-10&end=2015-10-10&fields=pe+pb'# error
# str='http://121.41.106.89:8010/api/benchmark/all'
# str='http://121.41.106.89:8010/api/benchmark/hs300?start=2015-01-01&end=2015-01-30&fields=open+high+low+close+adj_price+volume+turnover'
def print_lol(collection):
for i in collection:
if(isinstance(i, list)):
print_lol(i)
else:
print(i)
return
def getDataById(id, start, end, exchange):
url = 'http://121.41.106.89:8010/api/stock/' # the url to visit
head = {'X-Auth-Code': '44895fc229fefbd07cc009a787d554c5'} # my opencode , you can replace it with yours
st = '/?start='
en = '&end='
fd = '&fields=open+high+low+close+adj_price+volume+turnover'
checkurl = url + exchange + id + st + start + en + end + fd
print(checkurl)
try:
request = urllib2.Request(checkurl, headers=head) # remember to add the head
print(type(request))
response = urllib2.urlopen(request) # open the url
print(type(response))
data = [i for i in response]
info=data[0].decode("GB2312");
jsonData=json.loads(info,encoding="GB2312")
print(type(jsonData))
# print(type(data))
# print_lol(data)
print(data[0])
temp=jsonData['data']
print(jsonData['data'])
print(type(temp))
print(temp['trading_info'])
stockList=temp['trading_info']
print_lol(stockList)
# dict={"123":"23","ag":"543"}
# print(dict)
except urllib2.URLError as e:
print(e.code)
print(e.reason)
return
getDataById('600000', '2015-01-01', '2015-01-30', 'sh')
| [
"[email protected]"
]
| |
57260f6b5d40f289986b5d8fb601c421eafeae75 | 35c1a591ce5ea045b72a1d9f89fc0d8f46cdd78b | /rice/deps/prompt_toolkit/application/dummy.py | ebe2f334fe11c7f8340bb99e392d2af6fc46a457 | [
"MIT"
]
| permissive | jimhester/rice | 0a0aef48ccab3d6b2d7f700cc311977e8c4a3740 | 61cafc717d9398a57ecd2afb2a086afe1c676e30 | refs/heads/master | 2021-07-07T21:37:00.826756 | 2017-09-27T14:02:49 | 2017-09-27T14:02:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | from __future__ import unicode_literals
from .application import Application
from prompt_toolkit.input import DummyInput
from prompt_toolkit.output import DummyOutput
__all__ = (
'DummyApplication',
)
class DummyApplication(Application):
"""
When no `Application` is running,
`prompt_toolkit.application.current.get_app` will run an instance of this
`Application`.
"""
def __init__(self):
super(DummyApplication, self).__init__(output=DummyOutput(), input=DummyInput())
def run(self):
raise NotImplementedError('A DummyApplication is not supposed to run.')
def run_async(self):
raise NotImplementedError('A DummyApplication is not supposed to run.')
def run_in_terminal(self):
raise NotImplementedError
def run_coroutine_in_terminal(self):
raise NotImplementedError
def run_system_command(self):
raise NotImplementedError
def suspend_to_background(self):
raise NotImplementedError
| [
"[email protected]"
]
| |
2bd40a80b828137202059058e88f7504df2e6470 | 8613ec7f381a6683ae24b54fb2fb2ac24556ad0b | /boot/hard/2017.py | 36601afabce20178c45edae2db36c8014b9864eb | []
| no_license | Forest-Y/AtCoder | 787aa3c7dc4d999a71661465349428ba60eb2f16 | f97209da3743026920fb4a89fc0e4d42b3d5e277 | refs/heads/master | 2023-08-25T13:31:46.062197 | 2021-10-29T12:54:24 | 2021-10-29T12:54:24 | 301,642,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | q = int(input())
l, r = [0] * q, [0] * q
for i in range(q):
l[i], r[i] = map(int, input().split())
mini = min(min(l), min(r))
maxi = max(max(l), max(r))
ans = [0] * (maxi + 1)
prime = [0] * (maxi + 1)
def judge_prime(n):
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True if n != 1 else False
for i in range((mini + 1) // 2, maxi + 1):
prime[i] = judge_prime(i)
for i in range(mini, maxi + 1, 2):
ans[i] = ans[i - 2] + 1 if prime[i] and prime[(i + 1) // 2] else ans[i - 2]
#print(i, ans[i], ans[i - 2])
#print(ans[1:])
for i in range(q):
#print(ans[r[i]], ans[l[i] - 2], ans[l[i] - 1])
print(ans[r[i]] - ans[max(0, l[i] - 2)])
| [
"[email protected]"
]
| |
9ac6a88696d7b8f4a08d632b8636f601fec6d3c9 | d6f1bd13b8418f6176a4157860bcaa09cd969278 | /Codevita/staircase.py | c15f5d065f5033f9bb7abad62ac1d47706751133 | []
| no_license | GokulSoman/scripts | 83a9da4ff539dc4aeac22208a71357baafb2aa0a | 6969b212b014ccf03b4ac3264e3c0102b7966708 | refs/heads/master | 2022-12-24T02:32:49.141606 | 2020-09-25T14:39:32 | 2020-09-25T14:39:32 | 298,593,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n= int(input("Enter Number of stairs: "))
def jump(steps):
dist = steps
if dist == 0:
return 1
elif steps > 0:
return jump(steps-1) + jump(steps-2)
else:
return 0
print("Ways: {}".format(jump(n))) | [
"[email protected]"
]
| |
80cd0aba9e9547c3b95b9c1bdf832592f7c25791 | 65bf7311c871a24ac3d50972d734ddfb9e489846 | /DjangoFirst/DjangoFirst/urls.py | 71db8df67f2bd4791d3eccfa851a21b0c0cb16b9 | [
"MIT"
]
| permissive | FromF/GrayScaleDjango | d2a3d3468c976fe901d8e617b3b97a2dc8e640f9 | 538af606d75ace4a873295a792e523ade10287bc | refs/heads/master | 2020-05-15T21:58:26.747126 | 2019-04-21T09:36:46 | 2019-04-21T09:36:46 | 182,514,956 | 0 | 0 | MIT | 2019-04-21T09:36:47 | 2019-04-21T09:35:16 | Python | UTF-8 | Python | false | false | 1,075 | py | """DjangoFirst URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import settings
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('grayscale/', include('GrayScale.urls')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
94224b8eb2d7c9d25fd29141ae439ffb36b3255e | 252618a833de48b8da1dd71fa05653691f20fdff | /z-demos/yemian.py | a51379243e64b2beb63a5b22a159fc59e8fa4a97 | []
| no_license | xiebohust/DemoAPI | 5a5e3237d5a1f5c138bcdd5e76450d8d20105ae6 | 2ae10361c47a43d57941a4ed5b0184d1700c8092 | refs/heads/master | 2023-02-01T21:01:59.794835 | 2020-12-18T01:37:24 | 2020-12-18T01:37:24 | 261,943,427 | 0 | 0 | null | 2020-05-07T03:47:17 | 2020-05-07T03:47:17 | null | UTF-8 | Python | false | false | 278 | py |
# import requests
#
# url = 'https://lanhuapp.com/web/#/item/project/board?pid=7c3320e7-e58c-40ef-a118-2f373a6513d1'
# r = requests.get(url)
# print(r.text)
def y():
for i in range(2,10):
yield i
r= y()
print(next(r))
print(next(r))
print(next(r))
print(next(r)) | [
"[email protected]"
]
| |
6018b78f698286b8dcc5c68df4f3473b415eb318 | bf8870d923adca9877d4b4dacef67f0a454727a8 | /_other/decorator.py | 23fb94a26518ba636f1e500656ad6d7d4a7a468e | []
| no_license | artkpv/code-dojo | 6f35a785ee5ef826e0c2188b752134fb197b3082 | 0c9d37841e7fc206a2481e4640e1a024977c04c4 | refs/heads/master | 2023-02-08T22:55:07.393522 | 2023-01-26T16:43:33 | 2023-01-26T16:43:33 | 158,388,327 | 1 | 0 | null | 2023-01-26T08:39:46 | 2018-11-20T12:45:44 | C# | UTF-8 | Python | false | false | 922 | py | import threading
def persistant_caller(max_calls=None, timeout_ms=None):
def actual(function):
def persistant_function(*args, **kwargs):
count = 0
while True:
try:
count += 1
return function(*args, **kwargs)
except Exception as e:
if count > max_calls:
# report exception
raise e
# report exception
if timeout_ms:
threading.sleep(timeout_ms)
return persistant_function
return actual
count = 0
@persistant_caller(max_calls=2, timeout_ms=100)
def printer(arg1, key1=None, key2=None):
global count
if count < 0:
count += 1
raise Exception('first exception')
print('printer', arg1, key1, key2)
printer(1, key1='key1val', key2='key2val')
| [
"[email protected]"
]
| |
8c696a09239d19efe57f853c3d56c8c5cfad8cd3 | 5d1733f223efaf66399fc1e136e112c39d08bed1 | /Python/Dictionary_Exercises/Exercise2_Nested_Dictionaries.py | b9629130446f70be3cc61d8a98fbed8e1900e0f4 | []
| no_license | GregoryMNeal/Digital-Crafts-Class-Exercises | 3facb6937cb589bc7ae6b16d642e0bb777f53201 | 0a8e9f2f6c486f4329ae7350e1bd86b7b211158e | refs/heads/master | 2021-08-14T20:47:13.745243 | 2017-11-16T18:31:33 | 2017-11-16T18:31:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | # Functional Spec's:
# 1. Write a Python expression that gets the email address of Ramit.
# 2. Write a Python expression that gets the first of Ramit's interest.
# 3. Write a Python expression that gets the email address of Jasmine.
# 4. Write a Python expression that gets the second of Jan's two interests.
# Imports
# Functions
# Main
if __name__ == "__main__":
ramit = {
'name': 'Ramit',
'email': '[email protected]',
'interests': ['movies', 'tennis'],
'friends': [{
'name': 'Jasmine',
'email': '[email protected]',
'interests': ['photography', 'tennis']
},
{
'name': 'Jan',
'email': '[email protected]',
'interests': ['movies', 'tv']
}]
}
# Satisfy Functional Spec #1
ramit_email = ramit['email']
print(ramit_email)
# Satisfy Functional Spec #2
ramit_interest1 = ramit['interests'][0]
print(ramit_interest1)
# Satisfy Functional Spec #3
jasmines_email = ramit['friends'][0]['email']
print(jasmines_email)
# Satisfy Functional Spec #4
jans_2nd_interest = ramit['friends'][1]['interests'][1]
print(jans_2nd_interest)
| [
"[email protected]"
]
| |
b8a40d70f7b2b31cc7ecfccfbb62848da1de90b6 | a3dc62852462e9c13978c5a7c24e63ab778b4b33 | /jobs/migrations/0006_cover.py | 18ac93f80c6c045785319949d1249b1f8db49133 | []
| no_license | milemik/django_portfolio | eced1e8a5430a81c4904fd391004b11f0f768369 | 86c44a35fb2279214ba128a25823924938d8c54c | refs/heads/master | 2023-08-16T14:11:08.982466 | 2023-07-19T11:59:38 | 2023-07-19T11:59:38 | 229,927,059 | 0 | 0 | null | 2023-08-20T08:40:53 | 2019-12-24T11:10:18 | Python | UTF-8 | Python | false | false | 728 | py | # Generated by Django 4.0.4 on 2022-04-23 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jobs", "0005_auto_20200709_0545"),
]
operations = [
migrations.CreateModel(
name="Cover",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cover_title", models.CharField(max_length=50)),
("cover_image", models.ImageField(upload_to="images/cover")),
("use", models.BooleanField(default=False)),
],
options={
"ordering": ["id"],
},
),
]
| [
"[email protected]"
]
| |
be9b9888e2d1355614e0a6634a6914900f7d02af | e5c4a7acd6505cd1f6ae5dae5342380c177ddfe9 | /sea-level-predictor/sea_level_predictor.py | 6566ab605fadec9e019125867b72e58ea5e4b537 | []
| no_license | esinkarahan/data_analysis_with_python | 9cc878602d7a3a80dbffe18bb4e7e3b68d568949 | 19c6faf64b6a53c0bf69782fdf773eba9765e8bc | refs/heads/main | 2023-08-10T17:50:37.840195 | 2021-10-06T13:41:31 | 2021-10-06T13:41:31 | 402,902,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import linregress
def draw_plot():
# Read data from file
df = pd.read_csv('epa-sea-level.csv')
# Create scatter plot
fig,ax=plt.subplots()
ax.scatter(df['Year'],df['CSIRO Adjusted Sea Level'], s=5,c='steelblue',alpha=0.7)
# Create first line of best fit
result1 = linregress(df['Year'],df['CSIRO Adjusted Sea Level'])
x1 = range(1880,2051)
ypred1 = result1.slope*x1 + result1.intercept
ax.plot(x1,ypred1,c='r',label = 'prediction since 1880')
# Create second line of best fit
result2 = linregress(df.loc[df['Year']>=2000,'Year'],df.loc[df['Year']>=2000,'CSIRO Adjusted Sea Level'])
x2 = range(2000,2051)
ypred2 = result2.slope*x2 + result2.intercept
ax.plot(x2,ypred2,c='m',label = 'prediction since 2000')
# Add labels and title
ax.set(xlabel='Year',
ylabel='Sea Level (inches)', title='Rise in Sea Level');
ax.legend();
# Save plot and return data for testing (DO NOT MODIFY)
plt.savefig('sea_level_plot.png')
return plt.gca() | [
"[email protected]"
]
| |
f89f710127b08733d585ce9f7723e52d63a4ff3e | 5a12738ceacc894e481b69d4bcfc56745aaedac1 | /onmt/models/__init__.py | 30439ebb4b0b7c7f6ba704d8bfce2e6d4b15185d | [
"MIT"
]
| permissive | zdou0830/DDA | 3521b27f201e2a2c0f9de9acf2a45bd622016e13 | ddf3db1161f47269b90c07a52a569a8557897ed0 | refs/heads/master | 2020-07-30T14:10:01.413640 | 2019-09-23T03:58:21 | 2019-09-23T03:58:21 | 210,258,915 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | """Module defining models."""
from onmt.models.model_saver import build_model_saver, ModelSaver
from onmt.models.model import NMTModel, LMModel
__all__ = ["build_model_saver", "ModelSaver",
"NMTModel", "LMModel", "check_sru_requirement"]
| [
"[email protected]"
]
| |
fec405b40dffaf94db579791ad2b4b3996177b9b | a3ffda670cc680c13f62105cf74a0847f9de083c | /Code/aud/start.py | 4e4683e8139c67e95c6cfbf7fab324cae5088349 | []
| no_license | chionhkw/S15-15-112-Term-Project-Duality- | eba972fe4cebd8b380b25e180d00b1d4dff522a4 | c7a1ff59a790cd6be8a25133c188df2e050b866b | refs/heads/master | 2016-08-11T17:03:47.227638 | 2015-11-15T18:15:28 | 2015-11-15T18:15:28 | 45,872,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import winsound
import os
path = os.path.dirname(__file__)
winsound.PlaySound(path + os.sep + "start.wav", winsound.SND_FILENAME) | [
"[email protected]"
]
| |
168058f661863c9ce796c298d6f27afc1555518b | 395481c91da2299fc09adf14e842b6bcaaab476d | /AiStuff/Ai.py | c53a4eb31f603dd8261ff5893448213b89cb0a5d | []
| no_license | Vyom88/Connect4-Ai-And-Gui | 1f6f13ba5d56938afdc727dd26886863dee67aaa | 00b916cb0b0a5392897373ab4cc84c0108ab82ab | refs/heads/main | 2023-03-23T12:17:38.050964 | 2021-03-09T23:39:33 | 2021-03-09T23:39:33 | 346,174,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,123 | py | import random
# Did they tie?
def didTie(board):
for i in board[0]:
if i == "o":
return False
return True
#checks rows if someone won
def checkRows(board, color):
for row in board:
currentStreak = 0
for col in row:
if col == color:
currentStreak += 1
if currentStreak == 4:
return True
else:
currentStreak = 0
return False
#checks col if someone won
def checkColumns (board, color):
for i in range(7):
currentStreak = 0
for l in range(6):
if board[l][i] == color:
currentStreak += 1
else:
currentStreak = 0
if currentStreak == 4:
return True
return False
#checks diagonals if someone won
def checkDiagonals(board, color):
for i in list(range(3)):
for l in list(range(4)):
if board[i][l] == color:
if board[i + 1][l + 1] == color:
if board[i + 2][l + 2] == color:
if board[i + 3][l + 3] == color:
return True
for i in list(range(3)):
for l in list(range(3, 7)):
if board[i][l] == color:
if board[i + 1][l - 1] == color:
if board[i + 2][l - 2] == color:
if board[i + 3][l - 3] == color:
return True
return False
#calls the other 3 winning functions (checkrow, checkcol, and checkDiagnol) and returns True if someone won
def hasWon(board, color):
# print(checkRows(board, color))
# print(checkColumns(board, color))
# print(checkDiagonals(board, color))
if checkRows(board, color) or checkColumns(board, color) or checkDiagonals(board, color):
#print("ygckvbukgvbkuyghvkuyghvkyugvk")
return True
else:
return False
#places the peice in whatever col they input
def placePiece(board, colour, col):
col -= 1
if board[0][col] == "o":
counter = 0
for i in board:
if not i[col] == "o":
board[counter - 1][col] = colour
return True
counter += 1
if board[5][col] == "o":
board[5][col] = colour
else:
return False
# check to see if it is a vaild location to put things
def vaildLocation(board, col):
col -= 1
if board[0][col] == "o":
counter = 0
for i in board:
if i[col] == "o":
return True
counter += 1
else:
return False
# Check for all of the 3 pairs of a color
def checkThree(board, piece):
three_count = 0
rowAmount = len(board)
colAmount = len(board[0])
emptyspaceChar = Ai.emptyspaceChar
for r in range(rowAmount-1):
for c in range(colAmount-1):
if c < colAmount-3:
# check horizontal right
if board[r][c] == board[r][c+1] == board[r][c+2] == piece and board[r][c+3] == emptyspaceChar:
three_count += 1
if r < rowAmount-3:
# check diagnoal right
if board[r][c] == board[r+1][c+1] == board[r+2][c+2] == piece and board[r+3][c+3] == emptyspaceChar:
three_count += 1
if c >= 3:
# check horizontal left
if board[r][c] == board[r][c-1] == board[r][c-2] == piece and board[r][c-3] == emptyspaceChar:
three_count += 1
if r < rowAmount-3:
# check diagnoal left
if board[r][c] == board[r+1][c-1] == board[r+2][c-2] == piece and board[r+3][c-3] == emptyspaceChar:
three_count += 1
if r < rowAmount-3:
# chek vertical
if (board[r][c] == board[r+1][c] == board[r+2][c] == piece and board[r+3][c] == emptyspaceChar) or (board[r+1][c] == board[r+2][c] == board[r+3][c] == piece and board[r][c] == emptyspaceChar):
three_count += 1
return three_count
# Check for all of the 2 pairs of a color
def checkTwo(board, piece):
twoCount = 0
rowAmount = len(board)
colAmount = len(board[0])
emptyspaceChar = Ai.emptyspaceChar
for r in range(rowAmount-1):
for c in range(colAmount-1):
if c < colAmount-3:
# check horizontal right
if board[r][c] == board[r][c+1] == piece and board[r][c+2] == board[r][c+3] == emptyspaceChar:
twoCount += 1
if r < rowAmount-3:
# check diagnoal right
if board[r][c] == board[r+1][c+1] == piece and board[r+2][c+2] == board[r+3][c+3] == emptyspaceChar:
twoCount += 1
if c >= 3:
# check horizontal left
if board[r][c] == board[r][c-1] == piece and board[r][c-2] == board[r][c-3] == emptyspaceChar:
twoCount += 1
if r < rowAmount-3:
# check diagnoal left
if board[r][c] == board[r+1][c-1] == piece and board[r+2][c-2] == board[r+3][c-3] == emptyspaceChar:
twoCount += 1
if r < rowAmount-3:
# chek vertical
if (board[r][c] == board[r+1][c] == piece and board[r+2][c] == board[r+3][c] == emptyspaceChar) or (board[r+2][c] == board[r+3][c] == piece and board[r][c] == board[r+1][c] == emptyspaceChar):
twoCount += 1
return twoCount
# Pick the best move to take
def pickBestMove(board, piece, AIpiece, PLAYERpiece):
playableLocations = findPlayableLocations(board)
bestScore = float("-inf")
bestCol = random.choice(playableLocations)
for col in playableLocations:
temp_board = board
placePiece(temp_board, piece, col)
if hasWon(temp_board, AIpiece):
score = Ai.returnValue
else:
score = calculate_score(board, AIpiece, PLAYERpiece)
if score > bestScore:
bestScore = score
bestCol = col
return bestCol
# Find all playable colums
def findPlayableLocations(board):
playableLocations = []
colAmount = len(board[0])
for col in range(colAmount-1):
if vaildLocation(board, col+1):
playableLocations.append(col+1)
return playableLocations
# Calculate a score
def calculate_score(board, AIpiece, PLAYERpiece):
threeScore = checkThree(board, AIpiece)*4
twoScore = checkTwo(board, AIpiece)*2
AiWon = hasWon(board, AIpiece)
playerThreeScore = checkThree(board, PLAYERpiece)*6
playerTwoScore = checkTwo(board, PLAYERpiece)*2
playerWon = hasWon(board, PLAYERpiece)
temp1 = threeScore + twoScore + (1000 if AiWon else 0)
temp2 = playerThreeScore + playerTwoScore + (1000 if playerWon else 0)
score = temp1 - temp2
return score
class Ai:
returnValue = 1000000000000
emptyspaceChar = "o"
def __init__(self, aiColor="r", playerColor="y"):
self.color = aiColor
self.aiColor = aiColor
self.playerColor = playerColor
self.Pinfinity = float("inf")
self.Ninfinity = float("-inf")
self.lastCol = None
self.start = True
def makeBoardCopy(self, board):
temp_board = []
for row in board:
temp = []
for column in row:
temp.append(column)
temp_board.append(temp)
return temp_board
def minMax(self, board, depth, alpha, beta, maximizingPlayer):
if self.start is True:
self.lastCol = None
self.start = False
playableLocations = findPlayableLocations(board)
hasWonAi = hasWon(board, self.aiColor)
hasWonPlayer = hasWon(board, self.playerColor)
isTie = didTie(board)
terminal = True if hasWonAi or hasWonPlayer or isTie else False
if depth == 0 or terminal:
if terminal:
if hasWonAi:
return (self.lastCol, float("inf"))
elif hasWonPlayer:
return (self.lastCol, float("-inf"))
else:
return (self.lastCol, 0)
else:
return (self.lastCol, calculate_score(board, self.aiColor, self.playerColor))
# Ai's turn
if maximizingPlayer:
value = self.Ninfinity
column = random.choice(playableLocations)
for col in playableLocations:
temp_board = self.makeBoardCopy(board)
placePiece(temp_board, self.aiColor, col)
newScore = self.minMax(temp_board, depth-1, alpha, beta, False)[1]
if newScore > value:
value = newScore
column = col
self.lastCol = column
alpha = max(alpha, value)
if alpha >= beta:
break
return (column, value)
# Player's turn
else:
value = self.Pinfinity
column = random.choice(playableLocations)
for col in playableLocations:
temp_board = self.makeBoardCopy(board)
placePiece(temp_board, self.playerColor, col)
newScore = self.minMax(temp_board, depth-1, alpha, beta, True)[1]
if newScore < value:
value = newScore
column = col
self.lastCol = column
# if playerWon:
# value = float("inf")
# print(beta)
alpha = min(alpha, value)
if alpha >= beta:
break
return (column, value)
| [
"[email protected]"
]
| |
e0de689cfe67d9012183a5933cadf8c4b9bc0315 | cf8cd28b091fa4378d90ead4a7c620646e50a051 | /2016/14_solution.py | ec277e3e649dea467bc75da1ca614337cb409b49 | [
"MIT"
]
| permissive | kng/AoC2019 | 55a1f4fe24a243c9f1e6aa78e5563104faf35fb3 | 0d40053a876580a78e277be9c047c631762d6ea7 | refs/heads/master | 2021-12-19T10:05:22.922939 | 2021-11-26T15:36:39 | 2021-11-26T15:36:39 | 226,349,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | # --- Day 14: One-Time Pad ---
# https://adventofcode.com/2016/day/14
import time
import hashlib
simple = False
verbose = 0
if simple:
data = 'abc'
iterations = 25000
else:
file = open('14_input.txt', 'r')
data = file.read().strip()
iterations = 50000 # somewhere between 25k and 50k should suffice, ymmv :P
# better solution would be to use a generator, but this completed in less than 2min
def main():
start_time = time.time()
print('generating {} hashes with salt: {}'.format(iterations, data))
hashlist = []
part = 1 # set this to 1 or 2 change the puzzle part
for j in range(iterations):
s = data + str(j)
m = hashlib.md5(s.encode()).hexdigest()
if part == 2 and not simple:
for i in range(2016):
m = hashlib.md5(m.encode()).hexdigest()
hashlist.append(m)
print("time elapsed: {:.2f}".format((time.time() - start_time)))
start_time = time.time()
# print('first {}'.format(hashlist[0])) # part 2: a107ff... part 1: 577571...
print('search for threes')
trip = []
for i in range(len(hashlist)):
h = hashlist[i] # seems to be faster to assign to a variable first
for j in range(len(h) - 2):
if h[j] == h[j + 1] and h[j] == h[j + 2]:
trip.append([i, h[j]])
break
if verbose > 1:
print(trip)
print("time elapsed: {:.2f}".format((time.time() - start_time)))
start_time = time.time()
print('search for fives')
key = []
while trip:
idx, h = trip.pop(0) # yeah, collections.deque is better, but not critical here
hs = str(h * 5)
if any(hs in w for w in hashlist[idx + 1:idx + 1000]):
key.append(idx)
if verbose > 1:
print('index found {} in hash {}'.format(idx, hs))
print('keys found: {}'.format(len(key)))
if len(key) > 63:
print('part {}, key at pos 64: {}'.format(part, key[63]))
print("time elapsed: {:.2f}".format((time.time() - start_time)))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
eacb9522092aa5e0ceb98aa10b3504cb2ba0ef10 | aceaf99df06081676f33145ff104009fcf30e922 | /core/permissions.py | 709d90b2de6a29e2b5eb15e460ad2a721747fd68 | []
| no_license | jonatasoli/process_monitoring | 2222d692b5255cbdf1e982940bf4f8a749257295 | 6ba2b3cf68b8bf8be6e1a4547b98c09e08d91794 | refs/heads/master | 2021-01-24T01:59:50.231667 | 2018-02-27T20:00:52 | 2018-02-27T20:00:52 | 122,831,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow users to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class PostOwnStatus(permissions.BasePermission):
"""Allow users to update their own profile."""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status."""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| [
"[email protected]"
]
| |
a26082e5f6cee13c70b4c407e791e4715556b070 | 7c724008d9de61e11c86f47aff239edc8970e350 | /tests/get_test_requests_session.py | 334ef99a2ae9924636eec03ef5d54618a5a47c9c | [
"MIT"
]
| permissive | Virtual-Shard/tcp-tls-tunnel-py | fc6c406f5b6076b986b73aaa9479664043ed7420 | c2e16c4f629128b5840ada4aaf9d98a0864b8bdb | refs/heads/master | 2023-07-16T13:18:52.792361 | 2021-08-28T16:34:07 | 2021-08-28T16:34:07 | 382,931,584 | 2 | 1 | MIT | 2021-08-28T14:41:31 | 2021-07-04T19:20:34 | Python | UTF-8 | Python | false | false | 702 | py | from typing import Union
import urllib3
from tests import test_settings
from hyper.contrib import HTTP20Adapter
from requests import Session
from requests.adapters import BaseAdapter
def get_test_requests_session(adapter: Union[BaseAdapter, HTTP20Adapter] = None) -> Session:
headers = urllib3.make_headers(
keep_alive=True,
disable_cache=True,
accept_encoding=True,
user_agent=test_settings.USER_AGENT
)
session = Session()
session.headers.update(headers)
if adapter:
# Connect adapter for requests.Session instance
session.mount("http://", adapter=adapter)
session.mount("https://", adapter=adapter)
return session | [
"[email protected]"
]
| |
840e9b3ea6c66f476d8bacfb8a6cf053ead20b8c | 34247780ca65e6b71d706912251e29dc0103e8ed | /servo_control.py | 49c26768d6f58811d7f61b5fc43a6ee5e55e4bb1 | []
| no_license | ViktorBash/PID-Control-Loop | 74174e7cdf429c7ccd1de698ee42b643acdd7420 | fcc2fd2547eadbe9809c94906fd78482a2088baf | refs/heads/master | 2023-01-23T15:49:58.144613 | 2020-12-09T01:52:50 | 2020-12-09T01:52:50 | 275,858,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """
Two functions to control the two servo motors for the project
"""
from gpiozero import Servo, AngularServo
from gpiozero.tools import sin_values
import time
from time import sleep
# Setup servo 1
# servo1 = Servo(12)
servo1 = AngularServo(12, min_angle=-90, max_angle=90)
# Setup servo 2
# servo2 = Servo(19)
servo2 = AngularServo(19, min_angle=-90, max_angle=90)
# How much the servo will be able to move
top_limit = 15
bottom_limit = -15
def move_servo_1(angle):
if angle > top_limit:
angle = top_limit
if angle < bottom_limit:
angle = bottom_limit
servo1.angle = angle
def move_servo_2(angle):
if angle > top_limit:
angle = top_limit
if angle < bottom_limit:
angle = bottom_limit
servo2.angle = angle
# For testing purposes
if __name__ == "__main__":
while True:
move_servo_1(20)
move_servo_2(-20)
time.sleep(1)
move_servo_1(-20)
move_servo_2(20)
time.sleep(1)
| [
"[email protected]"
]
| |
f205541796b3344d92b1153f7b05f33739dfcaf1 | 900c8b1233e11ad83d96722146cab6345bb2df9e | /HW4/code/nndl/layer_utils.py | 6dac4b037167e23029214d7caa839f590659acc7 | []
| no_license | yetian28/UCLA-EE239 | 7c4ad66dd5eeec32f0bbc6d7827aa56bc52c4a27 | 0dd6dfabd91bb7d2dbf97b555b1afa4e9e1ad524 | refs/heads/master | 2020-04-07T22:08:33.924372 | 2018-03-04T00:19:58 | 2018-03-04T00:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,672 | py | from .layers import *
from .fast_layers import *
"""
This code was originally written for CS 231n at Stanford University
(cs231n.stanford.edu). It has been modified in various areas for use in the
ECE 239AS class at UCLA. This includes the descriptions of what code to
implement as well as some slight potential changes in variable names to be
consistent with class nomenclature. We thank Justin Johnson & Serena Yeung for
permission to use this code. To see the original version, please visit
cs231n.stanford.edu.
"""
def affine_relu_forward(x, w, b):
"""
Convenience layer that performs an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_forward_batchnorm(x, w, b, gamma, beta, bn_param):
a, fc_cache = affine_forward(x, w, b)
a, bn_cache = batchnorm_forward(a, gamma, beta, bn_param)
out, relu_cache = relu_forward(a)
cache = (fc_cache, bn_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache = cache[0]
relu_cache = cache[1]
# print("fc cache", fc_cache)
# print(len(cache))
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def affine_relu_backward_batchnorm(dout, cache):
fc_cache, bn_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dbn, dgamma, dbeta = batchnorm_backward(da, bn_cache)
dx, dw, db = affine_backward(dbn, fc_cache)
return dx, dw, db, dgamma, dbeta
def affine_batchnorm_relu_forward(x, w, b, gamma, beta, bn_params):
"""
Performs affine transformation, batchnorm, and ReLU
Returns all caches
BN forward takes: def batchnorm_forward(x, gamma, beta, bn_param):
"""
out, forward_cache = affine_forward(x, w, b)
# print("beta received: ", beta.shape)
out, batchnorm_cache = batchnorm_forward(out, gamma, beta, bn_params)
# print("got dim: ", out.dim)
out, relu_cache = relu_forward(out)
total_cache = (forward_cache,relu_cache, batchnorm_cache)
# print("returning out dim: ", out.shape)
return out, total_cache
def affine_batchnorm_relu_backward(dout, cache):
"""
Backward pass
def batchnorm_backward(dout, cache):
def relu_backward(dout, cache):
"""
#unpack the cache tuple
forward_cache, relu_cache, batchnorm_cache = cache
dx = relu_backward(dout, relu_cache)
dx, dgamma, dbeta = batchnorm_backward(dx, batchnorm_cache)
dx, dw, db = affine_backward(dx, forward_cache)
gradients = dx, dw, db, dgamma, dbeta
return gradients
"""
Functions for conv net without batchnorm
"""
def conv_relu_forward(x, w, b, conv_param):
# conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
out, conv_cache = conv_forward_im2col(x, w, b, conv_param)
out, relu_cache = relu_forward(out)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
conv_cache, relu_cache = cache
deriv = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_im2col(deriv, conv_cache)
return dx, dw, db
#apply pooling
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
out_conv_forward, conv_cache = conv_forward_im2col(x, w, b, conv_param)
out_relu_forward, relu_cache = relu_forward(out_conv_forward)
out, pool_cache = max_pool_forward_fast(out_relu_forward, pool_Param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
conv_cache, relu_cache, pool_cache = cache
dpool = max_pool_backward_fast(dout, pool_cache)
drelu = relu_backward(dpool, relu_cache)
dx, dw, db = conv_backward_im2col(drelu, conv_cache)
return dx, dw, db
"""
Functions with batchnorm
"""
def conv_relu_forward_batchnorm(x, w, b, conv_param, gamma, beta, bn_param):
out, conv_cache = conv_forward_im2col(x, w, b, conv_param)
out, bn_cache = spatial_batchnorm_forward(out, gamma, beta, bn_param)
out, relu_cache = relu_forward(out)
cache = (conv_cache, bn_cache, relu_cache)
return out, cache
def conv_relu_backward_batchnorm(dout, cache):
#relu back -> batchnorm back -> conv back
conv_cache, bn_cache, relu_cache = cache
deriv = relu_backward(dout, relu_cache)
dbn, dgamma, dbeta = spatial_batchnorm_backward(deriv, bn_cache)
dx, dw, db = conv_backward_im2col(dbn, conv_cache)
return dx, dw, db, dgamma, dbeta
def conv_relu_pool_forward_batchnorm(x, w, b, conv_param, pool_param, gamma, beta, bn_param):
#conv forward
out_conv_forward, conv_cache = conv_forward_im2col(x, w, b, conv_param)
#batchnorm forward - def spatial_batchnorm_forward(x, gamma, beta, bn_param):
out_bn_forward, bn_cache = spatial_batchnorm_forward(out_conv_forward, gamma, beta, bn_param)
#relu forward
out_relu_forward, relu_cache = relu_forward(out_bn_forward)
#pool
out, pool_cache = max_pool_forward_fast(out_relu_forward, pool_param)
cache = (conv_cache, bn_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward_batchnorm(dout, cache):
conv_cache, bn_cache, relu_cache, pool_cache = cache
#pool -> relu -> batchnorm back -> conv
dpool = max_pool_forward_fast(dout, pool_cache)
drelu = relu_backward(dpool, relu_cache)
dbn, dgamma, dbeta = spatial_batchnorm_backward(drelu, bn_cache)
dx, dw, db = conv_backward_im2col(dbn, conv_cache)
grads = (dx, dw, db, dgamma, dbeta)
return grads
| [
"[email protected]"
]
| |
0094d69ad917355ea2a470b92bfadd5756d51fcf | dbb9ed7fdef8f210d6e9786efddd77e943116958 | /src/unittest/python/util/common/CollectionUtil_test.py | 05cc772461fc2896600f895584d3d30f4327b53c | []
| no_license | HuBing958878/ugc.aggregator | 658ff60cadc888d64423e2d595db176f1ade7f06 | 5c838826fc75bbfa51011f438f16876fe04dd9e2 | refs/heads/master | 2021-01-22T17:42:50.235387 | 2016-04-24T08:33:55 | 2016-04-24T08:33:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # -*- coding:utf-8 -*-
import unittest
from util.common.CollectionUtil import CollectionUtil
class CollectionUtilTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print "setUpClass..."
@classmethod
def tearDownClass(cls):
print "tearDownClass..."
def test_chunksBySize(self):
array=range(10)
chunks=CollectionUtil().chunksBySize(array,5)
self.assertEqual(chunks,[[0, 1, 2, 3, 4], [5, 6, 7, 8,9]])
if __name__=="__main__":
unittest.main() | [
"[email protected]"
]
| |
027136e555fc8b01d48d55e3b5206ffec7249008 | 111330f20804d90a766f808c464fdf5b1de1579a | /examples/pieris/preprocessing/runSailfish.py | 58fc94daa8701700db7975267ad2faddeb509a44 | [
"MIT",
"LicenseRef-scancode-public-domain"
]
| permissive | NCBI-Hackathons/arvos | 53134550e0ebfbc3999a344e1e92f24c64200965 | 1cbea3e93a6659a334183d8119c034f7095d1a5d | refs/heads/master | 2021-01-22T02:08:18.916841 | 2017-05-30T18:54:22 | 2017-05-30T18:54:22 | 92,335,502 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,896 | py | #!/usr/bin/python
"""
Run STAR to align reads to transcriptome
"""
import os,sys,re,shutil,getopt,itertools
import HTSeq
from htsint import run_subprocess
## locations
homeDir = os.path.join(os.path.expanduser("~"),"sequencing","pieris")
sailfishDir = os.path.join(homeDir,'sailfish')
readsDir = os.path.join(homeDir,'reads')
genomePath = os.path.realpath(os.path.join(homeDir,'dn-trinity','Trinity.fasta'))
#gff3Path = os.path.realpath(os.path.join(".","Xentr7_2_Stable.gff3"))
#gtfPath = os.path.realpath(os.path.join(sailfishDir,"Xentr7_2_Stable.gtf"))
sailfishPath = "/usr/src/Sailfish-0.6.3-Linux_x86-64/bin/sailfish"
def get_reads(sampleList):
def get_trimmed_files(sample):
leftFile,rightFile = None,None
for fileName in os.listdir(readsDir):
if re.search("unpaired",fileName):
continue
if re.search("^%s.*\.fq$"%sample,fileName) and re.search("left_paired",fileName):
leftFile = os.path.realpath(os.path.join(readsDir,fileName))
elif re.search("^%s.*.fq$"%sample,fileName) and re.search("right_paired",fileName):
rightFile = os.path.realpath(os.path.join(readsDir,fileName))
return leftFile,rightFile
allLeft = []
allRight = []
for sample in sampleList:
left,right = get_trimmed_files(sample)
allLeft.append(left)
allRight.append(right)
## check
if len(allLeft) != len(sampleList):
raise Exception("Invalid number of left sequences")
if len(allRight) != len(sampleList):
raise Exception("Invalid number of right sequences")
return allLeft,allRight
if __name__ == "__main__":
sampleList = ["17", "18", "33", "46", "56", "61", "DL47", "DL61", "D163", "D178", "D185", "D239"]
## export sutff
print("export LD_LIBRARY_PATH=/usr/src/Sailfish-0.6.3-Linux_x86-64/lib:$LD_LIBRARY_PATH")
print("export PATH=/usr/src/Sailfish-0.6.3-Linux_x86-64/bin:$PATH")
## copy genome to star dir
cmd1 = "cp %s %s"%(genomePath,os.path.join(sailfishDir,'genome.fa'))
print("\n...copy genome to sailfish dir")
print(cmd1)
## create the index
cmd2 = "%s index -t %s -o %s -k 20"%(sailfishPath,os.path.join(sailfishDir,'genome.fa'),sailfishDir)
print("\n...index the genome")
print(cmd2)
## align reads
print("\n...align reads")
allLeft,allRight = get_reads(sampleList)
cmd3 = ""
for s,sample in enumerate(sampleList):
outDir = os.path.join(homeDir,'features',sample)
if not os.path.exists(outDir):
os.mkdir(outDir)
if s != len(sampleList) -1:
_cmd = '%s quant -i %s -o %s -l "T=PE:O=><:S=SA" '%(sailfishPath,sailfishDir,outDir)+\
'-1 %s -2 %s && '%(allLeft[s],allRight[s])
else:
_cmd = '%s quant -i %s -o %s -l "T=PE:O=><:S=SA" '%(sailfishPath,sailfishDir,outDir)+\
'-1 %s -2 %s'%(allLeft[s],allRight[s])
cmd3 += _cmd
print cmd3
overwrite = False
sys.exit()
## convert the sam files to coordinate-sorted bam and files
print("\n...sam/bam conversions and sort")
for s,sample in enumerate(sampleList):
samFile = os.path.join(sailfishDir,"%s_Aligned.out.sam"%(sample))
bamFile = os.path.join(readsDir,"%s_aligned.bam"%(sample))
sbamFile = os.path.join(readsDir,"%s_aligned_sorted.bam"%(sample))
ssamFile = os.path.join(readsDir,"%s_aligned_sorted.sam"%(sample))
if not os.path.exists(samFile):
raise Exception("cannot find sam file %s"%(samFile))
if os.path.exists(ssamFile) and not overwrite:
print("skipping sam to bam, align, bam to sam")
else:
cmd = "/usr/bin/samtools view -b -S %s > %s && "%(samFile,bamFile)
cmd += "/usr/bin/samtools sort -n %s %s && "%(bamFile,sbamFile[:-4])
cmd += "/usr/bin/samtools view -h %s > %s"%(sbamFile,ssamFile)
print cmd
run_subprocess(cmd)
## concat sam files and sort by coordinates
print("\n...make single sorted bam file..")
outBam = os.path.join(homeDir,"star_all_reads.bam")
outSbam = os.path.join(homeDir,"star_all_reads_sorted.bam")
cmdMerge = "/usr/bin/samtools merge %s"%(outBam)
cmdSort = "/usr/bin/samtools sort %s %s"%(outBam,outSbam[:-4])
for s,sample in enumerate(sampleList):
sbamFile = os.path.join(readsDir,"%s_aligned_sorted.bam"%(sample))
cmdMerge += " %s"%(sbamFile)
cmdMergeSort = cmdMerge + " && %s"%(cmdSort)
if os.path.exists(outSbam) and not overwrite:
print("skipping concat bam files and sort")
else:
if os.path.exists(outSbam):
os.remove(outSbam)
if os.path.exists(outBam):
os.remove(outBam)
print cmdMergeSort
run_subprocess(cmdMergeSort)
print("\n")
| [
"[email protected]"
]
| |
c1bcf930fde9e46b78d8ed6438271171d70ccbd9 | 5390a9625fe2ac6a00f0460d367bacd156f5aaa7 | /2.4.1.py | 443e2e62c7bb40e11ed422476aefd967d6ab66f8 | []
| no_license | BorisovaNatalia/stepik---auto-tests-course | 82860241dc3dbb52b8eae2ac4d8cafe41e5226c6 | 80dcd9d967e58c279833da7deaf8adadf80d7657 | refs/heads/master | 2023-02-08T13:36:17.123620 | 2021-01-01T13:03:13 | 2021-01-01T13:03:13 | 295,004,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | from selenium import webdriver
import time
import math
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
link = "http://suninjuly.github.io/explicit_wait2.html"
browser = webdriver.Chrome()
browser.get(link)
button = browser.find_element_by_id("book")
WebDriverWait(browser, 20).until(
EC.text_to_be_present_in_element((By.ID, "price"),"$100")
)
button.click()
x = int(browser.find_element_by_id("input_value").text)
browser.find_element_by_id("answer").send_keys(calc(x))
#browser.f
# e
# ййind_element_by_id("robotCheckbox").click()
#browser.find_element_by_id("robotsRule").click()
browser.find_element_by_css_selector("[type='submit']").click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit() | [
"[email protected]"
]
| |
e0bf1dde490aa1972c660674bcf50d43a21840c7 | 96c2adda2c76255dd0310ce031a25d622ff51365 | /python3-voikko/app.py | 28ad459ae624fc337e353f06f7836d603f54643f | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | janik6n/docker-demos | 534165a2c5db28c0363430730780ce916504e633 | 30348c0d633d7c6d2120c52af4e119e27ff7f1c9 | refs/heads/master | 2021-09-18T22:10:57.989652 | 2018-07-20T08:46:48 | 2018-07-20T08:46:48 | 102,210,769 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #!/usr/bin/env python
import sys
from libvoikko import Voikko
print('Analysoidaan annetut sanat:\n')
v = Voikko("fi")
# Pass the 1st argument as it is the app name itself.
for a in sys.argv[1:]:
print(f'Sanan {a} analyysi:')
print(v.analyze(a))
print('Annetut sanat analysoitu.')
| [
"[email protected]"
]
| |
e58daf251c090d72da5375a591a8f869ab34ae2c | 6c9ef163a8cd14ab5e9343504640dc2fb0a4ff0b | /mountain_car/main.py | 37222db978cbb8a7ef64f029732bb34aab429e0a | [
"MIT"
]
| permissive | praxidike97/reinforcement-learning | 365e1fc14a2d4782321bbf4bc6b3819b6dae93c7 | c6af501939fe2d6ab32ce7d0405e0753797bf6cd | refs/heads/master | 2021-05-21T20:08:17.813755 | 2020-04-10T14:09:35 | 2020-04-10T14:09:35 | 252,783,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,729 | py | import gym
import time
from collections import deque
import matplotlib.pyplot as plt
import random
import argparse
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.optimizers import Adam, SGD
class DQNSolver():
def __init__(self, batch_size=20):
self.memory = deque(maxlen=100000)
self.env = gym.make('MountainCar-v0')
self.exploration_rate = 1.0
self.exploration_rate_min = 0.01
self.exploration_rate_decay = 0.9999
self.action_space = [0, 1, 2]
self.gamma = 0.95
self.learning_rate = 0.001
self.model = self.create_model()
self.batch_size = batch_size
def create_initial_population(self, size=100, threshold=30.):
print("Create initial population...")
total_rewards, samples = list(), list()
max_heights = list()
while len(self.memory) < size:
self.env.reset()
total_reward = 0
done = False
sample = list()
old_state = None
max_height = -10
while not done:
# Perform a random action
action = self.env.action_space.sample()
state, reward, done, info = self.env.step(action)
if state[0] > max_height:
max_height = state[0]
#if not old_state is None and not done:
if not old_state is None:
sample.append((old_state, action, reward, state, done))
old_state = state
total_reward += reward
print("Max height: %f" % max_height)
max_heights.append(max_height)
# Only add the samples to the initial population where the reward exceeds the threshold
if max_height > threshold:
self.memory += sample
print(total_reward)
total_rewards.append(total_reward)
print("Finished creating initial population!")
plt.hist(max_heights)
plt.show()
return samples
def create_model(self, input_size=2, output_size=3):
model = Sequential()
model.add(Dense(24, input_shape=(input_size,), activation="relu"))
model.add(Dense(output_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def get_next_action(self, state):
if random.random() < self.exploration_rate:
return random.choice(self.action_space)
else:
q_values = self.model.predict(state)
return np.argmax(q_values[0])
def experience_replay(self, runs):
reduction = (1.0 - self.exploration_rate_min) / (runs)
if len(self.memory) < self.batch_size:
return
batch = random.sample(self.memory, self.batch_size)
training_input, target_output = list(), list()
for old_state, action, reward, state, done in batch:
old_state_Q_values = self.model.predict(np.array([old_state]))
new_state_Q_values = self.model.predict(np.array([state]))
#old_state_Q_update = reward
if done and state[0] >= 0.5:
old_state_Q_values[0][action] = reward #+ 10.
else:
old_state_Q_values[0][action] = reward + self.gamma * np.amax(new_state_Q_values[0])
#old_state_Q_values[0][action] = old_state_Q_update
#training_input = np.array([old_state])
#target_output = np.array(old_state_Q_values)
#self.model.fit(training_input, target_output, verbose=0)
training_input.append(old_state)
target_output.append(old_state_Q_values[0])
self.model.fit(np.asarray(training_input), np.asarray(target_output), verbose=0)
self.exploration_rate *= self.exploration_rate_decay
self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate)
#if self.exploration_rate > self.exploration_rate_min:
# self.exploration_rate -= reduction
def test_model(model):
env = gym.make('MountainCar-v0')
state = env.reset()
total_reward = 0
for t in range(500):
env.render()
action = np.argmax(model.predict(np.array([state])))
print(action)
state, reward, done, info = env.step(action)
total_reward += reward
#time.sleep(0.05)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
print("Total reward: %f" % total_reward)
env.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true')
parser.add_argument('--runs', type=int, default=200)
parser.add_argument('--test', action='store_true')
parser.add_argument('--initial_population', action='store_true')
FLAGS, unparsed = parser.parse_known_args()
dqnSolver = DQNSolver()
if FLAGS.initial_population:
dqnSolver.create_initial_population(size=50000, threshold=-0.25)
run = 0
top_score = 0
if FLAGS.train:
while run < FLAGS.runs:
run += 1
done = False
old_state = dqnSolver.env.reset()
old_state = np.reshape(old_state, [1, 2])
step = 0
total_reward = 0
max_height = -10.
while not done:
action = dqnSolver.get_next_action(old_state)
state, reward, done, info = dqnSolver.env.step(action)
total_reward += reward
#reward = reward if not done else -reward
state = np.reshape(state, [1, 2])
dqnSolver.memory.append((old_state[0], action, reward, state[0], done))
if state[0][0] > max_height:
max_height = state[0][0]
if run%20 == 0:
dqnSolver.env.render()
old_state = state
if done:
print("Run: " + str(run) + ", exploration: " + str(dqnSolver.exploration_rate) + ", total reward: " + str(total_reward))
#if step > top_score:
#top_score = step
if total_reward > -200.:
dqnSolver.model.save("./models/best_model-%i.h5" % run)
break
step += 1
dqnSolver.experience_replay(runs=FLAGS.runs)
print("Max height: %f" % max_height)
#dqnSolver.model.save("./model.h5")
if FLAGS.test:
model = load_model("models/best_model-999.h5")
test_model(model)
| [
"[email protected]"
]
| |
c1bcd65d34b7a3e59e2d47a48b25316f3ee6c058 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv1/mmdet/models/mask_heads/fcn_mask_head.py | 26cb3c0ff0c362870863dc2fddb5f9a2379cb87e | [
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,012 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from mmdet.core import auto_fp16, force_fp32, mask_target
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule
@HEADS.register_module
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
upsample_method='deconv',
upsample_ratio=2,
num_classes=81,
class_agnostic=False,
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
raise ValueError(
'Invalid upsample method {}, accepted methods '
'are "deconv", "nearest", "bilinear"'.format(upsample_method))
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = upsample_method
self.upsample_ratio = upsample_ratio
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
self.upsample = nn.ConvTranspose2d(
upsample_in_channels,
self.conv_out_channels,
self.upsample_ratio,
stride=self.upsample_ratio)
else:
self.upsample = nn.Upsample(
scale_factor=self.upsample_ratio, mode=self.upsample_method)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_target(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid().cpu().numpy()
assert isinstance(mask_pred, np.ndarray)
# when enabling mixed precision training, mask_pred may be float16
# numpy array
mask_pred = mask_pred.astype(np.float32)
cls_segms = [[] for _ in range(self.num_classes - 1)]
bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy() + 1
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
for i in range(bboxes.shape[0]):
if not isinstance(scale_factor, (float, np.ndarray)):
scale_factor = scale_factor.cpu().numpy()
bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
label = labels[i]
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
if not self.class_agnostic:
mask_pred_ = mask_pred[i, label, :, :]
else:
mask_pred_ = mask_pred[i, 0, :, :]
bbox_mask = mmcv.imresize(mask_pred_, (w, h))
bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(
np.uint8)
if rcnn_test_cfg.get('crop_mask', False):
im_mask = bbox_mask
else:
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
if rcnn_test_cfg.get('rle_mask_encode', True):
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label - 1].append(rle)
else:
cls_segms[label - 1].append(im_mask)
return cls_segms
| [
"[email protected]"
]
| |
f12ad10f04ebad7fca901b3f4958f768e9b2773f | ea5691c2632a30d4209f0222008b9067a44a3fe7 | /dcptree/zero_one_loss/tests/test_mip.py | c07f03419592cd26e37a6133607f4db282698841 | [
"BSD-3-Clause"
]
| permissive | ustunb/dcptree | 1369168f651ec4a35b7decbdef133dcc7941e767 | 40ce36e5b2bb6b8d5de3310e0d29d19a2d46782a | refs/heads/master | 2020-05-24T02:23:39.262609 | 2019-06-05T23:49:58 | 2019-06-05T23:49:58 | 187,052,932 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,982 | py | from dcptree.zero_one_loss.tests.testing_helper_functions import *
from dcptree.zero_one_loss.mip import *
#### Test Setup ####
data_name = 'breastcancer'
format_label = 'envyfree'
selected_groups = []
test_settings = {
'fit_intercept': False,
'compress_data': True,
'standardize_data': True,
'margin': 0.0001,
'total_l1_norm': 1.00,
'add_l1_penalty': False,
'add_constraints_for_conflicted_pairs': True,
'add_coefficient_sign_constraints': True,
'use_cplex_indicators_for_mistakes': True,
'use_cplex_indicators_for_signs': True,
}
time_limit = 10
random_seed = 2338
# load / process data data
data_file = '%s%s_%s_processed.pickle' % (data_dir, data_name, format_label)
data, cvindices = load_processed_data(data_file)
#data = filter_data_to_fold(data, cvindices, fold_id = 'K05N01', fold_num = 0, include_validation = True)
# remove selected groups
data, groups = split_groups_from_data(data = data, group_names = selected_groups)
data = convert_remaining_groups_to_rules(data)
data = cast_numeric_fields(data)
# subsample data
#data = sample_test_data(data, max_features = 2, n_pos = 25, n_neg = 25, n_conflict = 5, remove_duplicates = True)
#data = sample_test_data(data, max_features = 4, n_pos = 100, n_neg = 100, n_conflict = 40, remove_duplicates = True)
#data = sample_test_data(data, max_features = 5, n_pos = 20, n_neg = 20, n_conflict = 5, remove_duplicates = True)
# solve MIP
mip = ZeroOneLossMIP(data = data, settings = test_settings)
mip.solve(time_limit = time_limit)
indices = mip.indices
mip_data = mip.data
mip_settings = mip.settings
mip_info = mip.mip_info
#### Setup Variables ####
# data related components
theta_pos = np.array(mip.solution.get_values(indices['theta_pos']))
theta_neg = np.array(mip.solution.get_values(indices['theta_neg']))
mistakes_pos = np.array(mip.solution.get_values(indices['mistakes_pos']), dtype = np.bool_)
mistakes_neg = np.array(mip.solution.get_values(indices['mistakes_neg']), dtype = np.bool_)
total_mistakes_pos = np.array(mip.solution.get_values(indices['total_mistakes_pos']))
total_mistakes_neg = np.array(mip.solution.get_values(indices['total_mistakes_neg']))
total_mistakes = np.array(mip.solution.get_values(indices['total_mistakes']))
theta = theta_pos + theta_neg
# data related components
if test_settings['fit_intercept']:
data = add_intercept(data)
else:
data = remove_intercept(data)
X = np.array(data['X'])
if mip_settings['standardize_data']:
X = (X - mip_data['X_shift'])/(mip_data['X_scale'])
Y = mip_data['Y']
n_variables = X.shape[1]
U_pos = mip_data['U_pos']
x_to_u_pos_idx = mip_data['x_to_u_pos_idx']
u_neg_to_x_neg_idx = mip_data['u_neg_to_x_neg_idx']
n_counts_pos = mip_data['n_counts_pos']
n_points_pos = len(n_counts_pos)
y_pos = Y[x_to_u_pos_idx]
U_neg = mip_data['U_neg']
x_to_u_neg_idx = mip_data['x_to_u_neg_idx']
u_pos_to_x_pos_idx = mip_data['u_pos_to_x_pos_idx']
n_counts_neg = mip_data['n_counts_neg']
n_points_neg = len(n_counts_neg)
y_neg = Y[x_to_u_neg_idx]
conflicted_pairs = mip_data['conflicted_pairs']
# Expected Values
scores = X.dot(theta)
scores_pos = scores[x_to_u_pos_idx]
yhat_pos = np.sign(scores_pos)
expected_mistakes_pos = np.not_equal(yhat_pos, y_pos)
scores_neg = scores[x_to_u_neg_idx]
yhat_neg = np.sign(scores_neg)
expected_mistakes_neg = np.not_equal(yhat_neg, y_neg)
#### Data Tests
def test_compression_pos():
pos_idx = data['Y'] == 1
n_pos = np.sum(pos_idx)
assert np.sum(np.sum(n_counts_pos) == n_pos)
assert np.all(y_pos == 1.0)
assert np.all(X[x_to_u_pos_idx,:] == U_pos)
assert np.all(U_pos[u_pos_to_x_pos_idx,:] == X[pos_idx, :])
def test_compression_neg():
neg_idx = data['Y'] == -1
n_neg = np.sum(neg_idx)
assert np.sum(np.sum(n_counts_neg) == n_neg)
assert np.all(y_neg == -1.0)
assert np.all(X[x_to_u_neg_idx,:] == U_neg)
assert np.all(U_neg[u_neg_to_x_neg_idx,:] == X[neg_idx, :])
def test_conflicted_pairs():
for (p, n) in tuple(conflicted_pairs):
assert np.all(U_pos[p,:] == U_neg[n,:])
assert y_pos[p] == 1
assert y_neg[n] == -1
#### Basic MIP Tests
def test_mip_settings():
mip_settings = mip.settings
for k, v in test_settings.items():
assert v == mip_settings[k], \
'setting mismatch (%s): expected %r\n found %r' % (k, v, mip_settings[k])
def test_mip_data():
expected_data = process_mip_data(data = data,
fit_intercept = test_settings['fit_intercept'],
standardize = test_settings['standardize_data'],
compress = test_settings['compress_data'])
for k, v in expected_data.items():
assert np.all(v == mip_data[k])
def test_mip_intercept():
intercept_idx = mip_data['intercept_idx']
if test_settings['fit_intercept']:
assert intercept_idx >= 0
assert np.all(mip_data['U_pos'][:, intercept_idx] == 1.0)
assert np.all(mip_data['U_neg'][:, intercept_idx] == 1.0)
if has_intercept(data):
expected_n_coefs = data['X'].shape[1]
else:
expected_n_coefs = data['X'].shape[1] + 1
else:
assert intercept_idx == -1
if has_intercept(data):
expected_n_coefs = data['X'].shape[1] - 1
else:
expected_n_coefs = data['X'].shape[1]
assert len(theta) == expected_n_coefs
def test_mip_data_normalization():
assert 'intercept_idx' in mip_data
assert 'coefficient_idx' in mip_data
assert 'X_shift' in mip_data
assert 'X_scale' in mip_data
mu = np.array(mip_data['X_shift']).flatten()
sigma = np.array(mip_data['X_scale']).flatten()
intercept_idx = mip_data['intercept_idx']
coefficient_idx = mip_data['coefficient_idx']
if intercept_idx > 0:
assert mu[intercept_idx] == 0.0
assert sigma[intercept_idx] == 1.0
assert np.all(np.greater(sigma, 0.0))
def test_mip_variable_bounds():
for var_name in mip_info['var_names']:
vals = mip.solution.get_values(indices[var_name])
assert np.all(mip_info['lower_bounds'][var_name] <= vals)
assert np.all(mip_info['upper_bounds'][var_name] >= vals)
def test_mip_variable_types():
for var_name in mip_info['var_names']:
vals = mip.solution.get_values(indices[var_name])
types = mip_info['var_types'][var_name]
if isinstance(vals, list):
assert np.all(np.array(list(map(lambda vt: check_variable_type(vt[0], vt[1]), zip(vals, types)))))
else:
assert check_variable_type(vals, types)
def test_mip_variable_indices():
indices = mip.indices
# check types
assert isinstance(indices, dict)
for k in indices.keys():
assert isinstance(indices[k], list)
# check lengths
assert len(indices['theta_pos']) == n_variables
assert len(indices['theta_neg']) == n_variables
assert len(indices['mistakes_pos']) == n_points_pos
assert len(indices['mistakes_neg']) == n_points_neg
assert len(indices['total_mistakes_pos']) == 1
assert len(indices['total_mistakes_neg']) == 1
assert len(indices['total_mistakes']) == 1
if mip_info['add_coefficient_sign_constraints']:
assert 'theta_sign' in indices
sign_idx = np.array(mip.solution.get_values(indices['theta_sign']), dtype = np.bool_)
assert len(sign_idx) == n_variables
flat_indices = [item for var_indices in indices.values() for item in var_indices]
flat_indices = np.sort(flat_indices)
assert np.array_equal(flat_indices, mip.variables.get_indices(mip.variables.get_names())), \
'indices are not distinct'
assert np.array_equal(flat_indices, np.arange(start = flat_indices[0], stop = flat_indices[-1] + 1)), \
'indices are not consecutive'
assert np.array_equal(flat_indices, mip.variables.get_indices(mip.variables.get_names())), \
'indices do not contain all variables in MIP object'
#### Key Attributes
def test_coefficients():
indices = mip.indices
theta_pos = np.array(mip.solution.get_values(indices['theta_pos']))
theta_neg = np.array(mip.solution.get_values(indices['theta_neg']))
theta = theta_pos + theta_neg
assert np.array_equal(theta, mip.coefficients())
def test_model():
model = mip.get_classifier()
assert isinstance(model, ClassificationModel)
assert model.check_rep()
predictions_model = model.predict(data['X'])
predictions_mip = np.sign(X.dot(mip.coefficients()))
assert np.array_equal(predictions_model, predictions_mip)
#### Tests on Coefficients
def test_theta_pos_or_theta_neg():
# check that either theta_pos > 0 or theta_neg < 0 not both
assert ~np.any(np.logical_and(theta_pos, theta_neg)), "theta_pos[j] != 0 and theta_neg[j] != 0"
def test_sign_indicators():
if mip_info['add_coefficient_sign_constraints']:
sign_idx = np.array(mip.solution.get_values(indices['theta_sign']), dtype = np.bool_)
assert np.all(theta_pos[sign_idx] >= 0.0)
assert np.all(theta_neg[sign_idx] <= 0.0)
def test_L1_norm():
assert np.isclose(np.sum(abs(theta)), mip_info['total_l1_norm'])
if np.sum(abs(theta)) != mip_info['total_l1_norm']:
msg = [
'numerical issue in L1-norm,'
'expected value: %1.6f' % mip_info['total_l1_norm'],
'computed value: %1.6f' % np.sum(abs(theta))
]
print('\n'.join(msg))
#### Parameters for Indicator Constraints
def test_margin_pos():
assert np.all(mip_info['margin_pos'] == abs(mip_settings['margin']))
assert np.all(mip_info['margin_pos'] >= 0.0)
def test_margin_neg():
assert np.all(mip_info['margin_neg'] == abs(mip_settings['margin']))
assert np.all(mip_info['margin_neg'] >= 0.0)
if not mip_settings['use_cplex_indicators_for_mistakes']:
def test_big_m_constant_pos():
assert np.all(np.greater_equal(mip_info['M_pos'], scores_pos))
def test_big_m_constant_neg():
assert np.all(np.greater_equal(mip_info['M_neg'], scores_neg))
#### Classifier Scores
def test_score_is_within_margin_pos():
margin_idx = (scores_pos > 0.0) & (scores_pos < mip_info['margin_pos'])
bug_idx = np.where(margin_idx)[0]
error_msg = "score is within margin for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_pos, theta, U_pos, y_pos, mistakes_pos, point_type = "pos")
def test_score_is_within_margin_neg():
margin_idx = (scores_neg > 0.0) & (scores_neg < mip_info['margin_neg'])
bug_idx = np.where(margin_idx)[0]
error_msg = "score is within margin for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_pos, theta, U_pos, y_pos, mistakes_pos, point_type = "pos")
def test_score_is_exactly_zero_pos():
bug_idx = np.where(scores_pos == 0.0)[0]
error_msg = "score is exactly = 0.0 for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_pos, theta, U_pos, y_pos, mistakes_pos, point_type = "pos")
assert len(bug_idx) == 0, error_msg
def test_score_is_exactly_zero_neg():
bug_idx = np.where(scores_neg == 0.0)[0]
error_msg = "score is exactly = 0.0 for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_neg, theta, U_neg, y_neg, mistakes_neg, point_type = "neg")
assert len(bug_idx) == 0, error_msg
def test_score_is_small_pos():
exact_idx = np.where(scores_pos == 0.0)[0]
close_idx = np.flatnonzero(np.isclose(scores_pos, 0.0, rtol = 1e-10, atol = 1e-10))
bug_idx = np.setdiff1d(close_idx, exact_idx, assume_unique = True)
error_msg = "score is close to 0.0 for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_pos, theta, U_pos, y_pos, mistakes_pos, point_type = "pos")
assert len(bug_idx) == 0, error_msg
def test_score_is_small_neg():
exact_idx = np.where(scores_neg == 0.0)[0]
close_idx = np.flatnonzero(np.isclose(scores_neg, 0.0, rtol = 1e-10, atol = 1e-10))
bug_idx = np.setdiff1d(close_idx, exact_idx, assume_unique = True)
error_msg = "score is close to 0.0 for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_neg, theta, U_neg, y_neg, mistakes_neg, point_type = "pos")
assert len(bug_idx) == 0, error_msg
#### Mistakes
def test_mistakes_pos():
bug_idx = np.flatnonzero(np.not_equal(mistakes_pos, expected_mistakes_pos))
error_msg = "incorrect mistake variables for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_pos, theta, U_pos, y_pos, mistakes_pos, point_type = "pos")
assert len(bug_idx) == 0, error_msg
def test_mistakes_neg():
bug_idx = np.flatnonzero(np.not_equal(mistakes_neg, expected_mistakes_neg))
error_msg = "incorrect mistake variables for %d points\n" % len(bug_idx)
error_msg += print_score_error(bug_idx, scores_neg, theta, U_neg, y_neg, mistakes_neg, point_type = "neg")
assert len(bug_idx) == 0, error_msg
def test_mistakes_on_conflicts():
for (p, n) in tuple(conflicted_pairs):
error_msg = "found conflicting pair such that (l_pos[%d], l_neg[%d]) = (%d, %d)" % \
(p,n, mistakes_pos[p], mistakes_neg[n])
assert mistakes_pos[p] + mistakes_neg[n] == 1, error_msg
#### Auxiliary Variables
def test_total_mistakes_internal():
assert total_mistakes_neg == mistakes_neg.dot(n_counts_neg)
assert total_mistakes_pos == mistakes_pos.dot(n_counts_pos)
assert total_mistakes == total_mistakes_neg + total_mistakes_pos
def test_total_mistakes_external():
expected_total_mistakes_pos = expected_mistakes_pos.dot(n_counts_pos)
expected_total_mistakes_neg = expected_mistakes_neg.dot(n_counts_neg)
assert total_mistakes_pos == expected_total_mistakes_pos
assert total_mistakes_neg == expected_total_mistakes_neg
assert total_mistakes == expected_total_mistakes_pos + expected_total_mistakes_neg
| [
"[email protected]"
]
| |
bc0104807d36c2d7c6def7e3549370848dea517d | 46d36c47d5991ac3f05f143c79c4fa7e56f9193b | /peri.py | 432664ef6468ccacd62cba76747d988bfa325e92 | [
"MIT"
]
| permissive | jenwich/practicum_game_project | 1aab8d699cc494b0951fea5c853179ae985c212c | 3619b808bb8e0779a3d6581c904421576cf39c9f | refs/heads/master | 2021-01-10T09:01:40.863526 | 2015-12-10T18:41:38 | 2015-12-10T18:41:38 | 46,523,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | from practicum import McuBoard
RQ_SET_LED = 0
RQ_GET_SWITCH = 1
RQ_GET_LIGHT1 = 2
RQ_GET_LIGHT2 = 3
RQ_GET_LIGHT3 = 4
RQ_GET_SOUND = 5
####################################
class PeriBoard(McuBoard):
################################
def setLed(self, led_no, led_val):
'''
Set status of LED led_no on peripheral board to led_val
'''
self.usbWrite(request=RQ_SET_LED,index=led_no,value=led_val)
# return
################################
def setLedValue(self, value):
'''
Display value's 3 LSBs on peripheral board's LEDs
'''
self.setLed(0, value%2)
value /= 2
self.setLed(1, value%2)
value /= 2
self.setLed(2, value%2)
################################
def getSwitch(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
x = self.usbRead(request=RQ_GET_SWITCH,length=1)
return bool(x[0])
################################
def getLight(self, i):
'''
Return the current reading of light sensor on peripheral board
'''
result = 0
if i == 0:
x = self.usbRead(request=RQ_GET_LIGHT1, length=2)
result = x[1]*256 + x[0]
elif i == 1:
x = self.usbRead(request=RQ_GET_LIGHT2, length=2)
result = x[1]*256 + x[0]
elif i == 2:
x = self.usbRead(request=RQ_GET_LIGHT3, length=2)
result = x[1]*256 + x[0]
return result
def getSound(self):
x = self.usbRead(request=RQ_GET_SOUND, length=2)
return x[1]*256 + x[0]
| [
"[email protected]"
]
| |
1c664c2d6941a617b5c8f4784a6866f94995ad87 | 0f0f7fec08a4169262066efcecd22149fc046417 | /contest/elliptic_encryption.py | da2f2fd8df4febca050175370d7fd0cda048fcef | []
| no_license | TurtlePU/ecc | ca08be2a6ca6b9d901c86c3b05fe1b56e6a0111f | 4a8ae2dfc786db01bd1fbe321dc8029f088eb67a | refs/heads/main | 2023-07-27T08:18:36.259367 | 2021-09-11T07:43:10 | 2021-09-11T07:43:10 | 388,795,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,716 | py | from typing import Generic, List, Optional, Tuple, TypeVar
from dataclasses import dataclass
from random import randrange
T = TypeVar('T')
class Group(Generic[T]):
def eq(self, x: T, y: T) -> bool:
raise NotImplementedError
def order(self) -> int:
raise NotImplementedError
def into(self, x: T) -> T:
return x
def mul(self, x: T, y: T) -> T:
raise NotImplementedError
def unit(self) -> T:
raise NotImplementedError
def inv(self, x: T) -> T:
raise NotImplementedError
def true_div(self, x: T, y: T) -> T:
return self.mul(x, self.inv(y))
def pow(self, x: T, ord: int) -> T:
if ord == 0:
return self.unit()
elif ord < 0:
return self.pow(self.inv(x), -ord)
elif ord % 2 == 0:
return self.pow(self.mul(x, x), ord / 2)
else:
return self.mul(self.pow(x, ord - 1), x)
class Field(Group[T]):
def add(self, x: T, y: T) -> T:
raise NotImplementedError
def zero(self) -> T:
raise NotImplementedError
def is_zero(self, x: T) -> bool:
return self.eq(x, self.zero())
def neg(self, x: T) -> T:
raise NotImplementedError
def sub(self, x: T, y: T) -> T:
return self.add(x, self.neg(y))
class SqrtField(Field[T]):
def sqrt(self, x: T) -> Optional[T]:
raise NotImplementedError
@dataclass
class Zn(SqrtField[int]):
N: int
def order(self):
return self.N
def into(self, x):
return x % self.N
def eq(self, x, y):
return (x - y) % self.N == 0
def mul(self, x, y):
return (x * y) % self.N
def unit(self):
return 1
def inv(self, x):
return int(pow(x, -1, self.N))
def pow(self, x, ord):
return int(pow(x, ord, self.N))
def add(self, x, y):
return (x + y) % self.N
def zero(self):
return 0
def neg(self, x):
return self.N - x % self.N
def sqrt(self, x):
assert self.N % 4 == 3
sqrt = self.pow(x, (self.N + 1) // 4)
if self.into(sqrt ** 2) == self.into(x):
return sqrt
else:
return None
T = TypeVar('T')
class Encoder(Generic[T]):
def encode(self, text: str) -> List[T]:
raise NotImplementedError
def decode(self, code: List[T]) -> str:
raise NotImplementedError
def chunks(it, n):
for i in range(0, len(it), n):
yield it[i:i+n]
def dec_char(number):
if 0 <= number <= 9:
return chr(48 + number)
if 10 <= number <= 35:
return chr(55 + number)
if 36 <= number <= 61:
return chr(61 + number)
if number == 62:
return '_'
if number == 63:
return '.'
raise Exception()
def enc_char(ch):
symbol = ord(ch)
if 48 <= symbol <= 57:
return symbol - 48
if 65 <= symbol <= 90:
return symbol - 55
if 97 <= symbol <= 122:
return symbol - 61
if symbol == ord('_'):
return 62
if symbol == 46:
return 63
raise Exception()
def encode_base64(text):
bigint = 0
for c in reversed(text):
bigint = bigint * 64 + enc_char(c)
return bigint
def decode_base64(code):
result = ''
while code != 0:
result += dec_char(code % 64)
code //= 64
return result
class LineEncoder(Encoder[int]):
def encode(self, text):
return [ encode_base64(line) for line in text.split('\n') ]
def decode(self, code):
return '\n'.join(decode_base64(c) for c in code)
@dataclass
class Point:
x: int
y: int
z: int
def unpack(self) -> Tuple[int, int, int]:
return self.x, self.y, self.z
class Zero(Exception):
pass
@dataclass
class Curve:
field: Field[int]
a: int
b: int
def point(self, x: int, y: int, z: int = 1) -> Point:
p = Point(self.field.into(x), self.field.into(y), self.field.into(z))
assert self.check(p)
return p
def rhs(self, x: int, z: int = 1) -> int:
return x ** 3 + self.a * x * z ** 2 + self.b * z ** 3
def check(self, p: Point) -> bool:
x, y, z = p.unpack()
return self.field.eq(y ** 2 * z, self.rhs(x, z))
def intern_opt(self, p: Point) -> Optional[Tuple[int, int]]:
if p.z == 0:
return None
else:
z = self.field.inv(p.z)
return self.field.mul(p.x, z), self.field.mul(p.y, z)
def intern(self, p: Point) -> Tuple[int, int]:
res = self.intern_opt(p)
if res is None:
raise Zero
else:
return res
class NotOnCurve(Exception):
pass
class SolvableCurve(Curve):
def __init__(self, field: SqrtField[int], a: int, b: int):
super().__init__(field, a, b)
def solve_for_x(self, x: int) -> Optional[Point]:
y_sq = self.field.into(self.rhs(x))
y = self.field.sqrt(y_sq)
if y is not None:
return self.point(x, y)
else:
return None
def asserted(self, x: int) -> Point:
p = self.solve_for_x(x)
if p is None:
raise NotOnCurve
else:
return p
@dataclass
class E(Group[Point]):
ord: int
curve: Curve
def order(self):
return self.ord
def into(self, a):
return self.curve.point(*a.unpack())
def unit(self):
return Point(0, 1, 0)
def is_unit(self, a: Point) -> bool:
return a.z == 0
def inv(self, a):
return Point(a.x, self.curve.field.neg(a.y), a.z)
def eq(self, a, b):
eq = self.curve.field.eq
return eq(a.x * b.z, a.z * b.x) and eq(a.y * b.z, a.z * b.y)
def mul(self, a, b):
if self.is_unit(a):
return b
elif self.is_unit(b):
return a
elif self.eq(a, self.inv(b)):
return self.unit()
elif self.eq(a, b):
x, y, z = a.unpack()
q = 2 * y * z
n = 3 * x ** 2 + self.curve.a * z ** 2
p = 4 * x * y ** 2 * z
u = n ** 2 - 2 * p
new_x = u * q
new_z = q ** 3
new_y = n * (p - u) - 8 * y ** 4 * z ** 2
return self.curve.point(new_x, new_y, new_z)
else:
u = a.z * b.y - a.y * b.z
v = a.z * b.x - a.x * b.z
w = u ** 2 * a.z * b.z - v ** 3 - 2 * v ** 2 * a.x * b.z
new_x = v * w
new_y = u * (v ** 2 * a.x * b.z - w) - v ** 3 * a.y * b.z
new_z = v ** 3 * a.z * b.z
return self.curve.point(new_x, new_y, new_z)
@dataclass
class PartialEncoder(Encoder[Point]):
x_enc: Encoder[int]
curve: SolvableCurve
def encode(self, text):
return [self.curve.asserted(x) for x in self.x_enc.encode(text)]
def decode(self, code):
return self.x_enc.decode([self.curve.intern(p)[0] for p in code])
T = TypeVar('T')
class ElGamal(Generic[T]):
def __init__(self, group: Group[T], generator: T, encoder: Encoder[T]):
self.group = group
self.generator = generator
self.encoder = encoder
def encrypt(self, public_key: T, message: str) -> List[Tuple[T, T]]:
return [self.encrypt_one(public_key, m) for m in self.encoder.encode(message)]
def decrypt(self, private_key: int, cipher: List[Tuple[T, T]]) -> str:
return self.encoder.decode([self.decrypt_one(private_key, c) for c in cipher])
def encrypt_one(self, h: T, m: T) -> Tuple[T, T]:
y = randrange(0, self.group.order())
return self.group.pow(self.generator, y), self.group.mul(self.group.pow(h, y), m)
def decrypt_one(self, x: int, c: Tuple[T, T]) -> T:
c1, c2 = c
return self.group.true_div(c2, self.group.pow(c1, x))
p = 2 ** 256 - 2 ** 224 + 2 ** 192 + 2 ** 96 - 1
a = -3
b = 41058363725152142129326129780047268409114441015993725554835256314039467401291
gx = 48439561293906451759052585252797914202762949526041747995844080717082404635286
gy = 36134250956749795798585127919587881956611106672985015071877198253568414405109
order = 115792089210356248762697446949407573529996955224135760342422259061068512044369
curve = SolvableCurve(Zn(p), a, b)
g = curve.point(gx, gy)
group = E(order, curve)
gamal = ElGamal(group, g, PartialEncoder(LineEncoder(), curve))
k = curve.point(*map(int, input().split(' ')))
import sys
n = int(input())
inp = [ input() for _ in range(n) ]
message = '\n'.join(inp).rstrip('\n')
def print_point(p):
res = curve.intern_opt(p)
if res is None:
print('Z')
else:
print(*res)
for c1, c2 in gamal.encrypt(k, message):
print_point(c1)
print_point(c2)
| [
"[email protected]"
]
| |
663c65c642e041fe9f4fcde9c464ac55686e5397 | 37cd089a4b865ee8e5928799372ab30b169bd0c8 | /src/python-bindings/scripts/classad_eval.py | 4d2209fa2383fb1b2d89df4dbe150363e2a41fe8 | [
"DOC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | ashishadscft/htcondor | 96f6d3b512d30be445382f42c36d4419f9cf90a5 | c93abb7847bfd4d4ebd7c0b95bd2a287350c5e1c | refs/heads/master | 2020-09-12T18:35:49.815344 | 2019-11-18T17:16:20 | 2019-11-18T17:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | #!/usr/bin/env python
import sys
import classad
import htcondor
def evaluateAndPrint(argv):
if len(argv) < 3:
print("Usage: {0} <classad> <expr>[ <expr>]*".format(argv[0]))
print(" You may substitute '-file <file>' for '<classad>'.");
return -1
adString = argv[1]
if adString == "-file":
argv.pop(1)
adString = open(argv[1]).read()
for i in range(2,len(argv)):
exprString = argv[i]
ad = classad.parseOne(adString)
expr = classad.ExprTree(exprString)
result = expr.simplify(ad)
print(repr(result))
return 0
def main():
return evaluateAndPrint(sys.argv)
if __name__ == "__main__":
exit(evaluateAndPrint(sys.argv))
| [
"[email protected]"
]
| |
fb30dd275caddb83372fc2f304fb8788cbcf7c23 | 9111dead6c8340b854bfaa5869bf6fd821b0eee0 | /docs/_ext/exts.py | a46fc3d825ca4cfb88047cfe6665fb1c142edfb7 | [
"BSD-2-Clause"
]
| permissive | starenka/django-password-policies | d88f4a57526f3af1a05e40a27b70036441e71401 | 201a158b82189457b4f9ce347001e18c7235cc67 | refs/heads/master | 2021-01-15T23:06:39.722732 | 2013-08-19T20:53:31 | 2013-08-19T20:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,571 | py | import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
from fields import model_fields
from fields import model_meta_fields
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
from django import forms
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
lines.append(u'')
for field in fields:
# Do not document AutoFields
if type(field).__name__ == 'AutoField' and field.primary_key:
continue
k = type(field).__name__
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
lines.append(u'.. attribute:: %s' % field.name)
lines.append(u' ')
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
l = u' %s(\':class:`~%s.%s`\')' % (type(field).__name__,
to.__module__,
to.__name__)
elif isinstance(field, models.OneToOneField):
to = field.rel.to
l = u' %s(\':class:`~%s.%s`\')' % (type(field).__name__,
to.__module__,
to.__name__)
else:
l = u' %s' % type(field).__name__
if not field.blank:
l = l + ' (Required)'
if hasattr(field, 'auto_now') and field.auto_now:
l = l + ' (Automatically set when updated)'
if hasattr(field, 'auto_now_add') and field.auto_now_add:
l = l + ' (Automatically set when created)'
lines.append(l)
if help_text:
lines.append(u'')
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u' %s' % help_text)
lines.append(u' ')
f = model_fields[type(field).__name__]
for key in sorted(f.iterkeys()):
if hasattr(field, key) and getattr(field, key) != f[key] and getattr(field, key):
attr = getattr(field, key)
if key == 'error_messages':
error_dict = {}
for i in sorted(attr.iterkeys()):
error_dict[i] = force_unicode(attr[i])
attr = error_dict
if key == 'validators':
v = []
for i in sorted(attr):
n = ':class:`~%s.%s`' % (type(i).__module__,
type(i).__name__)
v.append(n)
attr = v
lines.append(u' :param %s: %s' % (key, attr))
lines.append(u'')
lines.append(u'.. attribute:: Meta')
lines.append(u'')
for key in sorted(model_meta_fields.iterkeys()):
if hasattr(obj._meta, key) and getattr(obj._meta, key) != model_meta_fields[key]:
lines.append(u' %s = %s' % (key, getattr(obj._meta, key)))
lines.append(u'')
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj):
if issubclass(obj, forms.Form) or issubclass(obj, forms.ModelForm):
# Grab the field list from the meta class
fields = obj.base_fields
lines.append(u'')
for field in fields:
f = obj.base_fields[field]
# Decode and strip any html out of the field's help text
if hasattr(f, 'help_text'):
help_text = strip_tags(force_unicode(f.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
label = force_unicode(f.label).capitalize()
lines.append(u'.. attribute:: %s' % field)
lines.append(u'')
# Add the field's type to the docstring
field_inst = obj.base_fields[field]
l = u' :class:`~%s.%s`' % (type(field_inst).__module__,
type(field_inst).__name__)
if field_inst.required:
l = l + ' (Required)'
lines.append(l)
lines.append(u'')
if hasattr(f, 'error_messages') and f.error_messages:
msgs = {}
for key, value in f.error_messages.items():
msgs[key] = force_unicode(value)
lines.append(u':kwarg error_messages: %s' % msgs)
if f.help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':kwarg help_text: %s' % help_text)
if hasattr(f, 'initial') and f.initial:
lines.append(u':kwarg initial: %s' % f.initial)
if hasattr(f, 'localize'):
lines.append(u':kwarg localize: %s' % f.localize)
if hasattr(f, 'validators') and f.validators:
l = []
for v in f.validators:
l.append(':class:`~%s.%s`' % (type(v).__module__,
type(v).__name__))
lines.append(u':kwarg validators: %s' % l)
lines.append(u':kwarg widget: %s' % type(f.widget).__name__)
lines.append(u'')
# Return the extended docstring
return lines
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
app.add_crossref_type(
directivename = "admin",
rolename = "admin",
indextemplate = "pair: %s; admin",
)
app.add_crossref_type(
directivename = "command",
rolename = "command",
indextemplate = "pair: %s; command",
)
app.add_crossref_type(
directivename = "context_processors",
rolename = "context_processors",
indextemplate = "pair: %s; context_processors",
)
app.add_crossref_type(
directivename = "form",
rolename = "form",
indextemplate = "pair: %s; form",
)
app.add_crossref_type(
directivename = "formfield",
rolename = "formfield",
indextemplate = "pair: %s; formfield",
)
app.add_crossref_type(
directivename = "manager",
rolename = "manager",
indextemplate = "pair: %s; manager",
)
app.add_crossref_type(
directivename = "middleware",
rolename = "middleware",
indextemplate = "pair: %s; middleware",
)
app.add_crossref_type(
directivename = "model",
rolename = "model",
indextemplate = "pair: %s; model",
)
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "settings",
rolename = "settings",
indextemplate = "pair: %s; settings",
)
app.add_crossref_type(
directivename = "signal",
rolename = "signal",
indextemplate = "pair: %s; signal",
)
app.add_crossref_type(
directivename = "token",
rolename = "token",
indextemplate = "pair: %s; token",
)
app.add_crossref_type(
directivename = "validator",
rolename = "validator",
indextemplate = "pair: %s; validator",
)
app.add_crossref_type(
directivename = "view",
rolename = "view",
indextemplate = "pair: %s; view",
)
| [
"[email protected]"
]
| |
566c95167a311915ae2e5fcd0fddadd39b9811a8 | 502e84ec9014adac3df026c66106055cf90a8825 | /apps/core/admin.py | 5becff817befcb68859ffca0246455cce8dada64 | [
"BSD-3-Clause"
]
| permissive | Hagalas/sis | a80f53c9640871bcbb84a6f9ac5643edd1423109 | d5333a263a590fe7f9f88dd298b6dbe047f4c0a5 | refs/heads/master | 2021-06-02T12:25:21.191941 | 2015-05-26T14:08:34 | 2015-05-26T14:08:34 | 34,321,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from suit.widgets import SuitDateWidget, SuitTimeWidget, SuitSplitDateTimeWidget
from django.db import models
class BaseModelAdmin(admin.ModelAdmin):
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple('', is_stacked=False)},
models.DateField: {'widget': SuitDateWidget()},
models.TimeField: {'widget': SuitTimeWidget()},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget()},
}
def _list_field(self, qs, item_func):
if len(qs) == 0:
return '-'
items = []
for item in qs:
result = item_func(item)
items.append('<li> <a href="%s">%s</a></li>' % result)
return '<ul>%s</ul>' % ''.join(items)
| [
"[email protected]"
]
| |
044933ceef9d864c4c6bbdae99fddac39245cf68 | 89e23b8d0c00a5255de9e7f066b6ca58224bc9da | /mysite/polls/views.py | 6a1bd9dcf657d5f32fe8cde326473ad2f865efbb | []
| no_license | Mholliday6611/firstproject | 6da49d64d119fd9b7a38bf9ffa0ca5b451bb64f9 | 2d7d977fa6d9fa8641dd3a067475cfe8e65b6e52 | refs/heads/master | 2021-01-12T16:41:06.441651 | 2016-12-08T04:43:36 | 2016-12-08T04:43:36 | 71,424,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question_id,))) | [
"[email protected]"
]
| |
f529cabe1b39c2b9c90469b2edc8da3230bb3ca1 | baad209266154504218fdceda1a88939bb7df361 | /sandbox/apps/python/multigrid/NAS-PB-MG-3.2/main.py | 216d4f43feaebfd86691d7f0f2db679c9b99242b | [
"Apache-2.0"
]
| permissive | bollu/polymage | c104efab65181d10b55695c7fe94edb513708e7e | 517657142cc3ae74e9daff3b41a0257d6a4ce2b6 | refs/heads/master | 2021-01-09T20:41:53.647747 | 2016-06-24T05:27:43 | 2016-06-24T05:27:43 | 63,394,845 | 12 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import numpy as np
import time
import sys
from __init__ import *
from init import init_all, init_norm
from verify import verify_norm
from builder import create_lib, build_resid, build_mg3p
from exec_mg import multigrid
from printer import print_line, print_header, print_config
app = 'nas-pb-mg-3.2'
def main():
print_header()
app_data = {}
app_data['ROOT'] = ROOT
# init all the required data
init_all(app_data)
print_config(app_data)
app_name = "nas_mg_class_"+app_data['prob_class']
app_data['app'] = app_name
if app_data['mode'] == 'tune':
#app_tune(app_data)
pass
else:
#-------------------------------------------------------------------
# setting up residual norm computation
create_lib(None, "norm", app_data)
# setting up multigrid v-cycle computation
create_lib(build_mg3p, app_name, app_data)
# setting up standalone version of residual computation
create_lib(build_resid, "resid", app_data)
#-------------------------------------------------------------------
init_norm(app_data)
multigrid(app_data)
verify_norm(app_data)
#-------------------------------------------------------------------
return
main()
| [
"[email protected]"
]
| |
f488fb1b598893609ff4510a7ee334fda84ad105 | 212724dd876c15ef801fb781e907b1c7dd08f4ae | /skyline/webapp/gunicorn.py | 50d0f6817e3af53960712284da80d1324840b628 | [
"MIT"
]
| permissive | wfloutier/skyline | b9e769cddccdefeeb7c7cc258524bbf489f9d5eb | b12758dc11564de93c7ad76c1f8ed3327db78aa4 | refs/heads/master | 2020-08-08T03:19:40.283298 | 2019-10-09T11:05:13 | 2019-10-09T11:05:13 | 213,693,601 | 0 | 0 | NOASSERTION | 2019-10-08T16:20:15 | 2019-10-08T16:20:15 | null | UTF-8 | Python | false | false | 1,378 | py | import sys
import os.path
import logging
# import multiprocessing
# import traceback
from logging.handlers import TimedRotatingFileHandler, MemoryHandler
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
import settings
bind = '%s:%s' % (settings.WEBAPP_IP, str(settings.WEBAPP_PORT))
# workers = multiprocessing.cpu_count() * 2 + 1
workers = 2
backlog = 10
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logger = logging.getLogger(skyline_app_logger)
pidfile = '%s/%s.pid' % (settings.PID_PATH, skyline_app)
accesslog = '%s/webapp.access.log' % (settings.LOG_PATH)
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
errorlog = '%s/webapp.log' % (settings.LOG_PATH)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.handlers.TimedRotatingFileHandler(
logfile,
when="midnight",
interval=1,
backupCount=5)
memory_handler = logging.handlers.MemoryHandler(100,
flushLevel=logging.DEBUG,
target=handler)
handler.setFormatter(formatter)
logger.addHandler(memory_handler)
| [
"[email protected]"
]
| |
a286590d7c089d392082b4cfc43cbd3de3c36947 | d738a9f24693d6c8a1be01f0710696f91bfa478d | /gage.py | 63ea3d093ced471c4cbcea53b00f37dbc5b840b9 | []
| no_license | PaulSchrum/SMSbridge2PostGreSQL | d784aa97bf416557586bf49179521d3c497bcd02 | 68a05a595b5241f602685cdfeb91b22b7f0ffc3d | refs/heads/master | 2020-03-07T12:26:32.036989 | 2018-05-11T00:29:58 | 2018-05-11T00:29:58 | 127,478,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | '''
Gage class represents a a gage or sensor.
All data is expected to come from a json entity which was originally created
via a MongoDB json dump from FRF's SMS database.
'''
import os, json
from frfObjectBase import FrfObjectBase as B
requiredFields = [('mongo_id', 'STRING', '31'),
('stationId', 'STRING', '31'),
('gageNumber', 'STRING', '31'),
('gageName', 'STRING', '31'),
('gageType', 'STRING', '63'),
('lat', 'STRING', '31'),
('lon', 'STRING', '31'),
('owner', 'STRING', '63'),
('serialNumber', 'STRING', '63'),
('barCode', 'STRING', '31'),
('manufacturer', 'STRING', '31'),
('model', 'STRING', '31'),
('firmwareVersion', 'STRING', '31'),
('depth', 'STRING', '31'),
('description', 'STRING', '255'),
('createdAt', 'STRING', '31'),
('createdBy', 'STRING', '31'),
]
class Gage(B):
def __init__(self, rowStr):
aDict = json.loads(rowStr)
self.mongo_id = aDict[B._map('mongo_id')]
self.stationId = aDict.get('stationId', None)
self.gageNumber = aDict.get('gageNumber', None)
self.gageName = aDict.get('gageName', None)
self.gageType = aDict.get('gageType', None)
self.lat = float(aDict.get('lat', '0.0'))
self.lon = float(aDict.get('lon', '0.0'))
self.owner = aDict.get('owner', None)
self.serialNumber = aDict.get('serialNumber', None)
self.barCode = aDict.get('', None)
self.manufacturer = aDict.get('manufacturer', None)
self.model = aDict.get('model', None)
self.firmwareVersion = aDict.get('firmwareVersion', None)
self.depth = aDict.get('depth', None)
if self.depth is not None and isinstance(self.depth, dict): # it is another json object, so unpack
try:
tempD = json.loads(self.depth)
self.depth = tempD.values()[0]
except Exception as ex:
self.depth = None
self.description = aDict.get('description', None)
self.createdAt = aDict.get('createdAt', None)
self.createdBy = aDict.get('createdBy', None)
self._cleanUpNAs()
if self.createdAt is not None:
self.createdAt = self.createdAt['$date']
@staticmethod
def getRequiredFieldsTuples():
return requiredFields
@staticmethod
def getRequiredFieldNames():
retList = ['SHAPE@XY']
retList.extend([val[0] for val in requiredFields])
return retList
def createOrUpdateRow(self):
'''
:return: List containing all values of this gage to be
used in creating a new gage row.
'''
retList = [(self.lon, self.lat)] # corresponds to 'SHAPE@XY'
for fieldName in Gage.getRequiredFieldNames()[1:]:
aValue = self.__dict__[fieldName]
retList.append(aValue)
return retList
def GetAllGagesDict(pathFN):
returnDict = {}
with open(pathFN, 'r') as theFile:
allLines = theFile.readlines()
for aRow in allLines:
aStation = Gage(aRow)
returnDict[aStation.id] = aStation
return returnDict
# def getGagesFieldList():
# '''
# Retrieves the list of fields to be updated for a Gage Row.
# Using this every time ensures that the order of fields is always the same.
# '''
# retList = [val[0] for val in requiredFields]
# return retList
if __name__ == '__main__':
testDir = 'Data/FRFdata'
cwd = os.getcwd()
testFullPath = os.path.join(cwd, testDir)
stationsFile = os.path.join(testFullPath, 'gages.json')
allGagesDict = GetAllGagesDict(stationsFile)
for gageKey, aGage in allGagesDict.iteritems():
print gageKey
aGage.prettyPrint()
print
| [
"[email protected]"
]
| |
f619abde284052fd0d7c5f49dc880743a48e4667 | 9fae63ebb022b117041b3ece348e7375de161505 | /ems_server/emp/urls.py | a23ab508912f10a52d2695f60ce63077b37252b5 | []
| no_license | Hz5212/Python2010-drf_day5 | 2f174d8a2f85677e7e511885178f6c1f13b06082 | 79e906af8b94d959ae611ede49d257e7687ab203 | refs/heads/master | 2023-04-02T20:37:30.383078 | 2021-04-06T00:07:45 | 2021-04-06T00:07:45 | 355,005,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.urls import path
from emp import views
urlpatterns = [
path("employees/", views.EmployeeAPIView.as_view()),
path("employees/<str:pk>/", views.EmployeeAPIView.as_view()),
]
| [
"[email protected]"
]
| |
f96b6739d30de98f438bfc15e544eb95f5523574 | 4a7a6f629e4dd16b5ba3db23a6b6369dbb19c10d | /a038- 數字翻轉.py | f4df8c96273b40ed14b01a7ac42e057c5349a0a8 | []
| no_license | jlhung/ZeroJudge-Python | 1170fc70ffc6a1a577d035cd70289529d2bbc07e | 356381363891ba05302736746c698ea85668af50 | refs/heads/master | 2022-12-08T06:42:12.160731 | 2020-09-03T12:04:49 | 2020-09-03T12:04:49 | 282,219,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | '''
20200723 v1.0 jlhung "0"也要輸出 要消除尾數的0
'''
while True:
try:
n = int(input())
if n == 0:
print(0)
break
while n % 10 == 0:
n //= 10
print(str(n)[::-1])
except(EOFError):
break | [
"[email protected]"
]
| |
3bdbd3cfdd89c89ececba6bd06fdd7af1e184e39 | 6e68584f2819351abe628b659c01184f51fec976 | /Centre_College/CSC_339_SP2015/vindiniumAI/pybrain/optimization/memetic/inversememetic.py | f38eec205b10fd0408828968956d69f6cf8c206b | [
"WTFPL"
]
| permissive | DanSGraham/code | 0a16a2bfe51cebb62819cd510c7717ae24b12d1b | fc54b6d50360ae12f207385b5d25adf72bfa8121 | refs/heads/master | 2020-03-29T21:09:18.974467 | 2017-06-14T04:04:48 | 2017-06-14T04:04:48 | 36,774,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | __author__ = 'Tom Schaul, [email protected]'
from memetic import MemeticSearch
class InverseMemeticSearch(MemeticSearch):
""" Interleaving local search with topology search (inverse of memetic search) """
def _learnStep(self):
self.switchMutations()
MemeticSearch._learnStep(self)
self.switchMutations()
| [
"[email protected]"
]
| |
2f6eee826dd2db1a3b0bfd02c9320d14e04010f1 | 945929aca88ab7614e16284756a118c71ca87259 | /requests/Requests/test_api_08_auth.py | 5f5deafd346fe8bb8983ea51633986b4dfb23410 | []
| no_license | XIAOQUANHE/pythontest | 344c221b688f80e0ef5a3d3d71ffa48759e09ac1 | 8a02508fd66b13c6c41a689a65adac53aeb7361a | refs/heads/master | 2023-01-24T22:25:57.278411 | 2020-11-30T05:43:31 | 2020-11-30T05:43:31 | 290,959,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | '''
get 请求的登录的用户名密码,市面上很少,基本都是post请求
r = rs.get(url=url,auth=(username,password))
''' | [
"[email protected]"
]
| |
f03e352509aa4b80b654d443e20eafe28072fae0 | 56ced24af9276acbb846cf3ff1eeb47b3442a264 | /zigzag.py | da9e45916d46379f49e82f331cd5b2c541765ca4 | []
| no_license | Yorktsc/LeetCode | 9b614ce670d5f97dace1000f49983ac9d4c7892d | 44e457d4b803c57c6c2af7973da84f830e148ec7 | refs/heads/master | 2020-06-28T09:46:45.052953 | 2019-11-08T08:34:24 | 2019-11-08T08:34:24 | 200,202,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
"""
if not root: return []
res = []
queue = [root]
depth = 1
while len(queue) > 0:
tmp = []
next_level = []
for node in queue:
tmp.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
if depth % 2 == 0:
tmp = tmp[::-1]
res.append(tmp)
queue = next_level
depth += 1
return res
"""
res = []
def helper(root, depth):
if not root: return
if len(res) == depth:
res.append([])
if depth % 2 == 0:res[depth].append(root.val)
else: res[depth].insert(0, root.val)
helper(root.left, depth + 1)
helper(root.right, depth + 1)
helper(root, 0)
return res
| [
"[email protected]"
]
| |
c6f623c161bda067f97a2de450fbd681059f6f60 | 750f4897e103b29a23088dd564d69f9ad27c9d97 | /portfolio/migrations/0002_remove_customer_email.py | d033bc685cab7a79dfa98043b5dc148b4e730daa | []
| no_license | hippili/Efs2 | f45ab07a24a060de70abe4ca01d3b709f124cdfa | 39980c081a4c74156f4485dc5b7ce0fae55fef6e | refs/heads/master | 2023-03-06T19:21:09.883134 | 2021-02-24T05:37:05 | 2021-02-24T05:37:05 | 341,789,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # Generated by Django 3.0.7 on 2021-02-22 00:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='email',
),
]
| [
"[email protected]"
]
| |
9c31bb49454d9c7290279507c98e14594f9f9512 | 020204c8c01171d4f85ad158b0d43f6c4b20d3b0 | /management/admin.py | 554f0176b16192ae2e18160c11befbb35b8c6b7d | []
| no_license | yksmbkkr/tnp_archive | 28795636548670f37e1f329f97b11d38f9c358be | ac39174c69b3da09ddcdf4cecabecb16baf1b799 | refs/heads/master | 2022-09-27T06:23:49.474581 | 2020-06-04T10:47:11 | 2020-06-04T10:47:11 | 269,297,334 | 0 | 1 | null | 2020-06-04T08:48:39 | 2020-06-04T08:02:45 | HTML | UTF-8 | Python | false | false | 1,212 | py | from django.contrib import admin
from management.models import *
# Register your models here.
admin.site.register(branches)
admin.site.register(branch_type)
admin.site.register(managers)
admin.site.register(company_grade)
admin.site.register(current_batch_year)
@admin.register(company)
class company_admin(admin.ModelAdmin):
list_display = ('name', 'ctc', 'grade')
list_filter = ('branch_allowed', 'grade')
search_fields = ('name', )
@admin.register(student_email_db)
class student_email_db_admin(admin.ModelAdmin):
list_display = ('rollno','email')
list_filter = ('batch',)
search_fields = ('rollno','email')
@admin.register(ban)
class ban_admin(admin.ModelAdmin):
list_display = ('rollno', 'banned_by', 'banned_on')
search_fields = ('rollno',)
@admin.register(profile_set)
class profile_set_admin(admin.ModelAdmin):
list_display = ('user',)
search_fields = ('user__username',)
@admin.register(sms_logs)
class sms_logs_admin(admin.ModelAdmin):
list_display = ('get_from_to','message_body','timestamp')
search_fields = ('sender','receiver','message_body')
def get_from_to(self,obj):
return obj.sender.username + ' - ' + obj.receiver.username | [
"[email protected]"
]
| |
a87c9047c98e2aca8c3c3fa0f4081072d0318a75 | 5f69a130169d3316f4e0cfdd7c6bb801bb289d51 | /path2png(Mine).py | 0b348efa4eb02c74ff1874637b2f954471514b0b | []
| no_license | tushaR18k/Diagrams-data | dee330025ab13e4bcd0fc8010c07ab0953ffdf36 | 3ff00c749834fa4a51b8c3f0a15013f133a562cb | refs/heads/main | 2023-07-04T17:36:13.392144 | 2021-08-10T04:54:59 | 2021-08-10T04:54:59 | 385,915,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,512 | py | """
Demo skript: Turn Drawings into PNGs
------------------------------------
Walk through the drawings of a page and join path rectangles, which "touch"
each other.
Create high resolution PNG images for each of the resulting rectangles.
License & Copyright
-------------------
License AGPL 3.0
Copyright (c) 2021, Jorj McKie
"""
import fitz
import os
paperdetails = []
def findid(text,fig):
switcher = {
0:text.find("we present"),
1: text.find("as shown in"),
2:text.find("is shown in"),
3:text.find("from"),
4: text.find("the results are"),
5: text.find(f"{fig} illustrates"),
6: text.find(f"{fig} demonstrates"),
7: text.find(f"{fig} presents the"),
8: text.find(f"{fig} shows the")
}
for i in range(0,9):
id = switcher.get(i)
if id!=-1:
return id
return -1
os.chdir('../PDFACL')
papernames = ["9.pdf"]#list(os.listdir())
#papernames = ["11","101","20","765","35","589","342","58","1000","5001"]
for papername in papernames:
papername = papername.split('.')
papername = papername[0]
try:
doc = fitz.open(f"B:\\Nature_Scraping\\{papername}.pdf")
except:
continue
df = {'Image Location':[],'Fig Caption':[],'Fig Desc.':[]}
figcapmapper = {}
textlist = []
if len(doc) > 30:
continue
for page in range(0,1):#doc:
new_rects = [] # resulting rectangles
pagelist = []
page = doc[5]
print("Hlelos")
print(len(page.get_drawings()))
if len(page.get_drawings())> 200:
continue
for p in page.get_drawings():
#print("Hello")
#print(p)
w = p["width"]
r = p["rect"] + (-w, -w, w, w) # enlarge each rectangle by width value
for i in range(len(new_rects)):
if abs(r & new_rects[i]) > 0: # touching one of the new rects?
new_rects[i] |= r # enlarge it
break
# now look if contained in one of the new rects
remainder = [s for s in new_rects if r in s]
if remainder == []: # no ==> add this rect to new rects
new_rects.append(r)
#print("Loop runing")
print("Ji")
print(new_rects)
new_rects = list(set(new_rects)) # remove any duplicates
new_rects.sort(key=lambda r: abs(r), reverse=True)
remove = []
# text = page.get_text("text")
# print(text)
b = page.get_text("blocks")
b.sort(key=lambda block: (block[1],block[0]),reverse=True)
for blocks in b:
pagelist.append(blocks)
# text = page.get_text("text")
# textlist.append(text)
for j in range(len(new_rects)):
for i in range(len(new_rects)):
if new_rects[j] in new_rects[i] and i != j:
remove.append(j)
remove = list(set(remove))
# for i in reversed(remove):
# del new_rects[i]
new_rects.sort(key=lambda r: (r.tl.y, r.tl.x)) # sort by location
mat = fitz.Matrix(3, 3) # high resolution matrix
indexes=[]
for i, r in enumerate(new_rects):
if r.height <= 90 or r.width <= 90:
continue # skip lines and empty rects
#print(r)
print(page.number)
#print(text)
print(r.x0)
print(r.y0)
print(r.x1)
print(r.y1)
idx = -1
prev = -1
it = 0
while it < 35:
it+=1
print("While")
print(page)
print("Indexes: ",indexes)
minVal = 100000
print(prev)
for k in range(len(b)):
if k not in indexes:
if b[k][1] > (r.y1-2):
diff = b[k][1]-r.y1
if(diff < minVal):
minVal = diff
#print(b[k][4])
idx = k
print(idx)
print(figcapmapper.keys())
try:
caption = b[idx][4]
except:
break
#print("Caption:", caption)
print("************************************")
print(indexes)
figcapt = caption.split(':')
if "Fig" not in figcapt[0]:
break
try:
if figcapt[0] in figcapmapper.keys():
print(figcapmapper.keys())
print(indexes)
continue
else:
#print(caption)
indexes.append(idx)
prev = idx
figcapmapper[figcapt[0]] = figcapt[1]
pix = page.getPixmap(matrix=mat, clip=r)
loc = f"B:/Nature_Scraping/ACLTest/{papername}.{figcapt[0]}.png"
#print(caption)
print(loc)
pix.writePNG(loc)
df["Image Location"].append(loc)
df["Fig Caption"].append(figcapt[1])
break
except:
break
textlist.append(pagelist)
print(df)
img = df["Image Location"]
figs=[]
for i in img:
s = i.split('/')
s = s[len(s)-1]
f = s.split('.')
f = f[1]
figs.append(f)
print(figs)
print(textlist)
paras = []
for ep in textlist:
for i in range(len(ep)):
l=[]
text = ep[i][4]
l.append(text)
paras.append(l)
print("************************Paras*************************")
print(paras)
# for i in range(len(textlist)):
# textlist[i] = set(textlist[i])
imgdesc={}
# print(textlist)
for t in paras:
text = t[0].lower()
for fig in figs:
fig = fig.lower()
idx = findid(text,fig)
if fig in text:
if fig in imgdesc.keys():
imgdesc[fig]+= text
else:
imgdesc[fig] = text
# print(imgdesc)
# print(imgdesc['figure 4'])
# print(imgdesc['figure 5'])
l = df["Image Location"]
for loc in l:
s = loc.split(".")
k = s[1]
k = k.lower()
desc = imgdesc[k]
df["Fig Desc."].append(desc)
print("Dataframe**********************************************************************************")
print(df)
page1 = doc[0]
abstract = page1.get_text("blocks")
paper = {'id':papername,'abstract':'','diagrams':[]}
#print(abstract)
for i in range(len(abstract)):
t = abstract[i][4]
id = t.find("Abstract")
if id!=-1:
a = abstract[i+1][4]
paper['abstract']+=a
paper['diagrams'].append(df)
break
paperdetails.append(paper)
print(os.getcwd())
print(paperdetails)
# import json
# with open("Acldata.json",'w') as f:
# json.dump(paperdetails,f)
| [
"[email protected]"
]
| |
e74bdca48ed80b6224ab0aa3aac1397fbc4f6c50 | ebebce7f00990f8cad48d845cad3d80cba3beb3d | /multithreaded_merge_sort.py | 02aa580555c87a832183c2e11000e7bb6f70356c | []
| no_license | prurph/python-concurrency-playground | a84235ae1c2e8bac1b4462b3fe9124f93775e964 | c71a5bb9025123c31aed5b314fdd246096cbacd5 | refs/heads/master | 2020-12-30T08:41:54.573002 | 2020-03-07T13:37:16 | 2020-03-07T13:37:16 | 238,933,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | import random
import time
from threading import Thread
def multithreaded_merge_sort(arr):
return multithreaded_merge_sort_h(arr, 0, len(arr) - 1, [None] * len(arr))
def multithreaded_merge_sort_h(arr, start, end, merge_scratch):
if start >= end:
return arr
mid = start + (end - start) // 2
workers = (
Thread(
name=f"MS[{start}:{mid + 1}]",
target=multithreaded_merge_sort_h,
args=(arr, start, mid, merge_scratch),
),
Thread(
name=f"MS[{mid + 1}:{end}]",
target=multithreaded_merge_sort_h,
args=(arr, mid + 1, end, merge_scratch),
),
)
for worker in workers:
worker.start()
for worker in workers:
worker.join()
# Copy array into backup array and merge from there
for i in range(start, end + 1):
merge_scratch[i] = arr[i]
return merge(arr, merge_scratch, start, mid, end)
def single_threaded_merge_sort(arr):
return single_threaded_merge_sort_h(arr, arr[:], 0, len(arr) - 1)
def single_threaded_merge_sort_h(arr, aux_arr, start, end):
if start == end:
return arr
mid = start + (end - start) // 2
single_threaded_merge_sort_h(aux_arr, arr, start, mid)
single_threaded_merge_sort_h(aux_arr, arr, mid + 1, end)
return merge(arr, aux_arr, start, mid, end)
def merge(merge_into, merge_from, start, mid, end):
left_p, right_p = start, mid + 1
merge_p = start
while left_p <= mid and right_p <= end:
if merge_from[left_p] < merge_from[right_p]:
merge_into[merge_p] = merge_from[left_p]
left_p += 1
else:
merge_into[merge_p] = merge_from[right_p]
right_p += 1
merge_p += 1
while left_p <= mid:
merge_into[merge_p] = merge_from[left_p]
left_p += 1
merge_p += 1
while right_p <= end:
merge_into[merge_p] = merge_from[right_p]
right_p += 1
merge_p += 1
return merge_into
def main():
# Very predictably this blows up recursively creating thousands of threads if you go any higher
for test_size in map(lambda x: x[1] ** x[0], enumerate([10] * 4)):
arr_1 = list(range(test_size))
random.shuffle(arr_1)
arr_2 = arr_1[:]
st_start = time.time()
single_threaded_merge_sort(arr_1)
st_end = time.time()
mt_start = time.time()
multithreaded_merge_sort(arr_1)
mt_end = time.time()
print(
f"List size: {test_size} Single-threaded: {st_end - st_start:0.5f} Multi-threaded: {mt_end - mt_start:0.5f}"
)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
a37ac3291231053560cbbc640451e0e4de4a3c28 | f5659d8fb411fbf54e1c6bb376411ae1fa774bd6 | /photoreport/mainapp/admin.py | 55641123b1ee03374c58f44b895e5bfb00b9f242 | []
| no_license | santoshghimire/photolog | fc995b433ba9cba509ffbb13f39670d48cad304d | 2dfe7b038ec9d6e76f3c345a080d3008d3044c4d | refs/heads/master | 2021-01-18T13:12:08.179544 | 2017-02-02T17:04:12 | 2017-02-02T17:04:12 | 80,746,257 | 1 | 0 | null | 2017-02-02T16:52:36 | 2017-02-02T16:52:36 | null | UTF-8 | Python | false | false | 168 | py | from django.contrib import admin
from .models import InputFile, Image, Project
admin.site.register(InputFile)
admin.site.register(Image)
admin.site.register(Project)
| [
"[email protected]"
]
| |
e29850d4bc107cdd9a707c816fea75d159dd1ae1 | 4cae2a0808d0f200a5f91a9724419a081b8c3eb0 | /create_biometric_users/models/ecube_bio_machine.py | 57453c1b50ef85bdab1500893f326346dbd611f0 | []
| no_license | khyasir/Latitude_Custom_Modules | 7392ba47da8c172f46902d32454e13614b5d5e8b | 6758fc2a97073609dc305e71571f9ea42916f71b | refs/heads/master | 2021-05-02T12:04:37.953490 | 2018-02-08T08:52:24 | 2018-02-08T08:52:24 | 120,735,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from openerp import models, fields, api
class EcubeMachine(models.Model):
_name = 'ecube.machine'
_description = 'EcubeMachine'
name = fields.Char('Machine Name')
machine_ip = fields.Char('Machine IP')
machine_status = fields.Boolean('Machine Status') | [
"[email protected]"
]
| |
ddcb7c9b4f04cabf00591c5bcb914a93a5489ff6 | 3cbb326024cedcede8a432582424ca1f37980738 | /Snowball/Snowball/middlewares.py | b1226877052b177a74e2dbe6aae1ead49953ba76 | []
| no_license | horysk/horysk_kaggle | bb02b9130c82948b187282e7ab49494ebea71bf5 | b26a40014be8d4548eeffd439f8255676091dfc4 | refs/heads/master | 2022-11-30T00:58:20.791818 | 2020-07-17T08:15:56 | 2020-07-17T08:15:56 | 280,077,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SnowballSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SnowballDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
]
| |
9866fcf46bab6408ee2d067adcfed3f1ed0287ad | fcdfb4231b64e38a5f6611057097def815a6a987 | /baidumap/tests/autopilot/baidumap/tests/__init__.py | 0bf71b2ca06c109fd48386845c2031198789f74a | []
| no_license | liu-xiao-guo/baidumap | f2967efc845347bb40769ea7202bb8d4b4c6e66d | e6ba8ba6bb3df4e2956af55414e5e8a1a34ac06a | refs/heads/master | 2021-01-10T08:45:01.423685 | 2016-03-23T04:47:49 | 2016-03-23T04:47:49 | 54,531,442 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
"""Ubuntu Touch App Autopilot tests."""
import os
import logging
import baidumap
from autopilot.testcase import AutopilotTestCase
from autopilot import logging as autopilot_logging
import ubuntuuitoolkit
from ubuntuuitoolkit import base
logger = logging.getLogger(__name__)
class BaseTestCase(AutopilotTestCase):
"""A common test case class
"""
local_location = os.path.dirname(os.path.dirname(os.getcwd()))
local_location_qml = os.path.join(local_location, 'Main.qml')
click_package = '{0}.{1}'.format('baidumap', 'liu-xiao-guo')
def setUp(self):
super(BaseTestCase, self).setUp()
self.launcher, self.test_type = self.get_launcher_and_type()
self.app = baidumap.TouchApp(self.launcher(), self.test_type)
def get_launcher_and_type(self):
if os.path.exists(self.local_location_qml):
launcher = self.launch_test_local
test_type = 'local'
else:
launcher = self.launch_test_click
test_type = 'click'
return launcher, test_type
@autopilot_logging.log_action(logger.info)
def launch_test_local(self):
return self.launch_test_application(
base.get_qmlscene_launch_command(),
self.local_location_qml,
app_type='qt',
emulator_base=ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase)
@autopilot_logging.log_action(logger.info)
def launch_test_click(self):
return self.launch_click_package(
self.click_package,
emulator_base=ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase)
| [
"[email protected]"
]
| |
6856af0940e6a623783187a44bd637734823dd8d | 3a6a9bd8cb6a977b77c9313282b769177bf1e0c6 | /src/analysis/internalAnalysis.py | 4b060a7d3fa51669d11a2314a4b3e327b4003bb0 | []
| no_license | PNNL-Comp-Mass-Spec/NMDC-Proteomics-Workflow | d34dd04ac1e32b7cfc77df3252ebec6a873f2ee2 | 1c8d04a6cdb497905a42e026663b39417ffdad02 | refs/heads/master | 2022-11-24T22:56:20.454860 | 2020-08-06T02:33:35 | 2020-08-06T02:33:35 | 266,201,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,000 | py | import pandas as pd
import numpy as np
import os
import re
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.optimize import minimize
__author__ = "Gao, Yuqian <[email protected]>"
__maintainer__ = "Anubhav <[email protected]>"
class downStreamAnalysis:
def __init__(self, parent_folder,):
self.parent_folder = parent_folder
def findproteinname(s):
'''Get Protein type
:param s:
:return:
'''
p1 = re.compile(r'^Contaminant_')
p2 = re.compile(r'^XXX_Contaminant_')
p3 = re.compile(r'^XXX_')
if p1.search(s) is not None:
return 'None'
elif p2.search(s) is not None:
return 'None'
elif p3.search(s) is not None:
return 'Reversed'
else: # the rest
return 'Forward'
def cleansequence(s):
'''
clean peptide sequence is the sequence without prefix and postfix but with oxidation
:param s:
:return:
'''
p = re.compile(r'\.(?P<cleanseq>[A-Z\*@#]+)\.')
m = p.search(s)
return m.group('cleanseq')
def process_data(self):
'''
:return:
'''
# import table
data = pd.read_table(self.parent_folder +'resultants_df.txt')[[
'JobNum', 'Dataset_x', 'Dataset_y', 'Scan', 'Protein', 'Peptide', \
'NTT', 'DelM', 'DelM_PPM', 'StatMomentsArea', 'PeakMaxIntensity', \
'MSGFDB_SpecEValue', 'EValue', 'QValue', 'PepQValue']]
# Add protein type, clean peptide sequence, specID
data.rename(columns={'Dataset_x': 'Dataset', 'Dataset_y': 'Dataset_ID', 'JobNum': 'Job'}, inplace=True)
data['Protein_Type'] = data['Protein'].apply(self.findproteinname)
data['Clean Peptide Sequence'] = data['Peptide'].apply(self.cleansequence)
data["SpecID"] = data.apply(lambda row: str(row["Dataset_ID"]) + "_" + str(row["Scan"]), axis=1)
# Save protein to peptide mapping of forward peptide identification
df_mapping = data[data['Protein_Type'] == 'Forward'] [['Protein', 'Peptide', 'Clean Peptide Sequence']].copy()
df_mapping.drop_duplicates(inplace=True)
# Calculate the redundancy by clean peptide sequence and protein_type
df_new = df_mapping[['Protein', 'Clean Peptide Sequence']].copy()
df_new.drop_duplicates(inplace=True)
df_redundancy = df_new.groupby(['Clean Peptide Sequence']).count()
df_redundancy.reset_index(inplace=True)
df_redundancy.rename(columns={'Protein': 'Clean Peptide Sequence Redundancy'}, inplace=True)
# merge redundancy to protein peptide map
df_mapping = df_mapping.merge(df_redundancy, how='left', on=['Clean Peptide Sequence'])
del df_redundancy
# save to file
df_mapping.to_csv("Results/protein_peptide_map.csv", index=False)
del df_mapping
# save the map of dataset id, dataset name and job id
df_ids = data[['Dataset', 'Job', 'Dataset_ID']].copy()
df_ids.drop_duplicates(inplace=True)
df_ids.to_csv("Results/dataset_job_map.csv", index=False)
del df_ids
# Remove 'Protein', 'dataset_name', 'job_id'
del data['Protein']
del data['Dataset']
del data['Job']
data.drop_duplicates(inplace=True) # this is so important to remove any duplicated rows
# Make a table for just forward peptides
data_cleaned_forward = data[data['Protein_Type'] == 'Forward'].copy()
del data_cleaned_forward['Protein_Type']
data_cleaned_forward.drop_duplicates(inplace=True)
# Make a table for just reversed peptides
data_cleaned_reversed = data[data['Protein_Type'] == 'Reversed'].copy()
del data_cleaned_reversed['Protein_Type']
data_cleaned_reversed.drop_duplicates(inplace=True)
# remove data file
del data
# Dataset list
dataset_list = data_cleaned_forward['Dataset_ID'].unique().tolist()
# Export forward and reverse for individual dataset
for i in range(len(dataset_list)):
df_ff = data_cleaned_forward[data_cleaned_forward['Dataset_ID'] == dataset_list[i]].copy()
df_ff.to_csv("Results/Data/" + str(dataset_list[i]) + "_forward_peptide_identification.csv", \
index=False)
df_rr = data_cleaned_reversed[data_cleaned_reversed['Dataset_ID'] == dataset_list[i]].copy()
df_rr.to_csv("Results/Data/" + str(dataset_list[i]) + "_reversed_peptide_identification.csv", \
index=False)
del df_ff
del df_rr
del data_cleaned_forward
del data_cleaned_reversed
# optimize the filtering criteria and filter the data
def parameter_optimization(dataset_ID):
'''
:param dataset_ID:
:return:
'''
data_f = pd.read_csv("Results/Data/" + str(dataset_ID) + \
"_forward_peptide_identification.csv")
data_r = pd.read_csv("Results/Data/" + str(dataset_ID) + \
"_reversed_peptide_identification.csv")
# Fit a 1-D data of DelM_PPM and get the peak ppm_shift
ppm_shift_df = data_f[(data_f["DelM_PPM"] < 10) & (data_f["DelM_PPM"] > -10)].copy()
ax = sns.distplot(ppm_shift_df["DelM_PPM"])
ppm_shift = ax.get_lines()[0].get_xdata()[np.argmax(ax.get_lines()[0].get_ydata())]
def PepFDR(Params):
'''
:param Params:
:return:
'''
# function to minimize
delppm1, delppm2, log10_specprob = Params # use log10 value so that it is managable for the computer
### The FDR function ###
pep_total = data_r['Clean Peptide Sequence'].unique().size
df_r = data_r[(data_r["DelM_PPM"] < ppm_shift + delppm1) & (data_r["DelM_PPM"] > ppm_shift - delppm2) & \
(data_r["MSGFDB_SpecEValue"] < 10 ** log10_specprob)].copy()
df_f = data_f[(data_f["DelM_PPM"] < ppm_shift + delppm1) & (data_f["DelM_PPM"] > ppm_shift - delppm2) & \
(data_f["MSGFDB_SpecEValue"] < 10 ** log10_specprob)].copy()
f_pep = df_f['Clean Peptide Sequence'].unique().size
r_pep = df_r['Clean Peptide Sequence'].unique().size
### fdr_pep ###
if (f_pep == 0) & (r_pep == 0):
fdr_pep = 1
else:
fdr_pep = r_pep / (f_pep + r_pep)
return 1 / (0.050001 - fdr_pep) * (-f_pep)
# Note:
# in this function, if fdr_pep>0.01, it returns +,
# if fdr_pep < 0.01, it returns -, the closest to 0.01, the smaller the return
# Add constraint
def constraint1(Params):
delppm1, delppm2, log10_specprob = Params # use log10 value so that it is managable for the computer
return 20 - delppm1 - delppm2
def constraint2(Params):
delppm1, delppm2, log10_specprob = Params # use log10 value so that it is managable for the computer
return delppm1 - 5
def constraint3(Params):
delppm1, delppm2, log10_specprob = Params # use log10 value so that it is managable for the computer
return delppm2 - 5
con1 = {'type': 'ineq', 'fun': constraint1}
con2 = {'type': 'ineq', 'fun': constraint2}
con3 = {'type': 'ineq', 'fun': constraint3}
# Miminize PepFDR
initial_guess = [min(10, max(data_f["DelM_PPM"]) - ppm_shift), \
min(10, ppm_shift - min(data_f["DelM_PPM"])), \
-15]
# print(dataset_ID, initial_guess)
result = minimize(PepFDR, initial_guess, method='COBYLA', constraints=[con1, con2, con3])
if result.success:
fitted_params = result.x
else:
n = 0
while (result.success == False) & (n < 40):
initial_guess = [initial_guess[0] + 0.2, initial_guess[1] + 0.2, initial_guess[2]]
result = minimize(PepFDR, initial_guess, method='COBYLA', constraints=[con1, con2, con3])
n = n + 1
if result.success:
fitted_params = result.x
else:
print("Failed optimization @ Dataset ID: %s, Database: %s" % (dataset_ID, DB))
print(initial_guess)
raise ValueError(result.message)
### plots ###
plt.close('all')
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(7, 15))
# plot the DelM_PPM
sns.distplot(data_f["DelM_PPM"], label='forward', ax=axes[0], bins=200)
axes[0].axvline(x=ppm_shift, color='r', linestyle='--')
axes[0].axvline(x=ppm_shift - fitted_params[1], color='g', linestyle='-')
axes[0].axvline(x=ppm_shift + fitted_params[0], color='g', linestyle='-')
sns.distplot(data_r["DelM_PPM"], label='reversed', ax=axes[0], bins=200)
axes[0].set_xlabel('DelM_PPM')
axes[0].set_ylabel('Density')
axes[0].set_title(r'left bound: %.2f; right bound: %.2f' % \
(ppm_shift - fitted_params[1], ppm_shift + fitted_params[0]))
# plot MSGFDB_SpecEValue
sns.distplot(data_f['MSGFDB_SpecEValue'], label='forward', kde=False, ax=axes[1], bins=5000)
sns.distplot(data_r['MSGFDB_SpecEValue'], label='reversed', kde=False, ax=axes[1], bins=5000)
axes[1].axvline(x=10 ** fitted_params[2], color='g', linestyle='-')
axes[1].set_title('MSGFDB_SpecEValue: %.2e' % (10 ** fitted_params[2]))
axes[1].set_xlabel("log(MSGFDB_SpecEValue)")
axes[1].set_ylabel('Density')
axes[1].set_xscale('log')
# save
plt.tight_layout()
plt.savefig("Results/Plots/" + str(dataset_ID) + ".jpg")
plt.close('all')
# #Filter the data
df_f = data_f[
(data_f["DelM_PPM"] < ppm_shift + fitted_params[0]) & (data_f["DelM_PPM"] > ppm_shift - fitted_params[1]) & \
(data_f["MSGFDB_SpecEValue"] < 10 ** fitted_params[2])].copy()
del data_f
df_r = data_r[
(data_r["DelM_PPM"] < ppm_shift + fitted_params[0]) & (data_r["DelM_PPM"] > ppm_shift - fitted_params[1]) & \
(data_r["MSGFDB_SpecEValue"] < 10 ** fitted_params[2])].copy()
del data_r
# Calculate FDR
f_spec = df_f['SpecID'].unique().size # Modified on Apr23 to only count for unique dataset-scan
r_spec = df_r['SpecID'].unique().size
if (f_spec == 0) & (r_spec == 0):
fdr_spec = 1
else:
fdr_spec = r_spec / (f_spec + r_spec)
# Modified on Aug 02, 2019 to use Clean Peptide Sequence instead of peptide
f_pep = df_f['Clean Peptide Sequence'].unique().size
r_pep = df_r['Clean Peptide Sequence'].unique().size
if (f_pep == 0) & (r_pep == 0):
fdr_pep = 1
else:
fdr_pep = r_pep / (f_pep + r_pep)
######################################Dataset FDR_table################################################
df_Metadata = pd.DataFrame({'PPM center': ppm_shift, 'PPM cutoff left': ppm_shift - fitted_params[1], \
'PPM cutoff right': ppm_shift + fitted_params[0], \
'SpecProb cutoff': 10 ** fitted_params[2], \
'Spectra forward': f_spec, 'Spectra reverse': r_spec, \
'Peptide forward': f_pep, 'Peptide reverse': r_pep, \
'PeptideFDR': [fdr_pep], 'SepctraFDR': [fdr_spec], 'Dataset_ID': dataset_ID, \
'Initial ppm cutoff left': ppm_shift - initial_guess[1], \
'Initial ppm cutoff right': ppm_shift + initial_guess[0], \
'Initial SpecProb cutoff': 10 ** initial_guess[2]})
######################################SpecID table################################################
df_SpecID_f = df_f[['SpecID', 'Dataset_ID', 'Scan', 'Peptide', 'Clean Peptide Sequence', 'MSGFDB_SpecEValue', \
'StatMomentsArea', 'DelM_PPM']].copy()
df_SpecID_f.drop_duplicates(inplace=True)
df_SpecID_r = df_r[['SpecID', 'Dataset_ID', 'Scan', 'Peptide', 'Clean Peptide Sequence', 'MSGFDB_SpecEValue', \
'StatMomentsArea', 'DelM_PPM']].copy()
df_SpecID_r.drop_duplicates(inplace=True)
######################################Spetra Count table##########################################
df_spec = df_f[['Clean Peptide Sequence', 'SpecID']].copy()
df_spec.drop_duplicates(inplace=True)
df_SpectraCount = df_spec.groupby(['Clean Peptide Sequence']).size().rename('Peptide Spectra Count').reset_index()
del df_spec
df_SpectraCount['Dataset_ID'] = dataset_ID
######################################Spetra Count table##########################################
df_Intensity = df_f[['Clean Peptide Sequence', 'StatMomentsArea']].groupby(['Clean Peptide Sequence']).max()
df_Intensity.rename(columns={'StatMomentsArea': 'Peptide Peak Area'}, inplace=True)
df_Intensity.reset_index(inplace=True)
df_Intensity['Dataset_ID'] = dataset_ID
return df_Metadata, df_SpecID_f, df_SpecID_r, df_SpectraCount, df_Intensity | [
"[email protected]"
]
| |
0a52fb0d6e2706f4e73db9d1d04ffd7f100566e8 | 9792f53e4c7d074a8a93cfcea8dd57c29260a3b9 | /Algorithms/Search/Insertion sort.py | 94676e628031f78dbc4fde2a304995d5cfa570e2 | []
| no_license | dpaniq/Python | 978a5e88d330461f958fd60dc05683401cc6c5b1 | bd900caa01433ab1537f709b16dec54d329b7964 | refs/heads/master | 2021-06-18T06:40:45.752002 | 2021-01-28T20:56:09 | 2021-01-28T20:56:09 | 161,350,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # ---------------------------------------------------------------------------- #
# Insertion Sort
# ---------------------------------------------------------------------------- #
# Example:
# "18 17 20 16 26 8 30 2 3 13" --> "2 3 8 13 16 17 18 20 26 30"
# ---------------------------------------------------------------------------- #
A = list(map(int,input().split()))
for j in range(1, len(A)):
key = A[j]
i = j-1
while (i >= 0 and A[i] > key):
A[i + 1] = A[i]
i = i - 1
A[i+1] = key
print(*A)
# ---------------------------------------------------------------------------- #
# 13/02/2019
# ---------------------------------------------------------------------------- #
| [
"[email protected]"
]
| |
36a0c61b58eabb3924f714ea972c50421a530abb | 9f858317f2a5eb9a74de50c6be99bed260659764 | /model/push.py | 39f3f8dec7dafea7347886db85899b86932fc7b5 | []
| no_license | wonleing/device_supervisor | 9e883c32e139f557971e97db9bca3ec2cb5cd615 | fbb35e9f681cb622edee21b5ce24d43098b9ff1f | refs/heads/master | 2020-06-12T05:23:45.283957 | 2019-06-29T09:44:34 | 2019-06-29T09:44:34 | 194,206,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # -*- coding: utf-8 -*-
from base import BaseModel
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import BigInteger, String, SmallInteger, Integer, Numeric, Text
from sqlalchemy.orm import relationship
class Push(BaseModel):
'移动端消息推送'
__tablename__ = 'push'
TYPE_NORMAL = 'normal'
STATUS_NORMAL = 'normal'
STATUS_COMPLETE = 'complete'
STATUS_FAIL = 'fail'
id = Column(BigInteger, primary_key=True, autoincrement=True)
type = Column(String(16), server_default='normal', nullable=False, index=True)
status = Column(String(16), server_default='normal', nullable=False, index=True)
create = Column(BigInteger, server_default='0', nullable=False, index=True)
last = Column(BigInteger, server_default='0', nullable=False)
device = Column(BigInteger, server_default='0', nullable=False)
title = Column(String(64), server_default='', nullable=False)
content = Column(Text, default='', nullable=False)
target = Column(Text, default='', nullable=False)
| [
"[email protected]"
]
| |
25ee4544a2ae3bcfe5ae0a616a0201ff6b536b46 | fd7be1f6e892013238331ddfc605b13f4f3bcd12 | /timeProj/timeApp/admin.py | 6377b2db06ee39442e8649e670a60c01f712e4c3 | []
| no_license | cs-fullstack-2019-spring/django-mini-project3-ChelsGreg | 6e1655113562d17d72e1382aa26cf19aae60e01d | c3af6667fa21b9f86ff3bdb078e42ae1451d600e | refs/heads/master | 2020-04-26T05:02:09.660809 | 2019-03-02T01:06:24 | 2019-03-02T01:06:24 | 173,320,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from django.contrib import admin
# FOR MODEL INFO TO REGISTER TO ADMIN SITE
from .models import TimeModel
admin.site.register(TimeModel) | [
"[email protected]"
]
| |
9d7d4f3297db002248f23cca87f96ae297756264 | 230c4bd55ce5dbadf4c7dccd7e46da34d88bc1d7 | /核心/二分搜索.py | b7685312d7725a178d7d9276880c156d84df91bc | []
| no_license | EmperorEuler/leetcode | c5b02a53bff129193aacabe339504423532888fa | 1d1fd5380a8281826c286c07b300869ae54552aa | refs/heads/main | 2023-08-20T02:45:01.772347 | 2021-10-12T06:22:24 | 2021-10-12T06:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
一个有序数组的搜索方案,
每次都使用对半分的方式, 如果搜索的目标值大于中位数, 则目标值应该在数组的右半边
如果小于中位数, 则目标值应该在数组的左半边
递归执行
我们假设该数值存在于数组中
"""
# 未完成
def binary_search(nums: [int], target: int) -> int:
mid = len(nums) // 2
if nums[mid] < target:
pass
return 0
| [
"[email protected]"
]
| |
8258490a8523ca5ddcc472087885ef1dc25aa68b | f2cc1dc87486833613fb83543c68784849fd7319 | /subtests/test_search_item.py | e063b884b30225a9c67e4b1ebc4d511584d3914c | []
| no_license | EduardoUrzuaBo/platziChallenge | cc953e2615653d575cf079bceea4fdcad75a4da0 | a8f06c98f14ee58db47848ec287dcd105b685dcb | refs/heads/master | 2023-07-29T10:44:16.469765 | 2021-09-05T20:14:42 | 2021-09-05T20:14:42 | 403,379,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from src.testproject.decorator import report_assertion_errors
"""
This pytest test was automatically generated by TestProject
Project: Test Projects
Package: TestProject.Generated.Tests.TestProjects
Test: Search Item
Generated by: Eduardo Andres Urzuas ([email protected])
Generated on 09/02/2021, 03:56:35
"""
@report_assertion_errors
def test_main(driver):
"""This test was auto generated from steps of the 'CreateAccount' test."""
# Test Parameters
SearchItem = "Books"
# 1. Click 'q'
q = driver.find_element(By.CSS_SELECTOR,
"#search")
q.click()
# 2. Type '{SearchItem}' in 'q'
q = driver.find_element(By.CSS_SELECTOR,
"#search")
q.send_keys(f'{SearchItem}')
# 3. Send 'ENTER' key(s)
ActionChains(driver).send_keys(Keys.ENTER).perform()
| [
"[email protected]"
]
| |
b69447446753f54f23b99027fe036a309efa174f | c3c7e5af8fad5ab88130deecc41b680e8b3846cb | /dashBoardCameras.py | 0ade97c8a0d7c2ba0af2688aa81007826a0d24b0 | []
| no_license | nikolaykm/FreeSSMPlugins | 25658191c2b6ba20b03c3d66513539a95977d53e | dd41d8e8ed1d5e1e480cba7a047abc4bd76ffb84 | refs/heads/master | 2021-07-24T05:16:47.999327 | 2017-11-02T14:17:13 | 2017-11-02T14:17:13 | 105,256,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | import time
import os
import socket
import sys
import time
import subprocess as sub
import pygame,sys
import pygame.camera
import pygame.display
from pygame.locals import *
pygame.init()
pygame.display.init()
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
white = (255,255,255)
def text_objects(text, font):
textSurface = font.render(text, True, white)
return textSurface, textSurface.get_rect()
def message_display(text,screen):
largeText = pygame.font.Font('freesansbold.ttf',40)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((pygame.display.Info().current_w/2),(pygame.display.Info().current_h/2))
screen.blit(TextSurf, TextRect)
pygame.display.update()
TCP_IP = '127.0.0.1'
TCP_PORT = 12345
BUFFER_SIZE = 1024
isConnected=False
while not isConnected:
isConnected=False
try:
print >>sys.stderr, '\n Trying to connect to IP: %s, Port: %s' %(TCP_IP,TCP_PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
isConnected=True
except:
print >>sys.stderr, '\n Unable to connect!'
isConnected=False
time.sleep(0.5)
if not isConnected:
continue
countEmptyData=0
while True:
if sock == None:
isConnected=False
break
print >>sys.stderr, '\nwaiting to receive message'
data = sock.recv(BUFFER_SIZE)
print >>sys.stderr, 'received %s bytes' % (len(data))
print >>sys.stderr, data
if len(data) == 0:
countEmptyData = countEmptyData + 1
if countEmptyData == 3:
isConnected=False
sock.close()
break;
if data and len(data) > 4:
countEmptyData = 0
dataArray = data.split(",@\x00")
print dataArray
for item in dataArray:
dataItem = item[4:]
ds = dataItem.split(",")
print ds
if len(ds) == 5:
if (ds[4] == "1" and ds[0] == "On"):
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
message_display("Taking FRONT pic!", screen)
bashCommand = "fswebcam -r 640x480 image-" + str(time.time()) + ".jpg -S 20"
os.system(bashCommand)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
pygame.display.quit()
break
if (ds[4] == "2" and ds[0] == "On"):
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
message_display("Taking FRONT video!", screen)
bashCommand = "timeout 60 avconv -f video4linux2 -r 30 -s 640x480 -i /dev/video0 test" + str(time.time()) + ".avi"
os.system(bashCommand)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
pygame.display.quit()
break
if (ds[4] == "0" and ds[0] == "On"):
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
message_display("Taking BACK pic!", screen)
while True:
bashCommand="sudo rmmod uvcvideo; sudo modprobe uvcvideo; fswebcam -d /dev/video1 -r 1280x720 image-r-" + str(time.time()) + " -S 20; sudo rmmod uvcvideo; sudo modprobe uvcvideo;"
p = sub.Popen(bashCommand, stdout=sub.PIPE, stderr=sub.PIPE, shell=True);
output, errors = p.communicate()
print "Output='" + output + "'"
print "Errors='" + errors + "'"
if errors == None or errors == "" or not errors or "Writing" in errors:
break
time.sleep(2)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
pygame.display.quit()
break
if (ds[4] == "3" and ds[0] == "On"):
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
message_display("Taking BACK video!", screen)
while True:
bashCommand = "sudo rmmod uvcvideo; sudo modprobe uvcvideo; sudo timeout 60 avconv -f video4linux2 -r 30 -s 640x480 -i /dev/video1 test-r" + str(time.time()) + ".avi; sudo rmmod uvcvideo; sudo modprobe uvcvideo;"
p = sub.Popen(bashCommand, stdout=sub.PIPE, stderr=sub.PIPE, shell=True);
output, errors = p.communicate()
print "Output='" + output + "'"
print "Errors='" + errors + "'"
if errors == None or errors == "" or not errors or "frame=" in errors:
break
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
pygame.display.quit()
break
| [
"[email protected]"
]
| |
400de2d7c652c000bcc597a06856b2b46c0ce770 | 98f9f4160c2177ae227b4c6282f4a45a3a77831c | /tests/test_helpers.py | 3fdb6368eb850803fa0e7255459944155f4ac4ff | [
"Apache-2.0"
]
| permissive | allaway/challengeutils | a4ee699adf932ad5b4fcc0f25981f2752d3dd1f6 | 4031a22ee60b3afda8d324cf45861402a66c34ca | refs/heads/master | 2022-06-21T13:57:10.527995 | 2020-04-22T05:29:01 | 2020-04-22T05:29:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,804 | py | '''
Test challengeutils helper functions
'''
import mock
from mock import patch
import pytest
import synapseclient
from challengeutils import helpers, utils
SYN = mock.create_autospec(synapseclient.Synapse)
LAST_UPDATED_TIME = 1000000
START_TIME = 10000
DOCKER_SUB_ANNOTATION = {helpers.WORKFLOW_LAST_UPDATED_KEY: LAST_UPDATED_TIME,
helpers.WORKFLOW_START_KEY: START_TIME,
'objectId':"12345"}
EVALUATION_ID = 111
def test_noneintquota_kill_docker_submission_over_quota():
'''
ValueError is raised when none integer quota is passed in
'''
with pytest.raises(ValueError, match=r'quota must be an integer'):
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID,
quota="foo")
def test_greaterthan0quota_kill_docker_submission_over_quota():
'''
ValueError is raised when quota of 0 or less is passed
'''
with pytest.raises(ValueError, match=r'quota must be larger than 0'):
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID,
quota=0)
with pytest.raises(ValueError, match=r'quota must be larger than 0'):
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID,
quota=-1)
def test_noquota_kill_docker_submission_over_quota():
'''
Time remaining annotation should not be added
if no quota is set, the default is sys.maxsize.
'''
with patch.object(utils, "evaluation_queue_query",
return_value=[DOCKER_SUB_ANNOTATION]) as patch_query,\
patch.object(SYN,
"getSubmissionStatus") as patch_getstatus,\
patch.object(utils,
"update_single_submission_status") as patch_update, \
patch.object(SYN, "store") as patch_synstore:
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID)
query = ("select * from evaluation_{} where "
"status == 'EVALUATION_IN_PROGRESS'").format(EVALUATION_ID)
patch_query.assert_called_once_with(SYN, query)
patch_getstatus.assert_not_called()
patch_update.assert_not_called()
patch_synstore.assert_not_called()
def test_notdocker_kill_docker_submission_over_quota():
'''
Time remaining annotation should not be added
if a submission is not validated/scored by the workflowhook
the submission will not have the right annotations,
'''
with patch.object(utils, "evaluation_queue_query",
return_value=[{}]) as patch_query,\
patch.object(SYN,
"getSubmissionStatus") as patch_getstatus,\
patch.object(utils,
"update_single_submission_status") as patch_update, \
patch.object(SYN, "store") as patch_synstore:
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID)
query = ("select * from evaluation_{} where "
"status == 'EVALUATION_IN_PROGRESS'").format(EVALUATION_ID)
patch_query.assert_called_once_with(SYN, query)
patch_getstatus.assert_not_called()
patch_update.assert_not_called()
patch_synstore.assert_not_called()
def test_underquota_kill_docker_submission_over_quota():
'''
Time remaining annotation should not be added
if the model is not over quota
'''
with patch.object(utils, "evaluation_queue_query",
return_value=[DOCKER_SUB_ANNOTATION]) as patch_query,\
patch.object(SYN,
"getSubmissionStatus") as patch_getstatus,\
patch.object(utils,
"update_single_submission_status") as patch_update, \
patch.object(SYN, "store") as patch_synstore:
# Set quota thats greater than the runtime
quota = LAST_UPDATED_TIME - START_TIME + 9000
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID,
quota=quota)
query = ("select * from evaluation_{} where "
"status == 'EVALUATION_IN_PROGRESS'").format(EVALUATION_ID)
patch_query.assert_called_once_with(SYN, query)
patch_getstatus.assert_not_called()
patch_update.assert_not_called()
patch_synstore.assert_not_called()
def test_overquota_kill_docker_submission_over_quota():
'''
Time remaining annotation should not be added
if the model is over the quota
'''
sub_status = {"annotations": []}
quota_over_annotations = {helpers.TIME_REMAINING_KEY: 0}
with patch.object(utils, "evaluation_queue_query",
return_value=[DOCKER_SUB_ANNOTATION]) as patch_query,\
patch.object(SYN, "getSubmissionStatus",
return_value=sub_status) as patch_getstatus,\
patch.object(utils, "update_single_submission_status",
return_value=sub_status) as patch_update, \
patch.object(SYN, "store") as patch_synstore:
# Set quota thats lower than the runtime
quota = LAST_UPDATED_TIME - START_TIME - 9000
helpers.kill_docker_submission_over_quota(SYN, EVALUATION_ID,
quota=quota)
query = ("select * from evaluation_{} where "
"status == 'EVALUATION_IN_PROGRESS'").format(EVALUATION_ID)
patch_query.assert_called_once_with(SYN, query)
objectid = DOCKER_SUB_ANNOTATION['objectId']
patch_getstatus.assert_called_once_with(objectid)
patch_update.assert_called_once_with(sub_status,
quota_over_annotations)
patch_synstore.assert_called_once_with(sub_status)
| [
"[email protected]"
]
| |
81a1f399a5de423dac9c0b369aac9ac4b7e1e416 | a16bf10a2014294c01a132df45f11885243d33f7 | /basic_web/main/validator/business_unit_validator.py | edc3d6d04569bc21f7f62325763680ec08246a3a | [
"MIT"
]
| permissive | 1212091/python-learning | 57ace40d965c1eb5d9f08d316e4376a271c47a09 | 30fad66460daf73fd3961cf667ee25b91dee923d | refs/heads/master | 2020-07-03T07:07:22.795574 | 2019-08-19T06:41:46 | 2019-08-19T06:41:46 | 201,832,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from flask_inputs import Inputs
from wtforms.validators import InputRequired
class BusinessUnitInputs(Inputs):
json = {
'bu_name': [InputRequired()]
}
| [
"[email protected]"
]
| |
5336885dc7bbc8e014b743f74a1bb8997d0cd956 | 4a3ef804cc0fe5d5fa09869c89d9254698d49a8a | /src/BD/ImportCSV.py | fd37c02c1d913a5a8e0ac54462d1343c256b856b | []
| no_license | ripoul/Installations-Sportives | 1ee67fcd8519d868a2d292dbb0cd8b995df8d6bf | a01677e20af713b882b28308c81b61b6aeb9a31f | refs/heads/master | 2022-03-26T21:57:52.477323 | 2020-01-02T10:46:23 | 2020-01-02T10:46:23 | 80,708,625 | 0 | 1 | null | 2020-01-02T10:46:25 | 2017-02-02T09:00:19 | Python | UTF-8 | Python | false | false | 2,625 | py | #!/usr/bin/python
import csv
import sqlite3
con = sqlite3.connect('test.db')
cur = con.cursor()
# Create each of the 4 table
try:
cur.execute('''CREATE TABLE installation
(numero INTEGER PRIMARY KEY, nom TEXT, adresse TEXT, code_postal TEXT, ville TEXT, latitude REAL, longitude REAL)''')
cur.execute('''CREATE TABLE activite
(numero INTEGER PRIMARY KEY, nom TEXT)''')
cur.execute('''CREATE TABLE equipement
(numero INTEGER PRIMARY KEY, nom TEXT, numero_installation INTEGER)''')
cur.execute('''CREATE TABLE equipement_activite
(numero_equipement INTEGER , numero_activite INTEGER)''')
except sqlite3.Error:
print("Erreur lors de la création des tables\n")
#Insert value from csv to db
try:
#Open the csv we will get data from
installations_data = csv.reader(open("data/csv/installationsMod.csv", "r"))
activite_data = csv.reader(open("data/csv/activiteMod.csv", "r"))
equipement_data = csv.reader(open("data/csv/equipementsMod.csv", "r"))
activite_data2 = csv.reader(open("data/csv/activiteMod.csv", "r"))
#Prepare the statment to insert values into the database
sql = ('INSERT INTO installation (numero, nom, adresse, code_postal, ville, latitude, longitude) VALUES ' '(?, ?, ?, ?, ?, ?, ?)')
#Insert each row of data from the csv into the db
for row in installations_data:
cur.execute(sql, (row[1], row[0], row[6]+" "+row[7], row[4], row[2], 0.0, 0.0))
#Prepare the statment to insert values into the database
sql = ('INSERT OR IGNORE INTO activite (numero, nom) VALUES ' '(?, ?)')
#Statment used to check if the activity is already in the db
sql2 = ('SELECT * FROM activite WHERE numero IN (?)')
for row in activite_data:
#if the id and name of the activity is not set, ignore the row
if((str(row[4]) != "") & (str(row[5]) != "")):
cur.execute(sql2, (row[4],))
#if the statment executed above returned nothing
if cur.rowcount:
cur.execute(sql, (row[4], row[5]))
#Prepare the statment to insert values into the database
sql = ('INSERT INTO equipement (numero, nom, numero_installation) VALUES ' '(?, ?, ?)')
#Insert each row of data from the csv into the db
for row in equipement_data:
cur.execute(sql, (row[4], row[5], row[2]))
#Prepare the statment to insert values into the database
sql = ('INSERT INTO equipement_activite (numero_equipement, numero_activite) VALUES ' '(?, ?)')
#Insert each row of data from the csv into the db
for row in activite_data2:
cur.execute(sql, (row[2], row[4]))
except sqlite3.Error:
print("Erreur lors des insertions dans les tables\n")
con.commit()
cur.close()
con.close() | [
"[email protected]"
]
| |
915ad6371c13240ce4e7557bcfe258d20c9c8585 | 5fbdc90152b73e82040812de0d6376b13254bd4e | /blog/views.py | b22b0acef4ece25d632d4cba4103008c3257cd35 | []
| no_license | Brayanubigo/Mis-perris | 111287ce7426aa4e13e47355d7d7870bb812e092 | b095316aa36bdbbdede7133da949756da4a63acc | refs/heads/master | 2020-04-03T08:48:42.271583 | 2018-11-05T18:56:37 | 2018-11-05T18:56:37 | 155,145,329 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,255 | py | from django.shortcuts import render
from .models import Post
from .models import Perros_Rescatados
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
from django.shortcuts import redirect
from .forms import Perro_RescatadoForm
from django.contrib.auth.decorators import login_required, permission_required
# Create your views here.
def post_list(request):
perro = Perros_Rescatados.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'perro': perro})
def post_list_user(request):
perro = Perros_Rescatados.objects.filter(estado = "Disponible")
return render(request, 'blog/post_list_user.html', {'perro': perro})
def inicio(request):
perro = Perros_Rescatados.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/inicio.html',)
if user.has_perm('blog.admin'):
return render(request, 'blog/post_list.html', {'perro': perro})
else:
return render(request, 'blog/post_list_user.html', {'perro': perro})
def registro(request):
return render(request, 'registration/registro.html',)
def post_detail(request, pk):
post = get_object_or_404(Perros_Rescatados, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
form = Perro_RescatadoForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit_addperro(request):
form = Perro_RescatadoForm()
return render(request, 'blog/post_edit_addperro.html', {'form': form})
def post_edit_addperro(request):
if request.method == "POST":
form = Perro_RescatadoForm(request.POST or None , request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_list_user')
else:
form = Perro_RescatadoForm()
return render(request, 'blog/post_edit_addperro.html', {'form': form})
def post_new(request):
if request.method == "POST":
form = Perro_RescatadoForm(request.POST or None , request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_list_user')
else:
form = Perro_RescatadoForm()
return render(request, 'blog/post_edit.html', {'form': form})
def login(request):
return render(request, 'registration/login.html', {})
# def post_edit(request, pk):
# perro = get_object_or_404(Perros_Rescatados, pk=pk)
# if request.method == "POST":
# form = PostForm(request.POST, instance=perro)
# if form.is_valid():
# perro = form.save(commit=False)
# perro.author = request.user
# perro.save()
# return redirect('post_detail', pk=perro.pk)
# else:
# form = PostForm(instance=perro)
# return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Perros_Rescatados, pk=pk)
if request.method == "POST":
form = Perro_RescatadoForm(request.POST or None , request.FILES or None, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
# return redirect('detail_post_perro', pk=post.pk)
return redirect('post_list')
else:
form = Perro_RescatadoForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
def post_delete(request , pk):
perros=Perros_Rescatados.objects.get(pk=pk)
if request.method =="POST":
perros.delete()
return redirect ('post_list')
return render(request, 'blog/post_delete.html', {'perro': perros})
| [
"[email protected]"
]
| |
e810fdb9f3f5a5b3c10a3d2249126c55c78d8c89 | e4633dacc48d682e80674fec3c7830a2ba148d94 | /Finance data capstone project 2.py | 684325846c66541e7ac0516994fe6731aafaafd1 | []
| no_license | lakshaygola/Popular-ML-Algorithms | c1b32b9d7db7a2818aa6f7eb95a54c993c1fe2c5 | 847140bb720146880ea9061f141c656b71fb8cc5 | refs/heads/master | 2022-12-21T08:59:24.681474 | 2020-09-29T17:59:50 | 2020-09-29T17:59:50 | 299,692,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | #Finance Data Capstone Projrct 2
#In this project we import the data online of the banks from the time of economic crisis
from pandas_datareader import data , wb
import pandas as pd
import numpy as np
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
import plotly
import cufflinks as cf
cf.go_offline()
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from plotly.graph_objs import *
init_notebook_mode()
start = datetime.datetime(2006,1,1)
end = datetime.datetime(2016,1,1)
#Bank of america
BAC = data.DataReader('BAC', 'yahoo', start = start, end = end)
#CitiGroup
C = data.DataReader('C', 'yahoo', start = start, end = end)
#Goldman Sachs
GS = data.DataReader('GS', 'yahoo', start = start, end = end)
#JPMorgan Chase
JPM = data.DataReader('JPM', 'yahoo', start = start, end = end)
#Morgan Stanley
MS = data.DataReader('MS', 'yahoo', start = start, end = end)
#Wells Fargo
WFC = data.DataReader('WFC' , 'yahoo', start , end)
#list of ticker symbols
tickers = ['BAC' , 'C' , 'GS' , 'JPM' , 'MS' , 'WFC']
#concatenating all the dataframes
bank_stocks = pd.concat([BAC , C , GS , JPM , MS , WFC] , axis = 1 , keys = tickers)
#setting the columns names
bank_stocks.columns.names = ['Banks Tickers' , 'Stock Info']
bank_stocks.head()
#grouping by banks names
for tick in tickers:
print(tick , bank_stocks['BAC']['Close'].max())
#or
bank_stocks.xs(key = 'Close' , axis = 1 , level = 'Stock Info').max()
#Making new dataframe called return
returns = pd.DataFrame()
#calculating the percentage change on each rows in bank_stocks data
for tick in tickers:
returns[tick + ' Return'] = bank_stocks[tick]['Close'].pct_change()
#pair plot
sns.pairplot(data = returns[1:])
plt.tight_layout()
#best and worst dates for the particular banks in the return dataframe
returns.idxmin()
returns.idxmax()
#standard deviation of the return data frame
returns.std()
#standard deviation of the return data in 2015
returns.loc['2015-01-01':'2015-12-31'].std()
#Distplot of the 2015 returns for Morgan Stanley
sns.distplot(returns.loc['2015-01-01' : '2015-12-31']['MS Return'] ,bins = 30 , color='green')
sns.set_style('whitegrid')
#2008 citigroup distplot
sns.distplot(returns.loc['2008-01-01':'2008-12-31']['C Return'] , bins = 50 , color = 'Red')
#line plot for each bank
#Using for loop
for tick in tickers:
bank_stocks[tick]['Close'].plot(label = tick)
plt.legend()
#line plot for each bank
#Using .xs method
bank_stocks.xs(key ="Close" , level = 'Stock Info' , axis = 1).plot()
#Using plotly
bank_stocks.xs(key = 'Close' , level = 'Stock Info' , axis = 1).iplot()
#Ploting the rolling average of BAC for the year 2008
bank_stocks['BAC']['Close'].loc['2008-01-01':'2009-01-01'].rolling(window = 30).mean().plot()
bank_stocks['BAC']['Close'].loc['2008-01-01':'2009-01-01'].plot()
#Heat map of the close columns
close_corr = bank_stocks.xs(key = 'Close' , axis = 1 , level = 'Stock Info').corr()
sns.heatmap(close_corr,annot = True)
#Cluster map
sns.clustermap(close_corr , annot = True)
#Heat map using iplot
close_corr.iplot(kind = 'heatmap')
#Candle plot of bank of america from 2015 to 2016
bank_stocks['BAC'][['Open','High','Low','Close']].loc['2015-01-01':'2016-01-01'].iplot(kind = 'candle')
#Simple moving averages plot of the morgan stanley for the year 2015
bank_stocks['MS'].loc['2015-01-01':'2015-12-31'].ta_plot(study = 'sma')
#Bollinger band plot for the Bank of america for the year 2015
bank_stocks['BAC'].loc['2015-01-01':'2016-01-01'].ta_plot(study='boll')
| [
"[email protected]"
]
| |
6fcb4d9f1f315a703aa7ce94c4508fd6a4975439 | 468c618179bfbc9e331c66762a2083527455d7d2 | /Launch.py | 100271bb9f4e87adb9059ce674944ab4419227b2 | []
| no_license | sodino/xml2plist | 31798b4af066eb2aaf22af73c902b08e4989cb42 | 51e07c2645f95050e99253d85583510b93407397 | refs/heads/master | 2023-07-08T21:50:14.651686 | 2021-08-11T07:23:40 | 2021-08-11T07:23:40 | 393,859,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,974 | py | from xml.dom.minidom import parse
import xml.dom.minidom
import os
import shutil
import PlistConst
import PlistIO
## 寻找老素材的地址
# dir_old = "/Users/sodino/NativeProjects/textconfiguration/old/"
dir_old = "/Users/sodino/IdeaProjects/xml2plist/test/"
## 转换为plist后的存储路径,要以 / 结尾
dir_plist = "/Users/sodino/IdeaProjects/xml2plist/new_plist/"
XML_NAME = "TextBubbleInfo.xml"
## 记录所有的Tag名称
tags = {}
## 记录所有的format格式
formats = {}
def xml_node_value(node):
if node.childNodes.length == 0:
return ''
else:
value = node.childNodes[0].data
return value
def xml_elements_value(elements):
value = elements[0].childNodes[0].data
return value
class XmlFrameRect:
def __init__(self, content):
## xml中 <frameRect>的值 : >{{305,25},{105,575}}
self.content = content
tmp = content.replace("{", "").replace("}", "")
values = tmp.split(',')
self.x = values[0]
self.y = values[1]
self.width = values[2]
self.height = values[3]
def __str__(self):
return "[x,y=%s,%s, w,h=%s,%s]" %(self.x, self.y, self.width, self.height)
def plist_content(self):
value = PlistConst.key_string("Rectangle",
"%s,%s,%s,%s"
%(self.x, self.y, self.width, self.height)
)
return value
class XmlTextPiece:
def __init__(self):
self.type = ""
self.text = ""
self.format = ""
self.language = ""
self.caseString = ""
self.editable = ""
self.color = ""
self.isBold = ""
self.isItalic = ""
self.autoLineBreak = ""
self.isVerticalText = ""
self.align = ""
self.verticalAlign = ""
self.font = ""
self.showShadow = ""
self.shadowColor = ""
## <shadowOffset>{1.000000,1.000000}</shadowOffset>
self.shadowOffset = ""
self.frameRect = ""
self.maxTextHeight = ""
def __str__(self):
value = "type=%s text='%s' format='%s' color='%s' frameRect:%s"\
% (self.type, self.text, self.format, self.color, self.frameRect)
return value
def text_color_orgba(self):
if len(self.color) == 0:
## color无值的情况,只有三个素材 10130375 10110488 10110434,都被‘禁用’了
return ""
elif len(self.color) != 9:
## 不符合格式
return ""
## 将 #11AA33FF转为 ORGBA格式 : 100, 17, 170, 51, 255 (10进制)
r = self.color[1:3]
g = self.color[3:5]
b = self.color[5:7]
a = self.color[7:9]
int_r = int(r, base=16)
int_g = int(g, base=16)
int_b = int(b, base=16)
int_a = int(a, base=16)
## 100 为默认值
value = "100, %d, %d, %d, %d" %(int_r, int_g, int_b, int_a)
## print("test --> %s to %s" %(self.color, value))
return value
def text_input_flag(self):
if len(self.format) == 0:
return ""
return ""
def text_horizontal_and_justify(self):
XML_LEFT = '0'
XML_CENTER = '1'
XML_RIGHT = '2'
ALIGN_LEFT = 0x01
ALIGN_HCENTER = 0x02
ALIGN_RIGHT = 0x04
ALIGN_TOP = 0x10
ALIGN_VCENTER = 0x20
ALIGN_BOTTOM = 0x40
justify = ALIGN_VCENTER | ALIGN_HCENTER
horizontal = 1 ## 1 : 文字为水平排列; 0 : 文字竖排,即‘对联’的竖排
if self.isVerticalText.lower() == 'true':
horizontal = 0
align = ALIGN_VCENTER
if self.verticalAlign == XML_LEFT:
align = ALIGN_TOP
elif self.verticalAlign == XML_RIGHT:
align = ALIGN_BOTTOM
else:
align = ALIGN_VCENTER
justify = ALIGN_HCENTER | align
else:
align = ALIGN_HCENTER
if self.align == XML_LEFT:
align = ALIGN_LEFT
elif self.align == XML_RIGHT:
align = ALIGN_RIGHT
else:
align = ALIGN_HCENTER
justify = ALIGN_VCENTER | align
line = PlistConst.key_integer("Horizontal", horizontal)
line += PlistConst.key_integer("Justify", justify)
return line
def generate_plist_line(self):
## RenderMode
line = ""
line += PlistConst.key_integer("RenderMode", "0")
line += PlistConst.key_string("TextString", self.text)
line += PlistConst.key_string("FontLibrary", self.font)
line += PlistConst.key_string("Size", self.maxTextHeight)
line += PlistConst.key_string("ORGBA", self.text_color_orgba())
line += self.frameRect.plist_content()
line += self.text_horizontal_and_justify()
bold = "0"
if self.isBold.lower() == 'true':
bold = "1"
line += PlistConst.key_string("Bold", bold)
italic = "0"
if self.isItalic.lower() == "true":
italic = "1"
line += PlistConst.key_string("Italic", italic)
wrap = 1 ## 是否自动换行
if self.autoLineBreak.lower() == "false":
wrap = 0
line += PlistConst.key_integer("Wrap", wrap)
editable = 1 ## 默认可编辑
input_flag = PlistConst.plist_input_flag(self.format, self.caseString, self.language)
if len(input_flag) > 0:
line += PlistConst.key_string("InputFlag", input_flag)
editable = 0 ## 有format就不可编辑
## 下划线:默认值,原xml中无此定义
line += PlistConst.key_integer("Underline", 0)
## 删除线:默认值,原xml中无此定义
line += PlistConst.key_integer("StrikeThrough", 0)
## 字间距:默认值,原xml中无此定义
line += PlistConst.key_integer("Spacing", 0)
## 行间距:默认值,原xml中无此定义
line += PlistConst.key_integer("LineSpacing", 0)
## 文字从左至右:默认值,原xml中无此定义
line += PlistConst.key_integer("LeftToRight", 1)
## 是否自动缩放:默认值,原xml中无此定义
line += PlistConst.key_integer("Shrink", 1)
line += PlistConst.const_plist_LayerStyleConfigs.format(editable = editable)
result = PlistConst.key_dict(dict = line)
# print("test --> line : %s" %result)
return result
class TextXML:
def __init__(self, xml_path):
## 当前xml的绝对路径
self._xml_path = xml_path
self.textPieceArray = []
self.resId = ""
self.width = ""
self.height = ""
self.backgroundImagePath= ""
self.mirrorReverse = ""
def __str__(self):
return "resId(%s) [w, h]=[%s, %s] bg=%s" %(self.resId, self.width, self.height, self.backgroundImagePath)
def fonts(self):
values = ""
dict = {}
for piece in self.textPieceArray:
font = piece.font
if font in dict:
continue
suffix = ""
if len(dict) > 0:
suffix = ","
values = values + suffix + font
dict[piece.font] = ""
return values
## 收集所有关注的xml字段名或字段值
def collect_all_infos(self, root_element):
list_all_nodes = [root_element]
while len(list_all_nodes) > 0:
first_node = list_all_nodes[0]
list_all_nodes.remove(first_node)
if first_node.childNodes.length > 0 :
list_all_nodes.extend(first_node.childNodes)
if first_node.nodeType == xml.dom.Node.TEXT_NODE:
continue
tags[first_node.nodeName] = ""
if first_node.nodeName == "format":
if first_node.childNodes.length > 0:
## 获取xml字段名对应的字段值
format_value = xml_node_value(first_node)
formats[format_value] = ""
def read_textPiece(self, textPiece):
nodes = textPiece.childNodes
xPiece = XmlTextPiece()
for node in nodes:
if node.nodeType == xml.dom.Node.TEXT_NODE:
continue
if 'type' == node.tagName:
xPiece.type = xml_node_value(node)
elif 'text' == node.tagName:
xPiece.text = xml_node_value(node)
elif 'format' == node.tagName:
xPiece.format = xml_node_value(node)
elif 'language' == node.tagName:
xPiece.language = xml_node_value(node)
elif 'caseString' == node.tagName:
xPiece.caseString = xml_node_value(node)
elif 'editable' == node.tagName:
xPiece.editable = xml_node_value(node)
elif 'color' == node.tagName:
xPiece.color = xml_node_value(node)
elif 'isBold' == node.tagName:
xPiece.isBold = xml_node_value(node)
elif 'isItalic' == node.tagName:
xPiece.isItalic = xml_node_value(node)
elif 'autoLineBreak' == node.tagName:
xPiece.autoLineBreak = xml_node_value(node)
elif 'isVerticalText' == node.tagName:
xPiece.isVerticalText = xml_node_value(node)
elif 'align' == node.tagName:
xPiece.align = xml_node_value(node)
elif 'verticalAlign' == node.tagName:
xPiece.verticalAlign = xml_node_value(node)
elif 'font' == node.tagName:
xPiece.font = xml_node_value(node)
elif 'showShadow' == node.tagName:
xPiece.showShadow = xml_node_value(node)
elif 'shadowColor' == node.tagName:
xPiece.shadowColor = xml_node_value(node)
elif 'shadowOffset' == node.tagName:
xPiece.shadowOffset = xml_node_value(node)
elif 'maxTextHeight' == node.tagName:
xPiece.maxTextHeight = xml_node_value(node)
elif 'frameRect' == node.tagName:
tmpValue = xml_node_value(node)
xPiece.frameRect = XmlFrameRect(tmpValue)
self.textPieceArray.append(xPiece)
def read_textPieceArray(self, textPieceArray):
array = textPieceArray[0]
for textPiece in array.childNodes:
if textPiece.nodeType == xml.dom.Node.TEXT_NODE:
continue
self.read_textPiece(textPiece)
def read_tag_values(self, root_element):
e_resId = root_element.getElementsByTagName("resId")
self.resId = xml_elements_value(e_resId)
e_width = root_element.getElementsByTagName("width")
self.width = xml_elements_value(e_width)
e_height = root_element.getElementsByTagName("height")
self.height = xml_elements_value(e_height)
e_backgroundImagePath = root_element.getElementsByTagName("backgroundImagePath")
self.backgroundImagePath = xml_elements_value(e_backgroundImagePath)
e_mirrorReverse = root_element.getElementsByTagName("mirrorReverse")
self.mirrorReverse = xml_elements_value(e_mirrorReverse)
e_textPieceArray = root_element.getElementsByTagName("textPieceArray")
self.read_textPieceArray(e_textPieceArray)
def read_xml(self):
dom_tree = xml.dom.minidom.parse(self._xml_path)
root_element = dom_tree.documentElement
self.collect_all_infos(root_element)
self.read_tag_values(root_element)
class TextPlist:
def __init__(self, xml):
self.xml = xml
def generate_plist_lines(self):
lines = ""
xml_text_piece_array = self.xml.textPieceArray
for piece in xml_text_piece_array:
lines += piece.generate_plist_line()
new_content = PlistConst.key_array("Lines", lines)
return new_content
def generate_plist_content(self):
plist_lines = self.generate_plist_lines()
template = PlistConst.const_ar_common_text_v2
new_content = template.format(width = self.xml.width,
height = self.xml.height,
bg_file_name = self.xml.backgroundImagePath,
xml_text_piece_array_2_plist_lines = plist_lines)
return new_content
# 一个文字素材包解压后的目录结构
# AUGUST
# |-- configuration.plist
# |-- ar
# |-- configuration.plist
# |-- res
# |-- bg.plist
# |-- bg.png ## 缩略图,对文字需求来说可以省略
# |-- arp
# |-- bg.png
class Converter:
## 清除存储目录
def clear_target_directory(self):
if os.path.exists(dir_plist):
shutil.rmtree(dir_plist)
os.makedirs(dir_plist)
print("clear and recreate target directory : %s" % dir_plist)
else:
os.makedirs(dir_plist)
print("create a new target directory : %s" % dir_plist)
def create_target_directory(self, xml_path):
suffix = xml_path.replace(dir_old, "").replace(XML_NAME, "")
new_dir_path = dir_plist + suffix
# 创建新文件夹的目录
os.makedirs(new_dir_path)
# print("new_dir_path : " + new_dir_path)
return new_dir_path
## 创建素材包根目录下第一个 configuration.plist
def create_root_plist(self, dir, text_xml):
template = PlistConst.const_root_plist
new_content = template.format(width = text_xml.width,
height = text_xml.height,
fonts = text_xml.fonts())
file_plist_path = os.path.join(dir, PlistConst.configuration_plist)
PlistIO.write(file_plist_path, new_content)
def create_bg_plist(self, dir, text_xml):
template = PlistConst.const_bg_plist
new_content = template.format(bg_file_name = text_xml.backgroundImagePath)
file_plist_path = os.path.join(dir, PlistConst.bg_plist)
PlistIO.write(file_plist_path, new_content)
def copy_bg_file(self, dir, text_xml):
xml_dir = os.path.dirname(text_xml._xml_path)
file_bg_path = os.path.join(xml_dir, text_xml.backgroundImagePath)
if os.path.exists(file_bg_path) == False:
print("Error!! Can't find bg file. res<%s> bg=%s" %(text_xml.resId, text_xml.backgroundImagePath))
new_bg_path = os.path.join(dir, os.path.join(PlistConst.ar_res_arp, text_xml.backgroundImagePath))
## print("test --> new_bg_path %s" %new_bg_path)
PlistIO.copy_file(file_bg_path, new_bg_path)
def create_text_plist(self, dir, text_xml):
file_plist_path = os.path.join(dir, os.path.join("ar", PlistConst.configuration_plist))
text_plist = TextPlist(text_xml)
new_content = text_plist.generate_plist_content()
PlistIO.write(file_plist_path, new_content)
def convert2plist(self, text_xml):
target_dir_path = self.create_target_directory(text_xml._xml_path)
self.create_root_plist(target_dir_path, text_xml)
self.create_bg_plist(target_dir_path, text_xml)
self.copy_bg_file(target_dir_path, text_xml)
self.create_text_plist(target_dir_path, text_xml)
return target_dir_path
def findAllFile(self, base_path):
for root, ds, fs in os.walk(base_path):
for f in fs:
if f.endswith(XML_NAME):
fullname = os.path.join(root, f)
yield fullname
def start(self):
## 当前目录路径
current_path = os.getcwd()
for xml_path in self.findAllFile(dir_old) :
print("start : " + xml_path)
text_xml = TextXML(xml_path)
text_xml.read_xml()
plist_dir_path = converter.convert2plist(text_xml)
zip_dir = os.path.abspath(os.path.join(plist_dir_path, ".."))
zip_path = os.path.join(zip_dir, text_xml.resId + ".zip")
# print("zip_path=" + zip_path + " from plist_dir_path=" + plist_dir_path)
PlistIO.zip_dir(plist_dir_path, zip_path)
print(text_xml.resId + " end")
if __name__ == '__main__':
converter = Converter()
## 清空、重建一下存储目录
converter.clear_target_directory()
converter.start()
print("xml all tagNames are : " + str(tags.keys()))
print("xml all formats are : " + str(formats.keys()))
| [
"[email protected]"
]
| |
0b7e8882309c1a65ca918630f819051eec8037d6 | 7a5fac541003c065a791d0563c73635252f46182 | /dogcare/settings.py | 76d3a5a70b3936c3676a275f91dd829e5a555faa | []
| no_license | Earnn/GroomimgCare | 5fd2329a61dd23eb1890b8542c522270d089ccf3 | b378e7758a083bafd0cdad116cda1779b7d377b0 | refs/heads/master | 2020-12-30T13:27:58.561883 | 2017-05-23T00:28:12 | 2017-05-23T00:28:12 | 91,216,869 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,515 | py | """
Django settings for dogcare project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ede!sl++8snhe_)(owa0$82=f!3#xq9m72wuht4$nbqqh*tt-+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'social_django', # <--
'authapp'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware', # <--
]
ROOT_URLCONF = 'dogcare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends', # <--
'social_django.context_processors.login_redirect', # <--
],
},
},
]
WSGI_APPLICATION = 'dogcare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
FACEBOOK_EXTENDED_PERMISSIONS = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email, age_range',
}
SOCIAL_AUTH_FACEBOOK_AUTH_EXTRA_ARGUMENTS = {
# 'auth_type': 'reauthenticate',
}
SOCIAL_AUTH_FACEBOOK_KEY = '124913428055452' # App ID
SOCIAL_AUTH_FACEBOOK_SECRET = '3301c929608c64acf68c9ed550d9c0d8' # App Secret
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '925001056036-ni7jdib9819qm1jvt4o5jm6rs5depu2u.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '_8zT3J2VWZwFpoeyGS9hvjGs'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
| [
"EARN"
]
| EARN |
c0375b7a96458749568e89d575cc2ddde6fa8900 | ac7e3759d8e4a220b080e09c4e529198f90b29e7 | /pyCausalFS/LSL/MBs/CMB/CMB_subroutine.py | 5f9d55f0276fb76cd84dab5e45c849d2d96f778f | []
| no_license | zhipengwang30105760/pyCausalFS | 7e2f9c0979320ef557be9815b8313c85046534ee | d5d4c2f898ccb0ff424ffdbefe24d2c6204c4f4a | refs/heads/master | 2022-11-25T08:49:34.318558 | 2020-08-07T14:51:02 | 2020-08-07T14:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | # _*_code:utf_8_*_
#!/usr/bin/env python
# date:2019/9/5 20:41
import numpy as np
from LSL.MBs.CMB.HITONPC import HITON_PC
from LSL.MBs.CMB.HITONMB import HITON_MB
from LSL.MBs.CMB.CausalSearch import CausalSearch
def CMB_subroutine(Data, T, alaph, IDT, already_calculated_MB, all_MB, is_discrete):
# already_calculated_MB[T] = 0
Z = []
idT3 = []
idT3_count = 0
idT4 = []
idT4_count = 0
PCT, _, _ = HITON_PC(Data, T, alaph, is_discrete)
IDT, idT3, idT3_count, idT4, idT4_count = CausalSearch(
Data, T, PCT, Z, IDT, alaph, idT3, idT3_count, idT4, idT4_count, is_discrete)
# step 2:further test variables with idT=4
for i in range(idT4_count):
x = idT4[i][0]
y = idT4[i][1]
if already_calculated_MB[x] == 1:
all_MB[x], _ = HITON_MB(Data, x, alaph, is_discrete)
already_calculated_MB[x] = 0
Z = []
if x in all_MB.keys():
Z = [i for i in all_MB[x] if i != T and i != y]
IDT, idT3, idT3_count, idT4, idT4_count = CausalSearch(
Data, T, PCT, Z, IDT, alaph, idT3, idT3_count, idT4, idT4_count, is_discrete)
if 4 not in IDT:
break
parents = [idx for idx, i in enumerate(IDT[T]) if i == 1]
for i in range(len(parents)):
x = parents[i]
for j in range(len(parents)):
if j != i:
y = parents[j]
for k in range(idT4_count):
if idT4[k][0] == x:
z = idT4[k][1]
for l in range(idT4_count):
if l != k:
if (idT4[l][0] == y and idT4[l][1] == z) or (idT4[l][0] == z and idT4[l][1] == y):
IDT[T, z] = 1
elif idT4[k][1] == x:
z = idT4[k][0]
for l in range(idT4_count):
if l != k:
if (idT4[l][0] == y and idT4[l][1] == z) or (idT4[l][0] == z and idT4[l][1] == y):
IDT[T, z] = 1
print(IDT)
for idx, i in enumerate(IDT[T]):
if i == 4:
IDT[T, idx] = 3
return IDT, idT3, idT3_count, PCT
# step 3:resolve variable set with idT=3
| [
"[email protected]"
]
| |
587fb249137b4f65c4e8e4e5ea82f45817a0bc12 | 64aca3c82ba752e3e48199440f4c252dd241dd93 | /ratu/scheduler.py | 0b211a6ce21210848f8fc4a6c470886dae1688c7 | []
| no_license | criticus/Data_converter | 5b8a0a1c4c9438f9b3420ba04f4eff617d3f4b85 | f38f9b7930af38e8eb807e51f26fa05d742dc62c | refs/heads/master | 2022-04-10T22:30:50.601822 | 2020-03-18T15:31:34 | 2020-03-18T15:31:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import schedule
import time
from datetime import datetime
def action ():
now = datetime.now()
print(datetime.strftime(datetime.now(), "%H:%M:%S"))
# schedule.every(1).seconds.do(action)
schedule.every().day.at("13:34").do(action)
schedule.every().day.at("13:35").do(action)
while 1:
schedule.run_pending()
time.sleep(1)
| [
"[email protected]"
]
| |
6218077c77b98d87fec6d77f57a385cd718a18b6 | 9d8a8fb13e17c2644e1bdba68351603e4745f717 | /labeling_tpchange.py | 20e8f51318bf0402f80e07cb75a7ff9b2f48a25b | []
| no_license | Nunpuking/NI_SFC | 2d3fe3179f0ee921e6f937a7674142f20c5e0304 | 34c23c27f5e48c6bda1805e20fe91becf6c6ee18 | refs/heads/master | 2021-05-20T22:52:21.937221 | 2020-04-02T12:07:27 | 2020-04-02T12:07:27 | 252,443,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,856 | py | from topology import Topology
import index_dict as idx
import topology_settings as tp
from shortest_path import dijsktra
import argparse
import numpy as np
import pandas as pd
import time
import os
def load_test_datasets():
testset_path = '../data/testset.csv'
raw_placement_dataset_path = '../data/20190530-nodeinfo.csv'
testset = np.array(pd.read_csv(testset_path, index_col=0))
raw_placement_dataset = np.array(pd.read_csv(raw_placement_dataset_path, skiprows=0))
placement = {}
for nodeinfo in raw_placement_dataset:
arrivaltime = int(nodeinfo[idx.PLACEMENT_RAW_ARRIVALTIME])
if arrivaltime not in placement.keys():
placement_id = 0
placement[arrivaltime] = {}
node_id = int(nodeinfo[idx.PLACEMENT_RAW_NODEID])-1
vnf_type = tp.vnf_types[nodeinfo[idx.PLACEMENT_RAW_VNFTYPE]]
n_inst = int(nodeinfo[idx.PLACEMENT_RAW_NINST])
placement[arrivaltime][placement_id] = (node_id, vnf_type, n_inst)
placement_id += 1
return testset, placement
def find_optimal_path(top, req_id):
def make_possible_seq_list(vnfs, src, dst, sfc, traffic):
n_sfc = len(sfc)
n_possible = 1
n_vnf_inst = np.zeros([tp.n_vnfs], dtype=np.int)
for vnf_type in sfc:
n_tmp = 0
vnf_item = vnfs[vnf_type]
for vnf_inst_id in vnf_item:
capacity = vnf_item[vnf_inst_id][idx.VNF_CAPACITY]
if capacity >= traffic:
n_tmp += 1
n_vnf_inst[vnf_type] = n_tmp
n_possible = n_possible * n_tmp
if n_possible == 0:
return np.zeros([1]), False
possible_seq_list = np.zeros([n_possible, n_sfc+2], dtype=np.int)
possible_seq_list[:,0] = src
possible_seq_list[:,-1] = dst
prev_jump_unit = n_possible
repeat = 1
sfc_idx = 1
for vnf_type in sfc:
jump_unit = int(prev_jump_unit / n_vnf_inst[vnf_type])
vnf_item = vnfs[vnf_type]
n_jump = 0
for r in range(repeat):
for vnf_inst_id in vnf_item.keys():
node_id = vnf_item[vnf_inst_id][idx.VNF_NODEID]
capacity = vnf_item[vnf_inst_id][idx.VNF_CAPACITY]
if capacity >= traffic:
possible_seq_list[jump_unit*n_jump:jump_unit*(n_jump+1),sfc_idx]\
= node_id
n_jump += 1
sfc_idx += 1
repeat = repeat * n_vnf_inst[vnf_type]
prev_jump_unit = jump_unit
return possible_seq_list, True
def compute_seq_cost(top, seq, req_id):
tmp_path = []
for i in range(len(seq)-1):
from_node = seq[i]
to_node = seq[i+1]
tmp_path += dijsktra(top, from_node, to_node)[:-1]
tmp_path += [to_node]
cost = top.compute_cost(tmp_path, req_id)
return tmp_path, cost
src = top.reqs[req_id][idx.REQ_SRC]
dst = top.reqs[req_id][idx.REQ_DST]
traffic = top.reqs[req_id][idx.REQ_TRAFFIC]
sfcid = top.reqs[req_id][idx.REQ_SFCID]
sfc = tp.sfc_type[sfcid]
n_sfc = len(sfc)
possible_seq_list, possible_flag = make_possible_seq_list(top.vnfs, src, dst, sfc, traffic)
n_possible = len(possible_seq_list)
if possible_flag == False:
return [0], 0, False
best_cost = 99999
for seq in possible_seq_list:
tmp_path, cost = compute_seq_cost(top, seq, req_id)
if cost < best_cost:
best_path = tmp_path
best_cost = cost
return best_path, best_cost, True
def generate_label_data(top, req_id, path, cost):
def make_string(sample):
result_str = ''
for tok in sample:
if len(result_str) == 0:
result_str += str(tok)
else:
result_str += ',' + str(tok)
return result_str
arrivaltime = top.reqs[req_id][idx.REQ_ARRIVALTIME]
duration = top.reqs[req_id][idx.REQ_ENDTIME] - arrivaltime
src = top.reqs[req_id][idx.REQ_SRC]
dst = top.reqs[req_id][idx.REQ_DST]
traffic = top.reqs[req_id][idx.REQ_TRAFFIC]
maxlat = top.reqs[req_id][idx.REQ_MAXLAT]
sfcid = top.reqs[req_id][idx.REQ_SFCID]
sample = [arrivaltime, duration, src, dst, traffic, maxlat, 0, 0, 0, 0, 0, sfcid, cost]\
+ path
sample_str = make_string(sample)
return sample_str
def generate_label_packet(packet, arrivaltime):
result_str = str(arrivaltime)
for sample in packet:
result_str += '/' + sample
return result_str
def labeling(save, rank_method, f):
generated_labels = [0] * 9999999
n_fail = 0
n_overmax = 0
n_packet = 0
testset, placement = load_test_datasets()
label_top = Topology()
label_top.Initialize_topology()
start = True
start_time = time.time()
stacked_avg_time = 0
total_time = 0
n_try = 0
for index in range(len(testset)):
packet_sample = []
n_try += 1
fail_packet_flag = False
label_top.Initialize_topology()
package = testset[index,0]
package = package.split('/')
arrivaltime = package[0]
package = package[1:]
n_req = len(package)
max_arrivaltime = 0
for reqline in package:
tmp_reqline = reqline.split(',')
reqline = np.zeros([len(tmp_reqline)], dtype=np.int)
for i, tok in enumerate(tmp_reqline):
reqline[i] = int(tok)
request_line = reqline[:idx.EOI]
arrivaltime = request_line[idx.REQ_RAW_ARRIVALTIME]
if arrivaltime > max_arrivaltime:
max_arrivaltime = arrivaltime
label_top.set_topology(request_line, max_arrivaltime, placement)\
#del_node = label_top.topology_change()
label_top.ranking(rank_method)
n_req = len(label_top.reqs.keys())
#if len(del_node) != 0:
# print("del node : ", del_node)
#label_top.print_topology(edge=True,vnf=True)
sorted_reqs = np.zeros([n_req], dtype=np.int)
for req_id in label_top.rank.keys():
rank_idx = label_top.rank[req_id]
sorted_reqs[rank_idx] = req_id
for rank_idx in range(n_req):
req_id = sorted_reqs[rank_idx]
path, cost, flag = find_optimal_path(label_top, req_id)
#if len(del_node) != 0:
# print("req : ", label_top.reqs[req_id])
# print("path : ", path)
if flag == False:
n_fail += 1
fail_packet_flag = True
break
#print("Cannot make path for {} at arrivaltime {}".format(\
# req_id, arrivaltime))
else:
if cost > label_top.reqs[req_id][idx.REQ_MAXLAT]:
n_overmax += 1
fail_packet_flag = True
break
else:
label_top.update_topology(path, req_id)
store_sample = generate_label_data(label_top, req_id, path, cost)
packet_sample.append(store_sample)
if fail_packet_flag == False:
generated_labels[n_packet] = generate_label_packet(packet_sample, arrivaltime)
n_packet += 1
if n_try % 10 == 0:
print("{} rank_method {} packets {} overmax {} fail".format(rank_method,\
n_packet, n_overmax, n_fail))
print("------------Labeling Done---------------")
print("Method : {}".format(rank_method))
print("Packets : {}, OverMax : {}, Fail : {}".format(\
n_packet, n_overmax, n_fail))
if save == True:
method_str = ''
for method_item in rank_method:
method_str += method_item
f.write("{}\t{}\t{}\t{}\n".format(method_str,n_packet,n_overmax,n_fail))
f.close()
target_dir = '../data/test_labeling.csv'
if os.path.exists(target_dir):
os.remove(target_dir)
print("Remove exist target file")
df_label = pd.DataFrame(generated_labels[:n_packet-1])
df_label.to_csv(target_dir)
print("Generating Labels Done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--rank_method", type=str, default=['sfcid','src_max','dst_max'])
args = parser.parse_args()
log_file_path = '../data/labeling_log.txt'
#if os.exist.path(log_file_path):
# os.remove(log_file_path)
f = open(log_file_path, 'a')
save = True
rank_method = args.rank_method
labeling(save, rank_method, f)
| [
"[email protected]"
]
| |
a071d653f678661efe5f76d8153b380eb2aa9da1 | 5456320f03ed956ff7b1ad6a9539d65a602c71d4 | /mozCingi/fuzzers/mutagen/mutagenExecutor.py | 03e99823055ea5ebf8b00cf285f353720184fc6d | []
| no_license | ShakoHo/mozCingi | 9020cbb4aa65308ca5fd5bf9c074230f1fddb751 | 39239411abc840cd58a05f1fa41a24ae7cf9695f | refs/heads/master | 2016-08-12T12:51:16.331671 | 2016-03-25T09:35:54 | 2016-03-25T09:35:54 | 49,626,247 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | __author__ = 'shako'
import os
from mozCingi.util.mozITPWrapper import MozITPWrapper
from mozCingi.steps.executor import AbsExecutor
class MutagenExecutor(AbsExecutor):
DEFAULT_EXEC_LOG_NAME = "exec.log"
def launch_execute_file(self):
mozitp_obj = MozITPWrapper()
pack_file_name = self.fuzzer_name + "_" + str(self.obj_index) + ".zip"
pack_file_path = os.path.join(self.working_dir, self.DEFAULT_ROOT_TMP_DIR, pack_file_name)
execution_log_dir = os.path.join(self.working_dir, self.DEFAULT_ROOT_LOG_DIR, self.fuzzer_name)
if os.path.exists(execution_log_dir) is False:
os.makedirs(execution_log_dir)
execution_log_path = os.path.join(execution_log_dir, self.DEFAULT_EXEC_LOG_NAME)
mozitp_obj.launch_itp_for_fuzz(pack_file_path, execution_log_path)
mozitp_obj.stop_itp()
| [
"[email protected]"
]
| |
92aa7a25070d981b4443680ae1a1621f0f40d582 | ce4d1c3a1522f382d9b3f73b7f126e7a3616bfb5 | /projects/DensePose/densepose/data/datasets/coco.py | ddd03c25b6956e8afa7d78ac0a259d255fb51541 | [
"Apache-2.0"
]
| permissive | davidnvq/detectron2 | 6c01512326687e86ab50c0f89af4e926c0007ae6 | eaca19840e5db014c3dd37dee9920d780b3b6165 | refs/heads/master | 2022-04-26T03:29:08.080258 | 2020-04-24T09:05:07 | 2020-04-24T09:05:07 | 258,421,912 | 1 | 0 | Apache-2.0 | 2020-04-24T06:08:26 | 2020-04-24T06:08:25 | null | UTF-8 | Python | false | false | 4,143 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from dataclasses import dataclass
from typing import Any, Dict, Iterable, Optional
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_coco_json
DENSEPOSE_KEYS = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V", "dp_masks"]
DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
@dataclass
class CocoDatasetInfo:
name: str
images_root: str
annotations_fpath: str
DATASETS = [
CocoDatasetInfo(
name="densepose_coco_2014_train",
images_root="coco/train2014",
annotations_fpath="coco/annotations/densepose_train2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014_100.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
),
CocoDatasetInfo(
name="densepose_chimps",
images_root="densepose_evolution/densepose_chimps",
annotations_fpath="densepose_evolution/annotations/densepose_chimps_densepose.json",
),
]
def _is_relative_local_path(path: os.PathLike):
path_str = os.fsdecode(path)
return ("://" not in path_str) and not os.path.isabs(path)
def _maybe_prepend_base_path(base_path: Optional[os.PathLike], path: os.PathLike):
"""
Prepends the provided path with a base path prefix if:
1) base path is not None;
2) path is a local path
"""
if base_path is None:
return path
if _is_relative_local_path(path):
return os.path.join(base_path, path)
return path
def get_metadata(base_path: Optional[os.PathLike]) -> Dict[str, Any]:
"""
Returns metadata associated with COCO DensePose datasets
Args:
base_path: Optional[os.PathLike]
Base path used to load metadata from
Returns:
Dict[str, Any]
Metadata in the form of a dictionary
"""
meta = {
"densepose_transform_src": _maybe_prepend_base_path(
base_path, "UV_symmetry_transforms.mat"
),
"densepose_smpl_subdiv": _maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
"densepose_smpl_subdiv_transform": _maybe_prepend_base_path(
base_path, "SMPL_SUBDIV_TRANSFORM.mat"
),
}
return meta
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[os.PathLike] = None):
"""
Registers provided COCO DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[os.PathLike]
Datasets root folder (default: None)
"""
annotations_fpath = _maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = _maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(
json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
extra_annotation_keys=DENSEPOSE_KEYS,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[os.PathLike] = None
):
"""
Registers provided COCO DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[os.PathLike]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
| [
"[email protected]"
]
| |
e6f51ac5c078a462cf32ca079da25b67af0a32d0 | 67559847d7dba32e033d8f4828df5751a3eef94c | /MDclt_secondary.py | 41a9836017384c9a3f81cab08dd7d3c2a8cd479c | [
"BSD-3-Clause"
]
| permissive | KarlTDebiec/MDclt | 5b72d5105b43bc0e1fca6188fbe8bbdbe9630173 | 9e86e996ed7958a348012c053fa957d94729be8a | refs/heads/master | 2016-09-11T09:26:41.955595 | 2015-09-21T13:47:25 | 2015-09-21T13:47:25 | 39,224,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# MDclt_secondary.py
#
# Copyright (C) 2012-2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Command Line Tool to manage secondary analysis of molecular dynamics simulations
.. todo:
- Automatically add analysis functions (i.e. do not hardcode)
"""
################################### MODULES ####################################
from __future__ import division, print_function
import argparse, operator, os, sys
import numpy as np
##################################### MAIN #####################################
if __name__ == "__main__":
# Prepare argument parser
parser = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.RawTextHelpFormatter)
tool_subparsers = parser.add_subparsers(
dest = "tool",
description = "")
from MDclt.secondary import pdist
pdist.add_parser(tool_subparsers)
from MDclt.secondary import assign
assign.add_parser(tool_subparsers)
from MDclt.secondary import stateprobs
stateprobs.add_parser(tool_subparsers)
# Parse arguments
kwargs = vars(parser.parse_args())
if kwargs["attrs"] is not None:
kwargs["attrs"] = {k: v for k, v in zip(*[iter(kwargs["attrs"])] * 2)}
else:
kwargs["attrs"] = {}
# Run selected analysis
kwargs.pop("analysis")(**kwargs)
| [
"[email protected]"
]
| |
996c4bb3c7fbff7130720f83e8a2aeb3f5985688 | 63556cdaafad44a285b2333fdb39f0b9471e4457 | /app.py | 92356afe7f21fb789689e2721abbc46d5b8bda13 | []
| no_license | Maiska123/python-flask-server-pi | 13cc6e9cc4f5ff02ea1061e17cacacf14fcee87e | f9520ca55f230ef96b29a2f785a69cf60276e822 | refs/heads/master | 2023-06-21T12:58:00.824889 | 2021-08-10T18:51:11 | 2021-08-10T18:51:11 | 394,752,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | from flask import Flask
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # Sets up the RPi lib to use the Broadcom pin mappings
# for the pin names. This corresponds to the pin names
# given in most documentation of the Pi header
GPIO.setwarnings(False) # Turn off warnings that may crop up if you have the
# GPIO pins exported for use via command line
GPIO.setup(17, GPIO.OUT) # Set GPIO2 as an output
app = Flask(__name__)
# @app.route('/')
# def index():
# return 'yellow hellou!'
# The magic happens here. When some http request comes in with a path of
# gpio/x/y, the Flask app will attempt to parse that as x=pin and y=level.
# Note that there is no error handling here! Failure to properly specify the
# route will result in a 404 error.
@app.route('/')
def setPinLevel(id = 17 , level = 1):
GPIO.output(int(id), int(level))
GPIO.output(int(id), int(0))
return "OH MY GOD! :DDD"
# If we're running this script directly, this portion executes. The Flask
# instance runs with the given parameters. Note that the "host=0.0.0.0" part
# is essential to telling the system that we want the app visible to the
# outside world.
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | [
"[email protected]"
]
| |
39db216033d546f0f643835c4d88651246ace967 | 2544d9e107e748faa06db1750c5ef72f583bbb7c | /puny/slug.py | 196cbd159adca30391d8c4dbc94e660ff53758b3 | []
| no_license | cleverdevil/puny | 5e3244d4974495e43053dc21048614feba5782df | fe2f03b825ef4f9d6afa561af409e55ddb70a8ce | refs/heads/master | 2021-09-15T03:02:08.329051 | 2018-05-24T16:48:31 | 2018-05-24T16:48:31 | 126,065,312 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from pecan import conf
from slugify import UniqueSlugify
from . import storage
import uuid
import maya
def unique_check(text, uids):
permalink = conf.app.public_url + '/view/entry/' + text
return storage.get_by_permalink(permalink, hidden=True) is None
slugify_unique = UniqueSlugify(unique_check=unique_check)
def generate_slug(mf2):
seed = None
props = mf2.get('properties', {})
if 'name' in props:
seed = props['name'][0]
elif 'content' in props:
if len(props['content']):
for content in props['content']:
if isinstance(content, dict):
if 'value' in content:
seed = content['value']
elif 'html' in content:
seed = content['html']
elif isinstance(content, str):
seed = content
if not seed:
if 'like-of' in props:
seed = 'like of ' + props['like-of'][0]
elif 'bookmark-of' in props:
seed = 'bookmark of ' + props['bookmark-of'][0]
elif 'repost-of' in props:
seed = 'repost-of ' + props['repost-of'][0]
else:
seed = str(uuid.uuid4())
return slugify_unique(seed, to_lower=True, max_length=40)
| [
"[email protected]"
]
| |
efa0990bf837cff4970ce887c5c3837d6ff340a6 | 4d75121d43ed6d1063f467b7b75309b28d8f48a2 | /2022.2.8start/greedy/860.柠檬水找零.py | e848d6b0e0b6969e789cc8fb61efc709142d4230 | []
| no_license | cosJin/LeetCode | b311d43a5224cc7156f8ad72f4233e1f85556f20 | 26fddfdbd09c30376cb0720e13baf0402c3a1e90 | refs/heads/master | 2022-11-08T10:29:43.461016 | 2022-10-27T15:04:16 | 2022-10-27T15:04:16 | 154,267,347 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | #
# @lc app=leetcode.cn id=860 lang=python
#
# [860] 柠檬水找零
#
# @lc code=start
class Solution(object):
def lemonadeChange(self, bills):
"""
:type bills: List[int]
:rtype: bool
"""
#因为只能有5 10 20元,所以穷举一下就行
change = {5:0, 10:0}
for bill in bills:
if bill == 5:
change[5] += 1
elif bill == 10:
if change[5] > 0:
change[5] -= 1
change[10] += 1
else:
return False
else:
if change[10]>0 and change[5]>0:
change[10] -= 1
change[5] -= 1
elif change[5] >=3:
change[5] -= 3
else:
return False
return True
# @lc code=end
| [
"[email protected]"
]
| |
81bfaf6375f77b7ed67e4349c83094fe48b45d0d | cb20a5e5f9360c50b5d2c8e3db68f08727c5bff3 | /Src/NewCustomer.py | b23a58b82b919b65229cf4246d1cb5cdb2c285ff | []
| no_license | AnhaaD/IMS-2.0 | cc40c9408bc2e1e4afd96ac52be1d7af0cd908a7 | 10bc88f7c0680733196debfdbe08aa4c2281df29 | refs/heads/master | 2020-06-18T10:58:26.981620 | 2019-07-10T21:48:54 | 2019-07-10T21:48:54 | 196,279,292 | 1 | 0 | null | 2019-07-10T21:48:52 | 2019-07-10T21:48:52 | null | UTF-8 | Python | false | false | 18,140 | py | from Tkinter import *
from ttk import *
import tkFont as F
from proWrd import Filter, InvoiceSplit
from tkMessageBox import showinfo
from tkMessageBox import askokcancel
from TableTree import MultiListbox
import UniqueIdGenerator as uig
from buttoncalender import CalendarButton
sty = N + W + S + E
class NewCustomer(Frame):
def __init__(self, master, modify, tup, db):
Frame.__init__(self, master)
self.master = master
self.db = db
self.modify = modify
self.tup = tup
bg = 'White'
fg = "#3496ff"
self.app = Frame(master)
self.app.grid(row=0, column=0, sticky=sty)
self.app.rowconfigure(1, weight=1)
self.app.columnconfigure(0, weight=1)
if modify == True:
value = tup[1]
self.ctmid = tup[0]
else:
value = "Customer Name"
self.ctmid = None
self.name = Label(self.app, text=value, font=('Arial Black', 26), foreground=fg)
self.name.grid(row=0, column=0, sticky=sty, pady=10)
note = Notebook(self.app)
note.grid(row=1, column=0, sticky=sty)
note.rowconfigure(0, weight=1)
note.columnconfigure(0, weight=1)
self.notepage1(note, modify, tup)
if modify == True:
self.notepage2(note)
def update_name(self, event):
name = Filter(self.entry5.get()).title()
if len(name) == 0:
name = "Customer Name"
self.name.configure(text=name)
def editinvoice(self):
i = self.mlb21.Select_index
if i == None:
return showinfo("Message", "No Item Selected", parent=self.master)
piid = self.mlb21.trueparent(self.mlb21.Select_iid)
i = self.mlb21.index(piid)
r = self.mlb21.get(i)
self.invid = r[0]
root13 = Toplevel()
root13.title("Invoice Edit")
root13.grid()
for i in range(5):
root13.rowconfigure(i, weight=1)
for i in range(2):
root13.columnconfigure(i, weight=1)
lf = root13
self.invgui = root13
color = 'gray98'
root13['background'] = color
Label(lf, text="Invoice ID : %s" % (self.invid), foreground="#3496ff", font=('Berlin Sans FB Demi', 18)).grid(
row=0, column=0, columnspan=2, sticky=sty, pady=8, padx=7)
r = self.db.sqldb.execute(""" SELECT invoice_date,invoice_no,customer_name,phone_no FROM invoices
JOIN customers USING (customer_id) JOIN contacts USING (customer_id) WHERE invoice_id = "%s" """ % (
self.invid)).fetchone()
Label(lf, text="Invoice Date", width=20).grid(row=1, column=0, sticky=sty, pady=8, padx=7)
Label(lf, text="Invoice No").grid(row=2, column=0, sticky=sty, pady=8, padx=7)
Label(lf, text="Customer Name").grid(row=3, column=0, sticky=sty, pady=8, padx=7)
Label(lf, text="Customer Phone").grid(row=4, column=0, sticky=sty, pady=8, padx=7)
self.invdate = CalendarButton(lf)
self.invdate.grid(row=1, column=1, sticky=sty, pady=8, padx=7)
try:
self.invdate.insert(r[0])
except:
self.invdate.insert(self.invdate.getTimeStamp())
self.invno = Spinbox(lf, from_=0, to=9999)
self.invno.grid(row=2, column=1, sticky=sty, pady=8, padx=7)
self.invno.delete(0, END)
self.invno.insert(0, r[1])
self.cusname = Entry(lf, width=40)
self.cusname.grid(row=3, column=1, sticky=sty, pady=8, padx=7)
self.cusname.delete(0, END)
self.cusname.insert(0, r[2])
self.cusphn = Entry(lf)
self.cusphn.grid(row=4, column=1, sticky=sty, pady=8, padx=7)
self.cusphn.delete(0, END)
self.cusphn.insert(0, r[3])
Button(lf, text="Save", command=lambda: self.invoicesave()).grid(row=5, column=1, sticky=sty, pady=8, padx=7)
root13.wait_window()
return 1
def invoicesave(self):
try:
no = float(Filter(self.invno.get()))
phn = Filter(self.cusphn.get())
date = Filter(self.invdate.get())
except:
return showinfo(title="ERROR", message='Invoice Number must be numbers', parent=self.master)
ctmid = self.db.sqldb.getcustomerID(phn)
if ctmid == None:
return showinfo(title="ERROR", message='Customer Not Found', parent=self.master)
self.db.editinvoice(self.invid, ctmid, no, date)
self.invgui.destroy()
self.mlb21load()
return showinfo(title="Successful", message='Changes Saved', parent=self.master)
def deleteinvoice(self):
i = self.mlb21.Select_index
if i == None:
return showinfo("Message", "No Item Selected", parent=self.master)
r = self.mlb21.get(i)
self.invid = r[0]
ans = askokcancel("Message", "Sure You Want To Delete %s ?" % (self.invid), parent=self.master)
if ans == True:
b = self.db.deleteinvoice(self.invid)
if b == True:
return showinfo("Message", "%s Has Been Successfully Deleted" % (self.invid),
parent=self.master), self.mlb21load()
else:
return showinfo("Message", "%s Is Attached To Selling Records" % (self.invid),
parent=self.master), self.mlb21load()
return False
def notepage2(self, note):
self.invid = None
app1 = Frame(note)
app1.grid(row=0, column=0, sticky=sty)
note.add(app1, text=' Invoices ')
for i in range(5):
app1.rowconfigure(i, weight=1)
for i in range(3):
app1.columnconfigure(i, weight=1)
Label(app1, text="Invoice Attached", font=('Berlin Sans FB Demi', 21), foreground="#3496ff").grid(row=0,
column=0,
sticky=sty,
padx=10,
pady=10)
self.mlb21 = MultiListbox(app1,
(("Invoice ID", 30), ("Invoice Number", 25), ("Invoice Date", 35), ("Paid", 20)))
self.mlb21.grid(row=1, column=0, columnspan=3, sticky=sty)
Button(app1, text="Edit Invoice", command=lambda: self.editinvoice()).grid(row=0, column=1, sticky=sty, pady=20)
Button(app1, text="Delete Invoice", command=lambda: self.deleteinvoice()).grid(row=0, column=2, sticky=sty,
pady=20, padx=5)
self.lbl1 = Label(app1, text="Total Amount Earned - 0 ")
self.lbl1.grid(row=2, column=0, sticky=sty, padx=5, pady=5)
self.lbl2 = Label(app1, text="Total No of Product - 0 ")
self.lbl2.grid(row=3, column=0, sticky=sty, padx=5, pady=5)
self.lbl3 = Label(app1, text="Total Amount Due - 0 ")
self.lbl3.grid(row=4, column=0, sticky=sty, padx=5, pady=5)
if self.modify == True:
self.mlb21load()
return 1
def mlb21load(self):
self.mlb21.delete(0, END)
ctmid = self.tup[0]
invoices = self.db.sqldb.execute("""SELECT invoice_id,invoice_no,invoice_date,paid
FROM invoices WHERE customer_id = "%s" ORDER BY invoice_no """ % (ctmid)).fetchall()
tp = 0.0
tpro = 0
td = 0.0
for i in invoices:
invid = i[0]
paid = i[3]
tp = tp + float(paid)
iid = self.mlb21.insert(END, i)
tup1 = self.db.sqldb.execute(""" SELECT product_name,cost,sold_price,QTY FROM (SELECT * FROM sells JOIN costs USING (cost_id) JOIN products USING (product_id) )
JOIN invoices USING (invoice_id) WHERE invoice_id = "%s" ORDER BY product_name """ % (
invid)).fetchall()
self.mlb21.insert(END, ["Product Name", "Cost Price", "Selling Price", "Qty"], parent=iid, rowname="+",
bg='grey90', fg='Blue', tag="l5")
tpro += len(tup1)
for g in xrange(len(tup1)):
self.mlb21.insert(END, tup1[g], parent=iid, rowname=g, bg='white')
self.lbl1["text"] = "Total Amount Earned - %d " % (tp)
self.lbl2["text"] = "Total No of Product - %d " % (tpro)
self.lbl3["text"] = "Total Amount Due - %d " % (td)
return 0
def phnrefresh(self, ctmid):
self.mlb2221.delete(0, END)
d = self.db.execute(""" SELECT phone_no FROM contacts WHERE customer_id = "%s" """ % (ctmid))
for i in d:
self.mlb2221.insert(END, i)
return None
def phoneedit(self, edit):
tup = []
if edit == True:
index = self.mlb2221.Select_index
if index == None or index > self.mlb2221.size():
return showinfo('Select Error', 'Noting Is Selected', parent=self.master)
piid = self.mlb2221.trueparent(self.mlb2221.Select_iid)
index = self.mlb2221.index(piid)
tup = self.mlb2221.get(index)
self.t = Toplevel(master=self.master)
self.t.title('Add Contact Number')
if edit == True:
self.t.title('Edit Contact Number')
self.t['bg'] = 'white'
self.t.focus()
Label(self.t, text="Contact Number", background='white').grid(row=1, column=0, padx=5, pady=5)
self.e = Entry(self.t)
self.e.grid(row=1, column=1, sticky=E + S + W + N, padx=5, pady=5)
btn = Button(self.t, text="Save Phone", command=lambda: self.Savephone(edit, tup))
btn.grid(row=2, column=1, sticky=E + S + W + N, padx=5, pady=5)
if edit == True:
Label(self.t, text="Phone ID : ", background='white').grid(row=0, column=0, padx=5, pady=5)
Label(self.t, text=self.db.sqldb.getphoneID(tup[0]), background='white').grid(row=0, column=1, padx=5,
pady=5)
self.e.delete(0, END)
self.e.insert(0, tup[0])
self.t.wait_window()
return None
def Savephone(self, edit, tup):
phone = Filter(self.e.get())
if self.db.sqldb.getphoneID(phone) is not None:
return showinfo('Error', 'Phone Number Is Already Added.', parent=self.t)
# phnid = self.db.sqldb.getphoneID(phone)
# ctmid = self.ctmid
# if phnid is not None and edit == False:
# return showinfo('Type Error', 'Phone Number Is Already Listed', parent=self.t)
# if not phone.isdigit():
# return showinfo('Type Error', 'Not a Valid Phone Number', parent=self.t)
# if edit:
# pphn = self.db.sqldb.getphoneID(tup[0])
# if pphn is not None:
# self.db.editphone(pphn, phone, ctmid)
# else:
# return showinfo('Type Error', 'Phone Number Already Listed', parent=self.t)
# else:
# if ctmid is None:
# return showinfo('Error', 'Add Phone Number After Adding Customer.', parent=self.t)
# self.db.addphone(phone, ctmid)
if edit:
index = self.mlb2221.Select_index
if index is None or index > self.mlb2221.size():
return showinfo('Select Error', 'Noting Is Selected', parent=self.master)
piid = self.mlb2221.trueparent(self.mlb2221.Select_iid)
index = self.mlb2221.index(piid)
self.mlb2221.setvalue(index,0,phone)
else :
self.mlb2221.insert(END, phone)
self.t.destroy()
return None
def phonedelete(self):
index = self.mlb2221.Select_index
if index is None or index > self.mlb2221.size():
return showinfo('Select Error', 'Noting Is Selected', parent=self.master)
piid = self.mlb2221.trueparent(self.mlb2221.Select_iid)
index = self.mlb2221.index(piid)
tup = self.mlb2221.get(index)
phnid = self.db.sqldb.getphoneID(tup[0])
if phnid == None:
self.mlb2221.delete(index)
return None
d = self.db.deletephone(phnid)
if d:
self.mlb2221.delete(index)
return showinfo('Info', 'Phone Number Deleted Successfully', parent=self.master)
else:
return showinfo('Info', 'Phone Number Cannot Be deleted attached with Customer', parent=self.master)
return None
def notepage1(self, note, modify, tup):
app = Frame(note)
app.grid(row=0, column=0, sticky=sty)
note.add(app, text='Customer')
for i in range(20):
app.rowconfigure(i, weight=1)
for i in range(3):
app.columnconfigure(i, weight=1)
Label(app, text="Customer Detail", font=('Berlin Sans FB Demi', 21), foreground="#3496ff").grid(row=0, column=0,
sticky=sty,
columnspan=2,
padx=10,
pady=10)
lbl = Label(app, text='Customer Name ')
lbl.grid(row=1, column=0, sticky=sty, padx=5, pady=5)
self.entry5 = Entry(app, width=55)
self.entry5.grid(row=1, column=1, sticky=sty, padx=5, pady=5)
self.entry5.bind('<Any-KeyRelease>', self.update_name)
lbl3 = Label(app, text="Email ")
lbl3.grid(row=2, column=0, sticky=sty, padx=5, pady=5)
self.entry3 = Entry(app, width=35)
self.entry3.grid(row=2, column=1, sticky=sty, padx=5, pady=5)
lbl1 = Label(app, text="Customer Address ", anchor=N)
lbl1.grid(row=3, column=0, sticky=sty, padx=5, pady=5)
self.text = Text(app, width=26, height=5, wrap=WORD, relief=FLAT)
self.text.grid(row=3, column=1, sticky=sty, padx=5, pady=5)
self.text.configure(highlightthickness=1, highlightbackground="Grey")
tmpapp = Frame(app)
tmpapp.grid(row=1, column=2, rowspan=4, sticky=sty, padx=0, pady=0)
tmpapp.columnconfigure(0, weight=1)
tmpapp.rowconfigure(0, weight=5)
tmpapp.rowconfigure(1, weight=1)
self.mlb2221 = MultiListbox(tmpapp, [("Phone Number", 30)], height=5)
self.mlb2221.grid(row=0, column=0, sticky=sty, padx=5, pady=5)
tmpapp = Frame(tmpapp)
tmpapp.grid(row=1, column=0, sticky=sty, padx=0, pady=0)
tmpapp.rowconfigure(0, weight=1)
Button(tmpapp, text='Add', command=lambda: self.phoneedit(False)).grid(row=0, column=0, sticky=sty, padx=5,
pady=5)
Button(tmpapp, text='Edit', command=lambda: self.phoneedit(True)).grid(row=0, column=1, sticky=sty, padx=5,
pady=5)
Button(tmpapp, text='Delete', command=lambda: self.phonedelete()).grid(row=0, column=2, sticky=sty, padx=5,
pady=5)
tmpapp = Frame(app)
tmpapp.grid(row=4, column=1, sticky=sty, padx=0, pady=0)
tmpapp.columnconfigure(0, weight=1)
tmpapp.columnconfigure(1, weight=1)
tmpapp.rowconfigure(0, weight=1)
btn = Button(tmpapp, text='Save', width=12, command=lambda: self.Save(modify, tup))
btn.grid(row=0, column=0, sticky=sty, padx=5, pady=5)
copy = Button(tmpapp, text='Save As Copy', width=12, command=lambda: self.Save(False, tup))
copy.grid(row=0, column=1, sticky=sty, padx=5, pady=5)
if modify == False:
copy['state'] = DISABLED
if modify == True:
ctmid = self.tup[0]
d = self.db.sqldb.execute(
""" SELECT customer_name,customer_address,customer_email FROM customers WHERE customer_id = "%s" """ % (
ctmid)).fetchone()
name = d[0]
add = d[1]
email = d[2]
self.phnrefresh(ctmid)
self.entry5.delete(0, END)
self.entry5.insert(0, name)
self.text.delete(0.0, END)
self.text.insert(0.0, add)
self.entry3.delete(0, END)
self.entry3.insert(0, email)
def Save(self, modify, tup):
"""
tup[0] = id no
tup[1] = customer name
tup[2] = phn no
tup[3] = address
tup[4] = email
"""
name = Filter(self.entry5.get()).title()
add = Filter(self.text.get(0.0, END)).title()
email = Filter(self.entry3.get()).title()
if len(name.split()) == 0:
return showinfo(title="Error", message='Customer Name Must Be Specified', parent=self.master)
ctmid = None
if not modify:
ctmid = self.db.addcustomer(name, address=add, email=email)
else:
ctmid = self.tup[0]
ask = askokcancel("Key Error",
"Are You Sure You Want To Change The Customer Name From %s To %s ?" % (tup[1], name),
parent=self.master)
if not ask:
return 1
self.db.editcustomer(ctmid, name, add, email)
if ctmid is not None :
for i in xrange(self.mlb2221.size()):
tup = self.mlb2221.get(i)
phnid = self.db.sqldb.getphoneID(tup[0])
if phnid is None:
self.db.addphone(tup[0],ctmid)
self.master.destroy()
return showinfo("ADDED", 'Saved Successfully')
| [
"[email protected]"
]
| |
8bcd9aa863af02fbda6ca89f80c595b263e35e8a | 49a167d942f19fc084da2da68fc3881d44cacdd7 | /kubernetes_asyncio/client/api/authorization_api.py | 420284fe2740cbf841d0d542f50e1b597a20ae81 | [
"Apache-2.0"
]
| permissive | olitheolix/kubernetes_asyncio | fdb61323dc7fc1bade5e26e907de0fe6e0e42396 | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | refs/heads/master | 2020-03-19T12:52:27.025399 | 2018-06-24T23:34:03 | 2018-06-24T23:34:03 | 136,546,270 | 1 | 0 | Apache-2.0 | 2018-06-24T23:52:47 | 2018-06-08T00:39:52 | Python | UTF-8 | Python | false | false | 4,144 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
class AuthorizationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_api_group_with_http_info(**kwargs) # noqa: E501
return data
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/authorization.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
]
| |
a9f41c2592af04dd331b3352a8130b05000be074 | 9014fbfa842dc97b5a957a1db82c33615936b88b | /egs/voice_privacy/v1/local/featex/create_melspec_data.py | 418f77565cadbe3879829f31be3fe589fe743bf2 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
]
| permissive | brijmohan/kaldi | a38b8fd4cf189960ebb500c59cbacb7e896d4f7c | c1f5ecfa28e3d2d599feef7e078b162158d525b0 | refs/heads/master | 2022-05-07T20:34:01.605784 | 2022-03-31T01:10:50 | 2022-03-31T01:10:50 | 182,386,883 | 2 | 0 | NOASSERTION | 2019-04-20T09:32:40 | 2019-04-20T09:32:40 | null | UTF-8 | Python | false | false | 422 | py |
import sys
from os.path import join, basename
from ioTools import readwrite
import kaldi_io
args = sys.argv
mspec_file = args[1]
out_dir = args[2]
mspec_out_dir = join(out_dir, "mel")
print "Writing MEL feats....."
# Write mspec features
for key, mat in kaldi_io.read_mat_scp(mspec_file):
#print key, mat.shape
readwrite.write_raw_mat(mat, join(mspec_out_dir, key+'.mel'))
print "Finished writing MEL feats."
| [
"[email protected]"
]
| |
a5dcefda5538fdd64b4d741c663b4258140cce2e | 19fb9c3dd2ae4b4002242ca220fc1ab8e0cc0c3a | /node_modules/websocket/build/config.gypi | f2fbc3357fdb46ce6778c2a50798b0674372bb7e | [
"Apache-2.0"
]
| permissive | chechuironman/watson-mood-kub | 73ff83925cd5698fc974ebc4054804da0cdbcb8d | b7fd1952e98a4eb01718a6cabc360afd580bf805 | refs/heads/master | 2021-04-09T16:21:39.192237 | 2018-03-21T16:48:10 | 2018-03-21T16:48:10 | 125,677,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,911 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/9.4.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "59.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "9.0",
"nodedir": "/Users/chechu/.node-gyp/9.4.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/chechu/.npm-init.js",
"userconfig": "/Users/chechu/.npmrc",
"cidr": "",
"node_version": "9.4.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/chechu/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/5.6.0 node/v9.4.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/qy/lh_mfrzs7_nc1jbtcpl5gh600000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"[email protected]"
]
| |
0bea0ecced4c778b22f949d0bfa1c3a5954fc139 | e0519908caa23bef1873ff69ebd17c5d81f741e1 | /calabiyau/views/sessions.py | 23c24167a6ff049d9af607d405a9047b9d2be499 | [
"BSD-3-Clause"
]
| permissive | TachyonicProject/calabiyau | 2fb7af37bd656a686a5f741cadd082b2500718ff | 415a8ada4a93ee84c4776e89c9442af328dcfdd6 | refs/heads/latest | 2020-05-02T04:14:43.953841 | 2019-12-06T04:12:39 | 2019-12-06T04:12:39 | 177,745,608 | 0 | 3 | NOASSERTION | 2019-12-06T04:12:40 | 2019-03-26T08:31:25 | Python | UTF-8 | Python | false | false | 4,448 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import register
from luxon import router
from luxon.utils import sql
from luxon.helpers.api import sql_list
from luxon import MBClient
from calabiyau.lib.ctx import ctx
@register.resources()
class Sessions(object):
def __init__(self):
# Services Users
router.add('GET', '/v1/sessions', self.sessions,
tag='services:view')
router.add('PUT', '/v1/disconnect/{session_id}', self.disconnect,
tag='services:admin')
router.add('PUT', '/v1/clear/{nas_id}', self.clear,
tag='services:admin')
def sessions(self, req, resp):
def ctx_val(ctx_id):
try:
return {'ctx': ctx[ctx_id]}
except IndexError:
return {'ctx': ctx_id}
f_session_id = sql.Field('calabiyau_session.id')
f_session_ctx = sql.Field('calabiyau_session.ctx')
f_session_accttype = sql.Field('calabiyau_session.accttype')
f_session_start = sql.Field('calabiyau_session.acctstarttime')
f_session_updated = sql.Field('calabiyau_session.acctupdated')
f_session_unique_id = sql.Field('calabiyau_session.acctuniqueid')
f_session_ip = sql.Field(
'INET6_NTOA(calabiyau_session.framedipaddress)')
f_nas_ip = sql.Field(
'INET6_NTOA(calabiyau_session.nasipaddress)')
f_session_username = sql.Field('calabiyau_session.username')
f_session_user_id = sql.Field('calabiyau_session.id')
select = sql.Select('calabiyau_session')
select.fields = (f_session_id,
f_session_unique_id,
f_session_start,
f_session_updated,
f_session_user_id,
f_session_username,
f_session_ip,
f_nas_ip,
f_session_ctx,
)
select.where = f_session_accttype != sql.Value('stop')
return sql_list(
req,
select,
search={
'calabiyau_session.acctstarttime': 'datetime',
'calabiyau_session.acctupdated': 'datetime',
'calabiyau_session.user_id': str,
'calabiyau_session.username': str,
'calabiyau_session.acctuniqueid': str,
'calabiyau_session.framedipaddress': 'ip',
'calabiyau_session.nasipaddress': 'ip'},
callbacks={'ctx': ctx_val})
def disconnect(self, req, resp, session_id):
with MBClient('subscriber') as mb:
mb.send('disconnect_session', {'session_id': session_id})
def clear(self, req, resp, nas_id):
with MBClient('subscriber') as mb:
mb.send('clear_nas_sessions', {'nas_id': nas_id})
| [
"[email protected]"
]
| |
df05652165c48f1e162013aea44f587cd1a93715 | 98a813b1c76d5da0509b97ebbbd49a6c0d920f67 | /Day5/best_time_to_buy_and_sell_stock_ii.py | 779611855d8b8a32988cdeb65b2cce7b482bfa61 | []
| no_license | routdh2/30DayLeetCodingChallenge | 06d94f03b2b241722cebf39ef7ec74477f64d5c3 | 5b2f8cc34a6c7bdd999c4cb22acbc3e21cb53caa | refs/heads/master | 2021-05-21T06:37:17.714656 | 2020-04-21T08:53:54 | 2020-04-21T08:53:54 | 252,587,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | #Problem Statement: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii
class Solution:
def maxProfit(self, prices: List[int]) -> int:
i=0
total_profit=0
while i<len(prices)-1:
profit=prices[i+1]-prices[i]
if profit>0:
total_profit+=profit
i+=1
return total_profit
| [
"[email protected]"
]
| |
dc765d76018ce6cd8317283edeebe0aa3727ded8 | 86b293ef3df8a276c97db49f25e5a9c36822776e | /0x08-python-more_classes/1-rectangle.py | a89c2a3ea1af8ed99a9cdb92d6e584860d74097b | []
| no_license | tayloradam1999/holbertonschool-higher_level_programming | 3c6ceab832ad85448df320a437ddf6c39130f0dd | 70068c87f3058324dca58fc5ef988af124a9a965 | refs/heads/main | 2023-08-19T16:13:04.240756 | 2021-09-28T00:37:03 | 2021-09-28T00:37:03 | 361,856,354 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | #!/usr/bin/python3
"""
This module defines a class 'Rectangle' that
defines a rectangle based on '0-rectangle.py'
"""
class Rectangle:
"""Defines a rectangle with a private instance attribute 'width'
and 'height'"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
"""Property getter for width"""
return self.__width
@width.setter
def width(self, value):
"""Property setter for width that raises Type and Value errors"""
if not isinstance(value, int):
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
"""Property getter for height"""
return self.__height
@height.setter
def height(self, value):
"""Property setter for height that raises Type and Value errors"""
if not isinstance(value, int):
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
| [
"[email protected]"
]
| |
b913a75bf1ebfd7c0de7ff9b4a148a3f401dc9cb | 5527696f454fd15a1e9f43a5def47f0a64b072d3 | /app.py | 2d4c1e0d3061c03e248aae6e5356fcc491b29373 | []
| no_license | JaccyLi/PythonPlayground | 985dba2d1d665debf21fd35a95a612914c40686f | 682a3444845b892fcc009608aaa45c34c4823050 | refs/heads/master | 2020-07-21T10:24:15.326765 | 2019-12-03T04:27:16 | 2019-12-03T04:27:16 | 206,831,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | import turtle
from turtle import *
import random
s = turtle.Screen()
def f():
fd(random.randint(0, 100))
def l():
lt(90)
def r():
rt(90)
s.listen()
s.onkey(f, "Up")
s.onkey(l, "Left")
s.onkey(r, "Right")
turtle.mainloop()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.