blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4781b424d9b9fbdd8f44d282b49b55a5e2d91486 | cf7b827958166c8569eb58deb511cc3f07567741 | /in_Python/0018 4Sum.py | 60ae17d9a8e2a5f6ae27d81501ae8a1beba56d71 | [] | no_license | YangLiyli131/Leetcode2020 | e4e36eb36b1983f73b0e733455b4a7953dfebe6d | 20623defecf65cbc35b194d8b60d8b211816ee4f | refs/heads/master | 2023-08-22T06:00:55.924112 | 2021-09-18T19:04:15 | 2021-09-18T19:04:15 | 251,426,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | class Solution(object):
def threesum(self, nums, target):
res = []
for i in range(len(nums)-2):
if i > 0 and nums[i] == nums[i-1]:
continue
l = i+1
r = len(nums)-1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s < target:
l += 1
elif s > target:
r -= 1
else:
res.append([nums[i],nums[l],nums[r]])
while l < r and nums[l] == nums[l+1]:
l += 1
while l < r and nums[r] == nums[r-1]:
r -= 1
l += 1
r -= 1
return res
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in range(len(nums)-3):
if i > 0 and nums[i] == nums[i-1]:
continue
cur = nums[i]
x = self.threesum(nums[i+1:], target - cur)
for item in x:
res.append([cur] + item)
return res | [
"[email protected]"
] | |
faf9386e8572b52f4ebd7b6ed69b8ca7c28b3c9e | 70ff4bfa6af83a1754b6d75f4d372d643ec66bbc | /main.py | f2bf5f0a2a7cebce9b0b056ced3a6afa2e570261 | [] | no_license | benmali/Resty | ca1ba091f2275463e67849b3e0195c96be14d47a | bfbdd3cdfe18e6d2d845d0f9ca81dcaf754e5d35 | refs/heads/master | 2023-07-24T17:05:58.788126 | 2021-08-16T16:03:04 | 2021-08-16T16:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | from flask import Flask, session
from backend.register import registerBP
from backend.send_hours import sendHoursBP
from backend.main_page import mainBP
from backend.arrangement import arrangementBP
from backend.login import loginBP
from backend.log_working_hours import log_workBP
from backend.restore_shifts import restore_shiftsBP
from backend.week_templates import week_templatesBP
from backend.template_info import templates_infoBP
from backend.send_hours_info import sendHoursInfoBP
from backend.arrangement_info import arrangementInfoBP
app = Flask(__name__)
#bp of main - get
#bp register employee - post + get
#bp show arrangement - get
#bp create arrangment - get/post
#bp login - post/get
#bp register to the website
app.register_blueprint(registerBP, url_prefix="")
app.register_blueprint(sendHoursBP, url_prefix="")
app.register_blueprint(mainBP, url_prefix="")
app.register_blueprint(arrangementBP, url_prefix="")
app.register_blueprint(loginBP, url_prefix="")
app.register_blueprint(log_workBP, url_prefix="")
app.register_blueprint(restore_shiftsBP, url_prefix="")
app.register_blueprint(week_templatesBP, url_prefix="")
app.register_blueprint(templates_infoBP, url_prefix="")
app.register_blueprint(sendHoursInfoBP, url_prefix="")
app.register_blueprint(arrangementInfoBP, url_prefix="")
app.secret_key = "sxchahsdiusd324wdasd"
# run scheduled tasks
# import time
# import atexit
#
# from apscheduler.schedulers.background import BackgroundScheduler
#
#
# def print_date_time():
# print(time.strftime("%A, %d. %B %Y %I:%M:%S %p"))
#
#
# scheduler = BackgroundScheduler()
# scheduler.add_job(func=print_date_time, trigger="interval", seconds=3)
# scheduler.start()
#
# # Shut down the scheduler when exiting the app
# atexit.register(lambda: scheduler.shutdown())
if __name__ == "__main__":
app.run(debug=True) | [
"[email protected]"
] | |
4ffea9d60227c13597c035fcec909aa0f5efba0a | 9b187191ffca9cd84191ad0b086bf1f3450311cf | /migrations/versions/6dcc3fec26a4_posts_table.py | e1e3e83d7992ff186e314b3de496b14f94b422e3 | [] | no_license | shivam2211/flask_1 | 7ae7c170b4cf9efdd103a0528b02f766445a79bf | 9ba2c82ea3d21dc985e518a0d0cda3fba39c66f0 | refs/heads/master | 2022-11-30T16:48:58.714298 | 2019-08-16T05:50:30 | 2019-08-16T05:50:30 | 202,662,534 | 0 | 1 | null | 2022-11-29T09:49:15 | 2019-08-16T05:14:14 | Python | UTF-8 | Python | false | false | 1,057 | py | """posts table
Revision ID: 6dcc3fec26a4
Revises: 14c1433eaa17
Create Date: 2019-08-07 18:39:59.461452
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6dcc3fec26a4'
down_revision = '14c1433eaa17'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
2e65701776453eaac4caa781ef041ef94faf4efa | a905f5b56732cb49d5d692b75c7334d772b67144 | /pydevs_abc/src/test/hex.py | 9c4f617649392474728cb4ea418e60a10a63452d | [] | no_license | weilaidb/PythonExample2 | d859acee3eb3e9b6448553b4f444c95ab2b2fc8f | 492fa2d687a8f3b9370ed8c49ffb0d06118246c7 | refs/heads/master | 2022-04-20T00:35:35.456199 | 2020-04-26T00:32:12 | 2020-04-26T00:32:12 | 114,774,595 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 890 | py | #!/usr/bin/python
# -*- coding: GBK -*-
'''
Created on 2017Äê12ÔÂ23ÈÕ
@author: Administrator
'''
from test import num
print (("%x " % 108))
print ("%X" % 108)
print ("%#x" % 108)
print ("%#X " % 108)
print ('%f' % 1234.567890)
print ('%.2f' % 1234.67890)
print ('%E' % 1234.567890)
print ('%e' % 1234.567890)
print ('%g' % 1234.567890)
print ('%G' % 1234.567890)
print ("%e" % 11111111111111111111111111111111)
print ("%+d" % 4)
print ("%+d" % -4)
print ("we are at %d%%" % 100)
print ('Your host is: %s' % 'earth')
print ('Host:%s\tPort:%d' % ('mars', 80))
num = 123
print ('dec:%d/ oct:%#o/ hex:%#X' % (num, num, num))
print ("MM/DD/YY = %02d/%02d/%d" % (2,15, 67))
w,p = 'Web', 'page'
print ('http://xxx.yyy.zzz/%s/%s.html' % (w, p))
print ('There are %(howmany)d %(lang)s Quotation Symbols' % \
{'lang': 'Python', 'howmany': 3})
| [
"[email protected]"
] | |
d5c3d573dd671d8acd86f5bffe5b659ddbd1f738 | 3a46874efa8238c516c70768c677dbd7e8f9225c | /utility/logging/base_logger.py | 5f1ec9eef95f1d0bf0f840f4d4be0ee98e138498 | [] | no_license | hungntt/WorldModelPlanning | ad36d8086275086dae0625dc040bdcce0f9d9aef | 1e58be1870bae952f104971d082999852f48a51b | refs/heads/master | 2023-05-28T14:39:04.194597 | 2021-01-12T12:40:04 | 2021-01-12T12:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py |
class BaseLogger:
def __init__(self, is_logging):
self.log_dir_root = 'utility/logging/tensorboard_runs'
self._is_logging = is_logging
def start_log(self, name):
pass
def commit_log(self):
pass
def end_log(self):
pass
def _add_text(self, tag, value, step, logger):
if not self._is_logging:
return
logger.add_text(tag=tag, text_string=value, global_step=step)
def _add_scalar(self, tag, value, step, logger):
if not self._is_logging:
return
logger.add_scalar(tag, value, step)
| [
"[email protected]"
] | |
91ff9a717c2a2c75bf69aa2faee3c27419ad23c0 | a7757ca757eedc7d17c840e5f726f8d5f067a544 | /djangoweb/wsgi.py | 9472b1c747a5e851da254aacaa47261f1aed4cf5 | [] | no_license | spicycouscous/apscp-project-thing | fcd613515f37ec9e26940bd2c972fa0965a48f25 | bea47ce027a986d9f4a5b49450c8310feed4c960 | refs/heads/master | 2020-05-30T01:03:15.925589 | 2019-06-03T20:39:17 | 2019-06-03T20:39:17 | 189,469,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | """
WSGI config for djangoweb project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
For more information, visit
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'djangoweb.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| [
"nope"
] | nope |
c458fef4201fe3d3cee5e187f12aea981b5cab02 | 8f2adafcc2fd52870986b17863b39ded83b730a0 | /ryu_multipath_1_5.py | 26c5df5dd2704572ec222f5739da91e2b68e925d | [] | no_license | josecarlosjr/SDN-CONTEXT | 197664fd8a5908cb8f8f5f11c65bc813abe8d0a4 | 78ccff317d6c79e07544d428a9112d757290181b | refs/heads/master | 2020-04-02T10:06:37.004598 | 2019-11-13T12:04:09 | 2019-11-13T12:04:09 | 154,324,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87,429 | py | #-*- coding: utf-8 -*-
from ryu.base import app_manager
from ryu.controller import mac_to_port, ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER, set_ev_cls
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet, arp, ethernet, ipv4, ipv6, ether_types, icmp
from ryu.lib import mac, ip, hub
from ryu.topology.api import get_switch, get_link, get_all_link, get_all_switch
from ryu.app.wsgi import ControllerBase
from ryu.topology import event, switches
from termcolor import colored
from collections import defaultdict
from operator import itemgetter
from operator import attrgetter
import os
import random
import time, copy
from datetime import datetime
import pandas as pd
#MAX_PATHS = 2
IP_1 = '192.168.1.1'
IP_2 = '192.168.2.2'
IP_3 = '192.168.3.3'
IP_4 = '192.168.4.4'
IP = ['192.168.1.1','192.168.2.2','192.168.3.3','192.168.4.4']
MAX_BAND = 800 #Mbps
adjacency = defaultdict(lambda: defaultdict(lambda: None))
####################################
class ProjectController(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_4.OFP_VERSION]
#ADICIONADO 26/09/2018 variavel global
################################
global dp, C, c, b, src, dst, first_port, last_port, out_ports, PL, PL1_3#, ipDFrame_src, arpDFrame_src, ipDFrame_dst, arpDFrame_dst
#######################################################################################################################
#Variaveis globais para calculo de banda
#DP 1
global band_1_1, result_1_1, band_rx_1_1, result_rx_1_1, tx_ini_1_1, tx_fin_1_1, rx_ini_1_1, rx_fin_1_1 #dp 1 port 1
global band_1_2, result_1_2, band_rx_1_2, result_rx_1_2, tx_ini_1_2, tx_fin_1_2, rx_ini_1_2, rx_fin_1_2 #dp 1 port 2
global band_1_3, result_1_3, band_rx_1_3, result_rx_1_3, tx_ini_1_3, tx_fin_1_3, rx_ini_1_3, rx_fin_1_3 #dp 1 port 3
global tx_1_2_packet, tx_1_3_packet, rx_1_2_packet, rx_1_3_packet, L1_2, L1_3
#DP 2
global band_2_1, result_2_1, band_rx_2_1, result_rx_2_1, tx_ini_2_1, tx_fin_2_1, rx_ini_2_1, rx_fin_2_1 #dp 2 port 1
global band_2_2, result_2_2, band_rx_2_2, result_rx_2_2, tx_ini_2_2, tx_fin_2_2, rx_ini_2_2, rx_fin_2_2 #dp 2 port 2
global band_2_3, result_2_3, band_rx_2_3, result_rx_2_3, tx_ini_2_3, tx_fin_2_3, rx_ini_2_3, rx_fin_2_3 #dp 2 port 3
global tx_2_2_packet, tx_2_3_packet, rx_2_2_packet, rx_2_3_packet, L2_2, L2_3
#DP 3
global band_3_1, result_3_1, band_rx_3_1, result_rx_3_1, tx_ini_3_1, tx_fin_3_1, rx_ini_3_1, rx_fin_3_1 #dp 3 port 1
global band_3_2, result_3_2, band_rx_3_2, result_rx_3_2, tx_ini_3_2, tx_fin_3_2, rx_ini_3_2, rx_fin_3_2 #dp 3 port 2
global band_3_3, result_3_3, band_rx_3_3, result_rx_3_3, tx_ini_3_3, tx_fin_3_3, rx_ini_3_3, rx_fin_3_3 #dp 3 port 3
global tx_3_2_packet, tx_3_3_packet, rx_3_2_packet, rx_3_3_packet, L3_2, L3_3
#DP 4
global band_4_1, result_4_1, band_rx_4_1, result_rx_4_1, tx_ini_4_1, tx_fin_4_1, rx_ini_4_1, rx_fin_4_1 #dp 4 port 1
global band_4_2, result_4_2, band_rx_4_2, result_rx_4_2, tx_ini_4_2, tx_fin_4_2, rx_ini_4_2, rx_fin_4_2 #dp 4 port 2
global band_4_3, result_4_3, band_rx_4_3, result_rx_4_3, tx_ini_4_3, tx_fin_4_3, rx_ini_4_3, rx_fin_4_3 #dp 4 port 3
global tx_4_2_packet, tx_4_3_packet, rx_4_2_packet, rx_4_3_packet, L4_2, L4_3
########################################################################################################################
#inicializando variáveis globais
C = c = b = out_ports = PL = PL1_3 = 0
#ipDFrame_src = pd.DataFrame([])
#DP 1
band_1_1 = result_1_1 = band_rx_1_1 = result_rx_1_1 = tx_ini_1_1 = tx_fin_1_1 = rx_ini_1_1 = rx_fin_1_1 = 0 #dp 1 port 1
band_1_2 = result_1_2 = band_rx_1_2 = result_rx_1_2 = tx_ini_1_2 = tx_fin_1_2 = rx_ini_1_2 = rx_fin_1_2 = 0 #dp 1 port 2
band_1_3 = result_1_3 = band_rx_1_3 = result_rx_1_3 = tx_ini_1_3 = tx_fin_1_3 = rx_ini_1_3 = rx_fin_1_3 = 0 #dp 1 port 3
tx_1_2_packet = tx_1_3_packet = rx_1_2_packet = rx_1_3_packet = L1_2 = L1_3 = 0
#DP 2
band_2_1 = result_2_1 = band_rx_2_1 = result_rx_2_1 = tx_ini_2_1 = tx_fin_2_1 = rx_ini_2_1 = rx_fin_2_1 = 0 #dp 2 port 1
band_2_2 = result_2_2 = band_rx_2_2 = result_rx_2_2 = tx_ini_2_2 = tx_fin_2_2 = rx_ini_2_2 = rx_fin_2_2 = 0 #dp 2 port 2
band_2_3 = result_2_3 = band_rx_2_3 = result_rx_2_3 = tx_ini_2_3 = tx_fin_2_3 = rx_ini_2_3 = rx_fin_2_3 = 0 #dp 2 port 2
tx_2_2_packet = tx_2_3_packet = rx_2_2_packet = rx_2_3_packet = L2_2 = L2_3 = 0
#DP3
band_3_1 = result_3_1 = band_rx_3_1 = result_rx_3_1 = tx_ini_3_1 = tx_fin_3_1 = rx_ini_3_1 = rx_fin_3_1 = 0 #dp 3 port 1
band_3_2 = result_3_2 = band_rx_3_2 = result_rx_3_2 = tx_ini_3_2 = tx_fin_3_2 = rx_ini_3_2 = rx_fin_3_2 = 0 #dp 3 port 2
band_3_3 = result_3_3 = band_rx_3_3 = result_rx_3_3 = tx_ini_3_3 = tx_fin_3_3 = rx_ini_3_3 = rx_fin_3_3 = 0 #dp 3 port 3
tx_3_2_packet = tx_3_3_packet = rx_3_2_packet = rx_3_3_packet = L3_2 = L3_3 = 0
#DP4
band_4_1 = result_4_1 = band_rx_4_1 = result_rx_4_1 = tx_ini_4_1 = tx_fin_4_1 = rx_ini_4_1 = rx_fin_4_1 = 0 #dp 4 port 1
band_4_2 = result_4_2 = band_rx_4_2 = result_rx_4_2 = tx_ini_4_2 = tx_fin_4_2 = rx_ini_4_2 = rx_fin_4_2 = 0 #dp 4 port 2
band_4_3 = result_4_3 = band_rx_4_3 = result_rx_4_3 = tx_ini_4_3 = tx_fin_4_3 = rx_ini_4_3 = rx_fin_4_3 = 0 #dp 4 port 3
tx_4_2_packet = tx_4_3_packet = rx_4_2_packet = rx_4_3_packet = L4_2 = L4_3 = 0
def __init__(self, *args, **kwargs):
super(ProjectController, self).__init__(*args, **kwargs)
#self.mac_to_port = {}
self.ipDFrame_src = self.arpDFrame_src = self.ipDFrame_dst = self.arpDFrame_dst = pd.DataFrame([])
self.topology_api_app = self
self.datapath_list = {}
self.arp_table = {}
self.switches = []
self.hosts = {}
self.multipath_group_ids = {}
self.group_ids = []
self.adjacency = defaultdict(dict)
#ADICIONADO 22/09/2018
##################################################
self.monitor_thread = hub.spawn(self._monitor)
self.eventos = []
##################################################
######################################################
#Algoritmo Depth First Search
def get_paths(self, src, dst):
'''
Get all paths from src to dst using DFS (Depth First Search) algorithm
'''
if src == dst:
# host target is on the same switch
return [[src]]
paths = []
stack = [(src, [src])]
while stack:
(node, path) = stack.pop()
for next in set(self.adjacency[node].keys()) - set(path):
if next is dst:
paths.append(path + [next])
else:
stack.append((next, path + [next]))
return paths
#####################################################
def add_ports_to_paths(self, paths, first_port, last_port):
'''
Add the ports that connects the switches for all paths
'''
#print ("add port to path is called")
paths_p = []
for path in paths:
p = {}
in_port = first_port
for s1, s2 in zip(path[:-1], path[1:]):
out_port = self.adjacency[s1][s2]
p[s1] = (in_port, out_port)
in_port = self.adjacency[s2][s1]
p[path[-1]] = (in_port, last_port)
paths_p.append(p)
#print "add_port_to_path", paths_p
return paths_p
##########################################################
def install_paths(self, src, first_port, dst, last_port, ip_src, eth_src, ip_dst, eth_dst):
computation_start = time.time()
#paths = self.get_optimal_paths(src, dst)
paths = self.get_paths(src, dst)
paths_with_ports = self.add_ports_to_paths(paths, first_port, last_port)
switches_in_paths = set().union(*paths)
for node in switches_in_paths:
dp = self.datapath_list[node]
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
ports = defaultdict(list)
actions = []
i = 0
for path in paths_with_ports:
if node in path:
in_port = path[node][0]
out_port = path[node][1]
if (out_port) not in ports[in_port]:
ports[in_port].append((out_port))
i += 1
for in_port in ports:
match_ip = ofp_parser.OFPMatch(
eth_type=0x0800,
ipv4_src=ip_src,
ipv4_dst=ip_dst
# eth_dst=eth_dst
)
match_arp = ofp_parser.OFPMatch(
eth_type=0x0806,
arp_spa=ip_src,
arp_tpa=ip_dst
# eth_dst=eth_dst
)
out_ports = ports[in_port]
#elif len(out_ports) == 1:
#print "datapath tive apenas 1 caminho:"
actions = [ofp_parser.OFPActionOutput(out_ports[0])]
self.add_flow(dp, 32766, match_ip, actions)
self.add_flow(dp, 1, match_arp, actions)
return paths_with_ports[0][src][1]
############################################################
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
# print "Adding flow ", match, actions
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def _switch_features_handler(self, ev):
global dp
#print "switch_features_handler is called"
datapath = ev.msg.datapath
#dp = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
#@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
#def port_desc_stats_reply_handler(self, ev):
# switch = ev.msg.datapath
# for p in ev.msg.body:
# self.bandwidths[switch.id][p.port_no] = p.curr_speed
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(data=msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
arp_pkt = pkt.get_protocol(arp.arp)
pkt_icmp = pkt.get_protocol(icmp.icmp)
#evita broadcast from LLDP
if eth.ethertype == 35020:
return
if pkt.get_protocol(ipv6.ipv6): # Drop the IPV6 Packets.
match = parser.OFPMatch(eth_type=eth.ethertype)
actions = []
self.add_flow(datapath, 1, match, actions)
return None
dst = eth.dst
src = eth.src
dpid = datapath.id
if src not in self.hosts:
self.hosts[src] = (dpid, in_port)
#print src
#print dst
out_port = ofproto.OFPP_FLOOD
if arp_pkt:
src_ip = arp_pkt.src_ip
dst_ip = arp_pkt.dst_ip
if arp_pkt.opcode == arp.ARP_REPLY:
self.arp_table[src_ip] = src
print colored('ARP_REPLY','blue')
h1 = self.hosts[src]
h2 = self.hosts[dst]
#chama o self.install_path primeiro
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, src, dst_ip, dst)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, dst, src_ip, src) # reverse
elif arp_pkt.opcode == arp.ARP_REQUEST:
print colored('ARP_REQUEST','blue')
if dst_ip in self.arp_table:
self.arp_table[src_ip] = src
dst_mac = self.arp_table[dst_ip]
h1 = self.hosts[src]
h2 = self.hosts[dst_mac]
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, src, dst_ip, dst)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, dst, src_ip, src) # reverse
actions = [parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(event.EventSwitchEnter)
def switch_enter_handler(self, ev):
#print "switch enter handler"
switch = ev.switch.dp
ofp_parser = switch.ofproto_parser
if switch.id not in self.switches:
self.switches.append(switch.id)
self.datapath_list[switch.id] = switch
# Request port/link descriptions, useful for obtaining bandwidth
req = ofp_parser.OFPPortDescStatsRequest(switch)
#print req
switch.send_msg(req)
@set_ev_cls(event.EventSwitchLeave, MAIN_DISPATCHER)
def switch_leave_handler(self, ev):
#print ("Switch leave handler", ev)
switch = ev.switch.dp.id
if switch in self.switches:
self.switches.remove(switch)
del self.datapath_list[switch]
del self.adjacency[switch]
@set_ev_cls(event.EventLinkAdd, MAIN_DISPATCHER)
def link_add_handler(self, ev):
global src, dst, ipDFrame_src, arpDFrame_src, ipDFrame_dst, arpDFrame_dst
s1 = ev.link.src
s2 = ev.link.dst
print '\033[1;34;47m Link Switch', s1.dpid, 'Porta', s1.port_no, 'Up\033[1;m'
self.adjacency[s1.dpid][s2.dpid] = s1.port_no
self.adjacency[s2.dpid][s1.dpid] = s2.port_no
#Variaveis globais sem valores try/except para tratar o erro de NameError or KeyError
#pq ao iniciar o experimento é acionada o evento de adicao de link entre switches event.EventLinkAdd
#
##########################################################
try:
#SRC
ofp_src = src.ofproto
ofp_parser_src = src.ofproto_parser
buffer_id_src = ofp_src.OFP_NO_BUFFER
#DST
ofp_dst = dst.ofproto
ofp_parser_dst = dst.ofproto_parser
buffer_id_dst = ofp_dst.OFP_NO_BUFFER
#print self.ipDFrame_src.at[i,'DST']
#print
#print self.ipDFrame_src.loc[1], '\n'
#print self.ipDFrame_src
#print
#DST = self.ipDFrame_src.loc["DST"]
#print self.ipDFrame_src.iloc[[0],[0]]
if s1.dpid == src.id:
i=0
for row in self.ipDFrame_src.iterrows():
match = ofp_parser_src.OFPMatch(eth_type=0x800, ipv4_dst=str(self.ipDFrame_src.at[i,'DST']),
ipv4_src=str(self.ipDFrame_src.at[i,'SRC']))
actions = [ofp_parser_src.OFPActionOutput(self.ipDFrame_src.at[i,'PORT'])]
self.add_flow(src, 32768, match, actions)
i += 1
i=0
for row in self.arpDFrame_src.iterrows():
match = ofp_parser_src.OFPMatch(eth_type=0x806, arp_tpa=str(self.arpDFrame_src.at[i,'TPA']),
arp_spa=str(self.arpDFrame_src.at[i,'SPA']))
actions = [ofp_parser_src.OFPActionOutput(self.arpDFrame_src.at[i,'PORT'])]
self.add_flow(src, 1, match, actions)
i += 1
self.ipDFrame_src = self.arpDFrame_src = pd.DataFrame([])
elif s1.dpid == dst.id:
i=0
for row in self.ipDFrame_dst.iterrows():
#print colored('Second FOR','red')
match = ofp_parser_dst.OFPMatch(eth_type=0x800, ipv4_dst=str(self.ipDFrame_dst.at[i,'DST']),
ipv4_src=str(self.ipDFrame_dst.at[i,'SRC']))
actions = [ofp_parser_dst.OFPActionOutput(self.ipDFrame_dst.at[i,'PORT'])]
self.add_flow(dst, 32768, match, actions)
i += 1
i=0
for row in self.arpDFrame_dst.iterrows():
match = ofp_parser_dst.OFPMatch(eth_type=0x806, arp_tpa=str(self.arpDFrame_dst.at[i,'TPA']),
arp_spa=str(self.arpDFrame_dst.at[i,'SPA']))
actions = [ofp_parser_dst.OFPActionOutput(self.arpDFrame_dst.at[i,'PORT'])]
self.add_flow(dst, 1, match, actions)
i += 1
self.ipDFrame_dst = self.arpDFrame_dst = pd.DataFrame([])
else: pass
except NameError, KeyError:
pass
@set_ev_cls(event.EventLinkDelete, MAIN_DISPATCHER)
def link_delete_handler(self, ev):
global c, adjacency, src, dst
s1 = ev.link.src
s2 = ev.link.dst
adjacency[s1.dpid][s2.dpid] = None
adjacency[s2.dpid][s1.dpid] = None
##########################################################
#Exception handling if switch already deleted
try:
del self.adjacency[s1.dpid][s2.dpid]
del self.adjacency[s2.dpid][s1.dpid]
except KeyError:
pass
#ADICIONADO 14/10/2018
#######################################################################
def install_controller(self, datapath):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, instructions=inst)
datapath.send_msg(mod)
######################################################################
#===============================================================================================
#ADICIONADO 22/09/2018
#Monitoramento para exibicao de estatisticas imprime na tela
###########################################################
def _monitor(self):
while True:
for dp in self.datapath_list.values():
#print
#print self.datapath_list.values()
#print
self._request_stats(dp)
hub.sleep(1)#Valor ajustavel (1) = 1 segundo
###########################################################
#ADICIONADO 22/09/2018
###########################################################
@set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapath_list:
# self.logger.debug('register datapath: %016x', datapath.id)
#print 'register datapath:', datapath.id
self.datapath_list[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapath_list:
# self.logger.debug('unregister datapath: %016x', datapath.id)
#print 'unregister datapath:', datapath.id
del self.datapath_list[datapath.id]
############################################################
#ADICIONADO 23/09/2018
############################################################
def _request_stats(self, datapath):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#req = parser.OFPFlowStatsRequest(datapath)
#datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
ofp = ofproto
_parser_ = parser
dp = datapath
#print colored('dp _request_stats','blue') #exibe os 4 switchs na tela
#print (dp.id)
#############################################################
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
start_time = time.time()
global c, b, PL, PL1_3
####dp1
global band_1_1, result_1_1, band_rx_1_1, result_rx_1_1, tx_ini_1_1, tx_fin_1_1, rx_ini_1_1, rx_fin_1_1 #dp 1 port 1
global band_1_2, result_1_2, band_rx_1_2, result_rx_1_2, tx_ini_1_2, tx_fin_1_2, rx_ini_1_2, rx_fin_1_2 #dp 1 port 2
global band_1_3, result_1_3, band_rx_1_3, result_rx_1_3, tx_ini_1_3, tx_fin_1_3, rx_ini_1_3, rx_fin_1_3 #dp 1 port 3
global tx_1_2_packet, tx_1_3_packet, rx_1_2_packet, rx_1_3_packet, L1_2, L1_3
####dp2
global band_2_1, result_2_1, band_rx_2_1, result_rx_2_1, tx_ini_2_1, tx_fin_2_1, rx_ini_2_1, rx_fin_2_1 #dp 2 port 1
global band_2_2, result_2_2, band_rx_2_2, result_rx_2_2, tx_ini_2_2, tx_fin_2_2, rx_ini_2_2, rx_fin_2_2 #dp 2 port 2
global band_2_3, result_2_3, band_rx_2_3, result_rx_2_3, tx_ini_2_3, tx_fin_2_3, rx_ini_2_3, rx_fin_2_3 #dp 2 port 3
global tx_2_2_packet, tx_2_3_packet, rx_2_2_packet, rx_2_3_packet, L2_2, L2_3
####dp3
global band_3_1, result_3_1, band_rx_3_1, result_rx_3_1, tx_ini_3_1, tx_fin_3_1, rx_ini_3_1, rx_fin_3_1 #dp 3 port 1
global band_3_2, result_3_2, band_rx_3_2, result_rx_3_2, tx_ini_3_2, tx_fin_3_2, rx_ini_3_2, rx_fin_3_2 #dp 3 port 2
global band_3_3, result_3_3, band_rx_3_3, result_rx_3_3, tx_ini_3_3, tx_fin_3_3, rx_ini_3_3, rx_fin_3_3 #dp 3 port 3
global tx_3_2_packet, tx_3_3_packet, rx_3_2_packet, rx_3_3_packet, L3_2, L3_3
####dp4
global band_4_1, result_4_1, band_rx_4_1, result_rx_4_1, tx_ini_4_1, tx_fin_4_1, rx_ini_4_1, rx_fin_4_1 #dp 4 port 1
global band_4_2, result_4_2, band_rx_4_2, result_rx_4_2, tx_ini_4_2, tx_fin_4_2, rx_ini_4_2, rx_fin_4_2 #dp 4 port 2
global band_4_3, result_4_3, band_rx_4_3, result_rx_4_3, tx_ini_4_3, tx_fin_4_3, rx_ini_4_3, rx_fin_4_3 #dp 4 port 3
global tx_4_2_packet, tx_4_3_packet, rx_4_2_packet, rx_4_3_packet, L4_2, L4_3
#######
#######
body = ev.msg.body
dpid = ev.msg.datapath.id
datapath = ev.msg.datapath
#contador de segundos
#t = time.localtime().tm_sec
#print colored(t,'green')
################################################################################################
################################################################################################
#seleciona o dp 1
#SELECIONA PORTA 1
if dpid == 1:
for stat in sorted(body, key=attrgetter('port_no')):
if stat.port_no == 1:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_1_1, stat.tx_bytes, result_1_1)
#stat.rx_packets, stat.tx_packets)
print
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_1_1 == 0: tx_ini_1_1 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_1_1 = stat.tx_bytes
band_1_1 = (tx_fin_1_1-tx_ini_1_1)*8
result_1_1 = int(band_1_1/1048576)
tx_ini_1_1 = tx_fin_1_1
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_1_1 == 0: rx_ini_1_1 = stat.rx_bytes
rx_fin_1_1 = stat.rx_bytes
band_rx_1_1 = (rx_fin_1_1-rx_ini_1_1)*8
result_rx_1_1 = int(band_rx_1_1/1048576)
rx_ini_1_1 = rx_fin_1_1
###############################################################################
#SELECIONA A PORTA 2
if stat.port_no == 2:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Packet_loss ')
#'Rx_packets Tx_packets Packet_loss')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d ',
#'%8d %8d %8d',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_1_2, stat.tx_bytes, result_1_2)
#stat.rx_packets, stat.tx_packets, L1_2)
#stat.rx_packets, stat.tx_packets)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
#pacote transmitido(dp1) - pacote recebido(dp2) dividido pelos pacotes transmitidos
#resultado é a % de pacotes perdidos
#if stat.tx_packets == 0: tx_1_2_packet = stat.tx_packets
#PL = rx_2_2_packet-tx_1_2_packet
#tx_1_2_packet = rx_2_2_packet
#L1_2 = (tx_1_2_packet - rx_2_2_packet)/stat.tx_packets
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_1_2 == 0: tx_ini_1_2 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_1_2 = stat.tx_bytes
band_1_2 = (tx_fin_1_2-tx_ini_1_2)*8
result_1_2 = int(band_1_2/1048576)
tx_ini_1_2 = tx_fin_1_2
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_1_2 == 0: rx_ini_1_2 = stat.rx_bytes
rx_fin_1_2 = stat.rx_bytes
band_rx_1_2 = (rx_fin_1_2-rx_ini_1_2)*8
result_rx_1_2 = int(band_rx_1_2/1048576)
rx_ini_1_2 = rx_fin_1_2
###############################################################################
#SELECIONA A PORTA 3
if stat.port_no == 3:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Packet_Loss ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_1_3, stat.tx_bytes, result_1_3)
#L1_3)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
#if stat.tx_packets == 0: tx_1_3_packet = stat.tx_packets
#PL1_3 = rx_4_2_packet-tx_1_3_packet
#tx_1_3_packet = rx_4_2_packet
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_1_3 == 0: tx_ini_1_3 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_1_3 = stat.tx_bytes
band_1_3 = (tx_fin_1_3-tx_ini_1_3)*8
result_1_3 = int(band_1_3/1048576)
tx_ini_1_3 = tx_fin_1_3
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_1_3 == 0: rx_ini_1_3 = stat.rx_bytes
rx_fin_1_3 = stat.rx_bytes
band_rx_1_3 = (rx_fin_1_3-rx_ini_1_3)*8
result_rx_1_3 = int(band_rx_1_3/1048576)
rx_ini_1_3 = rx_fin_1_3
################################################################################################
#seleciona o dp 2
if dpid == 2:
for stat in sorted(body, key=attrgetter('port_no')):
#SELECIONA PORTA 1
if stat.port_no == 1:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_2_1, stat.tx_bytes, result_2_1)
#stat.rx_packets, stat.tx_packets)
print
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_2_1 == 0: tx_ini_2_1 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_2_1 = stat.tx_bytes
band_2_1 = (tx_fin_2_1-tx_ini_2_1)*8 # 8 bits
result_2_1 = int(band_2_1/1048576) #divide 1Mb
tx_ini_2_1 = tx_fin_2_1
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_2_1 == 0: rx_ini_2_1 = stat.rx_bytes
rx_fin_2_1 = stat.rx_bytes
band_rx_2_1 = (rx_fin_2_1-rx_ini_2_1)*8
result_rx_2_1 = int(band_rx_2_1/1048576)
rx_ini_2_1 = rx_fin_2_1
###################################################################################
#Seleciona a porta 2
if stat.port_no == 2:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Packet_loss ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_2_2, stat.tx_bytes, result_2_2)
#L2_2)
print
# Calculo de banda para bytes transmitidos
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_2_2 == 0: tx_ini_2_2 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_2_2 = stat.tx_bytes
band_2_2 = (tx_fin_2_2-tx_ini_2_2)*8
result_2_2 = int(band_2_2/1048576)
#print((int(band/1048576)), 'Mbit/s')
tx_ini_2_2 = tx_fin_2_2
#Calculo de banda para bytes recebidos
#Se o valor de bytes recebidos for 0
if rx_ini_2_2 == 0: rx_ini_2_2 = stat.rx_bytes # valor inicial bytes armazenado
rx_fin_2_2 = stat.rx_bytes
band_rx_2_2 = (rx_fin_2_2-rx_ini_2_2)*8
result_rx_2_2 = int(band_rx_2_2/1048576)
rx_ini_2_2 = rx_fin_2_2
#Seleciona a porta 3
if stat.port_no == 3:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_2_3, stat.tx_bytes, result_2_3)
#stat.rx_packets, stat.tx_packets)
print
L2_3 = (tx_2_3_packet - rx_3_2_packet)/stat.tx_packets
#calculo de banda para bytes transmitidos na porta 3
if tx_ini_2_3 == 0: tx_ini_2_3 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_2_3 = stat.tx_bytes
band_2_3 = (tx_fin_2_3-tx_ini_2_3)*8
result_2_3 = int(band_2_3/1048576)
tx_ini_2_3 = tx_fin_2_3
#calculo de banda para bytes recebidos na porta 3
if rx_ini_2_3 == 0: rx_ini_2_3 = stat.rx_bytes
rx_fin_2_3 = stat.rx_bytes
band_rx_2_3 = (rx_fin_2_3-rx_ini_2_3)*8
result_rx_2_3 = int(band_rx_2_3/1048576)
rx_ini_2_3 = rx_fin_2_3
################################################################################################
#SELECIONA O DP 3
if dpid == 3:
for stat in sorted(body, key=attrgetter('port_no')):
########################################################################################
#PORTA 1
if stat.port_no == 1:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_3_1, stat.tx_bytes, result_3_1)
#stat.rx_packets, stat.tx_packets)
print
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_3_1 == 0: tx_ini_3_1 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_3_1 = stat.tx_bytes
band_3_1 = (tx_fin_3_1-tx_ini_3_1)*8
result_3_1 = int(band_3_1/1048576)
tx_ini_3_1 = tx_fin_3_1
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_3_1 == 0: rx_ini_3_1 = stat.rx_bytes
rx_fin_3_1 = stat.rx_bytes
band_rx_3_1 = (rx_fin_3_1-rx_ini_3_1)*8
result_rx_3_1 = int(band_rx_3_1/1048576)
rx_ini_3_1 = rx_fin_3_1
####################################################################################
#SELECIONA A PORTA 3
if stat.port_no == 3:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_3_2, stat.tx_bytes, result_3_2)
#stat.rx_packets, stat.tx_packets)
print
#L3_2 = (tx_3_2_packet - rx_2_3_packet)/stat.tx_packets
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_3_2 == 0: tx_ini_3_2 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_3_2 = stat.tx_bytes
band_3_2 = (tx_fin_3_2-tx_ini_3_2)*8#Multiplica por 8(bits)
result_3_2 = int(band_3_2/1048576)#Divide por 8
tx_ini_3_2 = tx_fin_3_2
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_3_2 == 0: rx_ini_3_2 = stat.rx_bytes
rx_fin_3_2 = stat.rx_bytes
band_rx_3_2 = (rx_fin_3_2-rx_ini_3_2)*8
result_rx_3_2 = int(band_rx_3_2/1048576)
rx_ini_3_2 = rx_fin_3_2
###################################################################################
throuput3_2 = result_3_2 + result_rx_3_2
###################################################################################
if c == 1: c += 1 #variavel de controle alcancada na porta 2 e adiciona + 1
if (throuput3_2 > MAX_BAND*0.8) and c == 2:#
print '\033[1;31;47m Porta 3 Congestionada\033[1;m'# mensagem de porta entrevista
c += 1
elif (throuput3_2 < MAX_BAND*0.8) and c == 3:# quando a banda normalizar
c = 0 # zera a variável de controle
self.send_flow_mod(datapath, stat.port_no, IP_3)# e modifica o fluxo de volta para porta 3
print '\033[1;34;47m Tráfego normalizado na porta ', stat.port_no,'\033[1;m'
###################################################################################
#SELECIONA A PORTA 2
if stat.port_no == 2:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
#'Rec_Dropped Rec_Errors '
#'Trans_Dropped Trans_Errors '
#'Propriedades(colisão,rx_crc_err, rx_frame_err, rx_over_err ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d '
#'%8d %8d %8d %8d '
#'%s %s %s %s',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_3_3, stat.tx_bytes, result_3_3)
#stat.rx_packets, stat.tx_packets)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
#L3_3 = (tx_3_3_packet - rx_4_3_packet)/stat.tx_packets
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_3_3 == 0: tx_ini_3_3 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_3_3 = stat.tx_bytes
band_3_3 = (tx_fin_3_3-tx_ini_3_3)*8 #Multiplica por 8 (bits)
result_3_3 = int(band_3_3/1048576) #Divide por 1Mb
tx_ini_3_3 = tx_fin_3_3
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_3_3 == 0: rx_ini_3_3 = stat.rx_bytes
rx_fin_3_3 = stat.rx_bytes
band_rx_3_3 = (rx_fin_3_3-rx_ini_3_3)*8
result_rx_3_3 = int(band_rx_3_3/1048576)
rx_ini_3_3 = rx_fin_3_3
throughput3_3 = result_3_3 + result_rx_3_3
###################################################################################
#Regras de Contexto: Congestionamento severo
#Se o throuput maior que 80% da banda a porta de saida sera trocada
#O Status da porta é modificado e o sentido do fluxo modificado
if (throughput3_3 > MAX_BAND*0.8) and c == 0: #variavel c de controle
start_time_1 = time.time()
time_2 = start_time_1 - start_time
#salva o tempo de captura de evento
captura = open('cenario_2_captura.txt','a')
captura.writelines(str(time_2))
captura.writelines("\n")
captura.close()
print '\033[1;31;47m Porta 2 Congestionada\033[1;m'
print '\033[1;34;47m Redirecionando o Tráfego\033[1;m'
self.send_flow_mod(datapath, stat.port_no, IP_3)
c += 1 #adiciona + 1 a variavel de controle
#elif (throuput3_3 < MAX_BAND*0.8) and c > 1:
# c = 0
# print
# print '\033[1;34;47m Restaurando fluxo anterior\033[1;m'
# print
total_time = time.time() - start_time
#Salva o tempo de inferencia em um arquivo TXT
inference = open('cenario_2_inference.txt', 'a')
inference.writelines(str(total_time))
inference.writelines("\n")
inference.close()
print "informações salvas"
elif (throughput3_3 > MAX_BAND*0.6) and b == 0:
print "Congestionamento Leve"
else:
pass
################################################################################################
#SELECIONA O DP 4
if dpid == 4:
for stat in sorted(body, key=attrgetter('port_no')):
#SELECIONA A PORTA 1
if stat.port_no == 1:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
#'Rec_Dropped Rec_Errors '
#'Trans_Dropped Trans_Errors '
#'Propriedades(colisão,rx_crc_err, rx_frame_err, rx_over_err '
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
#'%8d %8d %8d %8d '
#'%s %s %s %s',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_4_1, stat.tx_bytes, result_4_1)
#stat.rx_packets, stat.tx_packets)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_4_1 == 0: tx_ini_4_1 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_4_1 = stat.tx_bytes
band_4_1 = (tx_fin_4_1-tx_ini_4_1)*8 # 8 bits
result_4_1 = int(band_4_1/1048576) #divide a banda por 1Mb
tx_ini_4_1 = tx_fin_4_1
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_4_1 == 0: rx_ini_4_1 = stat.rx_bytes
rx_fin_4_1 = stat.rx_bytes
band_rx_4_1 = (rx_fin_4_1-rx_ini_4_1)*8
result_rx_4_1 = int(band_rx_4_1/1048576)
rx_ini_4_1 = rx_fin_4_1
#######################################################################################
#SELECIONA A PORTA 2
if stat.port_no == 2:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets ')
#'Rec_Dropped Rec_Errors '
#'Trans_Dropped Trans_Errors '
#'Propriedades(colisão,rx_crc_err, rx_frame_err, rx_over_err ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d ',
#'%8d %8d %8d %8d '
#'%s %s %s %s',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_4_2, stat.tx_bytes, result_4_2)
#stat.rx_packets, stat.tx_packets)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
#L4_2 = (tx_4_2_packet - rx_1_3_packet) /stat.tx_packets
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_4_2 == 0: tx_ini_4_2 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_4_2 = stat.tx_bytes
band_4_2 = (tx_fin_4_2-tx_ini_4_2)*8
result_4_2 = int(band_4_2/1048576)
tx_ini_4_2 = tx_fin_4_2
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_4_2 == 0: rx_ini_4_2 = stat.rx_bytes
rx_fin_4_2 = stat.rx_bytes
band_rx_4_2 = (rx_fin_4_2-rx_ini_4_2)*8
result_rx_4_2 = int(band_rx_4_2/1048576)
rx_ini_4_2 = rx_fin_4_2
######################################################################################
#SELECIONA A PORTA 3
if stat.port_no == 3:
self.logger.info('switch '
'Port_no '
'Rec_bytes Rec_Banda '
'Trans_bytes Trans_banda ')
#'Rx_packets Tx_packets '
#'Rec_Dropped Rec_Errors '
#'Trans_Dropped Trans_Errors '
#'Propriedades(colisão,rx_crc_err, rx_frame_err, rx_over_err ')
self.logger.info('%04x %8x '
'%8d %8d Mbps %8d %8d Mbps',
#'%8d %8d '
#'%8d %8d %8d %8d '
#'%s %s %s %s',
ev.msg.datapath.id, stat.port_no,
stat.rx_bytes, result_rx_4_3, stat.tx_bytes, result_4_3)
#stat.rx_packets, stat.tx_packets, L4_3)
#stat.rx_dropped, stat.rx_errors, stat.tx_dropped, stat.tx_errors,
#stat.properties[0].collisions, stat.properties[0].rx_crc_err, stat.properties[0].rx_frame_err,
#stat.properties[0].rx_over_err)
print
#L4_3 = (tx_4_3_packet - rx_3_3_packet) /stat.tx_packets
# Calculo de banda para bytes transmitidos (tx_bytes)
# Se o valor bytes transmitidos iniciais forem 0
if tx_ini_4_3 == 0: tx_ini_4_3 = stat.tx_bytes # valor inicial bytes armazenado
tx_fin_4_3 = stat.tx_bytes
band_4_3 = (tx_fin_4_3-tx_ini_4_3)*8
result_4_3 = int(band_4_3/1048576)
tx_ini_4_3 = tx_fin_4_3
#Calculo de banda para bytes recebidos (rx_bytes)
if rx_ini_4_3 == 0: rx_ini_4_3 = stat.rx_bytes
rx_fin_4_3 = stat.rx_bytes
band_rx_4_3 = (rx_fin_4_3-rx_ini_4_3)*8
result_rx_4_3 = int(band_rx_4_3/1048576)
rx_ini_4_3 = rx_fin_4_3
###############################################################################################
###############################################################################################
#ADICIONADO 24/10/2018
#FUNCAO PARA MODIFICAR O FLUXO CENARIO 02
def send_flow_mod(self, datapath, out_ports, ip_n):
#Variavel de tempo inicial para a remoção das linhas de fluxos
#start = time.time()
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
table_id = 0
idle_timeout = hard_timeout = 0
priority = 32766
importance = 0
buffer_id = ofp.OFP_NO_BUFFER
##########################################################################################
#Match field (de acordo com a tabela de fluxo 0)
match_ip = ofp_parser.OFPMatch(eth_type=0x800, ipv4_src=ip_n, ipv4_dst='192.168.1.1')
match_arp = ofp_parser.OFPMatch(eth_type=0x806, arp_spa=ip_n, arp_tpa='192.168.1.1')
##########################################################################################
#remove fluxo com match para ipv4
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
#OFPFC_DELETE para deletar
req = ofp_parser.OFPFlowMod(datapath, cookie, cookie_mask,
table_id, ofp.OFPFC_DELETE,
idle_timeout, hard_timeout,
priority, buffer_id,
ofp.OFPP_ANY, ofp.OFPG_ANY,
ofp.OFPFF_SEND_FLOW_REM,
importance,
match_ip, inst)
datapath.send_msg(req)
###########################################################################################
#remove fluxo com match para arp
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
#OFPFC_DELETE
req2 = ofp_parser.OFPFlowMod(datapath, cookie, cookie_mask,
table_id, ofp.OFPFC_DELETE,
idle_timeout, hard_timeout,
priority, buffer_id,
ofp.OFPP_ANY, ofp.OFPG_ANY,
ofp.OFPFF_SEND_FLOW_REM,
importance,
match_arp, inst)
datapath.send_msg(req2)
############################################################################################
#Adiciona um novo fluxo apontando para outra porta
if out_ports == 3: out_ports = out_ports - 1
elif out_ports == 2: out_ports +=1
else: pass
actions = [ofp_parser.OFPActionOutput(out_ports)]
self.add_flow(datapath, 32767, match_ip, actions)
self.add_flow(datapath, 32767, match_arp, actions)
#variavel de tempo para medir o tempo de atualização de fluxos
#tempo final - tempo inicial
#end_time = time.time() - start
#print "Tempo de tabelas de fluxos modificadas ", end_time
#Salva o tempo em um arquivo TXT
#flow_mod_time = open('flow_mod_time.txt', 'a')
#flow_mod_time.writelines(str(end_time))
#flow_mod_time.writelines("\n")
#flow_mod_time.close()
#print "informações salvas"
#############################################################################################
##ADICONANDO A GROUP TABLE CENARIO 03?
def send_features_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPFeaturesRequest(datapath)
datapath.send_msg(req)
#CENARIO 03?
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
cookie = cookie_mask = 0
table_id = 0
idle_timeout = hard_timeout = 0
priority = 32767
importance = 0
buffer_id = ofproto.OFP_NO_BUFFER
port_1 = 2
queue_1 = parser.OFPActionSetQueue(0)
actions_1 = [queue_1, parser.OFPActionOutput(port_1)]
port_2 = 2
queue_2 = parser.OFPActionSetQueue(0)
actions_2 = [queue_2, parser.OFPActionOutput(port_2)]
weight_1 = 10
weight_2 = 90
watch_port = ofproto_v1_4.OFPP_ANY
watch_group = ofproto_v1_4.OFPQ_ALL
buckets = [
parser.OFPBucket(weight_1, watch_port, watch_group, actions_1),
parser.OFPBucket(weight_2, watch_port, watch_group, actions_2)]
group_id = 50
req = parser.OFPGroupMod(datapath, datapath.ofproto.OFPFC_ADD,
datapath.ofproto.OFPGT_SELECT, group_id, buckets)
datapath.send_msg(req)
###############################################################################################
###############################################################################################
#REQUISICAO PARA LINHAS DE FLUXOS CENARIO 01
def send_flow_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
#REQUISICAO PARA LINHA DE FLUXO MATCH IP
match_ip = ofp_parser.OFPMatch(eth_type=0x800)
req = ofp_parser.OFPFlowStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY, ofp.OFPG_ANY,
cookie, cookie_mask,
match_ip)
datapath.send_msg(req)
#REQUISICAO PARA LINHA DE FLUXO MATCH ARP
match_arp = ofp_parser.OFPMatch(eth_type=0x806)
req = ofp_parser.OFPFlowStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY, ofp.OFPG_ANY,
cookie, cookie_mask,
match_arp)
datapath.send_msg(req)
###################################################################################################3
#RESPOSTA E EXIBICAO PARA REQUISICAO DE LINHAS DE FLUXOS CENARIO 01
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def flow_stats_reply_handler(self, ev):
global src, dst, first_port, last_port, ipDFrame_src, arpFrame_src, ipFrame_dst, arpFrame_dst, resultado
#SRC
ofp_src = src.ofproto
ofp_parser_src = src.ofproto_parser
buffer_id_src = ofp_src.OFP_NO_BUFFER
#DST
ofp_dst = dst.ofproto
ofp_parser_dst = dst.ofproto_parser
buffer_id_dst = ofp_dst.OFP_NO_BUFFER
######################################
cookie = cookie_mask = 0
table_id = 0
idle_timeout = hard_timeout = 0
priority = 32766
importance = 0
ips = [ip_src, ip_dst]
flows = []
#for stat in ev.msg.body:
if ev.msg.datapath.id == src.id:
for stat in sorted(ev.msg.body, key=attrgetter('match')):
flows.append('table_id=%s '
'duration_sec=%d duration_nsec=%d '
'priority=%d '
'idle_timeout=%d hard_timeout=%d flags=0x%04x '
'importance=%d cookie=%d packet_count=%d '
'byte_count=%d match=%s instructions=%s' %
(stat.table_id,
stat.duration_sec, stat.duration_nsec,
stat.priority,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.importance,
stat.cookie, stat.packet_count, stat.byte_count,
stat.match, stat.instructions))
#self.logger.info('FlowStats: %s', flows)
#DELETE/MODIFICA LINHAS DE FLUXO DO SRC PARA IP
if stat.match['eth_type'] == 2048 and stat.instructions[0].actions[0].port == first_port:
#Cria um DataFrame para armazenar linhas de fluxos que serão deletadas
self.ipDFrame_src = self.ipDFrame_src.append(pd.DataFrame({
'SRC': [stat.match['ipv4_src']],
'DST': [stat.match['ipv4_dst']],
'PORT':[stat.instructions[0].actions[0].port]}), ignore_index=True)
#print colored('ipDFrame_src','blue')
#print(self.ipDFrame_src)
match_ip = ofp_parser_src.OFPMatch(eth_type=0x800, ipv4_dst=stat.match['ipv4_dst'],
ipv4_src=stat.match['ipv4_src'])
actions = [ofp_parser_src.OFPActionOutput(ofp_src.OFPP_NORMAL, 0)]
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
#DELETA A LINHAS DE FLUXOS ARMAZENADAS
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_DELETE,
idle_timeout, hard_timeout,
priority, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
ofp_src.OFPFF_SEND_FLOW_REM,
importance, match_ip, inst)
src.send_msg(req)
#ADICIONA UM CAMINHO ENTRE EXTREMIDADES P\ O DP SRC
#print "EXTREMIDADE DP SRC"
#match_ip = ofp_parser_src.OFPMatch(eth_type=0x800, ipv4_src=ip_src, ipv4_dst=ip_dst)
#ESCOLHE A PORTA UP
if first_port == 2: out_put = first_port + 1
elif first_port == 3: out_put = first_port - 1
else: pass
#actions = [ofp_parser_src.OFPActionOutput(out_put)]
#inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
# actions)]
#req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
# table_id, ofp_src.OFPFC_ADD,
# idle_timeout, hard_timeout,
# priority, buffer_id_src,
# ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
# ofp_src.OFPFF_SEND_FLOW_REM,
# importance, match_ip, inst)
#src.send_msg(req)
#self.add_flow(src, 32767, match_ip, actions)
#MELHORAR
#VERIFICA SE TEM CAMINHO PARA DST 192.168.1.1 SE N ADICIONA UM
#if stat.match['ipv4_dst'] == '192.168.1.1' and stat.instructions[0].actions[0].port == out_put:
# print "BREAK"
# break
if src.id == 1:
print "POP SRC IP"
dest_ip = ofp_parser_src.OFPMatch(eth_type=0x800, ipv4_src='192.168.1.1', ipv4_dst=stat.match['ipv4_dst'])
actions = [ofp_parser_src.OFPActionOutput(out_put)]
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
0,
1, dest_ip, inst)
src.send_msg(req)
pass
elif stat.match['ipv4_dst'] == '192.168.1.1' and stat.instructions[0].actions[0].port != out_put:
print "SRC IP 192.168.1.1 adicionado"
dest_ip = ofp_parser_src.OFPMatch(eth_type=0x800, ipv4_src=ip_src, ipv4_dst='192.168.1.1')
actions = [ofp_parser_src.OFPActionOutput(out_put)]
#self.add_flow(src, 32767, dest_ip, actions)
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
0,
1, dest_ip, inst)
#ofp_src.OFPFF_SEND_FLOW_REM
src.send_msg(req)
#MODIFICA LINHAS DE FLUXO PARA ARP SRC
elif stat.match['eth_type'] == 2054 and stat.instructions[0].actions[0].port == first_port:
#Cria um DF com informacoes ARP de ip e portas que serã deletados
self.arpDFrame_src = self.arpDFrame_src.append(pd.DataFrame({
'SPA': [stat.match['arp_spa']],
'TPA': [stat.match['arp_tpa']],
'PORT':[stat.instructions[0].actions[0].port]}), ignore_index=True)
match_arp = ofp_parser_src.OFPMatch(eth_type=0x806, arp_tpa=stat.match['arp_tpa'],
arp_spa=stat.match['arp_spa'])
actions = [ofp_parser_src.OFPActionOutput(ofp_src.OFPP_NORMAL, 0)]
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
#DELETA A LINHAS DE FLUXOS ARMAZENADAS
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_DELETE,
idle_timeout, hard_timeout,
1, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
ofp_src.OFPFF_SEND_FLOW_REM,
importance, match_arp, inst)
src.send_msg(req)
#ADICIONA UM CAMINHO ENTRE EXTREMIDADES P\ O DP SRC
#print "EXTREMIDADE DP ARP SRC"
#match_arp = ofp_parser_src.OFPMatch(eth_type=0x806, arp_spa=ip_src, arp_tpa=ip_dst)
#ESCOLHE A PORTA UP
if first_port == 2: out_put = first_port + 1
elif first_port == 3: out_put = first_port - 1
else: pass
#actions = [ofp_parser_src.OFPActionOutput(out_put)]
#self.add_flow(src, 32767, match_arp, actions)
#inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
# actions)]
#req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
# table_id, ofp_src.OFPFC_ADD,
# idle_timeout, hard_timeout,
# priority, buffer_id_src,
# ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
# ofp_src.OFPFF_SEND_FLOW_REM,
# importance, match_arp, inst)
#src.send_msg(req)
#MELHORAR
#VERIFICA SE TEM CAMINHO PARA DST 192.168.1.1 N ADICIONA UM
#if stat.match['arp_tpa'] == '192.168.1.1' and stat.instructions[0].actions[0].port == out_put:
# print "BREAK"
# break
if src.id == 1:
print "POP SRC ARP"
dest_ip = ofp_parser_src.OFPMatch(eth_type=0x806, arp_spa='192.168.1.1', arp_tpa=stat.match['arp_tpa'])
actions = [ofp_parser_src.OFPActionOutput(out_put)]
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
0,
1, dest_ip, inst)
src.send_msg(req)
continue
elif stat.match['arp_tpa'] == '192.168.1.1' and stat.instructions[0].actions[0].port != out_put:
print "SRC ARP 192.168.1.1"
dest_ip = ofp_parser_src.OFPMatch(eth_type=0x806, arp_spa=ip_src, arp_tpa='192.168.1.1')
actions = [ofp_parser_src.OFPActionOutput(out_put)]
#self.add_flow(src, 32767, dest_ip, actions)
inst = [ofp_parser_src.OFPInstructionActions(ofp_src.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_src.OFPFlowMod(src, cookie, cookie_mask,
table_id, ofp_src.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_src,
ofp_src.OFPP_ANY, ofp_src.OFPG_ANY,
0,
1, dest_ip, inst)
src.send_msg(req)
#elif stat.match['eth_type'] != 2048:
# print "quantas vezes esta linha iaparece"
else: continue
#DATAPATH DE DST
elif ev.msg.datapath.id == dst.id:
for stat in sorted(ev.msg.body, key=attrgetter('match')):
flows.append('table_id=%s '
'duration_sec=%d duration_nsec=%d '
'priority=%d '
'idle_timeout=%d hard_timeout=%d flags=0x%04x '
'importance=%d cookie=%d packet_count=%d '
'byte_count=%d match=%s instructions=%s' %
(stat.table_id, stat.duration_sec,
stat.duration_nsec, stat.priority,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.importance,
stat.cookie, stat.packet_count, stat.byte_count,
stat.match, stat.instructions))
#DELETE/MODIFICA LINHAS DE FLUXO DO DST IP
if stat.match['eth_type'] == 2048 and stat.instructions[0].actions[0].port == last_port:
#Cria um DF com informações de porta e ip que serao deletados para switch DST
self.ipDFrame_dst = self.ipDFrame_dst.append(pd.DataFrame({
'SRC': [stat.match['ipv4_src']],
'DST': [stat.match['ipv4_dst']],
'PORT': [stat.instructions[0].actions[0].port]}), ignore_index=True)
match_ip = ofp_parser_dst.OFPMatch(eth_type=0x800, ipv4_dst=stat.match['ipv4_dst'],
ipv4_src=stat.match['ipv4_src'])
actions = [ofp_parser_dst.OFPActionOutput(ofp_dst.OFPP_NORMAL, 0)]
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_DELETE,
idle_timeout, hard_timeout,
priority, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
ofp_dst.OFPFF_SEND_FLOW_REM,
importance, match_ip, inst)
dst.send_msg(req)
#ADICIONA UM CAMINHO ENTRE EXTREMIDADES P\ O DP SRC
#print "EXTREMIDADE DP IP DST"
#match_ip = ofp_parser_dst.OFPMatch(eth_type=0x800, ipv4_src=ip_dst, ipv4_dst=ip_src)
#ESCOLHE A PORTA UP
if last_port == 2: out_put = last_port + 1
elif last_port == 3: out_put = last_port - 1
else: pass
#actions = [ofp_parser_dst.OFPActionOutput(out_put)]
#self.add_flow(dst, 32767, match_ip, actions)
#inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
# actions)]
#req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
# table_id, ofp_dst.OFPFC_ADD,
# idle_timeout, hard_timeout,
# priority, buffer_id_dst,
# ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
# ofp_dst.OFPFF_SEND_FLOW_REM,
# importance, match_ip, inst)
#dst.send_msg(req)
#MELHORAR
#VERIFICA SE TEM CAMINHO P/ DST 192.168.1.1 SE N ADICIONA UM
#if stat.match['ipv4_dst'] == '192.168.1.1' and stat.instructions[0].actions[0].port == out_put:
# print "BREAK DST IP"
# break
if dst.id == 1:
dest_ip = ofp_parser_dst.OFPMatch(eth_type=0x800, ipv4_src='192.168.1.1', ipv4_dst=stat.match['ipv4_src'])
actions = [ofp_parser_dst.OFPActionOutput(out_put)]
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req2 = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
0,
1, dest_ip, inst)
dst.send_msg(req2)
elif stat.match['ipv4_dst'] == '192.168.1.1' and stat.instructions[0].actions[0].port != out_put:
print "DST IP 192.168.1.1"
dest_ip = ofp_parser_dst.OFPMatch(eth_type=0x800, ipv4_src=ip_dst, ipv4_dst='192.168.1.1')
actions = [ofp_parser_dst.OFPActionOutput(out_put)]
#self.add_flow(dst, 32767, dest_ip, actions)
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
0,
1, dest_ip, inst)
dst.send_msg(req)
else: pass
#ofp_dst.OFPFF_SEND_FLOW_REM
#DELETA/MODIFICA LINHAS DE FLUXOS DO DST ARP
elif stat.match['eth_type'] == 2054 and stat.instructions[0].actions[0].port == last_port:
#Cria um DF com informacoes ARP de port e ip q serao deletados para switch DST
self.arpDFrame_dst = self.arpDFrame_dst.append(pd.DataFrame({
'SPA': [stat.match['arp_spa']],
'TPA': [stat.match['arp_tpa']],
'PORT': [stat.instructions[0].actions[0].port]}), ignore_index=True)
match_arp = ofp_parser_dst.OFPMatch(eth_type=0x806, arp_tpa=stat.match['arp_tpa'],
arp_spa=stat.match['arp_spa'])
actions = [ofp_parser_dst.OFPActionOutput(ofp_src.OFPP_NORMAL, 0)]
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_DELETE,
idle_timeout, hard_timeout,
1, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
ofp_dst.OFPFF_SEND_FLOW_REM,
importance, match_arp, inst)
dst.send_msg(req)
#ADICIONA UM CAMINHO ENTRE EXTREMIDADES P\ O DP DST
#print "EXTREMIDADES DP ARP DST"
#match_arp = ofp_parser_dst.OFPMatch(eth_type=0x806, ipv4_src=ip_dst, ipv4_dst=ip_src)
if last_port == 2: out_put = last_port + 1
elif last_port == 3: out_put = last_port - 1
else: pass
#actions = [ofp_parser_dst.OFPActionOutput(out_put)]
#self.add_flow(dst, 32767, match_arp, actions)
#inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
# actions)]
#req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
# table_id, ofp_dst.OFPFC_ADD,
# idle_timeout, hard_timeout,
# priority, buffer_id_dst,
# ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
# ofp_dst.OFPFF_SEND_FLOW_REM,
# importance, match_arp, inst)
#dst.send_msg(req)
#################
#if stat.match['arp_tpa'] == '192.168.1.1' and stat.instructions[0].actions[0].port == out_put:
# print "BREAK DST ARP"
# break
if dst.id == 1:
dest_ip = ofp_parser_dst.OFPMatch(eth_type=0x806, arp_spa='192.168.1.1', arp_tpa=stat.match['arp_spa'])
actions = [ofp_parser_dst.OFPActionOutput(out_put)]
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req2 = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
0,
1, dest_ip, inst)
dst.send_msg(req2)
elif stat.match['arp_tpa'] == '192.168.1.1' and stat.instructions[0].actions[0].port != out_put:
print "DST ARP 192.168.1.1"
dest_ip = ofp_parser_dst.OFPMatch(eth_type=0x806, arp_spa=ip_dst, arp_tpa='192.168.1.1')
actions = [ofp_parser_dst.OFPActionOutput(out_put)]
#self.add_flow(dst, 32767, dest_ip, actions)
inst = [ofp_parser_dst.OFPInstructionActions(ofp_dst.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser_dst.OFPFlowMod(dst, cookie, cookie_mask,
table_id, ofp_dst.OFPFC_ADD,
idle_timeout, hard_timeout,
32767, buffer_id_dst,
ofp_dst.OFPP_ANY, ofp_dst.OFPG_ANY,
0,
1, dest_ip, inst)
#ofp_dst.OFPFF_SEND_FLOW_REM
dst.send_msg(req)
else: pass
#ADICIONADO 23/09/2018
#Exibe o status de portas do switch
#classe utilizada ryu.controller.controller.Datapath
#ryu.ofproto.ofproto_v1_4_parser.OFPPort
#ryu.ofproto.ofproto_v1_4
#flags OFPPS_LINK_DOWN
#############################################################################
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
#start_time = time.time()
start_time = datetime.now()
print(start_time.microsecond)
#variaveis usadas nessa função
global C, src_id, dst_id, src, dst, first_port, last_port, ip_src, ip_dst
#global mac_addr_1_1, mac_addr_1_2, mac_addr_1_3, mac_addr_2_1, mac_addr_2_2, mac_addr_2_3
#global mac_addr_3_1, mac_addr_3_2, mac_addr_3_3, mac_addr_4_1, mac_addr_4_2, mac_addr_4_3
#eth_src = eth_dst = None
msg = ev.msg #armazena a mensagem do evento
dp = msg.datapath #dp.id
ofp = dp.ofproto
parser = dp.ofproto_parser
if msg.desc.state == ofp.OFPPR_ADD:
print 'link adicionado'
if msg.desc.state == ofp.OFPPS_LINK_DOWN:
#print "STATE", msg.desc.state
#start_time_1 = time.time()
start_time_1 = datetime.now()
print(start_time_1.microsecond)
time_1_2 = start_time_1 - start_time
print "tempo de captura de evento =", time_1_2
#Salva o tempo em um arquivo TXT
captura = open('cenario_1_captura.txt', 'a')
captura.writelines(str(time_1_2))
captura.writelines("\n")
captura.close()
print "tempo de inferencia salvo"
#print dp.id
print
print '\033[1;31;47m Nome da interface:', msg.desc.name, '\033[1;m'
print '\033[1;31;47m Porta: ', msg.desc.port_no, 'Porta status DOWN\033[1;m'
if (C == 0): #Condicional para armazenar o dp e in_port origem primeira iteração 0
src_id = dp.id
first_port = msg.desc.port_no
dst_id = 0
elif (C != 0): #Condicional para armazenar o dp e out_port destino apos a primeira iteração
dst_id = dp.id
last_port = msg.desc.port_no
#if (C > 0 and src and dst and first_port and last_port): ip_src = ip_dst = None #inicia as variaveis
else: pass
C += 1 #incrementa a variável de controle
#armazena o endereço Mac das insterfaces 2 e 3 dos datapath's
#eth_src
#if C == 1:
if src_id == 1: ip_src = IP_1
elif src_id== 2: ip_src = IP_2
elif src_id == 3: ip_src = IP_3
elif src_id == 4: ip_src = IP_4
else: pass
#armazena o endereço Mac das insterfaces 2 e 3 dos datapath's
#eth_dst
#if C == 2:
if dst_id == 1: ip_dst = IP_1
elif dst_id == 2: ip_dst = IP_2
elif dst_id == 3: ip_dst = IP_3
elif dst_id == 4: ip_dst = IP_4
else: pass
if (C == 2):
C = 0 #zera a variavel de controle ao alcançar 2
print '\033[1;31;47m Deletando tabela de fluxos\033[1;m'
if src_id and dst_id:
for datapath in self.datapath_list.values():
if datapath.id == src_id: src = datapath
if datapath.id == dst_id: dst = datapath
#print '\033[1;42m Redirecionando o Tráfego\033[1;m'
#REMOVE LINHAS DE FLUXOS
self.send_flow_stats_request(src)
self.send_flow_stats_request(dst)
#self.send_flow_mod(src, first_port, ip_src)
#self.send_flow_mod(dst, last_port, ip_dst)
#DELETE CONTROLLER SRC
#match = src.ofproto_parser.OFPMatch(eth_type=0x88cc)
#actions = [src.ofproto_parser.OFPActionOutput(src.ofproto.OFPP_CONTROLLER, src.ofproto.OFPCML_NO_BUFFER)]
#REMOVE TABELA 0
#self.remove_flows(src, 0)#chama a função para remover fluxo do dp adjacente
#self.remove_flows(dst, 0)#chama a função para remover fluxo do dp adjacente
#self.install_controller(src)
#self.install_controller(dst)
#tempo medido das tabelas apagadas e reescritas
end_time = time.time() - start_time_1
print "Tempo de inferencia ", end_time
#Salva o tempo em um arquivo TXT
inference = open('cenario_1_inference.txt', 'a')
inference.writelines(str(end_time))
inference.writelines("\n")
inference.close()
#print "tempo de inferencia salvo"
soma = start_time + start_time_1
#Salva a soma capture and inference
summe = open('cenario_1_soma.txt', 'a')
summe.writelines(str(soma))
summe.writelines("\n")
summe.close()
else:
reason = 'UNKNOWN'
pass
| [
"[email protected]"
] | |
729039bc1346bdb0938344b828fd0e78aca84c84 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/blink/tools/blinkpy/tool/commands/queries_unittest.py | 9d7214548f8855b08c2b70d812721dd9d733433e | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 7,907 | py | # Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import unittest
from blinkpy.common.system.output_capture import OutputCapture
from blinkpy.tool.commands.queries import PrintBaselines, PrintExpectations
from blinkpy.tool.mock_tool import MockBlinkTool
class PrintExpectationsTest(unittest.TestCase):
def run_test(self, tests, expected_stdout, platform='test-win-win7', **kwargs):
options_defaults = {
'all': False, 'csv': False, 'full': False, 'platform': platform,
'include_keyword': [], 'exclude_keyword': [], 'paths': False,
}
options_defaults.update(kwargs)
options = optparse.Values(dict(**options_defaults))
tool = MockBlinkTool()
tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
command = PrintExpectations()
oc = OutputCapture()
try:
oc.capture_output()
command.execute(options, tests, tool)
finally:
stdout, _, _ = oc.restore_output()
self.assertMultiLineEqual(stdout, expected_stdout)
def test_basic(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'))
def test_multiple(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win10\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'
'\n'
'// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'),
platform='test-win-*')
def test_full(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'Bug(test) failures/expected/text.html [ Failure ]\n'
'Bug(test) failures/expected/timeout.html [ Timeout ]\n'),
full=True)
def test_exclude(self):
self.run_test(['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'),
exclude_keyword=['crash'])
def test_include(self):
self.run_test(['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/crash.html\n'),
include_keyword=['crash'])
def test_csv(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('test-win-win7,failures/expected/image.html,Bug(test),,FAIL\n'
'test-win-win7,failures/expected/text.html,Bug(test),,FAIL\n'),
csv=True)
def test_paths(self):
self.run_test([],
('LayoutTests/TestExpectations\n'
'LayoutTests/NeverFixTests\n'
'LayoutTests/StaleTestExpectations\n'
'LayoutTests/SlowTests\n'),
paths=True)
class PrintBaselinesTest(unittest.TestCase):
def setUp(self):
self.oc = None
self.tool = MockBlinkTool()
self.test_port = self.tool.port_factory.get('test-win-win7')
self.tool.port_factory.get = lambda port_name=None: self.test_port
self.tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
def tearDown(self):
if self.oc:
self.restore_output()
def capture_output(self):
self.oc = OutputCapture()
self.oc.capture_output()
def restore_output(self):
stdout, stderr, logs = self.oc.restore_output()
self.oc = None
return (stdout, stderr, logs)
def test_basic(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'include_virtual_tests': False, 'csv': False, 'platform': None})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_multiple(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'include_virtual_tests': False, 'csv': False, 'platform': 'test-win-*'})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('// For test-win-win10\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'
'\n'
'// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_csv(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'platform': '*win7', 'csv': True, 'include_virtual_tests': False})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('test-win-win7,passes/text.html,None,png,passes/text-expected.png,None\n'
'test-win-win7,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
| [
"[email protected]"
] | |
0a31f183176667b3ccb7ef41a3aae879976e561e | b595a24b07662a89826a1b6d334dfcaa3ec1c4b0 | /venv/lib/python3.6/_bootlocale.py | eacdd540c7def6405335d26557019be25b1bb7e2 | [
"CC0-1.0"
] | permissive | kentarofujiy/base1 | 4629b638f96b3ed091ea695c81b3b7837af1ec79 | f820b9b379cda86ca5b446c63800fbe4bb8f3bce | refs/heads/master | 2021-07-13T02:06:01.371773 | 2017-03-11T12:43:19 | 2017-03-11T12:43:19 | 84,649,225 | 0 | 1 | CC0-1.0 | 2020-07-26T01:08:25 | 2017-03-11T12:43:32 | Python | UTF-8 | Python | false | false | 101 | py | /usr/local/Cellar/python3/3.6.0/Frameworks/Python.framework/Versions/3.6/lib/python3.6/_bootlocale.py | [
"[email protected]"
] | |
f20ae300239e95b18221dae69c1ce376ca36d924 | 8729478cd46625a8403894677bf616a34846a248 | /Django/hexocomments/migrations/0001_initial.py | 23e9389dd42b18441bae64f5c4e298c51ac2d7c1 | [] | no_license | dongdatangjie/Django | 5528e81c438b39eea94ee022283ac9b8b4cb4594 | 57dcef7182e1cc8b3f27c8f8c44643ec0755cf62 | refs/heads/master | 2020-12-03T06:44:56.795787 | 2017-06-29T02:18:02 | 2017-06-29T02:18:02 | 95,729,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 07:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Hexo', '0002_auto_20170628_1535'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('url', models.URLField(blank=True)),
('text', models.TextField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Hexo.Post')),
],
),
]
| [
"[email protected]"
] | |
36f8862cb75c3b1df9d93265b47e29c0730ba897 | eb73cc75bcda7a26784674c09a1cd14227889547 | /use_model.py | 22f59a9c2329ea44733b241afc6f92743d2d392c | [] | no_license | junTaniguchi/python | 232fc43b8650b4168264120fba1b0f686ada042f | 09ca809bee9a96ff0a79e84f827afd9256a1f15a | refs/heads/master | 2021-01-22T05:28:23.793408 | 2017-03-25T12:44:54 | 2017-03-25T12:44:54 | 81,666,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 9 17:15:52 2017
@author: j13-taniguchi
"""
import os
import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.models import model_from_json
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import tensorflow as tf
from ssd import SSD300
from ssd_utils import BBoxUtility
plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
np.set_printoptions(suppress=True)
path = "/Users/j13-taniguchi/study_tensorflow/keras_project/ssd_keras"
os.chdir(path)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
# 地名のリストを作成
with open("./param/place_tokyo.txt", "r") as place_file:
place_list = place_file.readlines()
place_list = [place_str.strip() for place_str in place_list]
NUM_CLASSES = len(place_list)
# 入力ファイルの次元を定義
input_shape=(300, 300, 1)
# modelのロード
model = model_from_json("./param/learning_place_name.json")
#model = SSD300(input_shape, num_classes=NUM_CLASSES)
#model.load_weights('weights_SSD300.hdf5', by_name=True)
model.load_weights('./param/learning_place_name.hdf5', by_name=True)
bbox_util = BBoxUtility(NUM_CLASSES)
inputs = []
images = []
img_path = 'XXXXXXXXX(入力とするファイルのパス)'
img = image.load_img(img_path, grayscale=True, target_size=(300, 300))
img = image.img_to_array(img)
images.append(imread(img_path))
inputs.append(img.copy())
preds = model.predict(inputs, batch_size=1, verbose=1)
results = bbox_util.detection_out(preds)
for i, img in enumerate(images):
# Parse the outputs.
det_label = results[i][:, 0]
det_conf = results[i][:, 1]
det_xmin = results[i][:, 2]
det_ymin = results[i][:, 3]
det_xmax = results[i][:, 4]
det_ymax = results[i][:, 5]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(img / 255.)
currentAxis = plt.gca()
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = NUM_CLASSES[label - 1]
display_txt = '{:0.2f}, {}'.format(score, label_name)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
plt.show() | [
"[email protected]"
] | |
56f0e42e930e5b0e72947b0bfbd26a4fa58d4071 | 12e6e0c818de61deabeeac039af16f440572faa6 | /exerc_01_b.py | bd95e79d52c5a9e5997c9cb637ec722340f8be15 | [
"MIT"
] | permissive | rmmariano/final_project_scientific-program | 379682365e899820da9407985b62057441d8d2bc | 95d06ecca39056436b5361a8486ab4449b712742 | refs/heads/master | 2020-05-20T06:03:51.171072 | 2016-09-15T22:52:59 | 2016-09-15T22:52:59 | 68,248,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py |
# Exerc. 01) b)
import matplotlib.pyplot as plt
import numpy as np
# create 200 values between 0 and 1
x = np.random.rand(200)
# do the values between -0.5 and 0.5
x = x - 0.5
print("values of x: ")
print(x)
plt.hist(x)
plt.show()
| [
"[email protected]"
] | |
ba158f2eb3e6552542d82191472d68f317100f75 | 9b4d7689a8b8dfea5971eb85637b76f52ab54bc6 | /ddMS/wsgi.py | 9b4cdaaeaa8fd5bbc808680487670c17697f2815 | [] | no_license | 007janus/MS | af610ce2633a88c5a5e1d851542d6a50b83d3b29 | f92c73ab6ecb031d11ec659f6b7f1d24a82c1402 | refs/heads/master | 2021-01-13T01:04:15.414333 | 2015-12-24T05:06:21 | 2015-12-24T05:06:21 | 48,355,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for ddMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ddMS.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"[email protected]"
] | |
c57e95274668910124ec3d3e16b4c0c86d1dabc4 | 1584ef7c2cbde323fe43675225a583154a9fe6de | /11_evaluate_det_lable.py | 0c5e29d6e97dbb8589f26077c0f5fc16538f7eef | [] | no_license | haima1998/dataset | 1620fa91c9b16a50c3037825c6836eb8ebd4cc5e | acaf2194f3e5fe59af99d3f1dff79defc2e34bfb | refs/heads/master | 2020-07-14T09:56:14.924563 | 2020-06-15T02:28:55 | 2020-06-15T02:28:55 | 205,295,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,188 | py | import numpy as np
import sys,os
import cv2
from lxml.etree import Element, SubElement, tostring
from xml.dom.minidom import parseString
import json
import xml.etree.ElementTree as ET
ROOT_DIR = '/home/charlie/disk2/dataset/number/data_dataset_voc'
#ROOT_DIR = '/home/charlie/disk2/dataset/number/test_data_dataset_voc'
detect_result_dir = os.path.join(ROOT_DIR, "det_result/all_xml/")
#detect_result_dir = os.path.join(ROOT_DIR, "result/02ssd_test_all/")
#test_files = os.path.join(ROOT_DIR, "ImageSets/Main/trainval.txt")
test_files = os.path.join(ROOT_DIR, "ImageSets/Main/test.txt")
#test_files = os.path.join(ROOT_DIR, "ImageSets/Main/all.txt")
lable_dir = os.path.join(ROOT_DIR, "Annotations/")
image_dir = os.path.join(ROOT_DIR, "JPEGImages/")
bad_case_image_dir = os.path.join(ROOT_DIR, "det_result/all_xml/all_badcase/")
global detect_obj_count
global lable_obj_count
detect_obj_count = 0
lable_obj_count = 0
global TP
global FP
global FN
TP = 0
FP = 0
FN = 0
global MIN_IOU
MIN_IOU = 0.5
global NEED_CHECK_TYPE
NEED_CHECK_TYPE = True
SHOW_ERROR_DETECTION = True
class DetObject:
def __init__(self, type, conf,x1,y1,x2,y2):
self.type = type
self.conf = conf
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def get_detect_result(file_name):
detect_result_file = detect_result_dir + file_name + '.xml'
print(detect_result_file)
object_array = []
tree = ET.parse(detect_result_file)
root = tree.getroot()
for child in root:
#print('child-tag:', child.tag, ',child.attrib', child.attrib, ',child.text:', child.text)
x1 = 0
y1 = 0
x2 = 1
y2 = 1
lable_str = ''
conf = 0.0
for sub in child:
#print('sub-tag:', sub.tag, ',sub.attrib:', sub.attrib, ',sub.text:', sub.text)
if sub.tag == 'name':
lable_str = sub.text
if sub.tag == 'conf':
conf = float(sub.text)
for subsub in sub:
#print('subsub-tag:', subsub.tag, ',subsub.attrib:', subsub.attrib, ',subsub.text:', subsub.text)
if subsub.tag == 'xmin':
x1 = int(subsub.text)
if subsub.tag == 'ymin':
y1 = int(subsub.text)
if subsub.tag == 'xmax':
x2 = int(subsub.text)
if subsub.tag == 'ymax':
y2 = int(subsub.text)
if conf > 0.01:
#print(lable_str)
#print(conf)
#print(x1)
#print(y1)
#print(x2)
#print(y2)
obj = DetObject(lable_str,conf,x1,y1,x2,y2)
object_array.append(obj)
#else:
#print('no detect result')
return object_array
def get_lable_objects(file_name):
detect_result_file = lable_dir + file_name + '.xml'
print(detect_result_file)
object_array = []
tree = ET.parse(detect_result_file)
root = tree.getroot()
for child in root:
#print('child-tag:', child.tag, ',child.attrib', child.attrib, ',child.text:', child.text)
x1 = 0
y1 = 0
x2 = 1
y2 = 1
lable_str = ''
for sub in child:
#print('sub-tag:', sub.tag, ',sub.attrib:', sub.attrib, ',sub.text:', sub.text)
have_obj = False
if sub.tag == 'name':
lable_str = sub.text
for subsub in sub:
#print('subsub-tag:', subsub.tag, ',subsub.attrib:', subsub.attrib, ',subsub.text:', subsub.text)
if subsub.tag == 'xmin':
x1 = int(subsub.text)
if subsub.tag == 'ymin':
y1 = int(subsub.text)
if subsub.tag == 'xmax':
x2 = int(subsub.text)
if subsub.tag == 'ymax':
y2 = int(subsub.text)
have_obj = True
if have_obj == True:
#print(lable_str)
#print(x1)
#print(y1)
#print(x2)
#print(y2)
obj = DetObject(lable_str,1,x1,y1,x2,y2)
object_array.append(obj)
return object_array
def get_short_name(obj_type):
short_name = "UNK"
if obj_type == "zero":
short_name = "0"
if obj_type == "one":
short_name = "1"
if obj_type == "two":
short_name = "2"
if obj_type == "three":
short_name = "3"
if obj_type == "four":
short_name = "4"
if obj_type == "five":
short_name = "5"
if obj_type == "six":
short_name = "6"
if obj_type == "seven":
short_name = "7"
if obj_type == "eight":
short_name = "8"
if obj_type == "nine":
short_name = "9"
return short_name
def show_result(file_name,det_obj_list,lable_obj_list):
image_file = image_dir + file_name + '.jpg'
print(image_file)
img = cv2.imread(image_file)
#height, width, depth = img.shape
img_lable = cv2.imread(image_file)
for obj in det_obj_list:
cv2.rectangle(img, (obj.x1, obj.y1), (obj.x2, obj.y2), (255, 255, 0), 1)
cv2.putText(img, get_short_name(obj.type), (obj.x1 + 3, obj.y1 - 3), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
#cv2.putText(img, str(obj.conf), (obj.x1 + 3, obj.y1 - 23), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
lable_width = 0
lable_height = 0
for obj in lable_obj_list:
cv2.rectangle(img_lable, (obj.x1, obj.y1), (obj.x2, obj.y2), (0, 255, 0), 1)
cv2.putText(img_lable, get_short_name(obj.type), (obj.x1 + 3, obj.y1 - 3), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 125, 255), 1, cv2.LINE_AA)
lable_width = obj.x2 - obj.x1
lable_height = obj.y2 - obj.y1
gt_win_str = "GT(" + str(lable_width) + "," + str(lable_height) + ")"
cv2.imshow("Detect(" + file_name + ")", img)
cv2.imshow(gt_win_str, img_lable)
cv2.moveWindow(gt_win_str, 580, 0)
k = cv2.waitKey(0)
if k == 27:
# sys.ext()
cv2.destroyAllWindows()
os._exit(0)
else:
print k
def write_badcase(file_name,det_obj_list,lable_obj_list):
image_file = image_dir + file_name + '.jpg'
print(image_file)
img = cv2.imread(image_file)
#height, width, depth = img.shape
img_lable = cv2.imread(image_file)
for obj in det_obj_list:
cv2.rectangle(img, (obj.x1, obj.y1), (obj.x2, obj.y2), (255, 255, 0), 1)
cv2.putText(img, get_short_name(obj.type), (obj.x1 + 3, obj.y1 - 3), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
lable_width = 0
lable_height = 0
for obj in lable_obj_list:
cv2.rectangle(img_lable, (obj.x1, obj.y1), (obj.x2, obj.y2), (0, 255, 0), 1)
cv2.putText(img_lable, get_short_name(obj.type), (obj.x1 + 3, obj.y1 - 3), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 125, 255), 1, cv2.LINE_AA)
lable_width = obj.x2 - obj.x1
lable_height = obj.y2 - obj.y1
gt_image_file = bad_case_image_dir + file_name + "_gt_" + str(lable_width) + "_" +str(lable_height) + ".jpg"
det_image_file = bad_case_image_dir + file_name + "_det_" + str(len(det_obj_list)) + ".jpg"
print(gt_image_file)
print(det_image_file)
cv2.imwrite(gt_image_file,img_lable)
cv2.imwrite(det_image_file,img)
def compute_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
#print('no intersect ret (%d,%d,%d,%d)' % (left_line,right_line,top_line,bottom_line))
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
print('have intersect ret (%d,%d,%d,%d,%d)' % (left_line,right_line,top_line,bottom_line,intersect))
print('iou ret (%d,%d,%d)' % (sum_area,intersect,sum_area - intersect))
return float(intersect) / float(sum_area - intersect)
def check_detect_result(det_obj_list,lable_obj_list):
#print('enter check_detect_result')
global TP
global FP
global FN
global MIN_IOU
global NEED_CHECK_TYPE
ret = True
for lable_obj in lable_obj_list:
have_match = False
for det_obj in det_obj_list:
det_obj_ret = (det_obj.x1,det_obj.y1,det_obj.x2,det_obj.y2)
lable_obj_ret = (lable_obj.x1,lable_obj.y1,lable_obj.x2,lable_obj.y2)
#print('det rect(%d,%d,%d,%d)' % (det_obj.x1,det_obj.y1,det_obj.x2,det_obj.y2))
#print('lable rect(%d,%d,%d,%d)' % (lable_obj.x1,lable_obj.y1,lable_obj.x2,lable_obj.y2))
iou = compute_iou(det_obj_ret,lable_obj_ret)
print(' det type:%s ' % det_obj.type)
print(' lable type:%s' % lable_obj.type)
print('iou = %f' % iou)
if iou > MIN_IOU and (det_obj.type == lable_obj.type or NEED_CHECK_TYPE == False):
have_match = True
if have_match == False:
ret = False
FN = FN + 1
#print('lable w:%d h:%d ' % (lable_obj.x2 - lable_obj.x1,lable_obj.y2 - lable_obj.y1))
for det_obj in det_obj_list:
have_match = False
for lable_obj in lable_obj_list:
det_obj_ret = (det_obj.x1,det_obj.y1,det_obj.x2,det_obj.y2)
lable_obj_ret = (lable_obj.x1,lable_obj.y1,lable_obj.x2,lable_obj.y2)
#print('det rect(%d,%d,%d,%d)' % (det_obj.x1,det_obj.y1,det_obj.x2,det_obj.y2))
#print('lable rect(%d,%d,%d,%d)' % (lable_obj.x1,lable_obj.y1,lable_obj.x2,lable_obj.y2))
iou = compute_iou(det_obj_ret,lable_obj_ret)
#print('iou = %f' % iou)
if iou > MIN_IOU and (det_obj.type == lable_obj.type or NEED_CHECK_TYPE == False):
#if iou > MIN_IOU and (det_obj.type == lable_obj.type or (det_obj.type == 'red_only' and lable_obj.type == 'yellow_only') or NEED_CHECK_TYPE == False):
have_match = True
if have_match == False:
ret = False
FP = FP + 1
else:
TP = TP + 1
return ret
def eval_one_frame(file_name):
global detect_obj_count
global lable_obj_count
#print('eval_one_frame')
det_obj_list = get_detect_result(file_name)
detect_obj_count = detect_obj_count + len(det_obj_list)
#print('detect one frame result')
print(len(det_obj_list))
lable_obj_list = get_lable_objects(file_name)
lable_obj_count = lable_obj_count + len(lable_obj_list)
#print('one frame lable count')
#print(len(lable_obj_list))
ret = check_detect_result(det_obj_list,lable_obj_list)
if((len(det_obj_list) != len(lable_obj_list)) or ret != True):
print(' det_num:%d lable_num:%d ret:%d' % (len(det_obj_list),len(lable_obj_list),ret))
if SHOW_ERROR_DETECTION == True:
show_result(file_name,det_obj_list,lable_obj_list)
else:
write_badcase(file_name,det_obj_list,lable_obj_list)
def main():
global detect_obj_count
global lable_obj_count
global TP
global FP
global FN
print("eval main enter")
with open(test_files, "r") as lf:
for line in lf.readlines():
line = line.strip('\n')
#print(line)
eval_one_frame(line)
print(' TP: %d, FP: %d, FN: %d , lable_cout %d, detect count:%d' % (TP,FP,FN,lable_obj_count,detect_obj_count))
recall = float(TP)/float(lable_obj_count)
precision = float(TP) / float(TP + FP)
print(' recall:%f precision:%f' % (recall,precision))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
97978976bbcc9a6bb1444b2a6c8d5159f0d88aa7 | 198289bcb6bccc9fd53f927db6f0bcdbd7d1e72e | /LoanApprovalSVM.py | db4eb814c49e6bca82959cf97b2f214ca7b396f0 | [] | no_license | arunbaruah/MLexamples | c1da1e19f4b10c0192c31bc5fa913bc5c63b1317 | b74c971ef44d1bda22d0766342fce5cafff1213b | refs/heads/master | 2020-08-24T01:51:02.384516 | 2020-05-07T08:42:25 | 2020-05-07T08:42:25 | 216,743,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | #Build the Support Vector Classifier Model
# Predict the loan approval status based on
# Gender, Marital Status, Credit History, Income and Loan Amount
#import the data
import pandas as pd
#read the data
dataset = pd.read_csv("LoanDataset-1.csv")
#check for null value
dataset.isnull().sum(axis=0)
# Replace Missing Values. Drop the rows.
dataset = dataset.dropna()
# Drop the column gender as we do not need it in this example
dataset = dataset.drop(['gender'], axis=1)
# Create Dummy variables
dataset.dtypes
dataset = pd.get_dummies(dataset, drop_first=True)
# Normalize Income and Loan Amount using StandardScaler
from sklearn.preprocessing import StandardScaler
scalar_ = StandardScaler()
dataset['income'] = scalar_.fit_transform(dataset[['income']])
dataset['loanamt'] = scalar_.fit_transform(dataset[['loanamt']])
# Create the X and Y
Y = dataset[['status_Y']]
X = dataset.drop(['status_Y'], axis=1)
# Split the X and Y dataset into trai test set in 70:30 ratio
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = \
train_test_split(X, Y, test_size = 0.3, random_state = 1234, stratify=Y)
# Build the model
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train, Y_train)
# Predict the outcome using Test data
Y_predict = svc.predict(X_test)
# Build the conufsion matrix and get the accuracy/score
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, Y_predict)
score = svc.score(X_test, Y_test)
| [
"[email protected]"
] | |
3baad59fe65bdc3700e08030e2fac3f88bc8d20a | f4dedea53630c9cbdc6297ae4a7e2a8195fd7691 | /7 Mathematics/31 Another Game.py | 9fee062c97127af344f06717d0ffe41c0d4a9c6c | [] | no_license | nikkisora/cses_problemset | d089db048444e07e002f131b4323adc9df95b05b | 03160f33e36cdc6d538403357b36bcb015b4dba7 | refs/heads/master | 2023-07-03T10:34:23.487709 | 2021-08-05T21:13:49 | 2021-08-05T21:13:49 | 379,251,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | '''
CSES - Another Game
Time limit: 1.00 s
Memory limit: 512 MB
There are n heaps of coins and two players who move alternately. On each move, a player selects some of the nonempty heaps and removes one coin from each heap. The player who removes the last coin wins the game.
Your task is to find out who wins if both players play optimally.
Input
The first input line contains an integer t: the number of tests. After this, t test cases are described:
The first line contains an integer n: the number of heaps.
The next line has n integers x_1,x_2,...,x_n: the number of coins in each heap.
Output
For each test case, print "first" if the first player wins the game and "second" if the second player wins the game.
Constraints
1 <= t <= 2 * 10^5
1 <= n <= 2 * 10^5
1 <= x_i <= 10^9
the sum of all n is at most 2 * 10^5
Example
Input:
3
3
1 2 3
2
2 2
4
5 5 4 5
Output:
first
second
first
''' | [
"[email protected]"
] | |
c3da4bca0ba8fb33dc93aead0dff65d72e62a31a | 30a621afb2a42e4750ee09decb1a82329dd30e1a | /datasets/__init__.py | 3e6321d5f899656136679120a60023baede9eb4b | [] | no_license | hongfel3/car_segmentation | c828e955d965f766f97ebed6f00cc01158b1fe63 | 2e815a27468d4d37fa9ed4477b2246e4072d8fab | refs/heads/master | 2020-03-08T13:12:28.949875 | 2017-09-15T21:49:05 | 2017-09-15T21:49:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | from .carvana import CARVANA
__ALL__ = [CARVANA]
| [
"[email protected]"
] | |
56d130728bdf1a465e5ffc8e16b294e68834aa4c | e397de5c8ec8b6a0973ee4a22c2dc3a11b06c0bc | /bot.py | a46658d76f4de29834061e7bed4fed3b7d107fc4 | [] | no_license | VnyC/tweetbook | e94c4f9e735d00acee86c506ebc8daf73ced4b20 | 8127319c97a27cc556527958749695856f2fa7b6 | refs/heads/main | 2023-05-14T16:08:01.185580 | 2021-06-10T16:29:57 | 2021-06-10T16:29:57 | 375,762,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | import tweepy
import time
print("Welcome back Vinay")
consumer_key = 'wGNE1qCPjhjfcOYMWsOik6g0O'
consumer_secret = 'kF1Y0jVAZviLfvODQVmYJJ3NIQvBkVAoStsqajzvclihcejxDk'
key = '1238345000678653952-qE5mHVLjjYE45Qjfqg2bZBvMsjYelq'
secret = 'E61WoMrFfVgb21ovryHrMr7KNx5mtwMgMxRyhlWJoCJca'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
# api.update_status('All Good things are wild and free!')
filename = 'last_seen.txt'
def read_last_seen(filename):
file_read = open(filename, 'r')
last_seen_id = int(file_read.read().strip())
file_read.close()
return last_seen_id
def write_last_seen(filename, last_seen_id):
file_write = open(filename, 'w')
file_write.write(str(last_seen_id))
file_write.close
return
def reply():
tweets = api.home_timeline(read_last_seen(filename), tweet_mode='extended')
for tweet in reversed(tweets):
try:
if 'life' in tweet.full_text.lower():
print(str(tweet.id)+' - '+tweet.full_text)
api.update_status('@'+tweet.user.screen_name+' Auto reply and like works ;)', tweet.id)
api.create_favorite(tweet.id)
write_last_seen(filename, tweet.id)
except tweepy.TweepError as e:
print(e.reason)
while True:
reply()
time.sleep(20)
| [
"[email protected]"
] | |
4f9371a99c122b5c7f63b63291b8d9fcad56880e | d0e0268584916b8e31dd99a26c693dfe33b24663 | /chatire/urls.py | 133c013062e2b05a3950620aa5c191f54542ded3 | [] | no_license | wesleymutwiri/emailgram | 1e5e31282f4efaf7bbb5708ad0ee7c4439554fb7 | 56cde06197ebcb97d0748085d7adf9bd7f3df81c | refs/heads/master | 2020-03-19T05:00:52.614342 | 2018-06-03T19:45:06 | 2018-06-03T19:45:06 | 135,891,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | """chatire URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
url(r'^admin/', admin.site.urls),
path('auth/', include('djoser.urls')),
path('auth/', include('djoser.urls.authtoken')),
path('api/', include('chats.urls')),
]
| [
"[email protected]"
] | |
72ba460bc33d2377356cfe0aa0cd91894117102f | 20b7df94a756200fc38853b9897b37dfbfd27d77 | /src/hdiutils/HDIimport/imzml_reader.py | 8659b95c3db5eab2b798007e7d0652a625b36944 | [
"MIT"
] | permissive | JoshuaHess12/hdi-utils | 6cbb8f84a3d7bb6455f55499f85a8f6ea07dea00 | 2b2923e74391eccf5963b161d5fc302ac6b214f3 | refs/heads/main | 2023-06-24T15:38:36.183314 | 2021-07-14T20:47:31 | 2021-07-14T20:47:31 | 361,567,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,929 | py | # Module for imzML parsing of imaging mass spectrometry (IMS) data
# Developer: Joshua M. Hess, BSc
# Developed at the Vaccine & Immunotherapy Center, Mass. General Hospital
# Import external modules
from pathlib import Path
import os
from pyimzml.ImzMLParser import getionimage
from pyimzml.ImzMLParser import ImzMLParser
import numpy as np
import pandas as pd
from operator import itemgetter
import scipy
import skimage
import skimage.io
# Import custom modules
from .utils import SubsetCoordinates
# Create a class object to store attributes and functions in
class imzMLreader:
"""Class for parsing and storing IMS data that is in the imzML format. Depends
on and contains the pyimzML python package distributed from the Alexandrov team:
https://github.com/alexandrovteam/pyimzML in a data object.
path_to_imzML: string indicating path to imzML file (Ex: 'path/IMSdata.imzML')
"""
def __init__(
self,
path_to_imzML,
flatten,
subsample,
mask=None,
path_to_markers=None,
**kwargs
):
"""Initialize class to store data in. Ensure appropriate file format
and return a data object with pixel table.
"""
# Create a pathlib object for the path_to_imzML
path_to_imzML = Path(path_to_imzML)
# check to see if the input is a folder
if path_to_imzML.is_dir():
# parse the inputs as ibd and imzml
path_to_ibd = [x for x in path_to_imzML.rglob('*.ibd')][0]
path_to_imzML = [x for x in path_to_imzML.rglob('*.imzML')][0]
# Set the file extensions that we can use with this class
ext = [".imzML"]
# Check to make sure the string is a valid path
if not os.path.exists(str(path_to_imzML)):
print("Not a valid path. Try again")
else:
print("Valid path...")
# Check to see if there is a valid file extension for this class
if str(path_to_imzML).endswith(tuple(ext)):
print(
"Valid file extension...",
"\nfile name:",
str(path_to_imzML),
"\nparsing imzML...",
)
# Read imzML and return the parsed data
self.data = ImzMLParser(str(path_to_imzML))
print("Finished parsing imzML")
else:
print("Not a valid file extension")
# Add the image size to the data object
self.data.array_size = (
self.data.imzmldict["max count of pixels y"],
self.data.imzmldict["max count of pixels x"],
)
# Check to see if the mask exists
if mask is not None:
# Check to see if the mask is a path (string)
if isinstance(mask, Path):
##############Change in future to take arbitrary masks not just tiff################
mask = skimage.io.imread(str(mask), plugin="tifffile")
# Ensure the mask is a sparse boolean array
mask = scipy.sparse.coo_matrix(mask, dtype=np.bool)
# Add the mask to the class object -- even if it is none. Will not be applied to image yet
self.data.mask = mask
# Create an object for a filtered/processed working
self.data.processed_image = None
# Check to see if creating a pixel table (used for dimension reduction)
if flatten:
# Check to see if we are using a mask
if mask is not None:
# Ensure that the mask is boolean
mask = np.array(mask.toarray(), dtype=np.bool)
# Get the coordinates where the mask is
where = np.where(mask)
# Create list of tuples where mask coordinates are (1-indexed) -- form (x,y,z) with z=1 (same as imzML)
coords = list(
zip(
where[1] + 1, where[0] + 1, np.ones(len(where[0]), dtype=np.int)
)
)
# intersect the mask coordinates with the IMS coordinates from imzML parser
mask_coords = list(set(coords) & set(self.data.coordinates))
# Reorder the mask coordinates for F style column major format (imzml format)
mask_coords = sorted(mask_coords, key=itemgetter(0, 1))
# Clear the old coordinates for memory
coords, where, mask = None, None, None
# Zip the coordinates into dictionary with list index (Faster with itemgetter)
full_coords_dict = dict(
zip(self.data.coordinates, range(0, len(self.data.coordinates)))
)
# Find the indices of the mask coordinates -- need for creating dataframe
coords_idx = list(itemgetter(*mask_coords)(full_coords_dict))
# Remove the dictionary to save memory
full_coords_dict = None
# Reset the coordinates object to be only the mask coordinates
self.data.coordinates = mask_coords
# Otherwise create a coords_idx from the full list of coordinates
else:
# Create list
coords_idx = [x for x in range(len(self.data.coordinates))]
# Check to see if subsampling
if subsample is not None:
# Use the coordinates for subsampling
sub_mask, coords = SubsetCoordinates(
coords=self.data.coordinates,
array_size=self.data.array_size,
**kwargs
)
# Alter the order to be in column major format Fortran style
coords = sorted(coords, key=itemgetter(0, 1))
# Clear space with the mask
sub_mask = None
# Get the indices now of these coordinates from the coords_idx
# coords_idx = [self.data.coordinates.index(x) for x in coords]
# Zip the coordinates into dictionary with list index (Faster with itemgetter)
tmp_coords_dict = dict(
zip(self.data.coordinates, range(0, len(self.data.coordinates)))
)
# Find the indices of the mask coordinates -- need for creating dataframe
coords_idx = list(itemgetter(*coords)(tmp_coords_dict))
# Clear the coordinates dictionary to save memory
tmp_coords_dict = None
# Add the subset coordinates to our object
self.data.sub_coordinates = coords
# Otherwise there is no subsampling so leave the coordinates as they are
else:
# Keep the full list of coordinates
coords = self.data.coordinates
# Add the subset coordinates as None
self.data.sub_coordinates = None
# Create numpy array with cols = m/zs an rows = pixels (create pixel table)
tmp = np.empty([len(coords), len(self.data.getspectrum(0)[0])])
# iterate through pixels and add to the array
print("Fetching Spectrum Table...")
for i, (x, y, z) in enumerate(coords):
# Get the coordinate index
idx = coords_idx[i]
# Now use the index to extract the spectrum
mzs, intensities = self.data.getspectrum(idx)
# Use the original i index to add to the array the data
tmp[i, :] = intensities
# Clear memory by removing mzs and intensities
mzs, intensities = None, None
# Create a pandas dataframe from numpy array
tmp_frame = pd.DataFrame(
tmp, index=coords, columns=self.data.getspectrum(0)[0]
)
# Delete the temporary object to save memory
tmp = None
# Assign the data to an array in the data object
self.data.pixel_table = tmp_frame
# Get the image shape of the data
self.data.image_shape = (
self.data.imzmldict["max count of pixels y"],
self.data.imzmldict["max count of pixels x"],
self.data.pixel_table.shape[1],
)
else:
# Create a pixel table as None
self.data.pixel_table = None
# Set the image shape as None
self.data.image_shape = None
# Add the filename to the data object
self.data.filename = path_to_imzML
# Add None to the data image (not currently parsing full array)
self.data.image = None
# get number of channels
# here, we assume that each of the pixels has the same number of
# m/z peaks, so we can take only the first element of the list
self.data.num_channels = self.data.mzLengths[0]
# update the data type
self.data.hdi_type = "raster"
# Print an update that the import is finished
print("Finished")
def SubsetData(self, range=None):
"""Subset an IMS peak list to fall between a range of values.
range: tuple indicating range (Ex (400,1000)). Note for memory reasons
the PixelTable is overwritten, and a new subset of the peak list isnt created.
"""
# Get the lowest value
low = next(
x for x, val in enumerate(self.data.pixel_table.columns) if val >= range[0]
)
# Get the highest value
hi = [n for n, i in enumerate(self.data.pixel_table.columns) if i <= range[1]][
-1
]
# Assign the new peak list to the pixel_table (add +1 because python isnt inclusive)
self.data.pixel_table = self.data.pixel_table.iloc[:, low : hi + 1]
def ExportChannels(self):
"""Export a txt file with channel names for downstream analysis."""
# Print a sheet for m/z and channel numbers
sheet = pd.DataFrame(self.data.pixel_table.columns, columns=["channels"])
# Write out the sheet to csv
sheet.to_csv(path_to_imzML.stem + "_channels.csv", sep="\t")
def CreateSingleChannelArray(self, idx):
"""
Function for extracting a single channel image from the array given an index
"""
# create temporary image of all 0s to fill
im = np.zeros((self.data.array_size[0], self.data.array_size[1]), dtype=np.float32)
# Run through the data coordinates and fill array
for i, (x, y, z) in enumerate(self.data.coordinates):
# Add data to this slice -- only extact this index for each pixel
# getspectrum returns mzs, intensities for pixels --> take only the intensity
im[y - 1, x - 1] = self.data.getspectrum(i)[1][idx]
# return the filled array
return im
| [
"[email protected]"
] | |
46d412c1bb1b2739c159e119a2dbbb4235dcc3f3 | 2d064be157ce2ec9a37aef53c3d50c0f57027a7c | /code_monkey/node/assignment.py | 631023e32ff654bedb7bce1ab68a02459624b366 | [] | no_license | davidwallacejackson/code_monkey | 4cd447555ffaf4ed31f64b2c45db4a891f095804 | 8aae3dfb2cc78e52a1aceda583ec70763eb3ae53 | refs/heads/master | 2021-05-29T10:27:55.915608 | 2015-09-20T05:34:46 | 2015-09-20T05:34:46 | 42,385,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | from ast import literal_eval
from code_monkey.change import VariableChangeGenerator
from code_monkey.node.source import SourceNode
from code_monkey.utils import (
absolute_index_to_line_column,
find_termination)
class AssignmentNode(SourceNode):
'''Node representing a variable assignment inside Python source code.
The body of the variable is considered to be everything on the right of the
= sign, beginning with the first non-whitespace character. Unlike classes
and functions, a variable's source does NOT include a newline at the end.'''
def __init__(self, parent, astroid_object, siblings):
super(AssignmentNode, self).__init__(
parent=parent,
astroid_object=astroid_object,
siblings=siblings)
#the _astroid_object (an Assign object) has TWO children that we need to
#consider: the variable name, and another astroid node (the 'right
#hand' value)
self._astroid_name = astroid_object.targets[0]
self._astroid_value = astroid_object.value
try:
self.name = self._astroid_name.name
except AttributeError:
#'subscript' assignments (a[b] = ...) don't have a name in astroid.
#instead, we give them one by reading their source
#TODO: this can result in names containing dots, which is invalid.
#need a better solution
self.name = self._astroid_name.as_string()
def eval_body(self):
'''Attempt to evaluate the body (i.e., the value) of this AssignmentNode
using ast.literal_eval (which will evaluate ONLY Python literals).
Return the value if successful, otherwise, return None.'''
try:
return literal_eval(self.get_body_source())
except (SyntaxError, ValueError):
return None
@property
def fs_path(self):
return self.parent.fs_path
@property
def change(self):
return VariableChangeGenerator(self)
#the 'whole source' of a AssignmentNode includes the name and the value
@property
def start_line(self):
return self._astroid_name.fromlineno - 1
@property
def start_column(self):
return self._astroid_name.col_offset
#in a AssignmentNode, the _astroid_value represents the body
@property
def body_start_line(self):
return self._astroid_value.fromlineno - 1
@property
def body_start_column(self):
return self._astroid_value.col_offset
@property
def end_index(self):
#there's a bug in astroid where it doesn't correctly detect the last
#line of multiline enclosed blocks (parens, brackets, etc.) -- it gives
#the last line with content, rather than the line containing the
#terminating character
#we have to work around this by scanning through the source ourselves to
#find the terminating point
#astroid bug report submitted:
#https://bitbucket.org/logilab/astroid/issue/31/astroid-sometimes-reports-the-wrong
file_source_lines = self.get_file_source_code().splitlines(True)
#we start by finding the line/column at which the next 'sibling' of
#this node begins. if the node is at the end of the file, we get the
#end of the file instead
next_sibling = self._astroid_object.next_sibling()
if next_sibling:
scan_from_line = next_sibling.fromlineno
scan_from_column = next_sibling.col_offset - 1
else:
scan_from_line = len(file_source_lines) - 1
scan_from_column = len(file_source_lines[scan_from_line]) - 1
#this string doesn't have the right formatting, but it should be
#otherwise correct -- so we can use it to see what character our
#variable ends on
terminating_char = self._astroid_value.as_string()[-1]
return find_termination(
file_source_lines,
scan_from_line,
scan_from_column,
terminating_char)
#for variable nodes, it's easiest to find an absolute end index first, then
#work backwards to get line and column numbers
@property
def end_line(self):
return absolute_index_to_line_column(
self.get_file_source_code(),
self.end_index)[0]
@property
def end_column(self):
return absolute_index_to_line_column(
self.get_file_source_code(),
self.end_index)[1]
| [
"[email protected]"
] | |
7b5a8df56a92afdaa085b6dd78e03a066d5dcbf0 | 997c3b34d65105cb3e02cb427344637557f35d3e | /mecanografia/version orientada a objetos/data/clock.py | 0ffbd8210a98953cf3fd0ae95ee7f135b858be14 | [] | no_license | TomiProgramm/telegramconaku | 7db34d510937f4e31937b2aa1fd12e689231209f | b28b4c5e07c1b0c532ae875b08c16f3fcae00296 | refs/heads/main | 2023-03-07T04:39:31.948411 | 2021-02-16T17:23:32 | 2021-02-16T17:23:32 | 338,463,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from datetime import datetime
from math import floor
from .config import *
class Clock():
def __init__(self):
self.time = TIME
self.start = int()
self.end = int()
def start_count(self):
self.start = datetime.now()
def update(self):
difference = datetime.now() - self.start
self.time = TIME - floor(difference.seconds)
| [
"[email protected]"
] | |
85cbb1571c577c9e96a89e14e876ecbdc9d02336 | 649f0e0593219ea8c4fe932c6be8457e2898011b | /data_split_impute.py | 55d9643b0c9b032e7872c7855aa1ea37bfd2a415 | [] | no_license | Xiaoting05/ML-final-project | 83a5c2c8bdbabea4d17ef7f51c8d93d65ab1060e | 7235afa84a66f61d1143bb6e4aaa1670dccdbfd6 | refs/heads/main | 2023-05-13T12:24:13.168085 | 2021-06-04T05:00:32 | 2021-06-04T05:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,952 | py | import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve
import warnings
warnings.filterwarnings('ignore')
FEATURES = ['province','age','education', 'if_urban',
'wealth_index', 'if_own_house',
'if_employment', 'if_employment_current','employment_pay_method', 'if_earn_more',
'partner_edu',
'num_child','sex_head_household', 'sexual_activity', 'ideal_num_child', 'partner_ideal_child', 'money_decide_person']
NUMERIC_FEATURES = ['age','education','if_own_house','if_employment_current','partner_edu','num_child','ideal_num_child']
CATGORICAL_FEATURES = ['if_urban',
'wealth_index',
'employment_pay_method','if_earn_more',
'sex_head_household', 'sexual_activity', 'partner_ideal_child', 'money_decide_person']
TARGET_LST = ['if_emo_vio', 'if_phy_vio', 'if_sex_vio', 'if_vio', 'num_vio']
#need hot-code
need_one_hot=['if_urban','wealth_index','if_earn_more','sex_head_household', \
'partner_ideal_child','money_decide_person','country']
#already dummy
dummy=['if_own_house','if_employment_current']
#need normalize
need_normalize=['age','education','num_household','num_child','partner_edu']
features_col = need_normalize + dummy + need_one_hot
# Data Split
def split_data(features, target, test_size=0.20, random_state=505):
X_train, X_test, y_train, y_test = train_test_split(features,
target,
test_size,
random_state)
return X_train, X_test, y_train, y_test
def detect_null(df):
return df.isnull.any()
def detect_outliers(df, feature):
'''
Detect possible outliers in a dataframe.
Inputs:
df: dataframe
feature: str, the feature to be detected on finding outliers.
Return: a list of possible outlier values.
'''
mean = df[feature].mean()
std = df[feature].std()
outliers = []
for index, data in df.iterrows():
if abs((data.loc[feature] - mean)/std)> 2:
outliers.append(index)
return outliers
def impute_missing_median(df, col_lst):
'''
Impute missing values of continuous variables using the median value
'''
for col in col_lst:
df.loc[(df[col] == "don't know") | (df[col] == "non-numeric response") , col] = None
median = df[col].median()
df[col].fillna(median,inplace=True)
return df
| [
"[email protected]"
] | |
c34c133b43595f122a6f0a44e7439e75a1ea200c | b5a5daf6f3312b25ad8899dd46c1f3a1c3d83295 | /src/chime_dash/app/components/container.py | ec606b7086bf9566a22c8cd567509091c2510f09 | [
"MIT"
] | permissive | quinn-dougherty/chime | ed4a8e8f300c95793622645e7ff59ebb6436ab7a | 76a4a5751a084e6d0167f10ff1bd0ad06092bafc | refs/heads/master | 2021-03-26T21:11:44.602107 | 2020-03-28T15:25:52 | 2020-03-28T15:25:52 | 247,750,339 | 0 | 0 | MIT | 2020-03-16T15:41:29 | 2020-03-16T15:41:29 | null | UTF-8 | Python | false | false | 1,728 | py | """Initializes the dash html
"""
from collections import OrderedDict
import dash_bootstrap_components as dbc
from chime_dash.app.components.base import Component, HTMLComponentError
from chime_dash.app.components.content import Content
from chime_dash.app.components.sidebar import Sidebar
from penn_chime.models import SimSirModel
class Container(Component):
"""
"""
def __init__(self, language, defaults):
"""
"""
super().__init__(language, defaults)
self.components = OrderedDict(
sidebar=Sidebar(language, defaults),
content=Content(language, defaults),
)
self.callback_outputs = []
self.callback_inputs = OrderedDict()
for component in self.components.values():
self.callback_outputs += component.callback_outputs
self.callback_inputs.update(component.callback_inputs)
def get_html(self):
"""Initializes the content container dash html
"""
container = dbc.Container(
children=dbc.Row(self.components["sidebar"].html + self.components["content"].html),
fluid=True,
className="mt-5",
)
return [container]
def callback(self, *args, **kwargs):
"""
"""
pars = self.components["sidebar"].parse_form_parameters(**kwargs)
kwargs["model"] = SimSirModel(pars)
kwargs["pars"] = pars
callback_returns = []
for component in self.components.values():
try:
callback_returns += component.callback(**kwargs)
except Exception as error:
raise HTMLComponentError(component, error)
return callback_returns
| [
"[email protected]"
] | |
1ba26800a9ca397933e9387e3d361639a6d795ab | 3376d59bb048943970d9ab13c255e558317329bc | /iocage/cli/list.py | 9086404b9a516b2baa0f29a0f48cf249f32bb862 | [
"BSD-2-Clause"
] | permissive | jungle-boogie/iocage | 69aeca238d3e80ed18b0517ba6ea6b0004c70e9b | 6175e93409cfb0db1afef8daf15993d43c9293b7 | refs/heads/master | 2021-01-11T22:35:17.480965 | 2017-01-13T23:13:39 | 2017-01-13T23:13:39 | 78,992,068 | 0 | 0 | null | 2017-01-15T03:20:46 | 2017-01-15T03:20:46 | null | UTF-8 | Python | false | false | 897 | py | """
List module for the cli.
"""
import click
from iocage.lib.ioc_list import IOCList
__cmdname__ = "list_cmd"
@click.command(name="list", help="List a specified dataset type")
@click.option("--release", "--base", "-r", "-b", "dataset_type",
flag_value="base", help="List all bases.")
@click.option("--template", "-t", "dataset_type", flag_value="template",
help="List all templates.")
@click.option("--header", "-h", "-H", is_flag=True, default=True,
help="For scripting, use tabs for separators.")
@click.option("--long", "-l", "_long", is_flag=True, default=False,
help="Show the full uuid and ip4 address.")
def list_cmd(dataset_type, header, _long):
"""This passes the arg and calls the jail_datasets function."""
if dataset_type is None:
dataset_type = "all"
IOCList(dataset_type, header, _long).get_datasets()
| [
"[email protected]"
] | |
b950ba242ff0b1513e26655c11ccb47456c94b10 | 7fc9a1d1c66922ea8389a55a807633c1d5c6eeb8 | /bin/django-admin | 6d809e3c62cafc21b3413c656774336d3682fdb0 | [] | no_license | Ibtisam-a/Web-services-and-web-data | 84898758f7a0791b0a8d2092e7174d7a6861398e | 2f3de6fba9f2e92822b8a6cf831193e611218a33 | refs/heads/master | 2021-04-08T00:32:55.073909 | 2020-08-14T08:59:20 | 2020-08-14T08:59:20 | 248,720,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | #!/home/cserv1_a/soc_msc/ml18ikfa/courseworkK/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
e957ea449283848311d7d2e96ec5d74a7c796e20 | 6f23fadd1a5071d4c211802de620784740045443 | /presentation/part2/code/MultLayer.py | a5b4032487a45de97275b89ef12d3e55bc0bdcf7 | [] | no_license | abhijat01/deepnn4se | 35fa369e0226d6f3712760d13e8510156864cccb | f863ee573f52c0526a15a21220fdc41a3f2b4a40 | refs/heads/master | 2023-08-15T14:01:24.093712 | 2021-10-05T13:04:43 | 2021-10-05T13:04:43 | 287,421,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | class MultiplicationLayer1D:
def __init__(self):
self.cache = {}
def forward(self, x, w):
self.cache['x'] = x
self.cache['w'] = w
return w * x
def backprop(self, incoming_grad):
x_grad = self.cache['w']
w_grad = self.cache['x']
x_grad *= incoming_grad
w_grad *= incoming_grad
return x_grad, w_grad
| [
"[email protected]"
] | |
102678c00a5a71158d91b82481969944b9ac7f02 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/python_re2_test_file/regexlib_8007.py | 8f321fc6b1879f147d7566e9824b54c9f43cfb25 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # 8007
# ^[\.\wæøå-]+@([a-zæøå0-9]+([\.-]{0,1}[a-zæøå0-9]+|[a-zæøå0-9]?))+\.[a-z]{2,6}$
# EXPONENT
# nums:5
# EXPONENT AttackString:".@"+"a"*16+"!1 __NQ"
import re2 as re
from time import perf_counter
regex = """^[\.\wæøå-]+@([a-zæøå0-9]+([\.-]{0,1}[a-zæøå0-9]+|[a-zæøå0-9]?))+\.[a-z]{2,6}$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = ".@" + "a" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"[email protected]"
] | |
5cefd90a4c04739c5b4ed617546cd65496b62ca5 | 8106a563f5e52c84483736d7f8331818030c2eda | /dz3/5.py | 6d5b299701d740afaffe916286cb776faf05cdf1 | [] | no_license | romanfffg/vsu_programming | 7340f75c473df8bb33312ca55dc0974abd98f644 | 6967df6b062f9f42798738b3333c46a5c6a86f9c | refs/heads/master | 2022-12-12T11:40:04.380911 | 2020-09-17T20:06:45 | 2020-09-17T20:06:45 | 296,305,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | a = int(input())
lst = []
while a:
lst.append(int(a))
a = input()
print(sum(lst) / len(lst))
| [
"[email protected]"
] | |
186ba64144442a69bc12003089f9b8d7fd1d387e | 2e57d94047c47a2c7d8b0cd9bdedce78cfc9e9a3 | /pretraining_and_competitors/segmentation/Unet_noskips.py | 9efe61f295bfdc090590e69dfcdbd2921f1e1186 | [] | no_license | PollastriFederico/skin_lesion_segmentation_ensemble | fd1e0ec9b90a8821321416947002dd9c30535957 | ebde6f8191af556fd31737a38cb0d42cb3372492 | refs/heads/master | 2020-06-07T23:15:53.230445 | 2019-06-21T14:50:19 | 2019-06-21T14:50:19 | 193,113,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | import torch
import torch.nn.functional as F
from torch import nn
from utils import initialize_weights
class _EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super(_EncoderBlock, self).__init__()
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super(_DecoderBlock, self).__init__()
self.decode = nn.Sequential(
nn.Conv2d(in_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=2, stride=2),
)
def forward(self, x):
return self.decode(x)
class UNet(nn.Module):
def __init__(self, num_classes):
super(UNet, self).__init__()
self.name = 'U-Net'
self.enc1 = _EncoderBlock(3, 64)
self.enc2 = _EncoderBlock(64, 128)
self.enc3 = _EncoderBlock(128, 256)
self.enc4 = _EncoderBlock(256, 512, dropout=True)
self.center = _DecoderBlock(512, 1024, 512)
self.dec4 = _DecoderBlock(512, 512, 256)
self.dec3 = _DecoderBlock(256, 256, 128)
self.dec2 = _DecoderBlock(128, 128, 64)
self.dec1 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.final = nn.Conv2d(64, num_classes, kernel_size=1)
initialize_weights(self)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
center = self.center(enc4)
dec4 = self.dec4(center)
dec3 = self.dec3(dec4)
dec2 = self.dec2(dec3)
dec1 = self.dec1(dec2)
final = self.final(dec1)
return F.upsample(final, x.size()[2:], mode='bilinear')
| [
"[email protected]"
] | |
4b4a50988580ded53a3099b5bf1d46c08ccf1474 | a88ceaf127ad66c0c419684a65a3eaa68bdac807 | /c2b.py | cc116b67953e038a5e21b1377d9e29ebdcdf94a7 | [
"MIT"
] | permissive | EvansKaranja/darajaApi- | 54631586c3a472af1a34ff8163c0460840575adc | 402d0fa5949b81f4273908762af8141a00d2277d | refs/heads/master | 2020-07-11T21:50:41.977044 | 2019-08-27T12:46:02 | 2019-08-27T12:46:02 | 204,650,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import requests
import keys
from keys import get_access_token
def register_url():
access_token = get_access_token()
api_url = "https://sandbox.safaricom.co.ke/mpesa/c2b/v1/registerurl"
headers = {"Authorization": "Bearer %s" % access_token}
request = {
"ShortCode": keys.shortcode,
"ResponseType": "Completed",
"ConfirmationURL": "https://fullstackdjango.com/confirmation_url",
"ValidationURL": "https://fullstackdjango.com/validation_url",
}
response = requests.post(api_url, json=request, headers=headers)
print(response.text)
# register_url()
def simulate_c2b_transaction():
access_token = get_access_token()
api_url = "https://sandbox.safaricom.co.ke/mpesa/c2b/v1/simulate"
headers = {"Authorization": "Bearer %s" % access_token}
request = {
"ShortCode": keys.shortcode,
"CommandID": "CustomerPayBillOnline",
"Amount": "2",
"Msisdn": keys.test_msisdn,
"BillRefNumber": "12345678",
}
response = requests.post(api_url, json=request, headers=headers)
print(response.text)
simulate_c2b_transaction()
| [
"[email protected]"
] | |
c9ecef2696dc4e74fc257405b04245662007e250 | 7c6b0694b88d2ab29744f8f8b606879409811dc3 | /backend/vehicle/migrations/0001_initial.py | b434c4cf712b1260cbe8a81b3b715ebbb2110958 | [] | no_license | crowdbotics-apps/mobile-3777-18387 | 85192498f9970a7fc9d58c6d105271bd6b481515 | 1870ba38a9a48d61ae9d140f048149ad7f5e65db | refs/heads/master | 2022-11-10T06:07:31.811885 | 2020-06-24T09:05:40 | 2020-06-24T09:05:40 | 274,621,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | # Generated by Django 2.2.13 on 2020-06-24 08:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("taxi_profile", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="VehicleType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("icon", models.URLField()),
("base_rate", models.FloatField()),
],
),
migrations.CreateModel(
name="Vehicle",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("type_description", models.CharField(max_length=255)),
("plate_number", models.CharField(max_length=10)),
("timestamp_registered", models.DateTimeField(auto_now_add=True)),
("is_on_duty", models.BooleanField(blank=True, null=True)),
(
"driver",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="vehicle_driver",
to="taxi_profile.DriverProfile",
),
),
(
"vehicle_type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="vehicle_vehicle_type",
to="vehicle.VehicleType",
),
),
],
),
]
| [
"[email protected]"
] | |
8802c5f5b5fb018edabed52a443a6732d2cdba69 | a2531d7b363b7bf35f3f4254fa0e34722b858321 | /experiment/experiment_code/mnist_nag.py | 350a3f7cbb961e5e4876ddbb1ca6aeaa6f6c6c46 | [] | no_license | sgflower66/SPI-Optimizer | 08c7fc110ef3ff6299de5c1cc3a8f218840b5b05 | 1cece09f350bbbeb1105e209c20292dad77e1c2a | refs/heads/master | 2020-04-29T08:04:26.039221 | 2019-03-16T14:02:48 | 2019-03-16T14:02:48 | 175,973,850 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from sgd import SGD
import os
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
import torch.nn.functional as F
import random
import numpy as np
seed = 5000
torch.manual_seed(seed)
np.random.seed(seed)
# seed for weight initialization
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# Hyper Parameters
input_size = 784
hidden_size = 1000
num_classes = 10
num_epochs = 20
batch_size = 100
learning_rate = 0.12
#logger = Logger('m_0.03.txt', title='mnist')
logger = Logger(os.path.join('NAG_netj3_'+str(learning_rate)+'_'+str(seed)+'.txt'), title='mnist')
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
# MNIST Dataset
train_dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Neural Network Model (1 hidden layer)
#class Net(nn.Module):
# def __init__(self, input_size, hidden_size, num_classes):
# super(Net, self).__init__()
# self.fc1 = nn.Linear(input_size, hidden_size)
# self.fc2 = nn.Linear(hidden_size, num_classes)
#
# def forward(self, x):
# out = self.fc1(x)
# out = F.relu(out)
# out = self.fc2(out)
# return out
#net = Net(input_size, hidden_size, num_classes)
from lenet3 import LeNet
net = LeNet()
net.cuda()
net.train()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
#optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
optimizer = SGD(net.parameters(), lr=learning_rate, weight_decay=0.0001, momentum=0.9,nesterov=True)
# Train the Model
for epoch in range(num_epochs):
train_loss_log = AverageMeter()
train_acc_log = AverageMeter()
val_loss_log = AverageMeter()
val_acc_log = AverageMeter()
for i, (images, labels) in enumerate(train_loader):
# Convert torch tensor to Variable
# if i>0:
# break
# images = Variable(images.view(-1, 28*28).cuda())
images = Variable(images.cuda())
labels = Variable(labels.cuda())
# print(labels)
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
outputs = net(images)
train_loss = criterion(outputs, labels)
if i == 0:
# print(labels) # check dataLoader randomness
if epoch == 0:
# loss of the 1st mini-batch in the 1st epoch before backgrop, verify randomness of weight initialization
train_init_loss = train_loss
logger.append([0, train_init_loss, 0, 0, 0])
train_loss.backward()
optimizer.step()
prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
train_loss_log.update(train_loss.data[0], images.size(0))
train_acc_log.update(prec1[0], images.size(0))
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Acc: %.8f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, train_loss_log.avg, train_acc_log.avg))
# Test the Model
net.eval()
correct = 0
loss = 0
total = 0
for images, labels in test_loader:
# images = Variable(images.view(-1, 28*28)).cuda()
images = Variable(images.cuda())
labels = Variable(labels).cuda()
outputs = net(images)
test_loss = criterion(outputs, labels)
val_loss_log.update(test_loss.data[0], images.size(0))
prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
val_acc_log.update(prec1[0], images.size(0))
logger.append([learning_rate, train_loss_log.avg, val_loss_log.avg, train_acc_log.avg, val_acc_log.avg])
print('Accuracy of the network on the 10000 test images: %.8f %%' % (val_acc_log.avg))
print('Loss of the network on the 10000 test images: %.8f' % (val_loss_log.avg))
logger.close()
logger.plot()
| [
"[email protected]"
] | |
60c27b9b2d5592e9cfd29c966e30307bdf9dd261 | 6a591980ff8b85801163880383e4ff6b83b2375f | /testchild.py | 31138eb006398a68710ee2091ba4dd482c99a1e5 | [] | no_license | crosseyedlion/testrepo | 2f37bf4baaca9a3708faa112feabec3da6d99c63 | 0ee506da10ea96d8a8f895282b7345a3fd96e3b0 | refs/heads/main | 2023-06-21T17:55:46.413060 | 2021-07-21T17:08:13 | 2021-07-21T17:08:13 | 388,185,814 | 0 | 0 | null | 2021-07-21T17:08:14 | 2021-07-21T16:50:45 | Python | UTF-8 | Python | false | false | 66 | py | ## Adding a new file in childbranch
print ("inside child branch")
| [
"[email protected]"
] | |
3de0defe6015bb5872be5677642e3b6e1f8bfd76 | 22013212df1e21f29d0180f2109841177a2a8791 | /basic_addons/planilla/models/contabilidad/planilla_detalle_linea_nomina.py | 29af5e120796aae5ad9f32c01c818921cea7571c | [] | no_license | butagreeza/DTDATA_A | f965236c0d7faf0ec4082d27e2a0ff8e7dafe1c6 | 90b09f89714349a3f26de671a440a979aeebd54c | refs/heads/master | 2023-06-18T00:41:02.521432 | 2021-06-14T21:17:06 | 2021-06-14T21:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import ValidationError
from odoo.addons.base.res.res_request import referenceable_models
from datetime import datetime
class PlanillaDetalleLineaNomina(models.Model):
_name = 'planilla.detalle.linea.nomina'
_auto = False
fecha_ini = fields.Date()
fecha_fin = fields.Date()
name = fields.Char()
dni = fields.Char()
nombres = fields.Char()
sequence = fields.Char()
concepto = fields.Char()
cuenta_debe = fields.Char()
cuenta_haber = fields.Char()
monto = fields.Float('Monto', digits=(12, 2))
slip_id = fields.Integer()
salary_rule_id = fields.Integer()
employee_id = fields.Integer()
contract_id = fields.Integer()
class PlanillaDetalleLineaNominaWizard(models.TransientModel):
_name = "planilla.detalle.linea.nomina.wizard"
hr_payslip_run_id = fields.Many2one(
'hr.payslip.run',
string=u'Periodo de nomina', required=True,
ondelete='cascade'
)
date_start_rel = fields.Date("Fecha inicio",related='hr_payslip_run_id.date_start', readonly="1")
date_end_rel = fields.Date("Fecha Fin",related='hr_payslip_run_id.date_end', readonly="1")
@api.multi
def do_rebuild(self):
query_vista = """
DROP VIEW IF EXISTS planilla_detalle_linea_nomina;
create or replace view planilla_detalle_linea_nomina as (
select row_number() OVER () AS id,* from
(
select
a6.date_start as fecha_ini,
a6.date_end as fecha_fin,
a6.name,
a4.identification_id as dni,
a4.name_related as nombres,
a5.sequence,
a5.name as concepto,
a7.code as cuenta_debe,
a8.code as cuenta_haber,
a1.amount as monto,
a1.slip_id,a1.salary_rule_id,a1.employee_id,a1.contract_id from hr_payslip_line a1
left join hr_payslip a2 on a2.id=a1.slip_id
left join hr_contract a3 on a3.id=a1.contract_id
left join hr_employee a4 on a4.id=a1.employee_id
left join hr_salary_rule a5 on a5.id=a1.salary_rule_id
left join hr_payslip_run a6 on a6.id=a2.payslip_run_id
left join account_account a7 on a7.id=a5.account_debit
left join account_account a8 on a8.id=a5.account_credit
where char_length(trim(concat(a7.code,a8.code)))> 0 and a6.date_start='%s' and a6.date_end='%s'
order by a5.sequence
) T
)""" % (self.date_start_rel,self.date_end_rel)
self.env.cr.execute(query_vista)
return {
'type': 'ir.actions.act_window',
'res_model': 'planilla.detalle.linea.nomina',
'view_type': 'form',
'view_mode': 'tree',
'target': 'current'
}
| [
"[email protected]"
] | |
0aa3c239919591a7da1a732bb8b313608a8b22dc | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/documentdb/v20210701preview/sql_resource_sql_role_definition.py | 3952f82004d3f80c5a9692dd2d9b3041ef281f9e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,723 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SqlResourceSqlRoleDefinitionArgs', 'SqlResourceSqlRoleDefinition']
@pulumi.input_type
class SqlResourceSqlRoleDefinitionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None):
"""
The set of arguments for constructing a SqlResourceSqlRoleDefinition resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
:param pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]] permissions: The set of operations allowed through this Role Definition.
:param pulumi.Input[str] role_definition_id: The GUID for the Role Definition.
:param pulumi.Input[str] role_name: A user-friendly name for the Role Definition. Must be unique for the database account.
:param pulumi.Input['RoleDefinitionType'] type: Indicates whether the Role Definition was built-in or user created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if assignable_scopes is not None:
pulumi.set(__self__, "assignable_scopes", assignable_scopes)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if role_definition_id is not None:
pulumi.set(__self__, "role_definition_id", role_definition_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="assignableScopes")
def assignable_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
"""
return pulumi.get(self, "assignable_scopes")
@assignable_scopes.setter
def assignable_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "assignable_scopes", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]]:
"""
The set of operations allowed through this Role Definition.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> Optional[pulumi.Input[str]]:
"""
The GUID for the Role Definition.
"""
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_definition_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
A user-friendly name for the Role Definition. Must be unique for the database account.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['RoleDefinitionType']]:
"""
Indicates whether the Role Definition was built-in or user created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['RoleDefinitionType']]):
pulumi.set(self, "type", value)
class SqlResourceSqlRoleDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None,
__props__=None):
"""
An Azure Cosmos DB SQL Role Definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]] permissions: The set of operations allowed through this Role Definition.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] role_definition_id: The GUID for the Role Definition.
:param pulumi.Input[str] role_name: A user-friendly name for the Role Definition. Must be unique for the database account.
:param pulumi.Input['RoleDefinitionType'] type: Indicates whether the Role Definition was built-in or user created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlRoleDefinitionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB SQL Role Definition.
:param str resource_name: The name of the resource.
:param SqlResourceSqlRoleDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlRoleDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlRoleDefinitionArgs.__new__(SqlResourceSqlRoleDefinitionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["assignable_scopes"] = assignable_scopes
__props__.__dict__["permissions"] = permissions
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["role_definition_id"] = role_definition_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["type"] = type
__props__.__dict__["name"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210515:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210615:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:SqlResourceSqlRoleDefinition")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlRoleDefinition, __self__).__init__(
'azure-native:documentdb/v20210701preview:SqlResourceSqlRoleDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlRoleDefinition':
"""
Get an existing SqlResourceSqlRoleDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlRoleDefinitionArgs.__new__(SqlResourceSqlRoleDefinitionArgs)
__props__.__dict__["assignable_scopes"] = None
__props__.__dict__["name"] = None
__props__.__dict__["permissions"] = None
__props__.__dict__["role_name"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlRoleDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="assignableScopes")
def assignable_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
"""
return pulumi.get(self, "assignable_scopes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Optional[Sequence['outputs.PermissionResponse']]]:
"""
The set of operations allowed through this Role Definition.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Output[Optional[str]]:
"""
A user-friendly name for the Role Definition. Must be unique for the database account.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
05eefbd6347b2717c3294d3bacc4001917e74fa8 | a3626b65e8881da7e5b74fa1bb433478b22e4587 | /hacktj8/manage.py | 46c7f9291d376b65943c51442aef5e0f0dba1d32 | [] | no_license | saisree27/hacktj-8.0-project | ae779dfb6a45b394cdd18973f3205fb694294496 | ea8f99c3b45574d7741a2347b56bf0befbb909dc | refs/heads/main | 2023-04-09T20:13:52.938541 | 2021-04-11T19:42:22 | 2021-04-11T19:42:22 | 356,609,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hacktj8.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e8b54a4cad8e629c9f2ec0a179d49743ddc5deb5 | c81a68fd49fd3f41648a54d467b655a3c3ba1a00 | /activity/page/activity.py | c4c18e48701ce1229bb66626e228c50e456c44d0 | [] | no_license | yellowsea19/join | e505a8d1f9445432bce56c9372a17c576adbae44 | 878ac9585778f9a6261ecc7ae1d7e612d191ac74 | refs/heads/master | 2020-03-26T10:48:14.289820 | 2018-08-15T06:48:00 | 2018-08-15T06:48:00 | 144,816,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,282 | py | from page.login import loginPage,url
# from selenium import webdriver
# import requests
from selenium.webdriver.common.keys import Keys
import time,datetime
# name_td=datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d %H:%M')
# td1=datetime.timedelta(hours=1)
# td2=datetime.timedelta(hours=2)
# td3=datetime.timedelta(hours=-1)#自定义签到开始时间差
# newtd1=td1+datetime.datetime.now()
# newtd2=td2+datetime.datetime.now()
# newtd3=td3+datetime.datetime.now()
# dt1=datetime.datetime.strftime(newtd1,'%Y-%m-%d %H:%M')
# dt2=datetime.datetime.strftime(newtd2,'%Y-%m-%d %H:%M')
# dt3=datetime.datetime.strftime(newtd3,'%Y-%m-%d %H:%M')
idlist=[]
# print(dt3)
class activitypage(loginPage):
all=('xpath','//span[contains(text(),"活动管理") and @ng-bind="menu.name"]')
all1=('xpath','//span[contains(text(),"全部活动") ]')
new=('xpath','//span[contains(text(),"新增") ]')#点击新增
abi_name=('xpath','//input[@placeholder="请输入活动名称"]')#输入活动名称
unit=('xpath','//div[@placeholder="请选择发布单位"]')#发布单位
choose_unit=('xpath','//a/div[contains(text(),"345")]')#选择发布单位
scope=('xpath','//span[contains(text(),"请选择报名范围")]')#点击报名范围
scope1=('xpath','//div[contains(text(),"科大")]')#选择报名范围
sponsor=('xpath','//ul/li/input[@placeholder="请选择主办方"]')#点击主办方
sponsor1=('xpath','//div[contains(text(),"校学生会")]')#选择主办方
organizer=('xpath','//input[@placeholder="请选择承办方"]')#承办方
choose_organizer=('xpath','//div[contains(text(),"校学生会")]')
# abi_type=('xpath','//*[@id="editformDiv"]/bootstrap-decorator[24]/div/div/div[1]/div/div/span')#点击活动类别
abi_type1=('xpath','//span[contains(text(),"请选择活动类别")]')#点击活动类别
abi_type2=('xpath','//div[contains(text(),"学术科技类")]')#选择活动类别
# abi_type3=('xpath','//*[@id="ui-select-choices-row-4-1"]/a/div')
# target0=('xpath','//*[@id="editformDiv"]/bootstrap-decorator[30]/div/div')
img=('xpath','//button[contains(text(),"从图片库选择") |@class="btn btn-success picture-lib-btn"]')#点击从图片库选择
img1=('xpath','//div/div[1]/div/img')#选择图片
img_sure=('xpath','//div[@class="picture-poppup-footer ng-scope"]/button[@ng-click="save()"]')#点击确定
intro=('xpath','//div[@class="fr-element fr-view"]')#活动简介
meaning=('xpath','//div/textarea[@placeholder="请输入活动意义"]')#活动意义
target1=('xpath','//*[@id="editformDiv"]/bootstrap-decorator[45]/div/label')
abi_starttime=('xpath','//*[@id="abiStartTime"]')#点击开始时间
abi_endtime=('xpath','//*[@id="abiEndTime"]')#点击活动结束时间
# sure=('xpath','/html/body/div[13]/div[3]/div/button[1]')
shichang=('xpath','//input[@placeholder="请输入活动时长"]')#输入活动时长
allow=('xpath','//input[@placeholder="请输入允许报名人数"]')#输入报名人数
sign=('xpath','//select/option[contains(text(),"不需要签到")]')#点击签到模式
signnomal=('xpath','//select/option[contains(text(),"普通签到")]')#选择普通签到
signscan=('xpath',' //option[@label="扫描签到"]')#扫描签到
signadmin=('xpath',' //option[@label="管理员扫描签到"]')#管理员扫描签到
signticket=('xpath',' //option[@label="验票签到"]')#验票签到
signout=('xpath',' //option[@label="不需要签退"]')#点击签退
signoutnomal=('xpath',' //option[@label="普通签退"]')#普通签退
signoutscan=('xpath',' //option[@label="扫描签退"]')#扫描签退
signoutadmin=('xpath',' //option[@label="管理员扫描签退"]')#管理员扫描签退
signoutticket=('xpath',' //option[@label="验票签退"]')#验票签退
signtime=('xpath','//option[@label="默认(活动开始前15分钟)"]')#点击签到开始时间
signtime1=('xpath','//option[@label="自定义(在报名开始之后与活动结束之前)"]')#点击自义定签到时间
signstarttime=('xpath','//input[@placeholder="请选择签到开始时间"]')#自定义签到开始时间
# customtime=('xpath','//*[@id="signinStartTime"]')#点击自定义时间
# signbutton=('xpath','/html/body/div[14]/div[3]/div/button[1]')#确定
# map=('xpath','//span[@class="icon-activities_icon_add1"]')#点击地图
# maps=('xpath','//span[@ng-if="form.icon"]')
map=('xpath','//span[@class="icon-activities_icon_add1"]')#点击地图
mapbutton=('xpath','//div/button[@class="btn btn-success ng-binding ng-scope"]')#点击确定
target2=('xpath','//*[@id="editformDiv"]/bootstrap-decorator[119]/div/label')#定位
save=('xpath','//input[@value="保存"]')#点击保存
save1=('xpath','//button[@class="btn btn-primary ng-binding"]')#确定保存
a = ('xpath', '//tr[1]/td[3]/a/span')
# audit_button=('xpath','//div/button[@class="btn btn-success btn-joinpost"]')
# audit_agree=('xpath','//span[contains(text(),"通过")]')
# save_button=('xpath','//button[contains(text(),"审核")]')
# sure_button=('xpath','//button[@class="btn btn-primary ng-binding"]')
def activity(self,abi_property='普通活动',abi_name='autotest',choose_unit='345',choose_scope='科大',abi_label='学术科技类',abi_label_child='',intro='autotest',meaning='autotest',dt1='dt1',dt2='dt2',shichang='30',allowmember='100',sign=0,signout=0):
# time.sleep(3)
# self.click(self.all)#点击活动管理
# self.click(self.all1)#点击全部活动
self.click(('xpath','//span[text()="个人中心"]'))#点击个人中心
time.sleep(0.5)
self.click(('xpath','//span[text()="我的活动"]'))
self.click(self.new)#点击新增
time.sleep(3)
if self.is_exists(('xpath','//select/option[text()="普通活动"]')):#判断是否区分普通活动和论文征集活动
self.click(('xpath','//select/option[text()="普通活动"]'))
self.click(('xpath','//select/option[text()="%s"]'%abi_property))
self.send_keys(self.abi_name,abi_name)#输入活动名称
self.click(self.unit)#点击发布单位
time.sleep(1)
self.click(('xpath','//a/div[contains(text(),"%s")]'%choose_unit))#选择发布单位
self.click(self.scope) #点击报名范围
self.click(('xpath','//div[contains(text(),"%s")]'%choose_scope))#选择报名范围
self.click(self.sponsor)#点击主办方
self.click(self.sponsor1)#选择主办方
self.click(self.organizer)#点击承办方
self.click(self.choose_organizer)#选择承办方校学生会
self.click(self.abi_type1)#点击活动类别
self.click(('xpath','//div[contains(text(),"%s")]'%abi_label))#选择活动类别
if self.is_exists(('xpath','//span[text()="请选择活动子类别"]')):#判断是否有活动子类别
self.click(('xpath','//span[text()="请选择活动子类别"]'))#点击活动子类别
time.sleep(0.5)
self.click(('xpath','//div[text()="%s"]'%abi_label_child))
# self.js_focus_element(self.target0)#定位
self.click(self.img)#点击从图片库选择
self.click(self.img1)#选择图片
self.click(self.img_sure)#确定
self.send_keys(self.intro,intro)#输入简介
self.send_keys(self.meaning,meaning)#活动意义
# self.js_focus_element(self.target1)#元素聚焦
self.click(self.abi_starttime)#点击活动开始时间
time.sleep(1)
self.send_keys(self.abi_starttime,'2')#开始时间
# js='document.getElementById("abiStartTime").value="%s"'%dt1
# # print(js)
# self.js_execute(js)
# time.sleep(5)
# self.send_keys(self.abi_starttime,Keys.BACK_SPACE)
# time.sleep(5)
# self.send_keys(self.abi_starttime,Keys.TAB)
self.send_keys(self.abi_starttime,'2')#开始时间
self.send_keys(self.abi_starttime,dt1)
self.click(self.abi_endtime)
time.sleep(1)
# self.send_keys(self.abi_endtime,'2')
# js='document.getElementById("abiEndTime").value="%s"'%dt2
# self.js_execute(js)
# time.sleep(5)
# self.send_keys(self.abi_endtime,Keys.BACK_SPACE)
# time.sleep(5)
# self.send_keys(self.abi_endtime,Keys.TAB)
self.click(self.abi_endtime) #结束时间
self.send_keys(self.abi_endtime,'2')
self.send_keys(self.abi_endtime,dt2)
# self.send_keys(self.abi_endtime,dt2,is_clear=True)
time.sleep(1)
self.click(self.shichang)
self.send_keys(self.shichang,shichang)#时长
self.send_keys(self.allow,allowmember)#输入报名人数
if sign==0:
pass
if sign==1:
self.click(self.sign)
self.click(self.signnomal)#普通签到
if sign==2:
self.click(self.sign)
self.click(self.signscan)#扫码签到
if sign==3:
self.click(self.sign)
self.click(self.signadmin)#管理员扫码签到
if sign==4:
self.click(self.sign)
self.click(self.signticket)#验票签到
if signout==0:
pass #不需要签退
if signout==1:
self.click(self.signout)
self.click(self.signoutnomal)#普通签退
if signout==2:
self.click(self.signout)
self.click(self.signoutscan)#扫描签退
if signout==3:
self.click(self.signout)
self.click(self.signoutadmin)#管理员扫描签退
if signout==4:
self.click(self.signout)
self.click(self.signoutticket)#验票签退
# self.click(self.map)#点击地图
self.click(self.map)
time.sleep(2)
self.click(self.mapbutton)#点击确定
if abi_property=='论文征集活动':
time.sleep(1)
self.click(('xpath','//div[@placeholder="请选择荣誉类别"]/span'))
time.sleep(0.5)
self.click(('xpath','//div[text()="专利证书"]'))
time.sleep(2)
f=('xpath','//input[@class="form-control col-width-100 ng-pristine ng-untouched ng-valid"]')
num_list=self.find_elements(f)
time.sleep(3)
# print(num_list)
# self.send_keys(num_list[1],'5')
for num in num_list:
num.send_keys('1')
time.sleep(1)
self.click(('xpath','//input[@placeholder="请选择二评人员"]'))
time.sleep(1)
self.click(('xpath','//a[text()="脚印大学2800"]'))
time.sleep(1)
self.click(('xpath','//a[text()="院系"]'))
time.sleep(1)
self.click(('xpath','//div[text()="院系人员"]'))
time.sleep(1)
self.click(('xpath','//div[@class="modal-footer ng-scope"]/button[text()="确定"]'))
time.sleep(1)
self.send_keys(('xpath','//input[@placeholder="请输入公示周期"]'),'1')
# self.js_focus_element(self.target2)
time.sleep(2)
self.click(self.save)#点击确定
time.sleep(1)
self.click(self.save1)#确认保存
time.sleep(3)
self.click(self.a)#点击新增的活动进入活动详情
time.sleep(3)
addurl=self.get_url()
id=addurl.split('/')[-1]
with open('idlist.txt','a+') as f:
f.write(id+'\n')
f.close()
# idlist.append(id)
# print("id=",id)
# def audit1(self,id):
# print(id)
# auditurl=url+'#/activitybaseinfo/edit/show/5/'+id
# self.open(auditurl)
# self.click(self.audit_button)
# self.click(self.audit_agree)
# self.click(self.save_button)
# self.click(self.sure_button)
#
# def audit2(self,id):
# auditurl=url+'#/activitybaseinfo/edit/show/7/'+id
# self.open(auditurl)
# time.sleep(3)
# self.click(self.audit_button)
# self.click(self.audit_agree)
# self.click(self.save_button)
# self.click(self.sure_button)
| [
"[email protected]"
] | |
d3a02d8a0d5dc8a84e62e77c3a8635fac1e9942f | ce105dfbcb2acb78ba19f3410d87249a75eecbd9 | /FastGCN/models.py | 5bde38cf93ef28040331c704a0f9d3b2ab2abf28 | [
"MIT"
] | permissive | CEfanmin/DataMiningProjects | 8209670f65332681d0de39ba6e702077a7ab4602 | b6375f542c68c0001ae2971dd7e8046a0b4afc7a | refs/heads/master | 2021-04-06T02:05:52.868605 | 2020-02-29T03:41:33 | 2020-02-29T03:41:33 | 124,988,979 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,271 | py | from layers import *
from metrics import *
flags = tf.app.flags
FLAGS = flags.FLAGS
class Model(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
logging = kwargs.get('logging', False)
self.logging = logging
self.vars = {}
self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
def _build(self):
raise NotImplementedError
def build(self):
""" Wrapper for _build() """
with tf.variable_scope(self.name):
self._build()
# Build sequential layer model
self.activations.append(self.inputs)
for layer in self.layers:
hidden = layer(self.activations[-1])
self.activations.append(hidden)
self.outputs = self.activations[-1]
# Store model variables for easy access
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
# Build metrics
self._loss()
self._accuracy()
self.opt_op = self.optimizer.minimize(self.loss)
def predict(self):
pass
def _loss(self):
raise NotImplementedError
def _accuracy(self):
raise NotImplementedError
def save(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = saver.save(sess, "tmp/%s.ckpt" % self.name)
print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = "tmp/%s.ckpt" % self.name
saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
class MLP(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(MLP, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
self.layers.append(Dense(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=True,
logging=self.logging))
self.layers.append(Dense(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class GCN(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(GCN, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
self.layers.append(GraphConvolution(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=True,
logging=self.logging))
self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class GCN_APPRO(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(GCN_APPRO, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.supports = placeholders['support']
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += softmax_cross_entropy(self.outputs, self.placeholders['labels'])
def _accuracy(self):
self.accuracy = accuracy(self.outputs, self.placeholders['labels'])
def _build(self):
# appr_support = self.placeholders['support'][0]
self.layers.append(GraphConvolution(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
support=self.supports[0],
act=tf.nn.relu,
dropout=True,
sparse_inputs=False,
logging=self.logging))
self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
support=self.supports[1],
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class GCN_APPRO_Mix(Model): # mixture of dense and gcn
def __init__(self, placeholders, input_dim, **kwargs):
super(GCN_APPRO_Mix, self).__init__(**kwargs)
self.inputs = placeholders['AXfeatures']# A*X for the bottom layer, not original feature X
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.support = placeholders['support']
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += softmax_cross_entropy(self.outputs, self.placeholders['labels'])
def _accuracy(self):
self.accuracy = accuracy(self.outputs, self.placeholders['labels'])
def _build(self):
self.layers.append(Dense(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=False,
logging=self.logging))
self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
support=self.support,
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class GCN_APPRO_Onelayer(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(GCN_APPRO_Onelayer, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.supports = placeholders['support']
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
appr_support = self.placeholders['support'][0]
self.layers.append(GraphConvolution(input_dim=self.input_dim,
output_dim=self.output_dim,
placeholders=self.placeholders,
support=self.supports[0],
act=tf.nn.relu,
dropout=True,
sparse_inputs=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
| [
"[email protected]"
] | |
7959884d04bc3ce0e004bf3583549026b6fc1b1a | 3a9c1bc10588073920d55f72be5fbbbd3781cf9e | /word_count_python/word_count_reducer.py | 5378d6844129a68307411949c3b6a5875c627786 | [] | no_license | adcohen-tech/tech_challenge | 3224ff9e2278ebaca33361527cb313cc9d8f8b0f | 2d6bfe541998149cf056022ad2b3e626cf7bc8b9 | refs/heads/master | 2016-09-05T12:53:20.981228 | 2014-11-25T16:51:12 | 2014-11-25T16:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python
import sys
if __name__ == '__main__':
current_word = None
key_count = 0
for line in sys.stdin:
key,value = line.strip().split("\t")
try:
record_count = int(value)
except ValueError:
continue #?!?
if key != current_word:
if current_word != None:
print "{0}\t{1}".format(key, key_count)
key_count = record_count
current_word = key
else:
key_count += record_count
if current_word == key:
print "{0}\t{1}".format(key, key_count)
| [
"[email protected]"
] | |
5488b240e57097bb3539fc1125cba122aa285455 | b0cd6f73c3a2c4bf9d30cf2db06de937415f6ae5 | /loadData.py | 61101777354833673295041ac9fedaff8e5fe713 | [] | no_license | BinbinBian/TextualEntailment | 9d76b8c4ddb8c6f37172f12844646f736fb68434 | a81980e038762d325330d6852d17e9a32800e1b9 | refs/heads/master | 2021-01-18T03:55:47.090212 | 2015-12-08T14:07:48 | 2015-12-08T14:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,457 | py | import cPickle
import gzip
import os
import sys
sys.setrecursionlimit(6000)
import time
import numpy
import theano
import theano.tensor as T
import theano.sandbox.neighbours as TSN
from logistic_sgd import LogisticRegression
from WPDefined import ConvFoldPoolLayer, dropout_from_layer, shared_dataset, repeat_whole_matrix
from cis.deep.utils.theano import debug_print
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from operator import itemgetter
def load_ibm_corpus(vocabFile, trainFile, devFile, maxlength):
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
sentlength_limit=1040
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
Lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # label, question, answer
#question
for i in range(1,3):
sent=[]
words=tokens[i].strip().split()
length=len(words)
if length>sentlength_limit:
words=words[:sentlength_limit]
length=sentlength_limit
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
if left<0 or right<0:
print 'Too long sentence:\n'+tokens[i]
exit(0)
sent+=[0]*left
for word in words:
sent.append(word2id.get(word))
sent+=[0]*right
data.append(sent)
del sent
del words
line_control+=1
if line_control%100==0:
print line_control
read_file.close()
return numpy.array(data),numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_dev_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t')
Y.append(int(tokens[0])) # make the label starts from 0 to 4
for i in range(1,3):
sent=[]
words=tokens[i].strip().split()
length=len(words)
if length>sentlength_limit:
words=words[:sentlength_limit]
length=sentlength_limit
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
if left<0 or right<0:
print 'Too long sentence:\n'+line
exit(0)
sent+=[0]*left
for word in words:
sent.append(word2id.get(word))
sent+=[0]*right
data.append(sent)
line_control+=1
#if line_control==1000:
# break
read_file.close()
return numpy.array(data),Y, numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_dev, devY, devLengths, devLeftPad, devRightPad=load_dev_file(devFile, vocab)
print 'dev file loaded over, total pairs: ', len(devLengths)/2
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
return T.cast(shared_y, 'int32')
#return shared_y
train_set_Lengths=shared_dataset(trainLengths)
valid_set_Lengths = shared_dataset(devLengths)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
dev_left_pad=shared_dataset(devLeftPad)
dev_right_pad=shared_dataset(devRightPad)
#valid_set_y = shared_dataset(devY)
rval = [(indices_train,train_set_Lengths, train_left_pad, train_right_pad), (indices_dev, devY, valid_set_Lengths, dev_left_pad, dev_right_pad)]
return rval, word_ind-1
def load_word2vec_to_init(rand_values, file):
readFile=open(file, 'r')
line_count=1
for line in readFile:
tokens=line.strip().split()
rand_values[line_count]=numpy.array(map(float, tokens[1:]))
line_count+=1
readFile.close()
print 'initialization over...'
return rand_values
def load_msr_corpus(vocabFile, trainFile, testFile, maxlength): #maxSentLength=60
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # label, sent1, sent2
Y.append(int(tokens[0])) #repeat
Y.append(int(tokens[0]))
#question
for i in [1,2,2,1]: #shuffle the example
sent=[]
words=tokens[i].strip().lower().split()
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
if left<0 or right<0:
print 'Too long sentence:\n'+tokens[i]
exit(0)
sent=[0]*left+sent+[0]*right
data.append(sent)
#line_control+=1
read_file.close()
'''
#normalized length
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_test_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t')
Y.append(int(tokens[0])) # make the label starts from 0 to 4
#Y.append(int(tokens[0]))
for i in [1,2]:
sent=[]
words=tokens[i].strip().lower().split()
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
if left<0 or right<0:
print 'Too long sentence:\n'+tokens[i]
exit(0)
sent=[0]*left+sent+[0]*right
data.append(sent)
#line_control+=1
#if line_control==1000:
# break
read_file.close()
'''
#normalized lengths
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, trainY, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_test, testY, testLengths, testLeftPad, testRightPad=load_test_file(testFile, vocab)
print 'test file loaded over, total pairs: ', len(testLengths)/2
#now, we need normaliza sentence length in the whole dataset (training and test)
concate_matrix=numpy.concatenate((trainLengths, testLengths), axis=0)
max=numpy.max(concate_matrix)
min=numpy.min(concate_matrix)
normalized_trainLengths=(trainLengths-min)*1.0/(max-min)
normalized_testLengths=(testLengths-min)*1.0/(max-min)
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
return T.cast(shared_y, 'int64')
#return shared_y
#indices_train=shared_dataset(indices_train)
#indices_test=shared_dataset(indices_test)
train_set_Lengths=shared_dataset(trainLengths)
test_set_Lengths=shared_dataset(testLengths)
normalized_train_length=theano.shared(numpy.asarray(normalized_trainLengths, dtype=theano.config.floatX), borrow=True)
normalized_test_length = theano.shared(numpy.asarray(normalized_testLengths, dtype=theano.config.floatX), borrow=True)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
test_left_pad=shared_dataset(testLeftPad)
test_right_pad=shared_dataset(testRightPad)
train_set_y=shared_dataset(trainY)
test_set_y = shared_dataset(testY)
rval = [(indices_train,train_set_y, train_set_Lengths, normalized_train_length, train_left_pad, train_right_pad), (indices_test, test_set_y, test_set_Lengths, normalized_test_length, test_left_pad, test_right_pad)]
return rval, word_ind-1
def load_mts(train_file, test_file):
read_train=open(train_file, 'r')
train_values=[]
for line in read_train:
tokens=map(float, line.strip().split())
train_values.append(tokens)
train_values.append(tokens)#repeat once
read_train.close()
read_test=open(test_file, 'r')
test_values=[]
for line in read_test:
tokens=map(float, line.strip().split())
test_values.append(tokens)
read_test.close()
train_values=theano.shared(numpy.asarray(train_values, dtype=theano.config.floatX), borrow=True)
test_values=theano.shared(numpy.asarray(test_values, dtype=theano.config.floatX), borrow=True)
return train_values, test_values
def load_mts_wikiQA(train_file, test_file):
read_train=open(train_file, 'r')
train_values=[]
for line in read_train:
tokens=map(float, line.strip().split())
train_values.append(tokens)
read_train.close()
read_test=open(test_file, 'r')
test_values=[]
for line in read_test:
tokens=map(float, line.strip().split())
test_values.append(tokens)
read_test.close()
train_values=theano.shared(numpy.asarray(train_values, dtype=theano.config.floatX), borrow=True)
test_values=theano.shared(numpy.asarray(test_values, dtype=theano.config.floatX), borrow=True)
return train_values, test_values
def load_extra_features(train_file, test_file):
read_train=open(train_file, 'r')
train_values=[]
for line in read_train:
tokens=map(float, line.strip().split())
train_values.append(tokens)
read_train.close()
read_test=open(test_file, 'r')
test_values=[]
for line in read_test:
tokens=map(float, line.strip().split())
test_values.append(tokens)
read_test.close()
train_values=theano.shared(numpy.asarray(train_values, dtype=theano.config.floatX), borrow=True)
test_values=theano.shared(numpy.asarray(test_values, dtype=theano.config.floatX), borrow=True)
return train_values, test_values
def load_wmf_wikiQA(train_file, test_file):
read_train=open(train_file, 'r')
train_values=[]
for line in read_train:
tokens=map(float, line.strip().split())
train_values.append(tokens)
read_train.close()
read_test=open(test_file, 'r')
test_values=[]
for line in read_test:
tokens=map(float, line.strip().split())
test_values.append(tokens)
read_test.close()
train_values=theano.shared(numpy.asarray(train_values, dtype=theano.config.floatX), borrow=True)
test_values=theano.shared(numpy.asarray(test_values, dtype=theano.config.floatX), borrow=True)
return train_values, test_values
def load_wikiQA_corpus(vocabFile, trainFile, testFile, max_truncate,maxlength): #maxSentLength=45
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # question, answer, label
Y.append(int(tokens[2]))
#question
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
#print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
line_control+=1
#if line_control==50:
# break
read_file.close()
'''
#normalized length
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_test_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t')
Y.append(int(tokens[2])) # make the label starts from 0 to 4
#Y.append(int(tokens[0]))
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
#print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
#line_control+=1
#if line_control==1000:
# break
read_file.close()
'''
#normalized lengths
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, trainY, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_test, testY, testLengths, testLeftPad, testRightPad=load_test_file(testFile, vocab)
print 'test file loaded over, total pairs: ', len(testLengths)/2
#now, we need normaliza sentence length in the whole dataset (training and test)
concate_matrix=numpy.concatenate((trainLengths, testLengths), axis=0)
max=numpy.max(concate_matrix)
min=numpy.min(concate_matrix)
normalized_trainLengths=(trainLengths-min)*1.0/(max-min)
normalized_testLengths=(testLengths-min)*1.0/(max-min)
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
return T.cast(shared_y, 'int32') # for ARC-II on gpu
#return shared_y
#indices_train=shared_dataset(indices_train)
#indices_test=shared_dataset(indices_test)
train_set_Lengths=shared_dataset(trainLengths)
test_set_Lengths=shared_dataset(testLengths)
normalized_train_length=theano.shared(numpy.asarray(normalized_trainLengths, dtype=theano.config.floatX), borrow=True)
normalized_test_length = theano.shared(numpy.asarray(normalized_testLengths, dtype=theano.config.floatX), borrow=True)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
test_left_pad=shared_dataset(testLeftPad)
test_right_pad=shared_dataset(testRightPad)
train_set_y=shared_dataset(trainY)
test_set_y = shared_dataset(testY)
rval = [(indices_train,train_set_y, train_set_Lengths, normalized_train_length, train_left_pad, train_right_pad), (indices_test, test_set_y, test_set_Lengths, normalized_test_length, test_left_pad, test_right_pad)]
return rval, word_ind-1
def load_entailment_corpus(vocabFile, trainFile, testFile, max_truncate,maxlength): #maxSentLength=45
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # question, answer, label
question=tokens[1].strip().lower().split()
answer=tokens[2].strip().lower().split()
if len(question)>max_truncate or len(answer)>max_truncate or len(question)< 2 or len(answer)<2:
continue #skip this pair
else:
Y.append(int(tokens[0]))
sents=[question, answer]
#question
for i in [0,1]:
sent=[]
words=sents[i]
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
#if length==max_truncate: #we consider max 43 words
# break
if length==0:
print 'shit sentence: ', sents[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
line_control+=1
if line_control==10000:
break
read_file.close()
if len(Lengths)/2 !=len(Y):
print 'len(Lengths)/2 !=len(Y)'
exit(0)
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_test_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # question, answer, label
question=tokens[1].strip().lower().split()
answer=tokens[2].strip().lower().split()
if len(question)>max_truncate or len(answer)>max_truncate or len(question)< 2 or len(answer)<2:
continue #skip this pair
else:
Y.append(int(tokens[0]))
sents=[question, answer]
for i in [0,1]:
sent=[]
words=sents[i]
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==0:
print 'shit sentence: ', sents[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
line_control+=1
#if line_control==500:
# break
read_file.close()
'''
#normalized lengths
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, trainY, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_test, testY, testLengths, testLeftPad, testRightPad=load_test_file(testFile, vocab)
print 'test file loaded over, total pairs: ', len(testLengths)/2
#now, we need normaliza sentence length in the whole dataset (training and test)
concate_matrix=numpy.concatenate((trainLengths, testLengths), axis=0)
max=numpy.max(concate_matrix)
min=numpy.min(concate_matrix)
normalized_trainLengths=(trainLengths-min)*1.0/(max-min)
normalized_testLengths=(testLengths-min)*1.0/(max-min)
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
return T.cast(shared_y, 'int64')
#return T.cast(shared_y, 'int32') # for gpu
#return shared_y
#indices_train=shared_dataset(indices_train)
#indices_test=shared_dataset(indices_test)
train_set_Lengths=shared_dataset(trainLengths)
test_set_Lengths=shared_dataset(testLengths)
normalized_train_length=theano.shared(numpy.asarray(normalized_trainLengths, dtype=theano.config.floatX), borrow=True)
normalized_test_length = theano.shared(numpy.asarray(normalized_testLengths, dtype=theano.config.floatX), borrow=True)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
test_left_pad=shared_dataset(testLeftPad)
test_right_pad=shared_dataset(testRightPad)
train_set_y=shared_dataset(trainY)
test_set_y = shared_dataset(testY)
rval = [(indices_train,train_set_y, train_set_Lengths, normalized_train_length, train_left_pad, train_right_pad), (indices_test, test_set_y, test_set_Lengths, normalized_test_length, test_left_pad, test_right_pad)]
return rval, word_ind-1
def load_SICK_corpus(vocabFile, trainFile, testFile, max_truncate,maxlength, entailment): #maxSentLength=45
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # question, answer, label
if entailment:
Y.append(int(tokens[2]))
else:
Y.append(float(tokens[3]))
#question
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
line_control+=1
#if line_control==500:
# break
read_file.close()
if len(Lengths)/2 !=len(Y):
print 'len(Lengths)/2 !=len(Y)'
exit(0)
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_test_file(file, word2id):
read_file=open(file, 'r')
data=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t')
if entailment:
Y.append(int(tokens[2]))
else:
Y.append(float(tokens[3]))
#Y.append(int(tokens[0]))
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
line_control+=1
#if line_control==200:
# break
read_file.close()
'''
#normalized lengths
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, trainY, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_test, testY, testLengths, testLeftPad, testRightPad=load_test_file(testFile, vocab)
print 'test file loaded over, total pairs: ', len(testLengths)/2
#now, we need normaliza sentence length in the whole dataset (training and test)
concate_matrix=numpy.concatenate((trainLengths, testLengths), axis=0)
max=numpy.max(concate_matrix)
min=numpy.min(concate_matrix)
normalized_trainLengths=(trainLengths-min)*1.0/(max-min)
normalized_testLengths=(testLengths-min)*1.0/(max-min)
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
#return T.cast(shared_y, 'int64')
return T.cast(shared_y, 'int64') #
#return shared_y
def shared_dataset_float(data_y, borrow=True):
return theano.shared(numpy.asarray(data_y,dtype=theano.config.floatX), borrow=borrow)
#indices_train=shared_dataset(indices_train)
#indices_test=shared_dataset(indices_test)
train_set_Lengths=shared_dataset(trainLengths)
test_set_Lengths=shared_dataset(testLengths)
normalized_train_length=theano.shared(numpy.asarray(normalized_trainLengths, dtype=theano.config.floatX), borrow=True)
normalized_test_length = theano.shared(numpy.asarray(normalized_testLengths, dtype=theano.config.floatX), borrow=True)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
test_left_pad=shared_dataset(testLeftPad)
test_right_pad=shared_dataset(testRightPad)
if entailment:
train_set_y=shared_dataset(trainY)
test_set_y = shared_dataset(testY)
else:
train_set_y=shared_dataset_float(trainY)
test_set_y = shared_dataset_float(testY)
rval = [(indices_train,train_set_y, train_set_Lengths, normalized_train_length, train_left_pad, train_right_pad), (indices_test, test_set_y, test_set_Lengths, normalized_test_length, test_left_pad, test_right_pad)]
return rval, word_ind-1
def load_SICK_corpus_binary_feature(vocabFile, trainFile, testFile, max_truncate,maxlength, entailment): #maxSentLength=45
#first load word vocab
read_vocab=open(vocabFile, 'r')
vocab={}
word_ind=1
for line in read_vocab:
tokens=line.strip().split()
vocab[tokens[1]]=word_ind #word2id
word_ind+=1
read_vocab.close()
#load train file
def load_train_file(file, word2id):
read_file=open(file, 'r')
data=[]
binarys=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t') # question, answer, label
if entailment:
Y.append(int(tokens[2]))
else:
Y.append(float(tokens[3]))
#question
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
#binary feature
words1=tokens[0].strip().split()
words2=tokens[1].strip().split()
set1=set(words1)
set2=set(words2)
len1=len(words1)
len2=len(words2)
binary1=[]
binary2=[]
for word in words1:
if word in set2:
binary1.append(1.0)
else:
binary1.append(1e-10)
binary1=[0.0]*((maxlength-len1)/2)+binary1+[0.0]*(maxlength-(maxlength-len1)/2-len1)
for word in words2:
if word in set1:
binary2.append(1.0)
else:
binary2.append(1e-10)
binary2=[0.0]*((maxlength-len2)/2)+binary2+[0.0]*(maxlength-(maxlength-len2)/2-len2)
binarys.append(binary1)
binarys.append(binary2)
line_control+=1
#if line_control==500:
# break
read_file.close()
if len(Lengths)/2 !=len(Y):
print 'len(Lengths)/2 !=len(Y)'
exit(0)
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(binarys), numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
def load_test_file(file, word2id):
read_file=open(file, 'r')
data=[]
binarys=[]
Y=[]
Lengths=[]
#true_lengths=[]
leftPad=[]
rightPad=[]
line_control=0
for line in read_file:
tokens=line.strip().split('\t')
if entailment:
Y.append(int(tokens[2]))
else:
Y.append(float(tokens[3]))
#Y.append(int(tokens[0]))
for i in [0,1]:
sent=[]
words=tokens[i].strip().split()
#true_lengths.append(len(words))
length=0
for word in words:
id=word2id.get(word)
if id is not None:
sent.append(id)
length+=1
if length==max_truncate: #we consider max 43 words
break
if length==0:
print 'shit sentence: ', tokens[i]
#exit(0)
break
Lengths.append(length)
left=(maxlength-length)/2
right=maxlength-left-length
leftPad.append(left)
rightPad.append(right)
sent=[0]*left+sent+[0]*right
data.append(sent)
#binary feature
words1=tokens[0].strip().split()
words2=tokens[1].strip().split()
set1=set(words1)
set2=set(words2)
len1=len(words1)
len2=len(words2)
binary1=[]
binary2=[]
for word in words1:
if word in set2:
binary1.append(1.0)
else:
binary1.append(1e-10)
binary1=[0.0]*((maxlength-len1)/2)+binary1+[0.0]*(maxlength-(maxlength-len1)/2-len1)
for word in words2:
if word in set1:
binary2.append(1.0)
else:
binary2.append(1e-10)
binary2=[0.0]*((maxlength-len2)/2)+binary2+[0.0]*(maxlength-(maxlength-len2)/2-len2)
binarys.append(binary1)
binarys.append(binary2)
line_control+=1
#if line_control==200:
# break
read_file.close()
'''
#normalized lengths
arr=numpy.array(Lengths)
max=numpy.max(arr)
min=numpy.min(arr)
normalized_lengths=(arr-min)*1.0/(max-min)
'''
#return numpy.array(data),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
return numpy.array(data),numpy.array(binarys),numpy.array(Y), numpy.array(Lengths), numpy.array(leftPad),numpy.array(rightPad)
indices_train, binary_train, trainY, trainLengths, trainLeftPad, trainRightPad=load_train_file(trainFile, vocab)
print 'train file loaded over, total pairs: ', len(trainLengths)/2
indices_test, binary_test, testY, testLengths, testLeftPad, testRightPad=load_test_file(testFile, vocab)
print 'test file loaded over, total pairs: ', len(testLengths)/2
#now, we need normaliza sentence length in the whole dataset (training and test)
concate_matrix=numpy.concatenate((trainLengths, testLengths), axis=0)
max=numpy.max(concate_matrix)
min=numpy.min(concate_matrix)
normalized_trainLengths=(trainLengths-min)*1.0/(max-min)
normalized_testLengths=(testLengths-min)*1.0/(max-min)
def shared_dataset(data_y, borrow=True):
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX), # @UndefinedVariable
borrow=borrow)
#return T.cast(shared_y, 'int64')
return T.cast(shared_y, 'int64') #
#return shared_y
def shared_dataset_float(data_y, borrow=True):
return theano.shared(numpy.asarray(data_y,dtype=theano.config.floatX), borrow=borrow)
#indices_train=shared_dataset(indices_train)
#indices_test=shared_dataset(indices_test)
train_set_Lengths=shared_dataset(trainLengths)
test_set_Lengths=shared_dataset(testLengths)
normalized_train_length=theano.shared(numpy.asarray(normalized_trainLengths, dtype=theano.config.floatX), borrow=True)
normalized_test_length = theano.shared(numpy.asarray(normalized_testLengths, dtype=theano.config.floatX), borrow=True)
train_left_pad=shared_dataset(trainLeftPad)
train_right_pad=shared_dataset(trainRightPad)
test_left_pad=shared_dataset(testLeftPad)
test_right_pad=shared_dataset(testRightPad)
if entailment:
train_set_y=shared_dataset(trainY)
test_set_y = shared_dataset(testY)
else:
train_set_y=shared_dataset_float(trainY)
test_set_y = shared_dataset_float(testY)
train_binary=shared_dataset_float(binary_train)
test_binary=shared_dataset_float(binary_test)
rval = [(indices_train,train_binary, train_set_y, train_set_Lengths, normalized_train_length, train_left_pad, train_right_pad), (indices_test, test_binary, test_set_y, test_set_Lengths, normalized_test_length, test_left_pad, test_right_pad)]
return rval, word_ind-1 | [
"[email protected]"
] | |
61eec9209783882bdf2ed14473c988499d502c47 | 489b07587ac1ce7956e62295b0c2fa5fed0a79de | /testing.py | dd0ed56da4ca7134c6ad62baf659446bea1c809a | [] | no_license | rlhjansen/Paper_netlists | daa8c8a8ac849e8e8f04018f8670ead05e1f5535 | 34e05a6ceb15bb64d764f6b2f0c20f5af595a473 | refs/heads/master | 2021-06-06T11:49:32.605626 | 2019-11-05T10:34:58 | 2019-11-05T10:34:58 | 124,765,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py |
import os
import code.algorithms.simplyX as simple
def count_slash(str):
return sum([c=='/' for c in str])
for ind, elem in enumerate(os.walk('./results')):
if count_slash(elem[0]) == 10: # these give datafiles
try:
datafile = os.path.join(elem[0], elem[2][0])
print(datafile)
with open(datafile, 'r') as data:
print(data.readline().split(';')[-1][:-1])
print(datafile.split('/'))
break
except:
pass
def get_datafiles(size, ncount, nlist):
abspath = os.path.abspath(__file__)
abspath = os.path.dirname(abspath)
abspath = os.path.join(abspath, "data")
if self.generated:
abspath = os.path.join(abspath, "official_reference")
abspath = os.path.join(abspath, "generated")
abspath = os.path.join(abspath, "x"+str(size)+"y"+str(self.y))
abspath = os.path.join(abspath, "g0")
else:
abspath = os.path.join(abspath, "baseline")
abspath = os.path.join(abspath, 'C'+str(self.c))
abspath = os.path.join(abspath, 'C'+str(self.c)+"_"+str(self.cX))
self.circuit_path = abspath+".csv"
abspath = os.path.join(abspath, "N"+str(self.n))
abspath = os.path.join(abspath, "N"+str(self.n)+"_"+str(self.nX)+".csv")
self.netlist_path = abspath
def check_7(order, size, netcount, netlist, iters):
"""toplayers is a dictionary saving the toplayers of each routing"""
toplayers = {}
tag='test-okt'
start_add=10
lens = [i+start_add for i in range(81)]
for n in lens:
pool = mp.Pool(mp.cpu_count()-1)
Simples = simple_generator(100, 0, n, 20, size, size, tag=tag, iters=iters)
grid = file_to_grid(self.circuit_path, None, max_g=max_g)
self.circuit.read_nets(self.netlist_path)
simple.SIMPLY(100, 0, n, net_num, x, y, tag, iters=iters)
pool.map(meh, Simples)
pool.close()
simple_obj.circuit.connect()
circuit = simple_obj.circuit
if not ord:
ord = circuit.get_random_net_order()
g_coords, paths, _ = get_circuit_basics(circuit, ord)
| [
"[email protected]"
] | |
00b3db52cbf83200b0f4fb99b314fb59143ff2ae | fcee5f9afb4444c1ba2b81b672e78eed3fb721c4 | /make_voc_list.py | 97f929dc083e8c852934981f624f23da4c75a5c5 | [] | no_license | Abandon-ht/k210_yolov2 | 2c90ac8a599e8f096801134af46daba6305da921 | 0ff4d4dc84a8d3f53e03ded10159381774fe7d55 | refs/heads/main | 2023-07-18T20:18:07.865204 | 2021-09-22T10:49:57 | 2021-09-22T10:49:57 | 408,284,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | import os
import re
import numpy as np
import sys
import argparse
# image_txt_path = '/home/ncy/PycharmProjects/VOCdevkit/VOC2012/ImageSets/Main/train.txt'
def main(train_file: str):
image_path_list = np.loadtxt(train_file, dtype=str)
if not os.path.exists('data'):
os.makedirs('data')
np.savetxt('data/voc_img.list', image_path_list, fmt='%s')
ann_list = list(image_path_list)
ann_list = [re.sub(r'JPEGImages', 'labels', s) for s in ann_list]
ann_list = [re.sub(r'.jpg', '.txt', s) for s in ann_list]
np.savetxt('data/voc_ann.list', ann_list, fmt='%s')
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('train_file', type=str, help='trian.txt file path')
return parser.parse_args(argv)
if __name__ == "__main__":
args = parse_arguments(sys.argv[1:])
main(args.train_file)
| [
"[email protected]"
] | |
45e3cc0a6ee8e036392a8bceaabbd1267b9d866e | 064b0a858f5d03dcba2bcdfe763fa46c0e507770 | /mas_common_robotics/mcr_navigation/mcr_navigation_tools/ros/scripts/navigation_goals_as_marker | 738c49dad3386c280a1594b87450d609af1a4e3b | [] | no_license | deebuls/robocup-at-work | ff7ebff2453d768fa4a118b179e4366ea1bc99d5 | f67f841dcbadfe4ba3c6b13eef6a939fe7d52c0b | refs/heads/brazil-2014 | 2020-04-06T06:53:23.497667 | 2014-08-12T11:12:05 | 2014-08-12T11:12:05 | 43,391,473 | 0 | 0 | null | 2015-09-29T20:08:39 | 2015-09-29T20:08:39 | null | UTF-8 | Python | false | false | 2,442 | #!/usr/bin/python
import rospy
import tf
from visualization_msgs.msg import *
if (__name__ == "__main__"):
rospy.init_node('mcr_navigation_goals_as_marker', anonymous=True)
location_publisher = rospy.Publisher("/visualization_marker_array", MarkerArray)
while not rospy.is_shutdown():
rospy.sleep(5)
marker_array = MarkerArray()
if not rospy.has_param('/script_server/base'):
continue
param_list = rospy.get_param('/script_server/base')
counter=0
for item in param_list:
pose = rospy.get_param('/script_server/base/' + item)
single_marker = Marker()
single_marker_text = Marker()
single_marker.header.frame_id = single_marker_text.header.frame_id = "/map"
single_marker.header.stamp = single_marker_text.header.stamp = rospy.Time.now()
single_marker.ns = single_marker_text.ns = "base navigation goals"
single_marker.action = single_marker_text.action = 0
single_marker.id = counter
single_marker_text.id = counter+1
counter+=2
single_marker_text.type = 9
single_marker.type = 0
single_marker.pose.position.x = single_marker_text.pose.position.x = pose[0]
single_marker.pose.position.y = single_marker_text.pose.position.y = pose[1]
single_marker.pose.position.z = single_marker_text.pose.position.z = 0.0
(qx,qy,qz,qw) = tf.transformations.quaternion_from_euler(0.0, 0.0, pose[2])
single_marker.pose.orientation.x = single_marker_text.pose.orientation.x = qx
single_marker.pose.orientation.y = single_marker_text.pose.orientation.y = qy
single_marker.pose.orientation.z = single_marker_text.pose.orientation.z = qz
single_marker.pose.orientation.w = single_marker_text.pose.orientation.w = qw
single_marker_text.text = item
single_marker.scale.x = 0.6
single_marker.scale.y = 0.05
single_marker.scale.z = 0.05
single_marker.color.r = 1.0
single_marker.color.g = 0.0
single_marker.color.b = 0.0
single_marker.color.a = 0.5
single_marker_text.scale.x = 0.17
single_marker_text.scale.y = 0.17
single_marker_text.scale.z = 0.17
single_marker_text.color.r = 1.0
single_marker_text.color.g = 1.0
single_marker_text.color.b = 1.0
single_marker_text.color.a = 1.0
single_marker.lifetime = single_marker_text.lifetime = rospy.Duration(5)
marker_array.markers.append(single_marker)
marker_array.markers.append(single_marker_text)
location_publisher.publish(marker_array)
| [
"[email protected]"
] | ||
6350b28ce677c4102b1d0be5c87748350f2aa9e6 | c58bfb0d7a293cc471f2921db57b00d5c3e56bac | /Backups/Old-DIKB-Micropublication/scripts/query-DIKB-DDIs.py | 4aa6f9c61687c0fefd78be1965e8e28e369068b3 | [
"Apache-2.0"
] | permissive | dbmi-pitt/DIKB-Micropublication | 0a2f29b704c269d2ccfe091f8faff1b2374d626d | 0fa264903414ac0b552d363d139746ead198f06a | refs/heads/master | 2020-12-24T16:32:04.206847 | 2020-01-22T18:33:58 | 2020-01-22T18:33:58 | 22,527,994 | 6 | 2 | null | 2015-07-15T14:39:09 | 2014-08-01T20:51:07 | Python | UTF-8 | Python | false | false | 6,141 | py | ## query-DIKB-DDIs.py
##
## Simple Python script to query http://dbmi-icode-01.dbmi.pitt.edu:2020/sparql for DIKB observed DDIs"
## No extra libraries required.
# Authors: Richard D Boyce, Yifan Ning
#
# August 2014
#
## This code is licensed under Apache License Version 2.0, January
## 2004. Please see the license in the root folder of this project
import json
import urllib2
import urllib
import traceback
import pickle
import sys
sys.path = sys.path + ['.']
from PDDI_Model import getPDDIDict
def query(q,epr,f='application/sparql-results+json'):
"""Function that uses urllib/urllib2 to issue a SPARQL query.
By default it requests json as data format for the SPARQL resultset"""
try:
params = {'query': q}
params = urllib.urlencode(params)
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(epr+'?'+params)
request.add_header('Accept', f)
request.get_method = lambda: 'GET'
url = opener.open(request)
return url.read()
except Exception, e:
traceback.print_exc(file=sys.stdout)
raise e
if __name__ == "__main__":
# load all observed DDIs
pddiDictL = []
sparql_service = "http://dbmi-icode-01.dbmi.pitt.edu/dikb/sparql"
query_string = """
PREFIX swanpav: <http://purl.org/swan/1.2/pav/>
PREFIX meta: <http://www4.wiwiss.fu-berlin.de/bizer/d2r-server/metadata#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX prvTypes: <http://purl.org/net/provenance/types#>
PREFIX swandr: <http://purl.org/swan/1.2/discourse-relationships/>
PREFIX d2r: <http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2r-server/config.rdf#>
PREFIX map: <file:////home/rdb20/Downloads/d2r-server-0.7-DIKB/mapping.n3#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX swande: <http://purl.org/swan/1.2/discourse-elements#>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX prv: <http://purl.org/net/provenance/ns#>
PREFIX db: <http://dbmi-icode-01.dbmi.pitt.edu:2020/resource/>
PREFIX siocns: <http://rdfs.org/sioc/ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX prvFiles: <http://purl.org/net/provenance/files#>
PREFIX ndfrt: <http://purl.bioontology.org/ontology/NDFRT/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX ncbit: <http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#>
PREFIX dikbEvidence: <http://dbmi-icode-01.dbmi.pitt.edu/dikb-evidence/DIKB_evidence_ontology_v1.3.owl#>
PREFIX dikbD2R: <http://dbmi-icode-01.dbmi.pitt.edu:2020/vocab/resource/>
PREFIX swanco: <http://purl.org/swan/1.2/swan-commons#>
PREFIX prvIV: <http://purl.org/net/provenance/integrity#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX swanci: <http://purl.org/swan/1.2/citations/>
SELECT DISTINCT * WHERE {
?s a dikbD2R:DDIObservation;
dikbD2R:PharmacokineticDDIAssertion ?asrt;
dikbD2R:ObjectDrugOfInteraction ?object;
dikbD2R:PrecipitantDrugOfInteraction ?precip;
rdfs:label ?label.
?object a ncbit:Pharmacologic_Substance;
owl:sameAs ?objectURI.
?precip a ncbit:Pharmacologic_Substance;
owl:sameAs ?precipURI.
?asrt a swande:ResearchStatement;
foaf:homepage ?homepage;
dikbD2R:Assertions_numeric_val ?numericVal;
dikbD2R:Assertions_cont_val ?contVal;
dikbD2R:slot ?ddiPkEffect;
swanco:citesAsSupportingEvidence ?evidence;
rdfs:label ?researchStatementLabel.
?evidence a ncbit:Evidence;
dikbEvidence:Evidence_type ?evType;
rdfs:seeAlso ?evSource;
siocns:content ?content;
dc:date ?dateAnnotated;
dc:creator ?whoAnnotated.
}
"""
print "OBSERVED DDIs query_string: %s" % query_string
json_string = query(query_string, sparql_service)
resultset=json.loads(json_string)
print resultset.values()
if len(resultset["results"]["bindings"]) == 0:
print "INFO: No result!"
else:
#print json.dumps(resultset,indent=1)
for i in range(0, len(resultset["results"]["bindings"])):
newPDDI = getPDDIDict()
newPDDI["evidence"] = resultset["results"]["bindings"][i]["evidence"]["value"]
newPDDI["researchStatement"] = resultset["results"]["bindings"][i]["asrt"]["value"]
newPDDI["uri"] = resultset["results"]["bindings"][i]["s"]["value"]
obj = resultset["results"]["bindings"][i]["object"]["value"]
newPDDI["object"] = obj.replace(u"http://dbmi-icode-01.dbmi.pitt.edu/dikb/resource/Drugs/",u"").upper()
precip = resultset["results"]["bindings"][i]["precip"]["value"]
newPDDI["precip"] = precip.replace(u"http://dbmi-icode-01.dbmi.pitt.edu/dikb/resource/Drugs/",u"").upper()
newPDDI["objectURI"] = resultset["results"]["bindings"][i]["objectURI"]["value"]
newPDDI["precipURI"] = resultset["results"]["bindings"][i]["precipURI"]["value"]
newPDDI["label"] = resultset["results"]["bindings"][i]["label"]["value"]
newPDDI["homepage"] = resultset["results"]["bindings"][i]["homepage"]["value"]
newPDDI["numericVal"] = resultset["results"]["bindings"][i]["numericVal"]["value"]
newPDDI["contVal"] = resultset["results"]["bindings"][i]["contVal"]["value"]
newPDDI["ddiPkEffect"] = resultset["results"]["bindings"][i]["ddiPkEffect"]["value"]
newPDDI["evidenceSource"] = resultset["results"]["bindings"][i]["evSource"]["value"]
newPDDI["evidenceType"] = resultset["results"]["bindings"][i]["evType"]["value"]
newPDDI["evidenceStatement"] = resultset["results"]["bindings"][i]["content"]["value"]
newPDDI["dateAnnotated"] = resultset["results"]["bindings"][i]["dateAnnotated"]["value"]
newPDDI["whoAnnotated"] = resultset["results"]["bindings"][i]["whoAnnotated"]["value"]
newPDDI["researchStatementLabel"] = resultset["results"]["bindings"][i]["researchStatementLabel"]["value"]
pddiDictL.append(newPDDI)
f = open("dikb-observed-ddis.pickle","w")
pickle.dump(pddiDictL, f)
f.close()
| [
"[email protected]"
] | |
d52327859a2773b746a3751f55e4a4b9a7224608 | 5f2b22d4ffec7fc1a4e40932acac30256f63d812 | /tensorflow-study/AI_Drive_3D_Car/Driving_3D_Car/env.py | e5bf1bf7f0a584e5d2feb28a0bfb8734e741b3ea | [] | no_license | Thpffcj/Python-Learning | 45734dd31e4d8d047eec5c5d26309bc7449bfd0d | 5dacac6d33fcb7c034ecf5be58d02f506fd1d6ad | refs/heads/master | 2023-08-04T21:02:36.984616 | 2021-09-21T01:30:04 | 2021-09-21T01:30:04 | 111,358,872 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,074 | py | # -*- coding: UTF-8 -*-
# Created by thpffcj on 2019-02-13.
"""
配置 Neon Race(霓虹赛车)的游戏环境,以方便我们训练
"""
import cv2
import time
import numpy as np
import logging
import gym
from gym import spaces
from gym.spaces.box import Box
import universe
from universe import vectorized
from universe import spaces as vnc_spaces
from universe.spaces.vnc_event import keycode
from universe.wrappers import BlockingReset, GymCoreAction, EpisodeID, Unvectorize, Vectorize, Vision, Logger
# 配置日志系统
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
universe.configure_logging()
# 游戏:Neon Race
GAME = "flashgames.NeonRace-v0"
# 创建并配置游戏环境
def create_env(client_id, remotes):
env = gym.make(GAME)
env = Vision(env)
env = Logger(env)
env = BlockingReset(env)
reg = universe.runtime_spec('flashgames').server_registry
height = reg[GAME]["height"]
width = reg[GAME]["width"]
env = CropScreen(env, height, width, 84, 18)
env = Rescale(env)
# 可用的按键:左,右,上,左上,右上,下,用 Turbo 来加速
keys = ['left', 'right', 'up', 'left up', 'right up', 'down', 'up x']
env = DiscreteToFixedKeysVNCActions(env, keys)
env = EpisodeID(env)
env = DiagnosticsInfo(env)
env = Unvectorize(env)
env.configure(fps=5.0, remotes=remotes, start_timeout=15 * 60, client_id=client_id, vnc_driver='go', vnc_kwargs={
'encoding': 'tight', 'compress_level': 0,
'fine_quality_level': 50, 'subsample_level': 3})
return env
# 给环境加上记录诊断信息的功能
def DiagnosticsInfo(env, *args, **kwargs):
return vectorized.VectorizeFilter(env, DiagnosticsInfoI, *args, **kwargs)
# 诊断信息的类
class DiagnosticsInfoI(vectorized.Filter):
def __init__(self, log_interval=503):
super(DiagnosticsInfoI, self).__init__()
self._episode_time = time.time()
self._last_time = time.time()
self._local_t = 0
self._log_interval = log_interval
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
self._num_vnc_updates = 0
self._last_episode_id = -1
def _after_reset(self, observation):
logger.info('重置环境中')
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation
def _after_step(self, observation, reward, done, info):
to_log = {}
if self._episode_length == 0:
self._episode_time = time.time()
self._local_t += 1
if info.get("stats.vnc.updates.n") is not None:
self._num_vnc_updates += info.get("stats.vnc.updates.n")
if self._local_t % self._log_interval == 0:
cur_time = time.time()
elapsed = cur_time - self._last_time
fps = self._log_interval / elapsed
self._last_time = cur_time
cur_episode_id = info.get('vectorized.episode_id', 0)
to_log["diagnostics/fps"] = fps
if self._last_episode_id == cur_episode_id:
to_log["diagnostics/fps_within_episode"] = fps
self._last_episode_id = cur_episode_id
if info.get("stats.gauges.diagnostics.lag.action") is not None:
to_log["diagnostics/action_lag_lb"] = info["stats.gauges.diagnostics.lag.action"][0]
to_log["diagnostics/action_lag_ub"] = info["stats.gauges.diagnostics.lag.action"][1]
if info.get("reward.count") is not None:
to_log["diagnostics/reward_count"] = info["reward.count"]
if info.get("stats.gauges.diagnostics.clock_skew") is not None:
to_log["diagnostics/clock_skew_lb"] = info["stats.gauges.diagnostics.clock_skew"][0]
to_log["diagnostics/clock_skew_ub"] = info["stats.gauges.diagnostics.clock_skew"][1]
if info.get("stats.gauges.diagnostics.lag.observation") is not None:
to_log["diagnostics/observation_lag_lb"] = info["stats.gauges.diagnostics.lag.observation"][0]
to_log["diagnostics/observation_lag_ub"] = info["stats.gauges.diagnostics.lag.observation"][1]
if info.get("stats.vnc.updates.n") is not None:
to_log["diagnostics/vnc_updates_n"] = info["stats.vnc.updates.n"]
to_log["diagnostics/vnc_updates_n_ps"] = self._num_vnc_updates / elapsed
self._num_vnc_updates = 0
if info.get("stats.vnc.updates.bytes") is not None:
to_log["diagnostics/vnc_updates_bytes"] = info["stats.vnc.updates.bytes"]
if info.get("stats.vnc.updates.pixels") is not None:
to_log["diagnostics/vnc_updates_pixels"] = info["stats.vnc.updates.pixels"]
if info.get("stats.vnc.updates.rectangles") is not None:
to_log["diagnostics/vnc_updates_rectangles"] = info["stats.vnc.updates.rectangles"]
if info.get("env_status.state_id") is not None:
to_log["diagnostics/env_state_id"] = info["env_status.state_id"]
if reward is not None:
self._episode_reward += reward
if observation is not None:
self._episode_length += 1
self._all_rewards.append(reward)
if done:
logger.info('回合结束: 回合奖励=%s 回合长度=%s', self._episode_reward, self._episode_length)
total_time = time.time() - self._episode_time
to_log["global/episode_reward"] = self._episode_reward
to_log["global/episode_length"] = self._episode_length
to_log["global/episode_time"] = total_time
to_log["global/reward_per_time"] = self._episode_reward / total_time
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation, reward, done, to_log
# 限定的按键状态
class FixedKeyState(object):
def __init__(self, keys):
self._keys = [keycode(key) for key in keys]
self._down_keysyms = set()
def apply_vnc_actions(self, vnc_actions):
for event in vnc_actions:
if isinstance(event, vnc_spaces.KeyEvent):
if event.down:
self._down_keysyms.add(event.key)
else:
self._down_keysyms.discard(event.key)
def to_index(self):
action_n = 0
for key in self._down_keysyms:
if key in self._keys:
# 如果按下多个 key(按键),只用第一个 key
action_n = self._keys.index(key) + 1
break
return action_n
# 定义一个确定的 action space(动作空间)
class DiscreteToFixedKeysVNCActions(vectorized.ActionWrapper):
def __init__(self, env, keys):
super(DiscreteToFixedKeysVNCActions, self).__init__(env)
self._keys = keys
self._generate_actions()
self.action_space = spaces.Discrete(len(self._actions))
# 生成 action
def _generate_actions(self):
self._actions = []
uniq_keys = set()
for key in self._keys:
for cur_key in key.split(' '):
uniq_keys.add(cur_key)
for key in [''] + self._keys:
split_keys = key.split(' ')
cur_action = []
for cur_key in uniq_keys:
cur_action.append(vnc_spaces.KeyEvent.by_name(cur_key, down=(cur_key in split_keys)))
self._actions.append(cur_action)
self.key_state = FixedKeyState(uniq_keys)
def _action(self, action_n):
# 每个 action 可能是一个长度为 1 的 np.array
# 转换成 int 类型,以避免 warning(警告)
return [self._actions[int(action)] for action in action_n]
# 裁剪屏幕区域
class CropScreen(vectorized.ObservationWrapper):
"""
从左上角开始裁剪 height(高)x width(宽)大小的区域
"""
def __init__(self, env, height, width, top=0, left=0):
super(CropScreen, self).__init__(env)
self.height = height
self.width = width
self.top = top
self.left = left
self.observation_space = Box(0, 255, shape=(height, width, 3))
def _observation(self, observation_n):
return [ob[self.top:self.top+self.height, self.left:self.left+self.width, :] if ob is not None else None
for ob in observation_n]
# 处理 Frame(帧)
def _process_frame(frame):
frame = cv2.resize(frame, (200, 128))
frame = frame.mean(2).astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [128, 200, 1])
return frame
# 调节观测空间的大小
class Rescale(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(Rescale, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [128, 200, 1])
def _observation(self, observation_n):
return [_process_frame(observation) for observation in observation_n]
| [
"[email protected]"
] | |
d731866885db9fe81a9f2fb738a972218db51623 | 00e21a29e078f5216e66a71f591c4b1a7b6465b9 | /Level.1/Arrange String in Descending.py | 22a2e702010fc14f902458aba2e033f04f755da0 | [] | no_license | minji-OH/Python_Programmers_Solution | 6bdd0d251f883ab03e8deb990656d17757178de2 | 395b5459e026bfb0449383840d3bf3b17eb38754 | refs/heads/master | 2021-05-27T04:11:50.439360 | 2020-10-13T06:27:10 | 2020-10-13T06:27:10 | 254,211,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | def solution(s):
answer = ''
#대소문자 구분하기
lower = []
upper = []
for i in range(len(s)):
if s[i] >='a' and s[i] <='z':
lower.append(s[i])
else:
upper.append(s[i])
#각각 정렬
lower.sort(reverse=True)
upper.sort(reverse=True)
#조인
answer = ''.join(lower) + ''.join(upper)
return answer | [
"[email protected]"
] | |
6aefd1ec8f70dea375cd31b19055bc75bd9b5da9 | 51cfc2ff8b2bf98f6abacf6f3bb4bd19bf88fd81 | /user/migrations/0003_auto_20191230_2314.py | 69dfae1e43f64df86c6fd96a42849e1f26f7dd6d | [] | no_license | AndreiiZh/cnip | f1cac9e0f84d409acf8bb682e270608364921137 | 4dd31b507d94f42f1bb3a2258166add938ca8af8 | refs/heads/master | 2022-12-12T13:18:03.728428 | 2020-03-13T11:49:42 | 2020-03-13T11:49:42 | 227,101,711 | 0 | 0 | null | 2022-04-22T23:07:27 | 2019-12-10T11:22:34 | HTML | UTF-8 | Python | false | false | 451 | py | # Generated by Django 2.2.8 on 2019-12-30 21:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20191230_2249'),
]
operations = [
migrations.AlterField(
model_name='listservice',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, null=True, verbose_name='Вартість'),
),
]
| [
"[email protected]"
] | |
54361217ef28c9e0c8e6a9fccca0f48c5ea11222 | aedbc5b8cb95ba346137d21a636a37f3b24e76d7 | /restaurants/views.py | 9d6e98760ea565ca3361d1780cb3e35504f1d603 | [] | no_license | jayabhavana342/Learning_Django | 39df268612826c656e56eb2333516a92f38adfae | fc2a18f3920c23ed49d58cad40ffa84fb67154f9 | refs/heads/master | 2021-09-04T07:01:18.064031 | 2018-01-16T23:24:30 | 2018-01-16T23:24:30 | 116,726,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from .forms import RestaurantLocationCreateForm
from .models import RestaurantLocation
class RestaurantListView(LoginRequiredMixin, ListView):
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user)
class RestaurantDetailView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user)
class RestaurantCreateView(LoginRequiredMixin, CreateView):
form_class = RestaurantLocationCreateForm
login_url = '/login/'
template_name = 'form.html'
def form_valid(self, form):
instance = form.save(commit=False)
instance.owner = self.request.user
return super(RestaurantCreateView, self).form_valid(form)
def get_context_data(self, *args, **kwargs):
context = super(RestaurantCreateView, self).get_context_data(**kwargs)
context['title'] = 'Add Restaurant'
return context
class RestaurantUpdateView(LoginRequiredMixin, UpdateView):
form_class = RestaurantLocationCreateForm
login_url = '/login/'
template_name = 'restaurants/detail-update.html'
def get_context_data(self, *args, **kwargs):
context = super(RestaurantUpdateView, self).get_context_data(**kwargs)
name = self.get_object().name
context['title'] = 'Update Restaurant: ' + name
return context
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user)
| [
"[email protected]"
] | |
95485ccb37d5016b6bc455ec89bb27e6274ca8a0 | 049a40bd4e44636fe13656a815daaca860a98db1 | /forms.py | 87f85f44c8a2d2a6bb51e30d5cc6a0751b9f8ea3 | [] | no_license | heggy231/wdi-capstone-project-4-zom | ccbce32015838076fed969844ff2ec906d2f6b38 | ffe957ae44311d91c35888a3644d4fe5c4aa1db0 | refs/heads/master | 2023-02-06T11:01:55.088763 | 2020-04-15T01:22:03 | 2020-04-15T01:22:03 | 178,492,782 | 3 | 0 | null | 2023-02-02T05:44:18 | 2019-03-30T00:38:00 | HTML | UTF-8 | Python | false | false | 1,955 | py | # forms.py defines class to represent our form. Add the field we need which will eventually be used with a form builder on the front end https://git.generalassemb.ly/sf-wdi-51/Flask-Models
# import the tools, fields we need
from flask_wtf import FlaskForm as Form
# from models import User
from wtforms import StringField, PasswordField, TextAreaField, TextField, SubmitField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo)
from models import User
from models import Post
def email_exists(form, field):
if User.select().where(User.email == field.data).exists():
raise ValidationError('Oops!! User with that email already exists.')
return
# create the class and variable to house Field definitions
class RegisterForm(Form): # pass Form class obj to inherits WTForm field like StringField(), PasswordField()
email = StringField( # function I am bringing in from WTF
'Email',
validators=[
DataRequired(),
Email(),
email_exists
])
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=2),
EqualTo(
'password2',
message='Passwords must match')
])
password2 = PasswordField(
'Confirm Password',
validators=[DataRequired()]
)
# We are passing WTF Form Super class so it knows to get the functions from WTF form (Inheritance)
class SigninForm(Form):
email = StringField(
'Email',
validators=[
DataRequired(),
Email()
])
password = PasswordField(
'Password',
validators=[
DataRequired()
])
class PostForm(Form): #pass in Form class obj to inherit StringField() method and TextField
title = StringField(
'Title',
validators=[
DataRequired()
])
content = TextAreaField(
'Tell your story...',
validators=[
DataRequired()
]) | [
"[email protected]"
] | |
523a8bd5d2d8793658e493383d612fb1445cc0eb | 0ce955a125f729b6dff17c4a507162c8b86e4812 | /flip_zero_max_sum.py | 07ec8fcee1e2cc5029e710f6358e8228716bd30b | [] | no_license | changediyasunny/Challenges | f1b1f0fda490c53bfe8de4648c29b501ec7180a1 | df2ce669049ca040631dc6cc05cf5b5e8d2cc376 | refs/heads/master | 2021-01-10T13:29:53.137452 | 2019-10-10T05:19:39 | 2019-10-10T05:19:39 | 52,062,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py |
"""
Sliding window Problem...
"""
def flip(list1, n):
wL =0
wR = 0
Lindex = 0
window = 0
zero_cnt = 0
while wR < len(list1):
# Widen window if zero-count is < given flips...
if zero_cnt <= n:
if list1[wR] == 0:
zero_cnt = zero_cnt + 1
wR = wR + 1
# zero-cnt is more...
if zero_cnt > n:
if list1[wL] == 0:
zero_cnt = zero_cnt - 1
wL = wL + 1
# Keep track of maximum window found yet...
# ALways max is preserved under (wR - wL) condition...
if (wR-wL) > window:
window = wR - wL
Lindex = wL
#......................
for i in range(window):
if list1[Lindex+i] == 0:
print(Lindex+i)
def main():
list1 = [1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1]
n = 1
flip(list1, n)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
beeaa747f14ea060a718efff86b3fd69fa3fa66b | 2275e9c3147a5284f36c1fc2da7a8dcf53cc2383 | /params/blend3.py | 627fbb0b7e708598d26ec087e6101ea71394c352 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | puttak/nrel-efr | cb39197c362569922e69d65b3e7abf96e42ec992 | 2b6ebf1ce6136d13dfff741e8ff7c2b50ed65e85 | refs/heads/master | 2022-11-02T14:35:14.617027 | 2020-06-16T21:11:00 | 2020-06-16T21:11:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | """
Blend3 feedstock parameters.
ultimate_analysis : list
Elements listed as [C, H, O, N, S, ash, moisture]
chemical_analysis : dict
Biomass composition determined from chemical analysis data. These values
are used for the pyrolysis kinetics. Values are given as mass fraction (-)
on dry ash-free basis (% daf).
"""
feedstock = {
'name': 'Blend3',
'ultimate_analysis': [49.52, 5.28, 38.35, 0.15, 0.02, 0.64, 6.04],
'chemical_analysis': {
'cellulose': 39.19,
'hemicellulose': 23.26,
'lignin_c': 9.89,
'lignin_h': 9.89,
'lignin_o': 9.89,
'tannins': 7.88,
'triglycerides': 0.00
},
'biomass_characterization': {
'yc': 0.51,
'yh': 0.06,
'alpha': 0.56,
'beta': 0.6,
'gamma': 0.6,
'delta': 0.78,
'epsilon': 0.88
}
}
"""
Entrained flow reactor (EFR) parameters.
energy : str
Used by the Cantera reactor model. If set to `off` then disable the energy
equation. If `on` then enable the energy and use the provided thermo data
for the reactions.
"""
reactor = {
'name': 'Entrained flow reactor (EFR)',
'pressure': 101_325.0,
'temperature': 773.15,
'time_duration': 10.0,
'energy': 'on'
}
"""
Sensitivity analysis parameters for the Debiagi 2018 kinetics.
"""
sensitivity_analysis = {
'n_samples': 10,
'num_vars': 7,
'names': ['CELL', 'GMSW', 'LIGC', 'LIGH', 'LIGO', 'TANN', 'TGL'],
'bounds': [[0.01, 0.99],
[0.01, 0.99],
[0.01, 0.99],
[0.01, 0.99],
[0.01, 0.99],
[0.01, 0.99],
[0.01, 0.99]]
}
| [
"[email protected]"
] | |
d67459851bcc2e99ac347e2321788fbb44de2d73 | 61e23efe2d623da80d59fd98f62d98620512d0ae | /analyze_seqresults.py | 28dd2e3b284b04f80b64684b1e99753b0f338d1b | [] | no_license | sara-kassani/Embryo-Stage-Onset-Detection | 8e61634e47e72b538a35d73d81e5833eed91e633 | 98f7d327e4ca4d032258495762b7a52410f7f1f8 | refs/heads/main | 2023-08-06T00:16:55.829341 | 2021-09-29T14:27:14 | 2021-09-29T14:27:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | import numpy as np
import pandas as pd
import argparse
import math
parser = argparse.ArgumentParser()
parser.add_argument("--train_name", type=str, help="Training/Experiment name")
parser.add_argument("--suffix", type=str, help="Folder Suffix")
parser.add_argument("--cross_val", type=bool, help="Whether 5-fold cross-validation was performed", default=False)
args = parser.parse_args()
training_name = args.train_name
suffix = args.suffix
OUT_PATH = training_name + '/Predictions{}/'.format(suffix)
CROSS_VAL = args.cross_val
def read_preds(fold):
data_df = pd.read_csv(
OUT_PATH + 'Preds-fold{}.csv'.format(fold),
usecols=['Filenames', 'Labels', 'Preds'])
data_df['Folder'] = [filename.split('/')[0] for filename in data_df['Filenames']]
return data_df
def _analyze():
folds = 5 if CROSS_VAL else 1
for fold in range(folds):
data_df = read_preds(fold)
test_folders = data_df.Folder.unique()
vals = np.zeros((len(test_folders), 6))
for i,folder in enumerate(test_folders):
print(folder)
folder_df = data_df[data_df['Folder'] == folder].reset_index()
true_morula = folder_df.index[folder_df['Labels'] == 1].min()
pred_morula = folder_df.index[folder_df['Preds'] == 1].min()
true_blast = folder_df.index[folder_df['Labels'] == 2].min()
pred_blast = folder_df.index[folder_df['Preds'] == 2].min()
if math.isnan(pred_blast):
print('nan', folder)
pred_blast = folder_df.index.values.max()
dif_morula = np.abs(pred_morula - true_morula)
dif_blast = np.abs(pred_blast - true_blast)
vals[i,:] = np.array([
true_morula, pred_morula, true_blast, pred_blast,
dif_morula, dif_blast])
val_df = pd.DataFrame({
'Folder': test_folders,
'TrueMorula': vals[:,0],
'TrueBlast': vals[:,1],
'PredMorula': vals[:,2],
'PredBlast': vals[:,3],
'DifMorula': vals[:,4],
'DifBlast': vals[:,5]})
val_df.to_csv(OUT_PATH + 'SeqAnalysis-fold{}.csv'.format(fold), index=False)
if __name__ == '__main__':
_analyze()
| [
"[email protected]"
] | |
a31c303649e88c74d60cf8c7bb1455f75042f804 | 5cf982f0d16c4084b8c0b519a4856e4f5160e70d | /accountapp/forms.py | 002e95b59f712e8b73ea36fbc6b63657f74024be | [] | no_license | minzyk/Django_1 | 4b5ab942a45a61c368466b91f54af2acaac5cb63 | 1200db8a638ce5332e82c7785b483155f0d855a4 | refs/heads/master | 2023-06-07T18:31:57.527188 | 2021-07-01T05:44:38 | 2021-07-01T05:44:38 | 373,013,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from django.contrib.auth.forms import UserCreationForm
class AccountUpdateForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].disabled = True # username 을 비활성화 시킨것 (정보수정 할 경우에 ID 는 수정하지 못하도록) - disabled를 true로 바꿔주면 수정을 해도 서버에 반영이 되지 않는다
| [
"[email protected]"
] | |
077b5149d9a43b8f848cf4db9db2caa0bca1888a | d15ed15aa3df11ce3bc5a007d65dc90ad7b7471d | /manage.py | a271f08565de2725fd8a4d22c732429e233aa742 | [] | no_license | dansgithubuser/dansMap | 95947005c74f975355858f4b059b8913410814e9 | 48e035b1d6c308e83d5ddb5884475bfb88fb3eae | refs/heads/master | 2020-03-17T02:18:56.329812 | 2018-06-24T15:34:22 | 2018-06-24T15:34:22 | 133,185,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dansmap.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c355599110224238645664085a3d8c5b3a4781f3 | ccc55c7a05cd115e029f04cd577b11b70d0d3fdc | /Chapter 4/D4 TI C/Nurul Kamila (1184038)/Teori/7.py | f5bf61d9618b21cc7d772c42d310445c76f0ab7a | [] | no_license | ariqrafikusumah/Tugas | 6019be8c3219c930730d29076efd4494a3c70c79 | 6903117a91ad3002033c6d0297b4d1d06903088d | refs/heads/master | 2020-09-15T19:51:29.066553 | 2020-02-29T12:08:36 | 2020-02-29T12:08:36 | 216,065,279 | 0 | 0 | null | 2019-10-18T16:28:46 | 2019-10-18T16:28:45 | null | UTF-8 | Python | false | false | 141 | py | #Menulis File CSV dengan Fungsi to csv dengan Library Pandas
import pandas
df = pandas.read_csv('praktikum.csv')
df.to_csv('praktikum4.csv') | [
"[email protected]"
] | |
001a7c30b6353f7f46ae2652312694e726f9a87e | 637b9d443b84039cb3943ec186eb7e4872258c1e | /setup.py | d47f28936baf28bc9b27dc28fcb15c12dee19616 | [
"MIT"
] | permissive | ncod3/vprimer | d71a0c97c0a8583d57fd07e19dac01d82ecdab01 | 403151788e2df5138509cb444dd3eeebe4a78f1a | refs/heads/main | 2023-07-08T18:54:17.950399 | 2023-07-01T01:06:27 | 2023-07-01T01:06:27 | 278,553,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="vprimer",
version="1.0.7",
author="satoshi-natsume",
author_email="[email protected]",
description="V-primer",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ncod3/vprimer",
packages=setuptools.find_packages(),
license='MIT',
entry_points = {
'console_scripts': ['vprimer = vprimer.main:main']
},
python_requires='>=3.7',
)
| [
"[email protected]"
] | |
4decefeabc83feb66925607a6c3965c9dbc63df9 | 92cfe7677656056abaec6c6b349546260fbeb895 | /chplot.py | de7ef0630b47f49b1215c3adb579f3c8d9a35384 | [
"MIT"
] | permissive | cheolheil/control_chart | ea11eae0a13587ffd9bebd2d232d141699129a77 | adce7c5d6af27aab8053dba15518e531846434e5 | refs/heads/main | 2023-08-30T08:21:17.593741 | 2021-11-15T16:32:41 | 2021-11-15T16:32:41 | 428,305,384 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import matplotlib.pyplot as plt
class ccplot:
# this class accepts an instance of schewart_chart
def __init__(self, chart, X_test, figsize=(18, 6)):
self.chart_name = chart.stat.__repr__()
self.figsize = figsize
self.fig, self.ax = plt.subplots(figsize=self.figsize)
self.ax.set_xlabel('Time')
self.ax.set_ylabel(self.chart_name)
self.ax.set_title(self.chart_name + ' Control Chart')
self.ax.axhline(y=chart.upper_limit, color='grey', linestyle='--', lw=0.5)
self.ax.axhline(y=chart.lower_limit, color='grey', linestyle='--', lw=0.5)
self.ax.axhline(y=chart.center_line, color='black', lw=0.75)
stat_test, ooc_indices = chart.run(X_test, verbose=False)
self.ax.plot(stat_test, color='lightseagreen', lw=1.25)
self.ax.scatter(ooc_indices, stat_test[ooc_indices], facecolor='crimson', marker='s', s=10)
plt.show()
| [
"[email protected]"
] | |
60048c313e05bbc5f0b93f2fa2ea0123d4309b37 | e68bb9b3f1befb0116967981783d1bc1f8ce1eef | /project-addons/lot_states/wizard/mrp_consume_quarantine.py | 37d40eb405b3b559b0f11ceb44d8db656e9e1276 | [] | no_license | maurolguin1/PXGO_00064_2014_PHA | a0732e3c59a5a606b37f2a860642ba6550a3d099 | 50d341a0fb69529160e59c0ceb03fe6264ef7ae1 | refs/heads/master | 2020-03-22T16:37:39.396576 | 2015-05-14T08:47:56 | 2015-05-14T08:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,540 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Pexego All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class MrpConsumeQuarantine(models.TransientModel):
_name = 'mrp.production.consume.quarantine'
product_id = fields.Many2one('product.product', 'Product')
lot_id = fields.Many2one('stock.production.lot', 'Lot', required=True)
line_ids = fields.One2many('mrp.production.consume.quarantine.line', 'wizard_id', 'Lots')
@api.model
def default_get(self, fields):
res = super(MrpConsumeQuarantine, self).default_get(fields)
move_id = self.env.context.get('active_id', [])
move = self.env['stock.move'].browse(move_id)
res['product_id'] = move.product_id.id
lots = self.env['stock.production.lot'].search([('product_id', '=', move.product_id.id), ('state', '=', 'in_rev')])
lines = []
my_context = dict(self.env.context)
my_context['location_id'] = move.warehouse_id.wh_qc_stock_loc_id.id
for lot in lots:
my_context['lot_id'] = lot.id
qty = lot.product_id.with_context(my_context)._product_available()
qty = qty[lot.product_id.id]['qty_available']
lines.append((0,0, {'lot_id': lot.id, 'qty': qty, 'entry_date': lot.entry_quarantine}))
res['line_ids'] = lines
return res
@api.multi
def consume(self):
group = self.env.ref('lot_states.mrp_use_quarantine')
if group not in self.env.user.groups_id:
raise exceptions.Warning(_('Permission error'), _('No permission to consume quarantine'))
move_id = self.env.context.get('active_id', [])
move = self.env['stock.move'].browse(move_id)
quality_location = move.warehouse_id.wh_qc_stock_loc_id
move.restrict_lot_id = self.lot_id.id
previous_move = self.env['stock.move'].search([('move_dest_id', '=',
move.id)])
previous_move.restrict_lot_id = self.lot_id.id
previous_move.location_id = move.warehouse_id.wh_qc_stock_loc_id.id
previous_move.write({'restrict_lot_id': self.lot_id.id,
'location_id': quality_location.id})
read_domain = [('location_id', '=', quality_location.id),
('product_id', '=', move.product_id.id),
('lot_id', '=', self.lot_id.id)]
q_quants = self.env['stock.quant'].read_group(
read_domain, ['reservation_id', 'qty'], ['reservation_id'])
q_move = False
for quant in q_quants:
if quant['qty'] > move.product_uom_qty:
move_id = quant['reservation_id'][0]
q_move = self.env['stock.move'].browse(move_id)
break
if q_move:
q_move.do_unreserve()
q_move.product_uom_qty -= previous_move.product_uom_qty
q_move.action_assign()
previous_move.original_move = move.original_move = q_move
else:
raise exceptions.Warning(_('quarantine error'),
_('Not found the move from quarantine'))
move.raw_material_production_id.final_lot_id.write(
{'state_depends': [(4, self.lot_id.id)]})
return True
class MrpConsumeQuarantineLine(models.TransientModel):
_name = 'mrp.production.consume.quarantine.line'
lot_id = fields.Many2one('stock.production.lot', 'Lot')
wizard_id = fields.Many2one('mrp.production.consume.quarantine', 'wizard')
qty = fields.Float('Quantity')
entry_date = fields.Date('Entry quarantine')
| [
"[email protected]"
] | |
4dc218da7a5113da958b27d32acefa665a10695e | ef79ff6fdc3e2c3e162792c7f56c6c31c12efb9d | /account_makeover/report/aged_partner_balance.py | e5cbd63841fa2a111ad69903e31526e0b5d32ec7 | [] | no_license | 3dfxmadscientist/odoo_isa | dee0197d03b7c9775b71a5e909f591035698c9ed | 898895da18ce78c702e0191cd64d2056559faeab | refs/heads/master | 2020-04-10T09:25:00.743961 | 2014-07-04T07:25:15 | 2014-07-04T07:25:15 | 21,499,414 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
from openerp.report import report_sxw
# from openerp.addons.account.report.common_report_header import common_report_header
from openerp.addons.account.report.account_aged_partner_balance import aged_trial_report
class parser_aged_trial_report(aged_trial_report):
def __init__(self, cr, uid, name, context):
aged_trial_report.__init__(self, cr, uid, name, context=context)
report_sxw.report_sxw('report.makeover.aged.trial.balance', 'res.partner',
os.path.dirname(os.path.realpath(__file__)) + '/aged_partner_balance.rml', parser=parser_aged_trial_report, header="internal landscape")
| [
"[email protected]"
] | |
5886c639f14ce10db24b8b4ca5e7007774e40d9c | 486318571da84691363fa5f090237318c4217dd8 | /tests/unittests/test_models.py | e8b7445f1218055f13d4c0f290532a0ce8219eeb | [] | no_license | klfoulk16/personal_website | f731912a2e67771ca75ba4a83aa1b620efac61fb | 0820b2beaba7bd2fc790dc11de05e1f44a36eca0 | refs/heads/main | 2023-04-18T04:02:52.752460 | 2021-05-05T16:10:09 | 2021-05-05T16:10:09 | 315,940,441 | 10 | 1 | null | 2021-03-11T15:48:21 | 2020-11-25T12:59:32 | Python | UTF-8 | Python | false | false | 2,065 | py | import pytest
from application.database import Posts, BodyImages, Subscribers, Admin
import datetime
def test_posts_model():
"""
GIVEN a Posts model
WHEN a new Post is created
THEN check the h1, header_path, youtube_vid, sample, body, category, and date fields are defined correctly
"""
h1 = "Hi"
header_path = "/stuff/stuff/stuff/stuff"
youtube_vid = "80938203"
sample = "Hi this is a sample"
body = "<p>hi I edited this</p>"
category = "code"
post = Posts(h1, sample, header_path, youtube_vid, body, category)
assert post.h1 == h1
assert post.header_path == header_path
assert post.youtube_vid == youtube_vid
assert post.sample == sample
assert post.body == body
assert post.category == category
def test_body_images():
"""
GIVEN a Body Images model
WHEN a new Body Image is created
THEN check the post_id and img_path fields are defined correctly
"""
post_id = 1
img_path = "stuff/stuff/stuff/stuff"
body_img = BodyImages(post_id, img_path)
assert body_img.post_id == post_id
assert body_img.img_path == img_path
def test_subscribers():
"""
GIVEN a Subscribers model
WHEN a new Subscriber is created
THEN check the first, last, email, and date_subscribed fields are defined correctly
"""
first = "Nelly"
last = "Kelly"
email = "[email protected]"
sub = Subscribers(first, last, email)
assert sub.first == first
assert sub.last == last
assert sub.email == email
assert sub.date_subscribed == datetime.date.today()
def test_admin():
"""
GIVEN a Admin model
WHEN a new Admin is created
THEN check the email, password_hash and authenticated fields are defined correctly
THEN also check that methods are properly defined
"""
username = "[email protected]"
password = "weeeeeeeeeeee" #this is not encrypted but should be when sending to database
admin = Admin(username, password)
assert admin.username == username
assert admin.password_hash == password
| [
"[email protected]"
] | |
19a69838af72624333f25bcd7754605562b07419 | 1f0c6179a0d755dc7ac2521ea6ab500475239d72 | /transmitter.py | cef4a8bd6ac45862044d2b58792b79be19de57ba | [
"Apache-2.0"
] | permissive | aritrog/sarathi | 7034b9ccad3fc152667e5607778b94f8cc55313c | aeae111405656dc334e366ea42a5a8e2c7b26c5d | refs/heads/master | 2021-08-22T16:45:15.371830 | 2020-07-04T23:08:21 | 2020-07-04T23:08:21 | 202,331,724 | 3 | 1 | Apache-2.0 | 2019-11-16T12:37:52 | 2019-08-14T10:51:56 | Python | UTF-8 | Python | false | false | 1,547 | py | import os ,random,struct,binascii,base64
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
from Crypto import Random
from Crypto.Cipher import AES
import hashlib
class transmit():
def __init__(self,key):
self.bs=AES.block_size
self.key=hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw=self. _pad(raw)
iv= Random.new().read(AES.block_size)
cipher=AES.new(self.key, AES.MODE_CBC,iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc=base64.b64decode(enc)
iv= enc[:AES.block_size]
cipher=AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
#this part of the code is not compiled but the rest is while i upload this to github on 14/11/19 1 in the morning
def get_data():
#this peice of code corresponds to a code which is required to form a connection to the arduino
#that code will be a part of the boot procedure and will hence establish a
#connection before any funtion of the transmit is called
#we here crete a object of arduino_connect and use the already completed connection to get data using the arduino
ob=transmit("passwordkey")
txt="hi this is a text sample for encrption"
#txt=txt.encode('UTF-8')
print(txt)
while(len(txt)%16!=0):
txt=txt+"n"
print(ob.encrypt(txt))
print(ob.decrypt(ob.encrypt(txt)))
| [
"[email protected]"
] | |
2d1a6eafcbbdf684f1e6ac28f727e3e6a3c14ddd | 783324290a8c23ba03050032ecbf2be13558a536 | /pymethods/algorithms/elliptic_mesh_generation/meshStretch2d.py | 64c0b49286339634048edcb344307f3ca847fa15 | [] | no_license | IFF-0303/pymethods | 4a3a39af00554c2ed5e21528188214049766791f | c8690379dd9ca383cf3257a281094e4851677faa | refs/heads/master | 2023-07-03T21:30:59.695292 | 2020-12-14T03:18:03 | 2020-12-14T03:18:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,311 | py | import numpy as np
inner = np.s_[1:-1]
ip1 = np.s_[2:]
im1 = np.s_[0:-2]
def f1Prime(alpha, zeta):
return alpha * np.exp(alpha * zeta) / (np.exp(alpha) - 1)
def f1PrimePrime(alpha, zeta):
return (alpha**2) * np.exp(alpha * zeta) / (np.exp(alpha)-1)
def f2Prime(beta, eta):
return beta * np.exp(beta * eta) / (np.exp(beta)-1)
def f2PrimePrime(beta, eta):
return (beta**2) * np.exp(beta * eta) / (np.exp(beta)-1)
def G11(mesh, zeta, eta):
x, y = mesh
numerator_1 = x[inner, ip1] - x[inner, im1]
denominator_1 = 2 * zeta
numerator_2 = y[inner, ip1] - y[inner, im1]
denominator_2 = 2 * zeta
a = (numerator_1/denominator_1)**2
b = (numerator_2/denominator_2)**2
return a+b
def G22(mesh, zeta, eta):
x, y = mesh
numerator_1 = x[ip1, inner] - x[im1, inner]
denominator_1 = 2 * eta
numerator_2 = y[ip1, inner] - y[im1, inner]
denominator_2 = 2 * eta
a = (numerator_1/denominator_1)**2
b = (numerator_2/denominator_2)**2
return a+b
def constructCoefficients(
mesh, zeta_delta, eta_delta, zeta_params, eta_params, alpha, beta
):
x, y = mesh
height, length = x.shape
g11 = G11(mesh, zeta_delta, eta_delta)
g22 = G22(mesh, zeta_delta, eta_delta)
f2_double_over_single_prime = (
f2PrimePrime(beta, eta_params[inner, inner])/f2Prime(beta, eta_params[inner, inner])
)
f1_double_over_single_prime = (
f1PrimePrime(alpha, zeta_params[inner, inner])/f1Prime(alpha, zeta_params[inner, inner])
)
b = 2 * (g11/(eta_delta**2) + g22/(zeta_delta**2))
a = f2_double_over_single_prime * g11 / (2*eta_delta) + g11/(eta_delta ** 2)
c = - f2_double_over_single_prime * g11 / (2*eta_delta) + g11/(eta_delta**2)
dTerm = (
g22 / zeta_delta * (
x[inner, im1]/zeta_delta +
f1_double_over_single_prime * x[inner, im1]/2 +
x[inner, ip1]/zeta_delta -
f1_double_over_single_prime * x[inner, ip1]/2
)
)
eTerm = (
g22 / zeta_delta * (
y[inner, im1]/zeta_delta +
f1_double_over_single_prime * y[inner, im1] / 2 +
y[inner, ip1]/zeta_delta -
f1_double_over_single_prime * y[inner, ip1]/2
)
)
return a, b, c, dTerm, eTerm
def solveTDMA(phi, a, b, c, dTerm):
P = np.zeros(phi.shape[1])
Q = np.zeros(phi.shape[1])
bArr = np.zeros(phi.shape[1])
aArr = np.zeros(phi.shape[1])
for i in np.arange(1, phi.shape[1]-1):
Q[0] = phi[0][i]
for j in np.arange(1, phi.shape[0]-1):
P[j] = c[j-1][i-1]
Q[j] = dTerm[j-1][i-1]
bArr[j] = b[j-1][i-1]
aArr[j] = a[j-1][i-1]
term = 1.0 / (bArr[j] - aArr[j] * P[j - 1])
Q[j] = (Q[j] + aArr[j] * Q[j - 1]) * term
P[j] = P[j] * term
for j in np.arange(phi.shape[-1]-2, -1, -1):
phi[j][i] = P[j] * phi[j + 1][i] + Q[j]
def optimize(new_mesh, zeta_delta, eta_delta, zeta_params, eta_params,
alpha=0.01, beta=0.01, epsilon=1e-5, max_repeats=1000, residualInit=1000):
n_iters = 0
residual = residualInit
while residual > epsilon:
old_mesh = new_mesh.copy()
n_iters += 1
a, b, c, dTerm, eTerm = constructCoefficients(
new_mesh, zeta_delta, eta_delta, zeta_params, eta_params, alpha, beta
)
solveTDMA(new_mesh[0], a, b, c, dTerm)
solveTDMA(new_mesh[1], a, b, c, eTerm)
residual = (np.abs(old_mesh-new_mesh)).mean()
if n_iters > max_repeats:
break
return new_mesh
class meshStretch2d:
def __init__(self, mesh2d, zeta_delta, eta_delta, zeta_orig, eta_orig):
mesh2d = mesh2d[0:2]
assert mesh2d.shape[0] == 2
self.mesh = mesh2d
self.zeta_delta = zeta_delta
self.eta_delta = eta_delta
self.zeta_orig = zeta_orig
self.eta_orig = eta_orig
def __call__(self, **kwargs):
new_mesh = self.mesh.copy()
zeta_delta, eta_delta, zeta_orig, eta_orig = [
self.zeta_delta, self.eta_delta, self.zeta_orig, self.eta_orig]
new_mesh = optimize(
new_mesh, zeta_delta, eta_delta,
zeta_orig, eta_orig, **kwargs)
return new_mesh
| [
"[email protected]"
] | |
9fe9ea9d09007ef7568122aac18607ee2e1cdc33 | 75212bbbbe9b09e6b920af3fe28d68987e8c4f2f | /Algorithm-Python/Week-2/fibonacci.py | 71cab665dbb9c9d28ed306cc76e2e38d172621b4 | [] | no_license | definito/Algorithm | 001a93c9cf63af1e5867e0bc49769c6d8f8dc8b7 | 31a71a96245ff94c80d1f9435c4512b0bf07f85c | refs/heads/master | 2021-06-10T08:41:58.389782 | 2020-05-27T22:20:24 | 2020-05-27T22:20:24 | 95,305,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import sys
def fibonacci(n):
a = 0
b = 1
if n < 0:
print("Incorrect input")
elif n == 0:
return a
elif n == 1:
return b
else:
for i in range(2,n+1):
c = a + b
a = b
b = c
return b
if __name__ == '__main__':
input_n = int(input())
print(fibonacci(input_n))
| [
"[email protected]"
] | |
33e5cd4b2657c61467c435a3db9d60da3799993f | 98b8f1b1705f1ad8f36c68166770516f5717d9b1 | /contrib/kmb_search/topk_impl.py | 4a7f66ea0bb53e3bb8bb0055b16148411753f3b4 | [
"MIT"
] | permissive | gauenk/faiss_fork | b4fad9bfe7e027d52cd9f96a2ba00abd9e133245 | f51ffb536f829358bd1907acda89dfc8c1bc4146 | refs/heads/main | 2023-08-27T09:05:10.553656 | 2021-11-12T01:15:10 | 2021-11-12T01:15:10 | 408,296,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,537 | py |
# -- python --
import math
import numpy as np
from einops import rearrange,repeat
from numba import jit,njit,prange
# -- project --
from pyutils import save_image
# -- pytorch --
import torch
import torch.nn.functional as nnF
def pad_first_dim(tensor,pad):
tensor = torch.transpose(tensor,0,-1)
tensor = nnF.pad(tensor,(0,pad),value=float("nan"))
tensor = torch.transpose(tensor,0,-1)
return tensor
# -------------------------------------------
#
# Update State with Randomness
#
# -------------------------------------------
def update_state(propDists,prevDists,propModes,prevModes,
propInds,prevInds,sframes,s_iter):
"""
kmb_topk_rand:
Jump out of local optima by picking
an element of the search space that is not optimal
propDist/prevDist ratio:
Jump out of local optima by picking
a new distance that was not smaller than the previous distance
"""
# -- take top 1 of proposed --
# print("prevInds.shape: ",prevInds.shape)
propDists,propModes,propInds = kmb_topk_rand(propDists,propModes,propInds,s_iter)
# -- create modified dists for selection --
mPropDists = torch.nanmean(torch.abs(propDists-propModes),dim=0)
mPrevDists = torch.nanmean(torch.abs(prevDists-prevModes),dim=0)
mPrevDists[torch.where(torch.isnan(mPrevDists))] = 1000.
mPropDists[torch.where(torch.isnan(mPropDists))] = 1000.
assert mPrevDists.shape[0] == 1,"k == 1"
# -- compute ratio --
prior = 0.90 if s_iter > 1 else 1.
ratioDists = mPropDists/mPrevDists * prior
pexp = 2*math.log10(s_iter+1)+1#math.exp(2*math.log10(s+1))
pexp = pexp if s_iter > 1 else 1000.
ratioDists = torch.pow(ratioDists,pexp)
coin_flip = torch.rand_like(ratioDists)
toChange = torch.where(coin_flip > ratioDists)
# -- init next state --
nextDists = prevDists.clone()
nextInds = prevInds.clone()
nextModes = prevModes.clone()
# print("nextInds.shape: ",nextInds.shape)
# print("propInds.shape: ",propInds.shape)
# print(propInds[:,:,0,9,6])
# print("nextInds.shape: ",nextInds.shape)
# print(coin_flip.shape)
# print(propInds[:,:,:,9,6])
# -- fill in state --
# print("pre: ",nextInds[:,:,0,9,6])
nframes = propInds.shape[1]
nfsearch = len(sframes)
for ti,tj in enumerate(sframes):
nextDists[tj][toChange] = propDists[ti][toChange]
nextModes[tj][toChange] = propModes[ti][toChange]
for t in range(nframes):
nextInds[0,t][toChange] = propInds[0,t][toChange]
nextInds[1,t][toChange] = propInds[1,t][toChange]
# print("nextInds.shape: ",nextInds.shape)
# print("post: ",nextInds[:,:,0,9,6])
return nextDists,nextModes,nextInds
def kmb_topk_update(propDists,prevDists,propModes,prevModes,
propInds,prevInds,propSFrames,prevSFrames):
# -- pad across first dimension --
# print("prevDists.shape: ",prevDists.shape)
# print("propDists.shape: ",propDists.shape)
# print("propModes.shape: ",propModes.shape)
# pad = prevDists.shape[0] - propDists.shape[0]
# propDists = pad_first_dim(propDists,pad)
# propModes = pad_first_dim(propModes,pad)
# print("propDists.shape: ",propDists.shape)
# print("propModes.shape: ",propModes.shape)
# -- insert proposed into prev --
# b = propDists.shape[1]
# propDists_raw = propDists.clone()
# propModes_raw = propModes.clone()
# propDists = prevDists.clone().repeat(1,b,1,1)
# propModes = prevModes.clone().repeat(1,b,1,1)
# propDists[propSFrames] = propDists_raw
# propModes[propSFrames] = propModes_raw
# -- create stacks --
aug_vals = torch.cat([prevDists,propDists],dim=1)
aug_modes = torch.cat([prevModes,propModes],dim=1)
aug_inds = torch.cat([prevInds,propInds],dim=2)
# -- exec and return --
K = prevDists.shape[1]
return kmb_topk(aug_vals,aug_modes,aug_inds,K)
def kmb_topk_rand(vals,modes,inds,s_iter):
# -- init --
device = vals.device
tK,s,h,w = vals.shape
two,t,s,h,w = inds.shape
# -- run pytorch topk --
mvals = torch.nanmean(torch.abs(vals - modes),dim=0)
# -- misc --
# print(inds[:,:,0,9,6])
# print(inds[:,:,1,9,6])
# print(inds[:,:,5,9,6])
# print(mvals[:,9,6])
vals_topk,modes_topk,inds_topk = topk_torch_rand(mvals,vals,modes,inds,s_iter)
# print("inds_topk.shape: ",inds_topk.shape)
# print(inds_topk[:,:,0,9,6])
return vals_topk,modes_topk,inds_topk
def kmb_topk(vals,modes,inds,K):
# -- init --
device = vals.device
tK,s,h,w = vals.shape
two,t,s,h,w = inds.shape
# -- creat output vars --
# vals = vals.cpu().numpy()
# inds = inds.cpu().numpy()
# vals_topk = np.zeros((K,h,w))
# inds_topk = np.zeros((two,t,K,h,w))
# -- run pytorch topk --
mvals = torch.nanmean(torch.abs(vals - modes),dim=0)
# print("-- pre top k --")
# print("inds.shape: ",inds.shape)
# print("mvals.shape: ",mvals.shape)
# print(inds[:,:,0,4,5])
# print(mvals[:,4,5])
# print(vals[:,:,4,5])
# print(vals[:,:,4,5].shape)
vals_topk,modes_topk,inds_topk = topk_torch(mvals,vals,modes,inds,K)
# print("inds_topk.shape: ",inds_topk.shape)
# print(inds_topk[:,:,0,9,6])
# print("-- post top k --")
# print("vals_topk.shape: ",vals_topk.shape)
# print(inds_topk[:,:,0,4,5])
# print(vals_topk[:,:,4,5])
# -- launch numba --
# kmb_topk_numba(vals,inds,vals_topk,inds_topk)
# -- pytorch to numpy --
# vals_topk = torch.FloatTensor(vals_topk).to(device)
# inds_topk = torch.IntTensor(inds_topk).to(device)
return vals_topk,modes_topk,inds_topk
def topk_torch_rand(mvals,vals,modes,inds,s_iter,K=1):
"""
Jump out of local optima by picking
an element of the search space that is not optimal
"""
# -- take min --
assert K == 1,"only K = 1 right now."
topk = torch.topk(mvals,K,dim=0,largest=False,sorted=True)
topk_mvals = topk.values
# -- take ratio w.r.t. [ideal?] min --
eps = 1e-8
ratio_mvals = (topk_mvals+eps) / (mvals+eps) # [0,1] by construction
# -- sample using ratios as weights --
s,h,w = ratio_mvals.shape
weights = ratio_mvals
pexp = 5*math.log10(s_iter+1)+1#math.exp(2*math.log10(s+1))
pexp = pexp if s_iter > 1 else 1000.
weights = torch.pow(weights,pexp)
# -- save weights not equal to 1 on exh. search --
# wsum = torch.sum(weights,dim=0)
# wimg = torch.abs(wsum-1.)<1e-5
# wimg = wimg.type(torch.float)
# save_image("tkmb_wimg.png",wimg)
# -- sample across search space using weights --
weights = rearrange(weights,'s h w -> (h w) s')
samples = torch.multinomial(weights,1)
samples = rearrange(samples,'(h w) 1 -> 1 h w',h=h)
# print("samples.shape: ",samples.shape)
# print("topk.indices.shape: ",topk.indices.shape)
# print(ratio_mvals)
# print(samples)
# print(topk.indices)
# -- use indices to
return index_topk(vals,modes,inds,K,samples)
def topk_torch(mvals,vals,modes,inds,K):
# print("pre")
topk = torch.topk(mvals,K,dim=0,largest=False,sorted=True)
# torch.cuda.synchronize()
# print("post")
return index_topk(vals,modes,inds,K,topk.indices)
def index_topk(vals,modes,inds,K,indices):
two = inds.shape[0]
assert two == 2,"check [modes,inds] order."
tK = vals.shape[0]
vals_topk = torch.zeros_like(vals)[:,:K]
modes_topk = torch.zeros_like(modes)[:,:K]
# print(vals.shape,indices.shape,modes.shape,vals_topk.shape)
# exit()
# for tk in range(tK):
# print(vals_topk.shape,vals.shape)
# vals_topk[tk] = torch.gather(vals[tk],dim=0,index=indices)
# modes_topk[tk] = torch.gather(modes[tk],dim=0,index=indices)
# exit()
inds_topk = torch.zeros_like(inds)[:,:,:K]
# print("inds.shape: ",inds.shape)
# print("indices.shape: ",indices.shape)
# print(inds[:,:,6,4,5])
# print(indices[0,4,5])
for i in range(inds.shape[0]):
for t in range(inds.shape[1]):
inds_topk[i,t] = torch.gather(inds[i,t],dim=0,index=indices)
# print(inds_topk[:,:,0,4,5])
return vals_topk,modes_topk,inds_topk
def kmb_topk_numba(vals,inds,vals_topk,inds_topk):
pass
# s,h,w = vals.shape
# K,h,w = vals_topk.shape
# for hi in prange(h):
# for wi in prange(w):
# for si in range(s):
# # -- update --
| [
"[email protected]"
] | |
2825869abf0d80c2fbadd1499c8068f1d7a3f4cf | a7ee6ca2121c0d6478a631df0b670e3280abb664 | /SConstruct | 7b4598fdea4b4de74adb25997180e2ec55e73df4 | [
"MIT"
] | permissive | Boyquotes/crash-bandicoot-godot | fdf0fcb8ea8289dc0b154e0f49b594ae64c1cfff | 863294fa44936f45748efe364601e8ec65ecc500 | refs/heads/master | 2023-06-14T11:30:33.249550 | 2020-09-03T11:29:46 | 2020-09-03T11:29:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,039 | #!python
import os, subprocess, sys
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
print("this is cool eh :D")
if (os.name=="nt"):
import subprocess
def mySubProcess(cmdline,env):
#print "SPAWNED : " + cmdline
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print("=====")
print(err.decode("utf-8"))
print("=====")
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv=0
if len(cmdline) > 32000 and cmd.endswith("ar") :
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3,len(args)) :
rv = mySubProcess( cmdline + args[i], env )
if rv :
break
else:
rv = mySubProcess( cmdline, env )
return rv
opts = Variables([], ARGUMENTS)
# Gets the standard flags CC, CCX, etc.
env = DefaultEnvironment()
# Try to detect the host platform automatically.
# This is used if no `platform` argument is passed
if sys.platform.startswith('linux'):
host_platform = 'linux'
elif sys.platform == 'darwin':
host_platform = 'osx'
elif sys.platform == 'win32' or sys.platform == 'msys':
host_platform = 'windows'
else:
raise ValueError(
'Could not detect platform automatically, please specify with '
'platform=<platform>'
)
# Define our options
opts.Add(EnumVariable('target', "Compilation target", 'debug', ['d', 'debug', 'r', 'release']))
opts.Add(EnumVariable('platform', "Compilation platform", host_platform, ['', 'windows', 'x11', 'linux', 'osx']))
opts.Add(EnumVariable('p', "Compilation target, alias for 'platform'", '', ['', 'windows', 'x11', 'linux', 'osx']))
opts.Add(BoolVariable('use_llvm', "Use the LLVM / Clang compiler", 'no'))
opts.Add(PathVariable('target_path', 'The path where the lib is installed.', 'bin/'))
opts.Add(PathVariable('target_name', 'The library name.', 'libcdt-gd', PathVariable.PathAccept))
opts.Add(BoolVariable(
'use_mingw',
'Use the MinGW compiler instead of MSVC - only effective on Windows',
False
))
# Local dependency paths, adapt them to your setup
godot_headers_path = "thirdparty/godot-cpp/godot_headers/"
cpp_bindings_path = "thirdparty/godot-cpp/"
cpp_library = "libgodot-cpp"
# only support 64 at this time..
bits = 64
# Updates the environment with the option variables.
opts.Update(env)
# Process some arguments
if env['use_llvm']:
env['CC'] = 'clang'
env['CXX'] = 'clang++'
if env['p'] != '':
env['platform'] = env['p']
if env['platform'] == '':
print("No valid target platform selected.")
quit();
# Check our platform specifics
if env['platform'] == "osx":
env['target_path'] += 'osx/'
cpp_library += '.osx'
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS = ['-g','-O2', '-arch', 'x86_64', '-std=c++17'])
env.Append(LINKFLAGS = ['-arch', 'x86_64'])
else:
env.Append(CCFLAGS = ['-g','-O3', '-arch', 'x86_64', '-std=c++17'])
env.Append(LINKFLAGS = ['-arch', 'x86_64'])
elif env['platform'] in ('x11', 'linux'):
env['target_path'] += 'x11/'
cpp_library += '.linux'
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS = ['-fPIC', '-g3','-Og', '-std=c++17'])
else:
env.Append(CCFLAGS = ['-fPIC', '-g','-O3', '-std=c++17'])
elif env['platform'] == "windows":
env['target_path'] += 'windows/x64/'
cpp_library += '.windows'
if host_platform == 'windows' and not env['use_mingw']:
# This makes sure to keep the session environment variables on windows,
# that way you can run scons in a vs 2017 prompt and it will find all the required tools
env.Append(ENV = os.environ)
env.Append(CCFLAGS = ['-DWIN32', '-D_WIN32', '-D_WINDOWS', '-W3', '-GR', '-D_CRT_SECURE_NO_WARNINGS'])
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS = ['-EHsc', '-D_DEBUG', '-MDd'])
else:
env.Append(CCFLAGS = ['-O2', '-EHsc', '-DNDEBUG', '-MD'])
elif host_platform == 'linux' or host_platform == 'osx':
env['CXX'] = 'x86_64-w64-mingw32-g++'
env['AR'] = "x86_64-w64-mingw32-ar"
env['RANLIB'] = "x86_64-w64-mingw32-ranlib"
env['LINK'] = "x86_64-w64-mingw32-g++"
elif host_platform == 'windows' and env['use_mingw']:
env = env.Clone(tools=['mingw'])
env["SPAWN"] = mySpawn
if host_platform == 'linux' or host_platform == 'osx' or env['use_mingw']:
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS = ['-fPIC', '-g3','-Og', '-std=c++17'])
else:
env.Append(CCFLAGS = ['-fPIC', '-g0','-s','-O3', '-std=c++17'])
env.Append(LINKFLAGS=[
'--static',
'-Wl,--no-undefined',
'-static-libgcc',
'-static-libstdc++',
])
if env['target'] in ('debug', 'd'):
cpp_library += '.debug'
else:
cpp_library += '.release'
cpp_library += '.' + str(bits)
# make sure our binding library is properly includes
env.Append(CPPPATH=['CDT/include/', '.', godot_headers_path, cpp_bindings_path + 'include/', cpp_bindings_path + 'include/core/', cpp_bindings_path + 'include/gen/'])
env.Append(LIBPATH=[cpp_bindings_path + 'bin/'])
env.Append(LIBS=[cpp_library])
# tweak this if you want to use different folders, or more folders, to store your source code in.
env.Append(CPPPATH=['src/'])
sources = Glob('src/*.cpp')
library = env.SharedLibrary(target=env['target_path'] + env['target_name'] , source=sources)
Default(library)
# Generates help for the -h scons option.
Help(opts.GenerateHelpText(env))
| [
"[email protected]"
] | ||
05847e11fb76457486afad1b7417b96292f4bfe7 | 1e9f6e914bdb9ad79f74636eca5e2384a36595c5 | /pyronn/ct_reconstruction/__init__.py | 9843791cf969842ee18375bd3a8a15eb3386b687 | [
"Apache-2.0"
] | permissive | theHamsta/PYRO-NN | 18c1dea96659b44b4def2979b256d21133090840 | c454527c5edebc2cf4f351c6453ba013abf6a701 | refs/heads/master | 2020-07-29T10:30:44.480120 | 2019-09-17T12:38:05 | 2019-09-17T12:38:05 | 209,763,348 | 0 | 0 | Apache-2.0 | 2019-09-20T10:16:07 | 2019-09-20T10:16:06 | null | UTF-8 | Python | false | false | 599 | py | # Copyright [2019] [Christopher Syben, Markus Michen]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | [
"[email protected]"
] | |
8c40010ee1ea291323b93a24c8d96a24315a25d0 | fa33e457dbf78453f71795fe8e6ed07dbacec1db | /maingui.py | 58a1f2cfca5106e3a12e49d0553fc12cf0bb1cca | [] | no_license | Mountagha/projet-Automate | 91e879862420b21955a279ecccca608a46084c00 | db2336d9639a189b8e3170c4f0ffc2dc497104c1 | refs/heads/master | 2021-01-22T05:24:22.422731 | 2017-03-01T00:58:39 | 2017-03-01T00:58:39 | 81,659,770 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #!/usr/bin/env python
#! -*- coding:utf-8 -*-
from Automate import Automate
from Tkinter import *
from gui import InterfaceGraphique
#on crée une interface graphique minimale pour récupérer les input des utilisateurs et afficher les automates
#on crée notre fenêtre principale
window = Tk()
interface = InterfaceGraphique(window)
interface.mainloop()
| [
"[email protected]"
] | |
bb205e0d3be5a05f7828eef5c267e833e58a348b | 9c7b2965396867b7d1459fafacd87b0ed14959c3 | /LowerSaxony/06_subset_shps_per_tile.py | 086fa6a524efdbd88d2049a7c1be41e738e581eb | [] | no_license | clejae/forland_repo | 461cd7fcd85615c2d33b0a5985d5d8ee37164032 | 3387eed69fc3a60e1d3a948b12fe23538f0b79da | refs/heads/master | 2023-08-17T06:30:37.571058 | 2023-08-09T08:46:49 | 2023-08-09T08:46:49 | 241,071,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,678 | py | #
# github Repo: https://github.com/clejae
# ------------------------------------------ LOAD PACKAGES ---------------------------------------------------#
import os
import time
import glob
from osgeo import ogr, osr
import joblib
## CJ REPO
import vector
import forland_wrapper
import general
# ------------------------------------------ DEFINE FUNCTIONS ------------------------------------------------#
# ------------------------------------------ START TIME ------------------------------------------------------#
stime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
# ------------------------------------------ USER VARIABLES ------------------------------------------------#
wd = r'\\141.20.140.91\SAN_Projects\FORLand\Clemens\\'
# ------------------------------------------ LOAD DATA & PROCESSING ------------------------------------------#
os.chdir(wd)
####################################################################
## ADD ID column
# for pth in lst:
def workFunc(year):
pth = r'data\vector\IACS\LS\IACS_LS_{0}.shp'.format(year)
print("\n", year)
inv_shp = ogr.Open(pth, 1)
inv_lyr = inv_shp.GetLayer()
field_def = ogr.FieldDefn('ID', ogr.OFTInteger64)
inv_lyr.CreateField(field_def)
for f, feat in enumerate(inv_lyr):
feat.SetField("ID", f)
inv_lyr.SetFeature(feat)
inv_lyr.ResetReading()
print(year, "done")
if __name__ == '__main__':
joblib.Parallel(n_jobs=12)(joblib.delayed(workFunc)(year) for year in range(2019,2020))
####################################################################
## write tile names to txt file
# for i in range(1,4):
# pth = r"data\vector\grid\LS\Invekos_grid_LS_15km_sub{}.shp".format(i)
# shp = ogr.Open(pth)
# lyr = shp.GetLayer()
#
# file = open(r"data\vector\tile_list_LS_sub{}.txt".format(i), 'w+')
#
# for feat in lyr:
# fid = feat.GetField("POLYID")
# file.write(fid + "\n")
# lyr.ResetReading()
# file.close()
# ####################################################################
## Explore data structure
for year in range(2019, 2020):
pth = r'data\vector\IACS\LS\IACS_LS_{0}.shp'.format(year)
print("\n", year)
inv_shp = ogr.Open(pth)
inv_lyr = inv_shp.GetLayer()
vector.printFieldNames(inv_lyr)
fnames = vector.getFieldNames(inv_shp)
print(fnames)
for i in range(10):
feat = inv_lyr.GetFeature(i)
attrs = [feat.GetField(fname) for fname in fnames]
print(attrs)
####################################################################
# remove none geoms
for year in range(2019, 2020):
print('######################\n{0}\n######################'.format(year))
in_pth = r'data\vector\IACS\LS\IACS_LS_{0}.shp'.format(year)
out_pth = r'data\vector\IACS\LS\IACS_LS_{0}_no_nones.shp'.format(year)
forland_wrapper.removingNoneGeoms(in_pth, out_pth)
print(year, "done!")
## ALL Shapes don't have none geometries
## NAMING stays the same
####################################################################
## make geoms valid
# lst = list(range(2006,2011)) + list(range(2012,2020))
# lst = [2005, 2018, 2019]
lst = list(range(2011,2020))
import forland_wrapper
for year in lst:
print('######################\n{0}\n######################'.format(year))
in_pth = r'data\vector\IACS\LS\IACS_LS_{0}.shp'.format(year)
forland_wrapper.validityChecking(in_shp_pth = in_pth, id_field_name="ID")
print(year, "done!\n")
####################################################################
## subset invekos on TILE basis in parallel for one year
for i in range(1,4):
with open(r"data\vector\tile_list_LS_sub{}.txt".format(i)) as file:
tiles_lst = file.readlines()
tiles_lst = [item.strip() for item in tiles_lst]
for tile in tiles_lst:
def workFunc(year):
# year = 2017
pth = r'data\vector\IACS\LS\IACS_LS_{0}.shp'.format(year)
grid_pth = r'data\vector\grid\Invekos_grid_LS_15km.shp'
grid_shp = ogr.Open(grid_pth)
grid_lyr = grid_shp.GetLayer()
grid_lyr.SetAttributeFilter("POLYID = '" + tile + "'")
feat_grid = grid_lyr.GetNextFeature()
# year = general.findBetween(pth, 'Nutzung', '.shp')
print(year, tile)
# print("\n", pth)
inv_shp = ogr.Open(pth)
inv_lyr = inv_shp.GetLayer()
# print(year, "Number of features:", inv_lyr.GetFeatureCount())
# print("Extent:", inv_lyr.GetExtent())
inv_sr = inv_lyr.GetSpatialRef()
# transform = osr.CoordinateTransformation(grid_sr, inv_sr)
geom = feat_grid.geometry().Clone()
# print(year, "Geometry before transformation:\n", geom)
# geom.Transform(transform)
# print("Geometry after transformation:\n", geom)
inv_lyr.SetSpatialFilter(geom)
out_pth = r"data\vector\IACS\LS\tiles\{}".format(tile)
general.createFolder(out_pth)
out_shp_pth = r"data\vector\IACS\LS\tiles\{1}\IACS_{0}_{1}.shp".format(year, tile)
drv_shp = ogr.GetDriverByName('ESRI Shapefile')
inv_lyr_defn = inv_lyr.GetLayerDefn()
if os.path.exists(out_shp_pth):
drv_shp.DeleteDataSource(out_shp_pth)
out_shp = drv_shp.CreateDataSource(out_shp_pth)
lyr_name = os.path.splitext(os.path.split(out_shp_pth)[1])[0]
geom_type = ogr.wkbPolygon
out_lyr = out_shp.CreateLayer(lyr_name, inv_sr, geom_type=geom_type)
for i in range(0, inv_lyr_defn.GetFieldCount()):
field_def = inv_lyr_defn.GetFieldDefn(i)
out_lyr.CreateField(field_def)
for feat in inv_lyr:
out_feat = feat
out_lyr.CreateFeature(out_feat)
ouf_feat = None
inv_lyr.ResetReading()
del inv_shp, inv_lyr
del out_shp, out_lyr
print(year, tile, "done")
# if __name__ == '__main__':
# joblib.Parallel(n_jobs=22)(joblib.delayed(workFunc)(tile) for tile in tiles_lst)
if __name__ == '__main__':
joblib.Parallel(n_jobs=1)(joblib.delayed(workFunc)(year) for year in range(2011,2020))
# ------------------------------------------ END TIME --------------------------------------------------------#
etime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
print("end: " + etime)
# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#
| [
"[email protected]"
] | |
4fab9b6d74e6e9291d5d6b793838630fdaa4ac61 | c901830bafc8035aa28eed8c84ab7255f6afe923 | /unfolding/runCorrectionEnergyFakeTrgCutoff.py | d2acbb8199754ee9b5dce19b21d684302b55d95e | [] | no_license | mfasDa/SubstructureAnalysis | b003882418e0f8eae8188eafaa9b54cec8b3030e | fc365c285a148911fcab96677c653d5140d18d86 | refs/heads/master | 2022-06-13T04:14:15.877728 | 2022-05-31T16:33:23 | 2022-05-31T16:33:23 | 118,133,369 | 5 | 2 | null | 2022-12-09T15:53:41 | 2018-01-19T14:14:57 | C++ | UTF-8 | Python | false | false | 2,481 | py | #! /usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from threading import Thread, Lock
class workpool:
def __init__(self):
self.__tasks = []
self.__mutex = Lock()
def insert(self, task):
self.__mutex.acquire()
self.__tasks.append(task)
self.__mutex.release()
def pop(self):
payload = None
self.__mutex.acquire()
if len(self.__tasks):
payload = self.__tasks.pop(0)
self.__mutex.release()
return payload
class Processor(Thread):
def __init__(self, workqueue):
Thread.__init__(self)
self.__workqueue = workqueue
def run(self):
task = self.__workqueue.pop()
while task != None:
subprocess.call(task, shell = True)
task = self.__workqueue.pop()
def getrepo():
sciptname = os.path.abspath(sys.argv[0])
return os.path.dirname(sciptname)
if __name__ == "__main__":
REPO = getrepo()
parser = argparse.ArgumentParser(prog="runCorrrectionEnregy.py", description="Run correction chain 1D")
parser.add_argument("datadir", metavar="DATADIR", help="Location where to find the data")
parser.add_argument("-z", "--zleading", type=float, default=1.1, help="Cut on the leading neutral constituent")
args = parser.parse_args()
SCRIPTS = ["runCorrectionChain1DBayes_SysFakeTrgSwap.cpp"]
#SCRIPTS = ["runCorrectionChain1DBayes_SysRegTrgSwap.cpp"]
DATADIR = args.datadir
ZCUT= args.zleading
#CUTOFFS = [50., 60., 70., 80., 90., 100., 120.]
CUTOFFS = [120.]
BASEDIR = os.getcwd()
for CUT in CUTOFFS:
cutoffdir = os.path.join(BASEDIR, "cutoff%d" %(int(CUT)))
if not os.path.exists(cutoffdir):
os.makedirs(cutoffdir, 0755)
os.chdir(cutoffdir)
WORKQUEUE = workpool()
for RADIUS in range(2, 6):
print("Unfolding R=%.1f" %(float(RADIUS)/10.))
for SCRIPT in SCRIPTS:
cmd="root -l -b -q \'%s(%f, %f, %f, \"%s\")'" %(os.path.join(REPO, SCRIPT), float(RADIUS)/10., ZCUT, CUT, DATADIR)
print("Command: %s" %cmd)
WORKQUEUE.insert(cmd)
WORKERS = []
for IWORK in range(0, 4):
WORKER = Processor(WORKQUEUE)
WORKER.start()
WORKERS.append(WORKER)
for WORKER in WORKERS:
WORKER.join()
os.chdir(BASEDIR) | [
"[email protected]"
] | |
69a1bb73fc99c15a7457dfafc605f31130d4167e | 18137ede97006561de0bf109055be06b7bab3e71 | /第四周/最小基因变化.py | e25ead2f8aad811c3d859d5f7b6f782a0fe390bd | [] | no_license | aisen-x/git_learn | 0defe07eb5ee27d138cd69cbff37ee58ef980acb | 44f385fd269329503f4176a0dafaf7b9fd8b3070 | refs/heads/master | 2023-05-10T03:45:12.765662 | 2021-06-14T15:37:09 | 2021-06-14T15:37:09 | 354,201,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from typing import List
class Solution:
def minMutation(self, start: str, end: str, bank: List[str]) -> int:
bank = set(bank)
if end not in bank: return -1
dict_ = {
"A": "CGT",
"C": "AGT",
"G": "CAT",
"T": "CGA"
}
queue = [(start, 0)]
while queue:
node, step = queue.pop(0)
if node == end:
return step
for i, s in enumerate(node):
for c in dict_[s]:
new = node[:i] + c + node[i+1:]
if new in bank:
queue.append((new, step+1))
bank.remove(new)
return -1 | [
"[email protected]"
] | |
d4d98a04bb227001091d27832dc6dda7e8bdb711 | ecaf7a4af0e68296363ad62080d38889b6ffdd1d | /student/migrations/0002_auto_20160803_1417.py | e9943a29c7142169e163fdeacf5ace64ae6f5924 | [] | no_license | Patch67/openmis2 | 3c45cd394d3a09f3a8fda903f37f111073c086eb | 92d35da308ab586a6b34c0332b46a028c3c005d8 | refs/heads/master | 2021-01-19T04:19:44.286109 | 2016-08-12T06:49:25 | 2016-08-12T06:49:25 | 65,443,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-03 13:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='student',
name='first_name',
field=models.CharField(default='forename', max_length=50),
),
migrations.AddField(
model_name='student',
name='last_name',
field=models.CharField(default='surname', max_length=30),
),
]
| [
"[email protected]"
] | |
94febc2b579cb0ba9226d746eb929b24ac66b9b7 | d37ae956875dcff4ba4d8ca4ce4057c9acc2a314 | /Py_Practice/Tkinter_屏保/screensaver_V01.py | 399239b2a0994f46867b7d3fa72a5d0cd39402fc | [] | no_license | w976994214/Project | d74904fe2215ae38b027dbb714d90f5e67ad7065 | 8a7a0a35c62e73e8b72e17f3842ae023d90bafda | refs/heads/master | 2020-05-16T02:47:27.228346 | 2019-08-29T08:57:48 | 2019-08-29T08:57:48 | 182,638,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,965 | py | """
tkinter项目实战-屏保
分析:
屏幕保护可以自己启动,也可手动启动
一旦敲击键盘或移动鼠标,或者其他事件,则停止
如果屏保事一幅画的话,则没有画框
图像的运作时随机的,具有随机性,可能包括颜色、大小、数量、运动方向、变形
构成:
ScreenSaver
需要一个canvas,大小等于屏幕大小,没有边框
ball
颜色、大小、数量、运动方向、变形
可移动,可以被调用
"""
import random
import tkinter
class RandomBall(object):
"""
定义运动的球的类
"""
def __init__(self, canvas, scrnwidth, scrnheight):
"""
canvas:画布,所有的内容都应该在画布上呈现出来,此处通过次变量传入
scrnwidth/scrnheigh:屏幕的宽高
"""
self.canvas = canvas
self.item = 0
# 球的大小随机
# 球的大小用半径表示
self.radius = random.randint(20, 120)
# 球出现的初始位置要随机,表示球的圆心的位置
# xpos表示位置的X坐标
self.xpos = random.randint(self.radius, int(scrnwidth)-self.radius)
# ypos表示位置的Y坐标
self.ypos = random.randint(self.radius, int(scrnheight)-self.radius)
# 定义球的运动的速度
# 模拟运动,随机运动速度,不断地擦掉原来的画,然后在新的地方从新绘制(每次移动一点)
# 模拟X轴运动
self.xvelocity = random.randint(4, 20)
# 模拟Y轴运动
self.yvelocity = random.randint(4, 20)
# 定义屏幕的宽度
self.scrnwidth = scrnwidth
# 定义屏幕的高度
self.scrnheight = scrnheight
# 定义颜色
# RGB表示法:三个数字每个数字值0-255之间表示红绿蓝三个颜色的大小
# 在某些系统中,用英文单词也可以表示,比如red,green
# 此处用lambda表达式
def c():
return random.randint(0, 255)
self.color = '#%02x%02x%02x' % (c(), c(), c())
def create_ball(self):
"""
用构造函数定义的变量值,在canvas上画一个球
"""
# tkinter没有画圆形函数
# 只有一个画椭圆函数,画椭圆需要定义两个坐标
# 在一个长方形内画椭圆,我们只需要定义长方形左上角和右下角就行
x1 = self.xpos - self.radius
x2 = self.xpos + self.radius
y1 = self.ypos - self.radius
y2 = self.ypos + self.radius
# fill表示填充颜色
# outline表示边框颜色
self.item = self.canvas.create_oval(x1, y1, x2, y2, fill=self.color, outline=self.color)
def move_ball(self):
# 移动球的时候,需要控制球的方向
# 每次移动后,球都有一个新的坐标
self.xpos += self.xvelocity
self.ypos += self.yvelocity
# 判断撞墙
if self.xpos + self.radius >= self.scrnwidth:
self.xvelocity *= -1
# 也可写成self.xvelocity = -self.xvelocity
if self.xpos - self.radius <= 0:
self.xvelocity *= -1
if self.ypos + self.radius >= self.scrnheight:
self.yvelocity *= -1
if self.ypos - self.radius <= 0:
self.yvelocity *= -1
# 在画布上挪动图画
self.canvas.move(self.item, self.xvelocity, self.yvelocity)
class ScreenSaver(object):
"""
定义屏保的类
可以被启动
"""
# 如何装随机产生的球
balls = list()
def __init__(self):
# 每次启动球的数量随机
self.num_balls = random.randint(6, 20)
self.root = tkinter.Tk()
# 取消边框
self.root.overrideredirect(1)
# 任何鼠标移动都取消
self.root.bind('<Motion>', lambda e: self.root.destroy())
# 同理按动键盘都退出屏保
self.root.bind('<Key>', lambda e: self.root.destroy())
# 得到屏幕大小规格
w, h = self.root.winfo_screenwidth(), self.root.winfo_screenheight()
# 创建画布,包括画布的归属,规格
self.canvas = tkinter.Canvas(self.root, width=w, height=h)
self.canvas.pack()
# 在画布上画球
for i in range(self.num_balls):
ball = RandomBall(self.canvas, scrnwidth=w, scrnheight=h)
ball.create_ball()
self.balls.append(ball)
self.run_screen_saver()
self.root.mainloop()
def run_screen_saver(self):
for ball in self.balls:
ball.move_ball()
# after是指定毫秒后启动一个函数,需要启动的函数是第二个参数,这里用200毫秒
# 可以理解为200毫秒动一次球
self.canvas.after(200, self.run_screen_saver)
if __name__ == "__main__":
ScreenSaver()
| [
"[email protected]"
] | |
5723936537563bf8d80ec19210d44c360aaeea77 | 42a3aaf2347c8eaa83a61ecb65bca40b0320fc6f | /socket05s.py | 8d947dee9285add7e6db559aa7d5a56a0735f13e | [] | no_license | qinfena/python_net | 5893281525144c68c6f98c297bcf4f4e517de4ca | 72f8df6d77dcdcbb737fc66318fd9c2b21e875c0 | refs/heads/main | 2023-03-05T08:45:53.721116 | 2021-02-20T10:01:34 | 2021-02-20T10:01:34 | 340,620,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | import socket
import psutil
def do_cpu():
data = str(psutil.cpu_percent(0)) + '%\n'
count = 0
for process in psutil.process_iter():
data = data + process.name()
data = data + ',' + str(process.pid)
cpu_usage_rate_process = str(process.cpu_percent(0)) + '%'
data = data + ',' + cpu_usage_rate_process + '\n'
count += 1
if count == 10:
break
return data
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('192.168.12.76', 8090))
print('Bind UDP on 8090')
while True:
(info,addr) = s.recvfrom(1024)
data = do_cpu()
s.snedto(data.encode(utf-8,addr))
print('The client is ', addr)
print('Sended CPU data is :', data)
| [
"[email protected]"
] | |
02c5e834dc1df9cbf54266b2bca5a7a80ac5c957 | f820ac21e543206c9b345144827cc0702653c6a0 | /env2/bin/freeze_graph | 7ae934acd3d69abf5adb5c7e548ae5dd28566ed0 | [
"Apache-2.0"
] | permissive | HweheeChung/acai-iclr-2019 | 1885b0d23e04febcb7387947e482782488665a99 | 60eaef7c6a238396b784bcc57dc689b62461d29f | refs/heads/master | 2020-05-15T09:37:36.179629 | 2019-04-29T12:26:39 | 2019-04-29T12:26:39 | 182,180,758 | 0 | 0 | null | 2019-04-19T01:25:43 | 2019-04-19T01:25:43 | null | UTF-8 | Python | false | false | 273 | #!/Data/github/acai-iclr-2019/env2/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.python.tools.freeze_graph import run_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_main())
| [
"[email protected]"
] | ||
aed6a5cee7db41042619ae1167ba4ccf10c709b3 | daba182ef8dca49585c9a97ed379072eb4565931 | /21.py | 8d09105eb5c63fffef86e2135963b3a091aeaa93 | [] | no_license | krlos097/21 | f4e52ae703c7cdc1782f343ca152fc8f6268fca3 | 40e81a799ad96fe5cda7f34cc14321a3b7542fdf | refs/heads/master | 2021-06-26T14:58:06.396200 | 2017-09-11T22:14:23 | 2017-09-11T22:14:23 | 102,394,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | import random
#Crear el Mazo (1 a 10 y J,Q,K)
def crearMazo():
return range(1,11) + [10, 10, 10]
def crearPalos(palos):
if (palos == 0):
return []
else:
return crearMazo() + crearPalos(palos - 1)
def Barajar(mazo):
random.shuffle(mazo)
return mazo
def crearBaraja():
return Barajar(crearPalos(4))
def analizarCartas(cartas):
print [str(carta) if carta != 1 else "1/11" for carta in cartas]
def Ases(cartas, ases):
if (ases == 0):
return sum(cartas)
elif (sum(cartas) + 10 * ases <= 21):
return sum(cartas) + 10 * ases;
else:
return Ases(cartas, ases - 1)
#Obtener el puntaje de las cartas que hay en la Mano
def Puntaje(cartas):
return Ases(cartas, cartas.count(1))
def T_Jugador(cartas):
print "Sus cartas son: "
analizarCartas(cartas[0])
if (Puntaje(cartas[0]) < 21 and bool(input("Pedir cartas (1) / Plantarse (0): "))):
return T_Jugador([cartas[0] + [cartas[2][0]]] + [cartas[1]] + [cartas[2][1:]])
else:
return cartas
def T_Repartidor(cartas):
if (Puntaje(cartas[1]) < 22):
return T_Repartidor([cartas[0]] + [cartas[1] + [cartas[2][0]]] + [cartas[2][1:]])
else:
print "Las cartas del Repartidor son:"
analizarCartas(cartas[1])
return cartas
def ResultadoFinal(Pjugador, Prepartidor):
print "Puntaje jugador: " + str(Pjugador) + "\nPuntaje repartidor: " + str(Prepartidor) + "\n-----------------------------"
if (Pjugador > 21):
print "El Repartidor GANA el juego."
elif Pjugador == 21 and (Prepartidor > 21 or Prepartidor < 21):
print "El Jugador GANA el juego."
elif (Pjugador == 21 and Prepartidor == 21):
print "GANA EL REPARTIDOR."
elif (Pjugador < 21 and (Prepartidor > 21 or Prepartidor < Pjugador)):
print "Jugador GANA el juego."
elif (Pjugador < 21 and Prepartidor > Pjugador and Prepartidor <22):
print "El Repartidor GANA el juego."
elif (Pjugador == Prepartidor):
print "Gana el repartidor"
#Funcion principal del Juego, donde se compila todo (Crear Mazo/Repartir Cartas/Establecer los Turnos/Imprimir el Resultado del Juego).
def ElJuego(cartas,Turno):
if cartas[2] == []:
print "\nCreando Baraja..."
ElJuego(cartas[0:2] + [crearBaraja()], Turno)
elif cartas[0] == []:
print "Repartiendo primeras cartas..."
ElJuego([cartas[2][0:2]] + [cartas[1]] + [cartas[2][2:]], Turno)
elif cartas[1] == []:
print "Repartiendo segundas cartas..."
ElJuego([cartas[0]] + [[cartas[2][0]]] + [cartas[2][1:]], Turno)
elif Turno == 'Jugador':
print "-------------------------------"
print "Turno del Jugador\n"
ElJuego(T_Jugador((cartas)),'Repartidor')
elif Turno == 'Repartidor':
print "\n-------------------------------"
print "Turno del Repartidor\n"
if (Puntaje(cartas[0])<22 and Puntaje(cartas[1])<Puntaje(cartas[0])):
ElJuego(T_Repartidor((cartas)), 'FIN')
else:
print "Las cartas finales del Repartidor son: "
analizarCartas(cartas[1])
ResultadoFinal(Puntaje(cartas[0]),Puntaje(cartas[1]))
else:
print "\n\n--- RESULTADO DEL JUEGO ---\n"
print "Las cartas finales del Jugador son: "
analizarCartas(cartas[0])
print "Las cartas finales del Repartidor son: "
analizarCartas(cartas[1])
print "\n--------------------------------------------"
print "El puntaje final de los participantes es:"
ResultadoFinal(Puntaje(cartas[0]),Puntaje(cartas[1]))
#Funcion Main del Juego
def Juego():
print "JUEGO DE 21"
ElJuego([[], [], []], 'Jugador')
print "\nFin Del Juego"
#Iniciar el Juego
Juego()
| [
"[email protected]"
] | |
cff95b961d789ca9c8dc2aa750326f673898db1e | e2af7750bfc3e1834c9b46be5d8ac0fd20d61e0b | /ticket/migrations/0001_initial.py | 154e341c449854f4cda538b977057a44ae2f7207 | [] | no_license | MahanaElbana/CinemaTicketsReservation_Api | 5cca784c01e83663dca002035def8010aebfccc5 | 3be30bc504b8283db5ba179a2cac81d9eeb99cc8 | refs/heads/main | 2023-08-08T09:57:01.366960 | 2021-09-06T09:15:37 | 2021-09-06T09:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | # Generated by Django 3.2.6 on 2021-09-01 08:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Geust',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('mobile', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hall', models.CharField(max_length=10)),
('movie', models.CharField(max_length=10)),
('data', models.DateField()),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('geust', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservation', to='ticket.geust')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservation', to='ticket.movie')),
],
),
]
| [
"[email protected]"
] | |
2263dba5a2e7dcfcb1a99554ec165e4b8d52d3af | 89b6997b24e404c176358073626a8bfad7bcdb8e | /.history/courses/api/urls_20210426132401.py | a7fabb6eb22b1afeeebd95f123329c5eaeb754d1 | [] | no_license | mohamedhawas123/Education-platform-django | 513e64ac112880385402ce609077796578b4e9ee | 7b83e66bba66b8b2b1a007f5818a534653e6abfb | refs/heads/main | 2023-07-18T16:19:52.177886 | 2021-09-24T12:04:09 | 2021-09-24T12:04:09 | 352,306,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | from django.urls import path, include
from .views import SubjectListView, SubjectDetailView, CourseViewList
from rest_framework import routers
from . import views
app_name = 'courses'
router = routers.DefaultRouter()
router.register('courses', views.CourseViewList)
urlpatterns = [
path('subjects/', SubjectListView.as_view(), name="subject_list" ),
path('subjects/<pk>/', SubjectDetailView.as_view(), name="subject_detail"),
# path('courses/', ModuleListView.as_view(), name="course_list" ),
path('', include(router.urls)),
path('courses/<pk>/enroll/', CourseViewList.as_view(), name='course_enroll'),
]
| [
"[email protected]"
] | |
1b891071e90ee29ffabeab565da595a88b22ffa0 | 9c6a05d671049913b6f811b30967ecc79794ca68 | /dhtserver.py | 0ba4753e9e29a7dbf0742044a8ae176dc868df66 | [] | no_license | yucy/DHT | 955d2e0ca393c417367988256a33e46335c26aba | d5d2aeff5fb49c7b11afb4e41ddd239a7d316e01 | refs/heads/master | 2021-01-19T02:30:12.425330 | 2017-06-11T07:22:38 | 2017-06-11T07:22:38 | 87,285,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | # -*- coding:utf-8 -*-
import hashlib,time
import socket
# 初始化公网节点
BOOTSTRAP_NODES = (
("67.215.246.10", 6881),
("82.221.103.244", 6881),
("23.21.224.150", 6881),
("localhost", 6881),
)
# 使用sha1算法,返回key加密后的字符串
def str_encrypt(key):
sha = hashlib.sha1(key)
encrypts = sha.hexdigest()
print encrypts
return encrypts
# socket 使用方法介绍:http://blog.csdn.net/rebelqsp/article/details/22109925
# 用于发出请求,相当于是client
def ping():
m_client = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
m_client.sendto(b'this is a test',BOOTSTRAP_NODES[3])
m_client.close()
# 用于答复客户端请求,相当于是server
def dong():
m_server = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
m_server.bind('',68810)
print '正在等待介入'
while True:
time.sleep(5)
# 接受数据
data,addr = m_server.recvfrom(1024)
print addr
print data
if __name__ == '__main__':
# str_encrypt('lamda')
ping()
'''
DHT协议,共4条:
ping
find_node
get_peers (在edonkey kad中这叫find_value)
announce_peer
''' | [
"[email protected]"
] | |
64a6001c461b63871f2473e2890588b8d3d3dff6 | ae7d1e7cfb16aa5c330ff3903699bdae7a57d2fe | /zoo_catagories_rnn.py | 683f3d1cc13b4f980d836dd31b65d977604f6b40 | [] | no_license | Christopher-Braun/zoo_animal_classification | e933f2ada35a85d22731194df83524574ad51c0c | bdfa7dc08e5083d05052d0203661361a9116e821 | refs/heads/master | 2021-08-31T07:09:57.404068 | 2017-12-20T16:11:43 | 2017-12-20T16:11:43 | 114,904,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('zoo.csv')
X = dataset.iloc[:, 1:17].values
y = dataset.iloc[:, -1].values
y[:][y[:]==7]=int(0)
X[:,12][X[:,12]==2]=int(1)
X[:,12][X[:,12]==4]=int(2)
X[:,12][X[:,12]==6]=int(3)
X[:,12][X[:,12]==8]=int(4)
y_som = y
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
X12 = onehotencoder.fit_transform(X[:, 12].reshape(-1, 1)).toarray()
y = onehotencoder.fit_transform(y.reshape(-1,1)).toarray()
y = np.asarray(y, dtype = int)
X12 = np.asarray(X12, dtype = int)
Xnew = np.append(X, X12, axis=1)
X = np.delete(Xnew, 12, axis=1)
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
X_SOM = sc.fit_transform(X)
# Training the SOM
from minisom import MiniSom
som = MiniSom(x = 10, y = 10, input_len = 21, sigma = 1.0, learning_rate = 0.5)
som.random_weights_init(X_SOM)
som.train_random(data = X_SOM, num_iteration = 100)
# Visualizing the results
from pylab import bone, pcolor, colorbar, plot, show
bone()
pcolor(som.distance_map().T)
colorbar()
markers = ['o', 's', 'x', 'o', 's', 'x', 'v']
colors = ['r', 'g', 'b', 'w', 'y', 'c', 'm']
for i, x in enumerate(X_SOM):
w = som.winner(x)
plot(w[0] + 0.5,
w[1] + 0.5,
markers[y_som[i]],
markeredgecolor = colors[y_som[i]],
markerfacecolor = 'None',
markersize = 10,
markeredgewidth = 2)
show()
mappings = som.win_map(X_SOM)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Part 2 - Make the ANN
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 7, kernel_initializer = 'uniform', activation = 'relu', input_dim = 21))
# Adding the second hidden layer
classifier.add(Dense(units = 21, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 7, kernel_initializer = 'uniform', activation = 'softmax'))
# Compiling the ANN
classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 200)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_train)
y_pred_test = classifier.predict(X_test)
y_pred_cat = np.argmax(y_pred, axis=1)
y_pred_test_cat = np.argmax(y_pred_test, axis=1)
y_train_cat = np.argmax(y_train, axis=1)
y_test_cat = np.argmax(y_test, axis=1)
#Making the Confusion Matrix (compares actual values with predictions)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_train_cat, y_pred_cat)
cm_test = confusion_matrix(y_test_cat, y_pred_test_cat)
cm_count=0
cm_wrong=0
for i in range(len(cm)):
cm_count += cm[i,i]
for v in range(len(cm)):
cm_wrong += cm[i,v]
cm_wrong -= cm_count
cm_test_count=0
cm_test_wrong=0
for i in range(len(cm_test)):
cm_test_count += cm_test[i,i]
for v in range(len(cm_test)):
cm_test_wrong += cm_test[i,v]
cm_test_wrong -= cm_test_count
accuracy = cm_count/(cm_count + cm_wrong)
accuracy_test = cm_test_count/(cm_test_count + cm_test_wrong)
| [
"[email protected]"
] | |
b8968b3b65bf1de5df78c3c1473ecaf43be8aed9 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/cv2/xfeatures2d_BoostDesc.py | ee8d24abbfaff2a33bb993d84a1fd7baffc4e439 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | # encoding: utf-8
# module cv2.cv2
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.aruco as aruco # <module 'cv2.aruco'>
import cv2.bgsegm as bgsegm # <module 'cv2.bgsegm'>
import cv2.bioinspired as bioinspired # <module 'cv2.bioinspired'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.datasets as datasets # <module 'cv2.datasets'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.face as face # <module 'cv2.face'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.ft as ft # <module 'cv2.ft'>
import cv2.hfs as hfs # <module 'cv2.hfs'>
import cv2.img_hash as img_hash # <module 'cv2.img_hash'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.kinfu as kinfu # <module 'cv2.kinfu'>
import cv2.line_descriptor as line_descriptor # <module 'cv2.line_descriptor'>
import cv2.linemod as linemod # <module 'cv2.linemod'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.motempl as motempl # <module 'cv2.motempl'>
import cv2.multicalib as multicalib # <module 'cv2.multicalib'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.omnidir as omnidir # <module 'cv2.omnidir'>
import cv2.optflow as optflow # <module 'cv2.optflow'>
import cv2.plot as plot # <module 'cv2.plot'>
import cv2.ppf_match_3d as ppf_match_3d # <module 'cv2.ppf_match_3d'>
import cv2.quality as quality # <module 'cv2.quality'>
import cv2.reg as reg # <module 'cv2.reg'>
import cv2.rgbd as rgbd # <module 'cv2.rgbd'>
import cv2.saliency as saliency # <module 'cv2.saliency'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.structured_light as structured_light # <module 'cv2.structured_light'>
import cv2.text as text # <module 'cv2.text'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2.videostab as videostab # <module 'cv2.videostab'>
import cv2.xfeatures2d as xfeatures2d # <module 'cv2.xfeatures2d'>
import cv2.ximgproc as ximgproc # <module 'cv2.ximgproc'>
import cv2.xphoto as xphoto # <module 'cv2.xphoto'>
import cv2 as __cv2
class xfeatures2d_BoostDesc(__cv2.Feature2D):
# no doc
def create(self, desc=None, use_scale_orientation=None, scale_factor=None): # real signature unknown; restored from __doc__
"""
create([, desc[, use_scale_orientation[, scale_factor]]]) -> retval
.
"""
pass
def getScaleFactor(self): # real signature unknown; restored from __doc__
"""
getScaleFactor() -> retval
.
"""
pass
def getUseScaleOrientation(self): # real signature unknown; restored from __doc__
"""
getUseScaleOrientation() -> retval
.
"""
pass
def setScaleFactor(self, scale_factor): # real signature unknown; restored from __doc__
"""
setScaleFactor(scale_factor) -> None
.
"""
pass
def setUseScaleOrientation(self, use_scale_orientation): # real signature unknown; restored from __doc__
"""
setUseScaleOrientation(use_scale_orientation) -> None
.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
| [
"[email protected]"
] | |
14808a94e6859ea984b7599bc4c6afbd5f24d8d4 | 6b6edacfc343e60ee82b74bf82248ba8685d5007 | /backend/base/migrations/0001_initial.py | e560d4770812c27ad2df904d59b7860caa7c4d74 | [] | no_license | wchandler2020/django-react-ecommerce | 0e6c591227c118a8492bb6f89727e907f303b1db | cc62f9eadf89fa417dc1cd2f183dca65a736da90 | refs/heads/main | 2023-03-04T20:34:40.950457 | 2021-02-21T20:49:22 | 2021-02-21T20:49:22 | 338,858,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | # Generated by Django 3.1.6 on 2021-02-15 19:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('brand', models.CharField(blank=True, max_length=200, null=True)),
('category', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('rating', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('numReviews', models.IntegerField(blank=True, default=0, null=True)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('countInStock', models.IntegerField(blank=True, default=0, null=True)),
('createdAt', models.DateTimeField(auto_now_add=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
f72d4da9cfe29c981f9e132cef7c27f0de2a1a42 | 02a2ecc29102387120db40bbb64ea5a564b00a3d | /protocol/src/com/teltonika/Codec/IMEI_validation_test.py | d0d23c2002a2530c6d1b12cf09671a16f1717953 | [] | no_license | pkumarray91/production | f86a96b9fa603f8e6c2282466384edef4fde644c | 41adcc02738ffef07d72d019386bbb2253ffe9af | refs/heads/master | 2023-03-21T19:21:13.761448 | 2021-03-12T07:35:41 | 2021-03-12T07:35:41 | 346,977,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | from protocol.src.com.teltonika.Codec.IMEI_validation import *
"""
It is Driver code which calls IMEI_validation class and get the result
1. Get the hex_num
1.a. which contain the imei_number where the first 2 bytes gives the length of IMEI (000F)
1.b. and the digit '3' is present before every imei_number
2. To get the proper imei_number wehave to split it
3. If the return is True then it is correct valid IMEI
4. If the return is False then it is invalid IMEI
generate imei at:
https://generate.plus/en/number/imei
"""
#myimei = IMEI_validation(356307042441013)
#myimei = IMEI_validation(359632107452945)
#myimei = IMEI_validation(502695398174524)
#hex_num ='000F333536333037303432343431303133' #invalid imei
#hex_num ='000F333539363332313037343532393435' #valid imei
#hex_num ='000F353032363935333938313734353234' #valid imei
#hex_num ='000F353032363935333938313734353234' #invalid imei
#hex_num = '000F353333383031353132353831313036' #valid imei
#hex_num = '000F333037383733303036373436323539' #valid imei
#hex_num = '000F353234303231333732303138313436' #valid imei
#hex_num = '000F343939383133373531373833393634' #valid imei
#hex_num = '000F313033343536353635373937323937' #valid imei
hex_num = '000F313033343536353635373937323637' #invalid imei
# split the hex_num to get first 4 bytes
#hex_split1 = hex_num[:4]
# split the hex_num to get the remaining value
#hex_split2 = hex_num[4:]
#if int(hex_split1,16) != 15 :
#print ("wrong size", hex_split1)
#else:
#print("15 digit imei")
#test1 = len(str(hex_split2))
#to get the imei_number and remove the '3' digit from the hex_num
#my_imei = hex_split2[1:test1:2]
#print(my_imei)
myimei = IMEI_validation(hex_num)
valid_yes_no, imei_num = myimei.check_my_imei()
if valid_yes_no:
print('IMEI valid ', imei_num)
else:
print('IMEI invalid ', imei_num)
# raw_data, valid = myimei.checkIMEI()
#
# if valid:
# val_imei = myimei.ImeiIsValid(self, raw_data)
# if val_imei == True:
# print("valid IMEI")
# else:
# print("Invalid IMEI")
#
# else:
# print("Invalid IMEI")
| [
"[email protected]"
] | |
3bb2626e106e0aed2af16a86f65070cf26d1077d | 548666cccda576bf1e13070a58e9cbbbc11c7f6b | /Assignment3/get_print_string.py | 05bc6d955a163a9fa043ea577d00f580ab206ebc | [] | no_license | Kiran0343/Python | cfd0cf3171323b7a4f532c05628213b6a71cf3c8 | 46d640fd251cdf55733891c8a2a61fdc520bad50 | refs/heads/master | 2020-06-27T22:38:09.894204 | 2018-04-16T01:39:30 | 2018-04-16T01:39:30 | 97,076,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | """
python script which has class having two below methods and access
those methods in another class:
"""
class parent():
def __init__(self):
print "hello from parent"
def getString(self):
self.word = str(raw_input("Enter string : "))
def printString(self):
print "string is : ", self.word
class child(parent):
def __init__(self):
print "hello from child"
c = child()
c.getString()
c.printString() | [
"[email protected]"
] | |
6236e03243994ff3486a24f3d867d6b68ee24e25 | 5cc5a3063c98719662899d52e176d4df1dc4147e | /application/model/report.py | df2343d773c689157f418c1e7c2be0e2458a5db4 | [] | no_license | yuxd/pile | fef8b6005556eb1ab994830956363ff44bdc2025 | d1b0d4e53b4379e4878ef6dba36cb66414af39e4 | refs/heads/master | 2021-01-11T22:06:30.397223 | 2016-09-08T01:39:30 | 2016-09-08T01:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from datetime import datetime
from .helper import to_ts
from .. import db
class Report(db.Model):
"""
This class defines report structure.
id : record id
user_id : user id
comment : comment
evidence : evidence
dt : timestamp
handled : whether the report is handled
"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.ForeignKey('user.id'))
comment = db.Column(db.Text())
evidence = db.Column(db.String(80))
dt = db.Column(db.DateTime)
handled = db.Column(db.Boolean)
def __init__(self, user_id, evidence, comment, dt=None, handled=False):
self.user_id = user_id
self.comment = comment
self.evidence = evidence
self.dt = dt if dt else datetime.utcnow()
self.handled = handled
def to_json(self):
attrs = ('id', 'user_id', 'comment', 'evidence', 'handled')
ans = {attr: getattr(self, attr, None) for attr in attrs}
ans['dt'] = to_ts(self.dt)
return ans
| [
"[email protected]"
] | |
3f36a95e1faafb80203d5e7e414bfc856688ba93 | 2009832522a7f4f9c79da1dc16efea74975ef218 | /events/migrations/0012_auto_20190412_1822.py | 914d70996c725b954896831319d10fdc5568c9d1 | [] | no_license | siobahnv/communityeventsapp | 47f472f08f2921e8991d64dbc1cb3f5aee7e4f3f | ae30ab721d1c93f1ba8e9ae289676bfe6811afcc | refs/heads/master | 2022-12-09T07:55:15.292261 | 2019-04-17T17:29:46 | 2019-04-17T17:29:46 | 180,808,021 | 1 | 0 | null | 2022-12-08T04:58:54 | 2019-04-11T14:19:45 | Python | UTF-8 | Python | false | false | 442 | py | # Generated by Django 2.2 on 2019-04-12 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20190412_1755'),
]
operations = [
migrations.AlterField(
model_name='event',
name='tags',
field=models.ManyToManyField(blank=True, help_text='Select a tag for this event.', to='events.Tag'),
),
]
| [
"[email protected]"
] | |
ee6d6b096a33d9847e60bfe9a67807cf517e81db | a947c8c5ded83187d11944e21681dae71d027afa | /core_spider/core_spider/items.py | ad5381856bad8986353e32cc1ee4e2a814e3aef0 | [] | no_license | minfun/scrapeqq | f84cea349a476e1a25b0dec9d87776abab6d8d3b | 04bac71d22700d6eddb85a7194bbc44ab6def8f3 | refs/heads/master | 2020-12-30T15:41:55.101008 | 2017-05-14T08:51:49 | 2017-05-14T08:51:49 | 91,165,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CoreSpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
e82fb68fa15cb53c49889f6939ecfd1fccf3035c | 8e0df595ef441a2c57a209728f2b2f6bf5d13265 | /blogapi/api/serializers.py | 1cd635351a91a26e83fb0a9ffcc61046af926384 | [] | no_license | saddamphp/blog_api_django | eb5bd52e424150da9e3745adb94d0ade01f9be52 | c8fb1b5ba82c12f94a43fb0d6e59ea026e3c6635 | refs/heads/master | 2023-01-09T11:57:06.343348 | 2020-11-14T19:03:28 | 2020-11-14T19:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | from django.contrib.auth import get_user_model, password_validation
from django.contrib.auth.models import BaseUserManager
from rest_framework import serializers
from .models import Post,Profile
class blogserializer(serializers.ModelSerializer):
class Meta:
model = Post
fields ='__all__'
class profileserializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields ='__all__'
User = get_user_model()
class UserRegisterSerializer(serializers.ModelSerializer):
"""
A user serializer for registering the user
"""
class Meta:
model = User
fields = ('id','username' ,'email', 'password', 'first_name', 'last_name')
def validate_password(self, value):
password_validation.validate_password(value)
return value | [
"[email protected]"
] | |
33a1bd5faf0881009d06ebde82f8871fe939b812 | 8beeebe1ea8e9d13e1a2c1ef851d91c6fd9fa156 | /Date.py | 01d61a789821302f93e9ee532b80618a94cc345a | [] | no_license | jamanges/raspberrypi | 101a6b65036df8ef25395aaebe44706b5878ab3b | 83527f9e478565871785f4c45db215282ab42d12 | refs/heads/master | 2016-09-06T03:56:18.291574 | 2015-01-03T04:15:50 | 2015-01-03T04:15:50 | 28,611,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import RPi.GPIO as GPIO
import time
ledPin=15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(ledPin,GPIO.OUT)
month = int(float(time.strftime("%m")))
day = int(float(time.strftime("%d")))
while month > 0:
global month
GPIO.output(ledPin,GPIO.HIGH)
time.sleep(.25)
GPIO.output(ledPin,GPIO.LOW)
time.sleep(.25)
print month
month = month - 1
time.sleep(2)
while day > 0:
global day
GPIO.output(ledPin,GPIO.HIGH)
time.sleep(.25)
GPIO.output(ledPin,GPIO.LOW)
time.sleep(.25)
print day
day = day - 1
time.sleep(2)
| [
"[email protected]"
] | |
1def6138482e78b1cb11607f3c331914c0a37927 | cdfb5ba3e6210672f6e69a9370503544867d8ef6 | /Module 3 Labs/Module 3.2.py | be614a7d3aabcb2720977bf719c2787e35146c5f | [] | no_license | Dannyh0198/Module-3-Labs | a436a82c42eb580fe9df5945c78a67c52062239e | 68d430ba9a836ef9f955aaa71358232e35ae166c | refs/heads/master | 2023-07-19T06:17:34.930581 | 2021-09-23T18:35:05 | 2021-09-23T18:35:05 | 408,910,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,798 | py | # 3.2.1.3 LAB: Essentials of the while loop - Guess the secret number
# A junior magician has picked a secret number. He has hidden it in a variable named secret_number. He wants everyone
# who run his program to play the Guess the secret number game, and guess what number he has picked for them.
# Those who don't guess the number will be stuck in an endless loop forever! Unfortunately, he does not know how to complete the code.
# Your task is to help the magician complete the code in the editor in such a way so that the code:
# will ask the user to enter an integer number;
# will use a while loop;
# will check whether the number entered by the user is the same as the number picked by the magician.
#the number chosen by the user is different than the magician's secret number, the user should see the message "Ha ha! You're stuck in my loop!"
# and be prompted to enter a number again. If the number entered by the user matches the number picked by the magician, the number should be printed to the screen,
# and the magician should say the following words: "Well done, muggle! You are free now."
secret_number = 777
print(
"""
+================================+
| Welcome to my game, muggle! |
| Enter an integer number |
| and guess what number I've |
| picked for you. |
| So, what is the secret number? |
+================================+
""")
number = int(input("Guess the secret number:"))
while number != 777:
print("Ha ha! You're stuck in my loop!")
number = int(input("Guess the secret number:"))
else:
print("Well done, muggle! You are free now.")
#3.2.1.6 LAB: Essentials of the for loop - counting mississippily
# Your task is very simple here: write a program that uses a for loop to "count mississippily" to five.
# Having counted to five, the program should print to the screen the final message "Ready or not, here I come!"
# For the time being, we'd just like you to know that we've imported the time module and used the sleep()
# method to suspend the execution of each subsequent print() function inside the for loop for one second, so that the message outputted to the console resembles an actual counting. Don't worry - you'll soon learn more about modules and methods.
import time
for n in range (1,6): # Write a for loop that counts to five.
print (n,"Mississippi") # Body of the loop - print the loop iteration number and the word "Mississippi".
time.sleep(1) # Body of the loop - use: time.sleep(1)
print ("Ready or not, here I come!") # Write a print function with the final message.
# 3.2.1.9 LAB: The break statement - Stuck in a loop
# Design a program that uses a while loop and continuously asks the user to enter a word unless the user enters "chupacabra" as the secret exit word
# in which case the message "You've successfully left the loop." should be printed to the screen, and the loop should terminate.
while True:
word = str(input("What is the word:")) # Asks to input a string
if word == ("chupacabra"): # If the string is equal to chupacabra
print ("You've successfully left the loop.") # Print the following output
break # Stop the loop.
# 3.2.1.10 LAB: The continue statement - the Ugly Vowel Eater
# Your task here is very special: you must design a vowel eater! Write a program that uses:
user_word = str(input("Enter a word: ")) # ask the user to enter a word;
user_word = user_word.upper() # use user_word = user_word.upper() to convert the word entered by the user to upper case;
for letter in user_word: #use conditional execution
if letter == ("A"): # "eat" the following vowels A, E, I, O, U from the inputted word;
continue
elif letter == ("E"):
continue
elif letter == ("I"):
continue
elif letter == ("O"):
continue
elif letter == ("U"):
continue
else:
print (letter)
# 3.2.1.11 LAB: The continue statement - the Pretty Vowel Eater
# Your task here is even more special than before: you must redesign the (ugly) vowel eater from the previous lab (3.1.2.10)
# create a better, upgraded (pretty) vowel eater! Write a program that uses:
word_without_vowels = "" # Look at the code in the editor. We've created word_without_vowels and assigned an empty string to it
user_word = str(input("Enter a word: ")) # ask the user to enter a word;
user_word = user_word.upper() # use user_word = user_word.upper() to convert the word entered by the user to upper case;
for letter in user_word: #use conditional execution
if letter == ("A"): # "eat" the following vowels A, E, I, O, U from the inputted word;
continue
elif letter == "E":
continue
elif letter == "I":
continue
elif letter == "O":
continue
elif letter == "U":
continue
else:
word_without_vowels += letter # assign the uneaten letters to the word_without_vowels variable and print the variable to the screen.
print(word_without_vowels)
# 3.2.1.14 LAB: Essentials of the while loop
# The pyramid is stacked according to one simple principle: each lower layer contains one block more than the layer above.
blocks = int(input("Enter the number of blocks: ")) # Ask user to input a number
height = 0 # Set the height variable at 0
Blocks_needed_for_next_level = 0 # Set variable for the ammount of blocks you need to build the next level.
while True:
Blocks_needed_for_next_level += 1 # Every itteration of the while loop will increment the ammount of blocks needed by +1.
blocks = blocks - Blocks_needed_for_next_level # Every increment in height will deduct the number of blocks needed to build the next level.
if blocks <= 0: # Checks the block variable counter. If it is equal to or less than zero. End the loop. If not add 1 to the height.
break # Following on from the IF statement. Break the while loop.
height = height + 1 # Each itteration of the while loop will increment the height by 1.
# When the appropriate ammount of blocks can not be deducted from the total, the while loop will stop.
print("The height of the pyramid:", height) # When the while rule has completed its execution. It will print the current "height" variable.
#3.2.1.15 LAB: Collatz's hypothesis
# take any non-negative and non-zero integer number and name it c0;
# if it's even, evaluate a new c0 as c0 ÷ 2;
# otherwise, if it's odd, evaluate a new c0 as 3 × c0 + 1;
# if c0 ≠ 1, skip to point 2.
# Write a program which reads one natural number and executes the above steps as long as c0 remains different from 1.
# We also want you to count the steps needed to achieve the goal.
c0 = int(input("Input Number:")) # take any non-negative and non-zero integer number and name it c0;
counter = 0
while True:
if c0 <= 0: # Must be a non-negative and non zero integer. This would flag up an error
print ("Invalid Input")
break # If a non negative or non zero value is entered it would stop the while loop
if c0 % 2 == 0: # Is c0 a multiple of 2? I.e is it even?
c0 = c0//2 # If it is devide it by 2
print (c0) # print the resultant c0 after the division
counter += 1 # add a 1 to the counter to mark the number of steps the process has been through.
if c0 == 1: # Check to see if c0 = 0. If it is the while rule will end.
print ("steps:", counter) # Print the word steps + the counter that is keeping track of how many iterations the cycle has been through.
break # Stops the while rule
if c0 % 2 != 0: # Is c0 not divisible by 2. I.e is it odd?
c0 = 3 * c0 + 1 # If if is multiply c0 by 3 and add 1.
print (c0) # print the resultant c0 after the equation.
counter += 1 # add a 1 to the counter to mark the number of steps the process has been through. | [
"[email protected]"
] | |
bd443cac36cd8112754d692cea7d260869ab626d | 7789f4c84a250ce45accdecbf73630519bfc4aa1 | /devel/lib/python2.7/dist-packages/rail_manipulation_msgs/srv/_PrepareGrasp.py | de78ffe163fbb53ac02bd322fbc7c0c91ccb1710 | [] | no_license | JerryHu1994/NRI-authoring-Backend | 33610913692c3ba8ac2e7fd47b735d193771526c | d130201224deb8696ae4b2dbc451c251693040d3 | refs/heads/master | 2021-09-15T23:15:01.061392 | 2018-06-12T15:56:40 | 2018-06-12T15:56:40 | 113,100,927 | 0 | 2 | null | 2018-06-12T15:56:41 | 2017-12-04T22:11:56 | HTML | UTF-8 | Python | false | false | 12,030 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rail_manipulation_msgs/PrepareGraspRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class PrepareGraspRequest(genpy.Message):
_md5sum = "f48a95707774bb6708f0bd8158e612f7"
_type = "rail_manipulation_msgs/PrepareGraspRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """geometry_msgs/PoseStamped graspPose
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['graspPose']
_slot_types = ['geometry_msgs/PoseStamped']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
graspPose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PrepareGraspRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.graspPose is None:
self.graspPose = geometry_msgs.msg.PoseStamped()
else:
self.graspPose = geometry_msgs.msg.PoseStamped()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.graspPose.header.seq, _x.graspPose.header.stamp.secs, _x.graspPose.header.stamp.nsecs))
_x = self.graspPose.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.graspPose.pose.position.x, _x.graspPose.pose.position.y, _x.graspPose.pose.position.z, _x.graspPose.pose.orientation.x, _x.graspPose.pose.orientation.y, _x.graspPose.pose.orientation.z, _x.graspPose.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.graspPose is None:
self.graspPose = geometry_msgs.msg.PoseStamped()
end = 0
_x = self
start = end
end += 12
(_x.graspPose.header.seq, _x.graspPose.header.stamp.secs, _x.graspPose.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.graspPose.header.frame_id = str[start:end].decode('utf-8')
else:
self.graspPose.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.graspPose.pose.position.x, _x.graspPose.pose.position.y, _x.graspPose.pose.position.z, _x.graspPose.pose.orientation.x, _x.graspPose.pose.orientation.y, _x.graspPose.pose.orientation.z, _x.graspPose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.graspPose.header.seq, _x.graspPose.header.stamp.secs, _x.graspPose.header.stamp.nsecs))
_x = self.graspPose.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.graspPose.pose.position.x, _x.graspPose.pose.position.y, _x.graspPose.pose.position.z, _x.graspPose.pose.orientation.x, _x.graspPose.pose.orientation.y, _x.graspPose.pose.orientation.z, _x.graspPose.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.graspPose is None:
self.graspPose = geometry_msgs.msg.PoseStamped()
end = 0
_x = self
start = end
end += 12
(_x.graspPose.header.seq, _x.graspPose.header.stamp.secs, _x.graspPose.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.graspPose.header.frame_id = str[start:end].decode('utf-8')
else:
self.graspPose.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.graspPose.pose.position.x, _x.graspPose.pose.position.y, _x.graspPose.pose.position.z, _x.graspPose.pose.orientation.x, _x.graspPose.pose.orientation.y, _x.graspPose.pose.orientation.z, _x.graspPose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rail_manipulation_msgs/PrepareGraspResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class PrepareGraspResponse(genpy.Message):
_md5sum = "358e233cde0c8a8bcfea4ce193f8fc15"
_type = "rail_manipulation_msgs/PrepareGraspResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
"""
__slots__ = ['success']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PrepareGraspResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
else:
self.success = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.success))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.success))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class PrepareGrasp(object):
_type = 'rail_manipulation_msgs/PrepareGrasp'
_md5sum = '8918ac08fe533980834453f23389b29a'
_request_class = PrepareGraspRequest
_response_class = PrepareGraspResponse
| [
"[email protected]"
] | |
f779f90897b4b3140cf74c2e61e30e9eb35861fd | c52bf4544fc0224c4db5c6a4a5457d9b2e7fee4b | /core/models.py | c225807f5459e5b00d3326e884fb58339d7da2e5 | [] | no_license | sntciitbhu/website1 | c4efe48479cb092d0f5db0a257563b42d4e343ef | 7063a747abbaec0254fe04a2c2eefde7db656589 | refs/heads/master | 2022-10-13T11:30:58.307699 | 2019-11-09T08:33:51 | 2019-11-09T08:33:51 | 216,588,378 | 1 | 2 | null | 2022-09-23T22:31:37 | 2019-10-21T14:26:24 | CSS | UTF-8 | Python | false | false | 62 | py | from django.db import models
#class Students(models.Model):
| [
"[email protected]"
] | |
c68bcdaaf52094708cc8f1606bed4ef43235f013 | 99b7b8cef0f28aa93e87da82f9f75b65f91208f1 | /Game.py | a45b8541dd8e5733a0952b0e43ddfad28a2a43da | [] | no_license | ghosts1995/fjsss | b3a01115832fac2f0140baacd51ea6b4c3f63489 | 526a69378067e3632cf41d9c1f4943628ce4201a | refs/heads/master | 2022-03-05T18:56:28.358968 | 2019-10-30T10:17:43 | 2019-10-30T10:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,160 | py | import sys
from MainPageUi import Ui_MainWindow
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget,QMainWindow,QLabel,QPushButton
from PyQt5.QtGui import QMovie,QPixmap,QCursor,QPalette,QBrush,QFont,QIcon
from PyQt5 import QtCore
import linkServer
import execjs
POKE_SIZE=(50,68)
class Room(QWidget):
def __init__(self):
super(QWidget,self).__init__()
self.setWindowTitle('游戏界面')
# self.setObjectName('GameWindow')
self.setMinimumSize(1200, 811)
self.setMaximumSize(1200, 811)
# self.setStyleSheet("#MainWindow{border-image:url(./src/room_bc1.png);}")
try:
self.initData()
self.initUi()
self.hall=None
except Exception as e:
print(e)
def initUi(self):
#注意控制加载图片的顺序
#加载房间背景
self.setCursor(QCursor(QPixmap('./src/mouse40.png')))
palette = QPalette()
palette.setBrush(self.backgroundRole(), QBrush(QPixmap('./src/room_bc1.png')))
self.setPalette(palette)
#加载桌子的图片
table = QLabel(self)
table.setPixmap(QPixmap('./src/桌子.png'))
table.setGeometry(150, 410, 900, 700)
table.setScaledContents(True) # 让图片自适应label大小
# 加载头像
headFrame = QLabel(self) # 加载头像边框
headFrame.setPixmap(QPixmap('./src/headTopFrame.png'))
headFrame.setGeometry(295, 700, 100, 100)
headFrame.setScaledContents(True) # 让图片自适应label大小
avatar = QLabel(self) # 加载头像图片,默认男性头像
avatar.setPixmap(QPixmap('./src/headBoy.png'))
avatar.setGeometry(300, 705, 90, 90)
avatar.setScaledContents(True) # 让图片自适应label大小
#加载扑克牌位置
self.showMyPai()
#加载三个按钮 再来一局、开始游戏和离开游戏三个
self.gameAgain=QPushButton(self)
self.gameAgain.setStyleSheet("QPushButton{border-image: url(./src/再来一局.png)}")
self.gameAgain.setGeometry(800, 10, 125, 50)
self.startBtn = QPushButton(self)
self.startBtn.setStyleSheet("QPushButton{border-image: url(./src/开始游戏.png)}")
self.startBtn.setGeometry(540, 400, 125, 60)
self.quitBtn = QPushButton(self)
self.quitBtn.setStyleSheet("QPushButton{border-image: url(./src/top_btn_exit.png)}")
self.quitBtn.setGeometry(0, 8, 60, 62)
self.startBtn.clicked.connect(self.startGame)
self.quitBtn.clicked.connect(self.quitRoom)
self.gameAgain.clicked.connect(self.startGame)
def initData(self):
self.mypokes=[]
self.server=linkServer.Game()
def getUserInfo(self,userInfo):
try:
self.userInfo = userInfo
except Exception as e:
print(e)
def showMyPai(self):
self.pokes=[]
for i in range(13):
x = QPushButton(self)
x.setStyleSheet("QPushButton{border-image: url(./src/cardBack.png)}")
x.setGeometry(450+22*i, 680, 73, 100)
self.pokes.append(x)
def quitRoom(self):
self.close()
def startGame(self):
try:
self.startBtn.close()
print(self.userInfo["token"])
self.mypokes=self.server.openGame(self.userInfo["token"])
#开始发牌!!!
print("token",self.mypokes)
#将牌传入出牌算法
for y in self.pokes:
x=self.mypokes[self.pokes.index(y)]
if '$' in x:
index=13*3
elif '&' in x:
index=13*2
elif '*' in x:
index=13*1
else:
index=0
if 'A' in x:
index+=1
elif 'K' in x:
index+=13
elif 'Q' in x:
index+=12
elif 'J' in x:
index+=11
else:
index+=int(x[1:])
try:
# print(index)
url='./src/pokes/Images_Cards_Card_1_'+str(index)+'.png'
y.setStyleSheet("QPushButton{border-image: url("+url+")}")
except Exception as e:
print(e)
#出牌
# card=[]
# one=self.mypokes[0]+' '+self.mypokes[1]+' '+self.mypokes[2]
# two=self.mypokes[3]+' '+self.mypokes[4]+' '+self.mypokes[5]+' '+self.mypokes[6]+' '+self.mypokes[7]
# three=self.mypokes[8]+' '+self.mypokes[9]+' '+self.mypokes[10]+' '+self.mypokes[11]+' '+self.mypokes[12]
# card.append(one)
# card.append(two)
# card.append(three)
# print(card)
self.server.submitPoke(self.userInfo["token"],self.mypokes)
except Exception as e:
print('Game/startgame',e)
if __name__=="__main__":
app = QApplication(sys.argv)
demo = Room()
demo.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
5c2a44299f26b665135279bbbcbf5363c8df7b19 | df2282a02ae0dd788dc6bd5bd6457415cb1e1771 | /scoket/socketserver.py | e9f953d8793a87f13b14a325e3fc2df2e9b09a4d | [] | no_license | randian666/MPython3 | 6f6b6fc31734840c87322d5b0b74e4afd7e2165f | 6de046262ba1ad37a3072ee5d064b6e4cfa57df0 | refs/heads/master | 2021-01-21T01:11:27.491355 | 2018-01-05T10:44:30 | 2018-01-05T10:44:30 | 101,870,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | #!/usr/bin/env python3
'''
scoket服务端
大多数连接都是可靠的TCP连接。创建TCP连接时,主动发起连接的叫客户端,被动响应连接的叫服务器
'''
# 导入socket库:
import socket,threading,time
# 创建Socket时,AF_INET指定使用IPv4协议,如果要用更先进的IPv6,就指定为AF_INET6。SOCK_STREAM指定使用面向流的TCP协议,这样,一个Socket对象就创建成功
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 监听端口:
s.bind(('127.0.0.1',9999))
# 紧接着,调用listen()方法开始监听端口,传入的参数指定等待连接的最大数量
s.listen(5)
def tcplink(sock,addr):
print('Accept new connection from %s:%s...' % addr)
sock.send('hi~你好!'.encode('utf-8')); #给客户端发送消息
while True:
data=sock.recv(1024) #接受客户端消息
print(data.decode('utf-8'))
time.sleep(1);
if not data or data.decode('utf-8')=='exit':
break;
send_msg=input('please to input:')
sock.send(send_msg.encode('utf-8')); #给客户端发送消息
sock.close();
print('Connection from %s:%s closed.' % addr)
while True:
sock,addr=s.accept();
# 创建新线程来处理TCP连接:
t=threading.Thread(target=tcplink,args=(sock,addr))
t.start();
| [
"[email protected]"
] | |
1858d23256a8661d9d4359342a7148db7f925449 | b4adeaad18998e818da63e98d58c35a072b7d3d6 | /faktor.py | 0fa83d9076b9d49f4ce086939cf7368db5431fc9 | [] | no_license | mmncit/kattis | c3a7bd72f00ccec7c5bb3cd808789eda3c49372f | fbdee694d9ecc00531ca00bd6d559db65f61e672 | refs/heads/master | 2020-04-26T06:31:45.988378 | 2019-03-07T01:39:08 | 2019-03-07T01:39:08 | 173,367,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a, i = list(map(int, input().split())) # read input and split
print((a * (i-1)) + 1)
| [
"[email protected]"
] | |
431b065c5c92a9cc3b0e0da46ebaeaac958573ac | 7b2b9d66d3972714e9951d4664773e2d0b2012eb | /processAnnotationforGT.py | 9985d1f883cd89bbda7e4ed61e680fc722ea3d4a | [] | no_license | JASON7033/vigilant-barnacle | ff0f8194333b401994cc44638db7cf82f3eeee14 | 7c379c86f55d938388c3a7d095da04b49d0a7125 | refs/heads/main | 2023-04-20T12:33:39.950484 | 2021-05-30T15:00:02 | 2021-05-30T15:00:02 | 372,241,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | '''
生成测试集
修改图像信息的宽度 高度 bbox area的大小
删掉一个category的vehicle
'''
import json
'''
:type
annotations
images
categories
'''
#写入文件的位置
out_file="panda_test_gt_for_tiny.json"
#读取anno文件
anno_src_file_panda_coco="anno.json"
src_annos=json.load(open(anno_src_file_panda_coco))
#生成目标文件的副本 避免修改源文件
tiny_test_annos={}
#需要修改的部分为 images:图像本身的大小*0.1 annos:bbox的大小*0.1 seg的大小*0.1 顺便把area的大小更改一下w*h
resize_src_annos={}
resize_src_annos['images']=src_annos['images']
resize_src_annos['annotations']=src_annos['annotations']
for item in resize_src_annos['images']:
item['height']=(int)(item['height']/10)
item['width']=(int)(item['width']/10)
# print("-"*10,"test output for src_anno['images'] ","-"*10)
# for item in resize_src_annos['images']:
# print("{0} , {1} ".format(item['height'],item['width']))
# print("-"*10,"end test output for src_anno['images'] ","-"*10)
for item in resize_src_annos['annotations']:
bbox=item['bbox']
for i in range(0,len(bbox)):
item['bbox'][i]=(int)(item['bbox'][i]/10)
seg=item['segmentation']
for i in range(0,len(seg[0])):
item['segmentation'][0][i]=(int)(item['segmentation'][0][i]/10)
item['area']=(int)(bbox[2]*bbox[3])
#为每个annotation添加logo uncertain in_dense_image==false 同时ignore 为false
item['ignore']=False
item['uncertain']=False
item['logo']=False
item['in_dense_image']=False
# print("-"*10,"test output for src_anno['annotations']['bbox']","-"*10)
# for item in resize_src_annos['annotations']:
# print(item['bbox'],item['area'])
# print("-"*10,"end test output for src_anno['annotations']['bbox']","-"*10)
# print("-"*10,"test output for src_anno['annotations']['bbox']","-"*10)
# for item in resize_src_annos['annotations']:
# print(item['segmentation'])
# print("-"*10,"end test output for src_anno['annotations']['bbox']","-"*10)
tiny_test_annos['type']=src_annos['type']
tiny_test_annos['annotations']=resize_src_annos['annotations']
tiny_test_annos['images']=resize_src_annos['images']
tiny_test_annos['categories']=[src_annos['categories'][0]]
with open(out_file,'w') as f:
json.dump(tiny_test_annos,f)
print("success!")
| [
"[email protected]"
] | |
aa0fc14548faba731a4e211d376b0d6b65f8d387 | 5ec7a72cab10dd39e0cc877caa1cb97c3cd9f3de | /tests/unit/models/dq/test_operation.py | 2641a03a60137ae33d350e1327c0bbce25d716ca | [] | no_license | raufer/spark-dsl | a1d311263fe48f64859c04cd63a79f48d8cd8fa4 | a0fbf9561ba4567bc5d40bf2c7d289e214712aa6 | refs/heads/main | 2023-04-11T19:29:11.661273 | 2021-01-26T18:34:23 | 2021-01-26T18:34:23 | 367,982,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | import unittest
import pyspark.sql.types as T
import pyspark.sql.functions as F
from garuda.engine.graph.eval import resolve_operation
from garuda.models.dq.operation import Operation
from garuda.constants.argument_types import ARGUMENT_TYPES as AT
from garuda.constants.operations_ids import OPERATION_ID as OID
from garuda.models.dq.argument import Argument
from tests.utils.spark_test_case import SparkTestCase
from tests import spark
class TestOperation(SparkTestCase):
def test_parse(self):
data = {
'id': OID.NOT_NULL,
'arguments': [
{
'type': 'column',
'value': 'age'
}
]
}
operation = Operation(**data)
arguments = [Argument(type=AT.COLUMN, value='age')]
self.assertEqual(operation.id, OID.NOT_NULL)
self.assertListEqual(operation.arguments, arguments)
data = {
"id": OID.IS_BETWEEN,
"arguments": [
{
"type": "column",
"value": "salary"
},
{
"type": "integer",
"value": 70000
},
{
"type": "integer",
"value": 100000
}
]
}
operation = Operation(**data)
arguments = [
Argument(type=AT.COLUMN, value='salary'),
Argument(type=AT.INTEGER, value=70000),
Argument(type=AT.INTEGER, value=100000)
]
self.assertEqual(operation.id, OID.IS_BETWEEN)
self.assertListEqual(operation.arguments, arguments)
def test_call(self):
data = {
'id': OID.NOT_NULL,
'arguments': [
{
'type': 'column',
'value': 'age'
}
]
}
operation = Operation(**data)
data = [
('Joe', 30),
('Sue', None)
]
df = spark.createDataFrame(data, ['name', 'age'])
result = df.withColumn('res', resolve_operation(operation))
data = [
('Joe', 30, True),
('Sue', None, False)
]
schema = T.StructType([
T.StructField('name', T.StringType(), True),
T.StructField('age', T.LongType(), True),
T.StructField('res', T.BooleanType(), False)
])
expected = spark.createDataFrame(data, schema)
self.assertDataFrameEqual(result, expected)
| [
"[email protected]"
] | |
57ed4ed1637ba7d77a5067ac736bd9e3f23767d2 | 992e59b87f87afb950e5eaf8e348e2073e5183af | /ADT/Stack/linked_stack.py | 29d127385c45ecd72b878352179787b9149e703c | [
"MIT"
] | permissive | daesookimds/data-structure | 3167e31046370acf2db7d5f8c2500b0bad50642c | b5bb69e1ae533ff723280e991e6dd8d372368697 | refs/heads/main | 2023-03-14T02:37:19.279377 | 2021-03-02T15:05:24 | 2021-03-02T15:05:24 | 343,374,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | # Stack with Node
class Node(object):
def __init__(self, value=None, pointer=None):
self.value = value
self.pointer = pointer
class Stack(object):
def __init__(self):
self.head = None
self.count = 0
def isEmpty(self):
return not bool(self.head)
def push(self, value):
self.head = Node(value, self.head)
self.count += 1
def pop(self):
if self.count > 0 and self.head:
node = self.head
self.head = node.pointer
self.count -= 1
return node.value
else:
print("Stack is empty.")
def peek(self):
if self.count > 0 and self.head:
return self.head.value
else:
print("Stack is empty.")
def size(self):
return self.size
def printList(self):
node = self.head
while node:
print(node.value, end=" ")
node = node.pointer
print() | [
"[email protected]"
] | |
8758d8c4012938721a0f27e3b0a175dd8d14cdd9 | 5eb961d4961db6e32f59945edb51af3d76a382a3 | /matrimony/user_management/views.py | edfdb4b40fadbbc72cd26d1c89a17286e9466ec7 | [] | no_license | prakashgun/matrimony-app | e0e9ba4c18cc627886644937cd9eb8f1846f569c | b8f9f96e8c73055162888e9111f805afaf8f87c9 | refs/heads/master | 2023-04-20T20:13:36.244947 | 2021-05-09T05:55:13 | 2021-05-09T05:55:13 | 365,545,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework import generics
from .permissions import IsPostOrIsAuthenticated
from .serializers import UserSerializer, GroupSerializer
class UserList(generics.ListCreateAPIView):
permission_classes = [IsPostOrIsAuthenticated]
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
class GroupList(generics.ListAPIView):
queryset = Group.objects.all()
serializer_class = GroupSerializer
| [
"[email protected]"
] | |
0e57498e17dbb1188ff5803eaac005b6c4135d6c | 07a9efcd778003ca90dba01db64e5e21c02d256d | /project/cultboard/admin.py | 06c9eacb125393eb93dd82bbadc8cfdca28dee87 | [] | no_license | rakeshgithub00/cultboard | 099f1bb326dc61ae446bd9685ef5d170499b8a24 | 290b87ab1c33dd6f5c91a03fe353c7887d1f6a1b | refs/heads/master | 2023-06-11T13:45:46.918673 | 2021-06-25T05:06:27 | 2021-06-25T05:06:27 | 380,130,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.contrib import admin
from .models import Note, Detail, TeamMember, GallaryEvent, MajorEvent, Club, UpcomingEvent
# Register your models here.
admin.site.register(Note)
admin.site.register(Detail)
admin.site.register(TeamMember)
admin.site.register(GallaryEvent)
admin.site.register(MajorEvent)
admin.site.register(Club)
admin.site.register(UpcomingEvent) | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.